Commit
·
1adc797
1
Parent(s):
1b864a8
Update parquet files (step 4 of 476)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/101-5/gpt4free/testing/wewordle/testing.py +0 -30
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bus Simulator 2012 English Patch 1.2.4 Experience the Realistic and Fun Bus Driving Game.md +0 -157
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Call of Duty 4 Modern Warfare 11 English Language Pack - Where to Find and How to Use It.md +0 -82
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Embird 2017 Registration Password The Secret to Creating Amazing Embroidery Designs.md +0 -33
- spaces/1gistliPinn/ChatGPT4/Examples/Buku Zoologi Vertebrata.pdf Extra Quality.md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/Delock Usb Sound Adapter 7.1 Driver Download.md +0 -6
- spaces/1toTree/lora_test/ppdiffusers/pipelines/stable_diffusion_safe/safety_checker.py +0 -113
- spaces/2023Liu2023/bingo/src/components/tone-selector.tsx +0 -43
- spaces/A00001/bingothoo/src/components/ui/textarea.tsx +0 -24
- spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/vqperceptual.py +0 -136
- spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/commons/conformer/conformer.py +0 -72
- spaces/AIxPha/QSign/unidbg-fetch-qsign/bin/unidbg-fetch-qsign.bat +0 -89
- spaces/AchyuthGamer/ImMagician-Image-Generator/share_btn.py +0 -78
- spaces/AchyuthGamer/OpenGPT-Chat-UI/src/app.html +0 -32
- spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/types/SharedConversation.ts +0 -12
- spaces/AchyuthGamer/OpenGPT/server/babel.py +0 -48
- spaces/Adapter/CoAdapter/ldm/modules/diffusionmodules/model.py +0 -852
- spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/describer/pokemon.py +0 -51
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/methods/WaitEventMethods.js +0 -13
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/utils/CreateAnyLabel.js +0 -18
- spaces/AkitoP/umamusume_bert_vits2/text/english_bert_mock.py +0 -5
- spaces/AliSaria/MilitarEye/app.py +0 -45
- spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/util.py +0 -472
- spaces/Amrrs/fashion-aggregator-duplicated/app.py +0 -217
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py +0 -1398
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py +0 -599
- spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_512x1024_80k_cityscapes.py +0 -4
- spaces/Andy1621/uniformer_image_segmentation/configs/dmnet/dmnet_r50-d8_512x1024_80k_cityscapes.py +0 -4
- spaces/AnishKumbhar/ChatBot/text-generation-webui-main/.github/pull_request_template.md +0 -3
- spaces/Anonymous-sub/Rerender/ControlNet/config.py +0 -1
- spaces/Ariharasudhan/YoloV5/models/tf.py +0 -608
- spaces/ArtyomKhyan/Detection/utils/datasets.py +0 -887
- spaces/AtomdffAI/wechatgpt4atom/bot/bot_factory.py +0 -26
- spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/signers.py +0 -832
- spaces/Bready11/Onodofthenorth-SD_PixelArt_SpriteSheet_Generator/app.py +0 -3
- spaces/CVPR/Dual-Key_Backdoor_Attacks/bottom-up-attention-vqa/classifier.py +0 -18
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/PointRend/README.md +0 -115
- spaces/CVPR/LIVE/thrust/cmake/AppendOptionIfAvailable.cmake +0 -14
- spaces/CVPR/LIVE/thrust/thrust/detail/cstdint.h +0 -79
- spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/async/reduce.h +0 -350
- spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/gather.h +0 -107
- spaces/CVPR/VizWiz-CLIP-VQA/README.md +0 -10
- spaces/Choisuren/AnimeGANv3/README.md +0 -12
- spaces/CikeyQI/meme-api/meme_generator/memes/hug_leg/__init__.py +0 -32
- spaces/CofAI/chat/client/css/conversation.css +0 -158
- spaces/CofAI/chat/client/js/sidebar-toggler.js +0 -34
- spaces/DKDohare/Chat-GPT4-MAX/app.py +0 -141
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpcore/_async/http_proxy.py +0 -350
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/keras_mixin.py +0 -481
- spaces/DVLH/nlpconnect-vit-gpt2-image-captioning/app.py +0 -3
spaces/101-5/gpt4free/testing/wewordle/testing.py
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
from Wewordle import ChatCompletion
|
2 |
-
|
3 |
-
# Test 1
|
4 |
-
response = ChatCompletion.create(model="gpt-3.5-turbo",
|
5 |
-
provider="Wewordle",
|
6 |
-
stream=False,
|
7 |
-
messages=[{'role': 'user', 'content': 'who are you?'}])
|
8 |
-
|
9 |
-
print(response)
|
10 |
-
|
11 |
-
# Test 2
|
12 |
-
response = ChatCompletion.create(model="gpt-3.5-turbo",
|
13 |
-
provider="Wewordle",
|
14 |
-
stream=False,
|
15 |
-
messages=[{'role': 'user', 'content': 'what you can do?'}])
|
16 |
-
|
17 |
-
print(response)
|
18 |
-
|
19 |
-
|
20 |
-
# Test 3
|
21 |
-
response = ChatCompletion.create(model="gpt-3.5-turbo",
|
22 |
-
provider="Wewordle",
|
23 |
-
stream=False,
|
24 |
-
messages=[
|
25 |
-
{'role': 'user', 'content': 'now your name is Bob'},
|
26 |
-
{'role': 'assistant', 'content': 'Hello Im Bob, you asistant'},
|
27 |
-
{'role': 'user', 'content': 'what your name again?'},
|
28 |
-
])
|
29 |
-
|
30 |
-
print(response)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Bus Simulator 2012 English Patch 1.2.4 Experience the Realistic and Fun Bus Driving Game.md
DELETED
@@ -1,157 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Bus Simulator 2012: A Realistic and Fun Driving Simulation Game</h1>
|
3 |
-
<p>Have you ever wondered what it's like to drive a bus in a busy city? Do you want to experience the challenges and rewards of being a bus driver? If you answered yes to any of these questions, then you should try <strong>Bus Simulator 2012</strong>, a simulation game developed by TML Studios and published by astragon Entertainment in 2012.</p>
|
4 |
-
<p>In this game, you can explore a detailed and virtual world based on a picturesque German city behind the wheel of a realistically modeled and freely accessible bus. You can choose from different types of buses, routes, and scenarios, and interact with your passengers and traffic. You can also customize your vehicles and share them with the game community.</p>
|
5 |
-
<h2>Bus Simulator 2012 English Patch 1.2.4</h2><br /><p><b><b>DOWNLOAD</b> ►►► <a href="https://byltly.com/2uKzu6">https://byltly.com/2uKzu6</a></b></p><br /><br />
|
6 |
-
<p>In this article, we will tell you everything you need to know about Bus Simulator 2012, including what it is, how to install it, what is the English patch 1.2.4, how to play it, and some tips and tricks to make your gameplay more enjoyable.</p>
|
7 |
-
<h2>What is Bus Simulator 2012?</h2>
|
8 |
-
<p>Bus Simulator 2012 is a simulation game that lets you experience the life of a bus driver in a realistic and immersive way. You can drive various buses with different features and physics, such as city buses, articulated buses, double-decker buses, school buses, etc. You can also organize your own routes and service more than 450 bus stops in a huge and open 3D-world.</p>
|
9 |
-
<h3>Features of Bus Simulator 2012</h3>
|
10 |
-
<p>Some of the features that make Bus Simulator 2012 stand out from other simulation games are:</p>
|
11 |
-
<ul>
|
12 |
-
<li>You can switch on the air-conditioning, monitor your engine's temperature, check the charging level of the cooling liquid, oil, and gasoline, etc.</li>
|
13 |
-
<li>You can interact with your passengers by selling tickets, greeting them, announcing stops, etc. They will react accordingly to your behavior and service quality.</li>
|
14 |
-
<li>You can enjoy the realistic AI of both pedestrians and traffic, which will influence your driving style and schedule.</li>
|
15 |
-
<li>You can create your own vehicles using the integrated bus editor and share them online with other players.</li>
|
16 |
-
<li>You can use partial controller support or keyboard and mouse controls.</li>
|
17 |
-
<li>You can choose from different languages for the game interface and audio.</li>
|
18 |
-
</ul>
|
19 |
-
<h3>System Requirements for Bus Simulator 2012</h3>
|
20 |
-
<p>To play Bus Simulator 2012 on your PC, you need to meet the following minimum system requirements:</p>
|
21 |
-
<table>
|
22 |
-
<tr><td>OS</td><td>Windows XP/Vista/7/8/10</td></tr>
|
23 |
-
<tr><td>Processor</td><td>Dual core processor with 2.6 GHz</td></tr>
|
24 |
-
<tr><td>Memory</td><td>4 GB RAM</td></tr>
|
25 |
-
<tr><td>Graphics</td><td>NVIDIA GeForce® or AMD Radeon™ with at least 512 MB VRAM</td></tr>
|
26 |
-
<tr><td>DirectX</td><td>Version 9.0c</td></tr>
|
27 |
-
<tr><td>Storage</td><td>5 GB available space</td></tr>
|
28 |
-
<tr><td>Sound Card</td><td>DirectX compatible sound card</td></tr>
|
29 |
-
</table>
|
30 |
-
<h2>How to Install Bus Simulator 2012?</h2>
|
31 |
-
<p>If you want to play Bus Simulator 2012 on your PC, you need to follow these steps:</p>
|
32 |
-
<h3>Downloading and Extracting the Game Files</h3>
|
33 |
-
<ol>
|
34 |
-
<li>You need to download the game files from a reliable source. You can buy it from Steam or other online platforms for $9.99.</li>
|
35 |
-
<li>You need to extract the game files using a software like WinRAR or 7-Zip. You will get a folder named "Bus-Simulator_2012" with several subfolders inside.</li>
|
36 |
-
<li>You need to open the folder "Bus-Simulator_2012" and find the file named "setup.exe". You need to double-click on it to start the installation process.</li>
|
37 |
-
</ol>
|
38 |
-
<h3>Running the Setup and Choosing the Language</h3>
|
39 |
-
<ol start="4">
|
40 |
-
<li>You need to follow the instructions on the screen to complete the installation process. You will be asked to choose a destination folder for the game files.</li>
|
41 |
-
<li>You will also be asked to choose a language for the game interface and audio. You can choose from English, German, French, Italian, Spanish, Turkish, Polish, Czech, Hungarian, Russian, Dutch, Portuguese (Brazil), or Chinese (Simplified).</li>
|
42 |
-
<li>You will see a message that says "Installation complete" when the process is finished. You can click on "Finish" to exit the setup.</li>
|
43 |
-
<li>You will find a shortcut icon for Bus Simulator 2012 on your desktop or start menu. You can click on it to launch the game.</li>
|
44 |
-
</ol>
|
45 |
-
<h2>What is Bus Simulator 2012 English Patch 1.2.4?</h2>
|
46 |
-
<p>If you have installed Bus Simulator 2012 in a language other than English, you might encounter some problems with the game interface or audio. For example, some texts might be missing or unreadable, some sounds might be distorted or muted, etc.</p>
|
47 |
-
<h3>Why Do You Need the English Patch?</h3>
|
48 |
-
<p>To fix these problems, you need to download and apply an English patch for Bus Simulator 2012. This patch will update your game files to match the English language and fix any bugs or errors. The latest version of the English patch is 1.2.4, which was released on March 13, 2012.</p>
|
49 |
-
<h3>How to Download and Apply the English Patch?</h3>
|
50 |
-
<p>To download and apply the English patch for Bus Simulator 2012, you need to follow these steps:</p>
|
51 |
-
<p>Bus Simulator 2012 patch 1.2.4 download<br />
|
52 |
-
How to install Bus Simulator 2012 English Patch<br />
|
53 |
-
Bus Simulator 2012 gameplay with English Patch<br />
|
54 |
-
Bus Simulator 2012 patch 1.2.4 changelog<br />
|
55 |
-
Bus Simulator 2012 mods compatible with English Patch<br />
|
56 |
-
Bus Simulator 2012 system requirements for patch 1.2.4<br />
|
57 |
-
Bus Simulator 2012 patch 1.2.4 error fix<br />
|
58 |
-
Bus Simulator 2012 review with English Patch<br />
|
59 |
-
Bus Simulator 2012 patch 1.2.4 free download<br />
|
60 |
-
Bus Simulator 2012 cheats and tips with English Patch<br />
|
61 |
-
Bus Simulator 2012 update to patch 1.2.4<br />
|
62 |
-
Bus Simulator 2012 patch 1.2.4 crack<br />
|
63 |
-
Bus Simulator 2012 best routes with English Patch<br />
|
64 |
-
Bus Simulator 2012 patch 1.2.4 multiplayer<br />
|
65 |
-
Bus Simulator 2012 patch notes for English Patch<br />
|
66 |
-
Bus Simulator 2012 comparison with other bus simulators<br />
|
67 |
-
Bus Simulator 2012 patch 1.2.4 features and improvements<br />
|
68 |
-
Bus Simulator 2012 English Patch tutorial<br />
|
69 |
-
Bus Simulator 2012 patch 1.2.4 bugs and issues<br />
|
70 |
-
Bus Simulator 2012 patch 1.2.4 trailer and screenshots<br />
|
71 |
-
Bus Simulator 2012 patch history and versions<br />
|
72 |
-
Bus Simulator 2012 English Patch compatibility and performance<br />
|
73 |
-
Bus Simulator 2012 patch 1.2.4 release date and news<br />
|
74 |
-
Bus Simulator 2012 patch 1.2.4 size and download speed<br />
|
75 |
-
Bus Simulator 2012 English Patch feedback and ratings<br />
|
76 |
-
Bus Simulator 2012 realistic mode with English Patch<br />
|
77 |
-
Bus Simulator 2012 patch 1.2.4 steam key<br />
|
78 |
-
Bus Simulator 2012 custom buses with English Patch<br />
|
79 |
-
Bus Simulator 2012 patch 1.2.4 sound and graphics quality<br />
|
80 |
-
Bus Simulator 2012 English Patch installation guide and troubleshooting<br />
|
81 |
-
Bus Simulator 2012 patch 1.2.4 achievements and rewards<br />
|
82 |
-
Bus Simulator 2012 fun and challenging scenarios with English Patch<br />
|
83 |
-
Bus Simulator 2012 patch 1.2.4 offline mode and save data<br />
|
84 |
-
Bus Simulator 2012 English Patch requirements and recommendations<br />
|
85 |
-
Bus Simulator 2012 patch 1.2.4 support and contact information<br />
|
86 |
-
Bus Simulator 2012 sandbox mode with English Patch<br />
|
87 |
-
Bus Simulator 2012 patch 1.2.4 license key and activation code<br />
|
88 |
-
Bus Simulator 2012 realistic physics and weather with English Patch<br />
|
89 |
-
Bus Simulator 2012 patch 1.2.4 optimization and settings<br />
|
90 |
-
Bus Simulator 2012 English Patch pros and cons<br />
|
91 |
-
Bus Simulator 2012 patch alternative download links and sources<br />
|
92 |
-
Bus Simulator 2012 different bus models and types with English Patch<br />
|
93 |
-
Bus Simulator 2012 patch verification and validation process<br />
|
94 |
-
Bus Simulator 2012 dynamic traffic and pedestrians with English Patch <br />
|
95 |
-
Bus Simulator 2012 patch backup and restore options <br />
|
96 |
-
Bus Simulator 2012 map editor and custom maps with English Patch <br />
|
97 |
-
Bus Simulator 2012 patch uninstallation and removal instructions <br />
|
98 |
-
Bus Simulator 2012 voice commands and controls with English Patch <br />
|
99 |
-
Bus Simulator 2012 patch compatibility with other patches and updates <br />
|
100 |
-
Bus Simulator 2012 online community and forums with English Patch</p>
|
101 |
-
<ol>
|
102 |
-
<li>You need to go to the official website of TML Studios and find the page for Bus Simulator 2012. You can also use this link: <a href="http://www.tml-studios.de/index.php?option=com_content&view=article&id=30&Itemid=40&lang=en">http://www.tml-studios.de/index.php?option=com_content&view=article&id=30&Itemid=40&lang=en</a></li>
|
103 |
-
<li>You need to scroll down to the section "Patches" and click on the link for "Patch 1.3.2 (ENGLISH)". You will be redirected to a download page.</li>
|
104 |
-
<li>You need to click on the button "Download" and save the file "BusSimulator2012_Update_1_3_2_EN.exe" on your PC.</li>
|
105 |
-
<li>You need to run the file "BusSimulator2012_Update_1_3_2_EN.exe" and follow the instructions on the screen to install the patch. You will be asked to choose a destination folder for the patch files.</li>
|
106 |
-
<li>You will see a message that says "Installation complete" when the process is finished. You can click on "Finish" to exit the setup.</li>
|
107 |
-
<li>You can now launch Bus Simulator 2012 and enjoy the game in English.</li>
|
108 |
-
</ol>
|
109 |
-
<h2>How to Play Bus Simulator 2012?</h2>
|
110 |
-
<p>Now that you have installed Bus Simulator 2012 and applied the English patch, you are ready to play the game. Here are some basic steps to get you started:</p>
|
111 |
-
<h3>Choosing a Bus and a Route</h3>
|
112 |
-
<ol>
|
113 |
-
<li>When you launch the game, you will see a main menu with several options. You can click on "Start Game" to begin a new game or continue a saved game.</li>
|
114 |
-
<li>You will be taken to a screen where you can choose your bus and your route. You can use the arrows on the left and right sides of the screen to browse through different buses and routes. You can also click on the icons at the bottom of the screen to access more options, such as changing your name, your company name, your difficulty level, etc.</li>
|
115 |
-
<li>When you have selected your bus and your route, you can click on "Start" to begin your journey.</li>
|
116 |
-
</ol>
|
117 |
-
<h3>Driving and Interacting with Passengers</h3>
|
118 |
-
<ol start="4">
|
119 |
-
<li>You will see a cockpit view of your bus with various controls and indicators. You can use your mouse or keyboard to steer, accelerate, brake, etc. You can also use the number keys (1-9) to switch between different camera views, such as outside view, passenger view, mirror view, etc.</li>
|
120 |
-
<li>You will also see a map on the bottom right corner of the screen that shows your current location, your destination, your route, and other points of interest. You can use the M key to toggle between different map modes, such as zoom in, zoom out, rotate, etc.</li>
|
121 |
-
<li>You will have to follow your schedule and drive safely and responsibly. You will have to stop at bus stops, open and close doors, sell tickets, greet passengers, announce stops, etc. You will also have to obey traffic rules and avoid collisions with other vehicles or pedestrians.</li>
|
122 |
-
<li>You will earn money and reputation points based on your performance and service quality. You can use your money to buy new buses or upgrade your existing ones. You can use your reputation points to unlock new routes or scenarios.</li>
|
123 |
-
</ol>
|
124 |
-
<h3>Customizing and Sharing Your Vehicles</h3>
|
125 |
-
<ol start="8">
|
126 |
-
<li>If you want to customize your vehicles or create new ones, you can use the integrated bus editor that is accessible from the main menu. You can change various aspects of your buses, such as color, design, logo, interior, etc.</li>
|
127 |
-
<li>If you want to share your vehicles with other players online, you can use the integrated upload function that is accessible from the bus editor. You can also download vehicles created by other players from the official website of TML Studios or other online platforms.</li>
|
128 |
-
</ol>
|
129 |
-
<h2>Tips and Tricks for Bus Simulator 2012</h2>
|
130 |
-
<p>To make your gameplay more enjoyable and successful, here are some tips and tricks that you can use:</p>
|
131 |
-
<h3>How to Use the Keyboard Shortcuts</h3>
|
132 |
-
<p>There are many keyboard shortcuts that you can use in Bus Simulator 2012 to access different functions or features quickly. Here are some of them:</p>
|
133 |
-
<ul>
|
134 |
-
<li>F1: Help menu</li>
|
135 |
-
<li>F5: Save game</li>
|
136 |
-
<li>F6: Load game</li>
|
137 |
-
<li>F7: Pause game</li>
|
138 |
-
<li>F8: Screenshot</li>
|
139 |
-
<li>F9: Toggle HUD</li>
|
140 |
-
<li>F10: Toggle FPS counter</li>
|
141 |
-
<li>F11: Toggle free camera mode</li>
|
142 |
-
<li>F12: Toggle windowed mode</li>
|
143 |
-
<li>Tab: Toggle bus stop list</li>
|
144 |
-
<li>Space: Handbrake</li>
|
145 |
-
<li>Enter: Start/stop engine</li>
|
146 |
-
<li>E: Open/close doors</li>
|
147 |
-
<li>T: Sell ticket</li>
|
148 |
-
<li>G: Greet passenger</li>
|
149 |
-
<li>A: Announce stop</li>
|
150 |
-
<li>L: Toggle lights</li>
|
151 |
-
<li>K: Toggle wipers</li>
|
152 |
-
<li>H: Horn</li>
|
153 |
-
<li>I: Toggle indicators</li>
|
154 |
-
<li>O: Toggle hazard lights</li>
|
155 |
-
<li>P: Toggle parking brake</li></p> 0a6ba089eb<br />
|
156 |
-
<br />
|
157 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Call of Duty 4 Modern Warfare 11 English Language Pack - Where to Find and How to Use It.md
DELETED
@@ -1,82 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Call of Duty 4: Modern Warfare 11 - English Language Pack</h1>
|
3 |
-
<p>If you are a fan of first-person shooter games, you have probably heard of <strong>Call of Duty 4: Modern Warfare 11</strong>, one of the most popular and acclaimed titles in the franchise. This game offers an immersive and cinematic action experience that takes you to various hotspots around the world, where you can use advanced and powerful weapons and gear to fight against enemies and complete missions. However, if you are not a native speaker of English, you might have some difficulties in enjoying the game fully, as it might not be available in your preferred language. That's why you need an <strong>English language pack</strong> for Call of Duty 4: Modern Warfare 11, which will allow you to play the game in English and enhance your gaming experience. In this article, we will tell you everything you need to know about this language pack, including what it is, why you need it, how to download and install it, how to uninstall or restore it, and some tips and tricks for playing the game in English. Let's get started!</p>
|
4 |
-
<h2>What is Call of Duty 4: Modern Warfare 11?</h2>
|
5 |
-
<p>Call of Duty 4: Modern Warfare 11 is a first-person shooter video game developed by Infinity Ward and published by Activision in November 2007. It is the fourth installment in the Call of Duty series and the first one to be set in modern times, rather than World War II. The game follows the story of a British SAS officer, a US Marine, and a Russian informant who are involved in a conflict that spans from Russia to the Middle East. The game features both a single-player campaign mode and a multiplayer mode, where players can compete with or against each other in various modes and maps. The game also introduces new features such as killstreaks, perks, challenges, and customization options for weapons and characters.</p>
|
6 |
-
<h2>call-of-duty-4-modern-warfare-11-english-language-pack</h2><br /><p><b><b>Download Zip</b> ⭐ <a href="https://byltly.com/2uKvOH">https://byltly.com/2uKvOH</a></b></p><br /><br />
|
7 |
-
<p>Call of Duty 4: Modern Warfare 11 received critical acclaim from critics and players alike, who praised its graphics, sound, gameplay, story, and multiplayer mode. It won several awards and became one of the best-selling games of all time, selling over 18 million copies worldwide. It also spawned two sequels, Call of Duty: Modern Warfare 2 (2009) and Call of Duty: Modern Warfare 3 (2011), which continued the story arc of the original game.</p>
|
8 |
-
<h2>Why do you need an English language pack for Call of Duty 4: Modern Warfare 11?</h2>
|
9 |
-
<p>If you are not a native speaker of English, you might wonder why you need an English language pack for Call of Duty 4: Modern Warfare 11. After all, you can still play the game in your own language, right? Well, not exactly. Depending on where you bought or downloaded the game from, it might not have an option to change the language settings or it might only have a limited number of languages available. For example, if you bought or downloaded the game from Steam, you can only choose between English, French, German, Italian, Spanish - Spain (not Latin America), Polish (not Brazilian), Russian (not Ukrainian), or Chinese (not Japanese). If you want to play in any other language than these ones, you are out of luck.</p>
|
10 |
-
<p>However, even if your preferred language is among these ones, you might still want to play in English for several reasons. First of all, playing in English can help you improve your listening comprehension and vocabulary skills in this language. You can learn new words and expressions related to military terms, weapons names, locations names, commands orders etc. You can also practice your pronunciation by repeating what you hear from the characters or other players. Secondly, playing in English can enhance your immersion and enjoyment of the game. You can appreciate better the voice acting quality ,the dialogue writing ,the sound effects ,and the atmosphere of the game in its original language . You can also communicate more effectively with other players who speak English , especially if you play online . Thirdly, playing in English can help you understand the gameplay and the storyline better. You can follow more easily what is happening on screen ,what are your objectives ,what are your allies or enemies saying ,and what are the consequences of your actions .You can also avoid missing any important details or clues that might be lost in translation or localization .</p>
|
11 |
-
<h2>How to download and install the English language pack for Call of Duty 4: Modern Warfare 11?</h2>
|
12 |
-
<p>Now that you know why you need an English language pack for Call of Duty 4: Modern Warfare 11 ,you might wonder how to get it .Fortunately ,there are several ways to download and install this language pack ,depending on where you got your game from .Here are some options :</p>
|
13 |
-
<h3>Downloading the language pack from Steam</h3>
|
14 |
-
<p>If you bought or downloaded your game from Steam ,you can easily change its language settings by following these steps :</p>
|
15 |
-
<ol>
|
16 |
-
<li>Open Steam and go to your Library .</li>
|
17 |
-
<li>Right-click on Call of Duty 4: Modern Warfare (2007) and select Properties .</li>
|
18 |
-
<li>Go to Language tab .</li>
|
19 |
-
<li>Select English from the drop-down menu .</li>
|
20 |
-
<li>Click OK .</li>
|
21 |
-
<li>Steam will automatically download and install any necessary files for changing your game's language .This might take some time depending on your internet speed .</li>
|
22 |
-
<li>Once done ,launch your game and enjoy playing it in English .</li>
|
23 |
-
</ol>
|
24 |
-
<h3>Downloading the language pack from noName.zone</h3>
|
25 |
-
<p>If you don't have Steam or prefer another source ,you can also download an English language pack from noName.zone ,a website that offers various gaming tutorials ,tools ,and mods .Here is how :</p>
|
26 |
-
<p>How to change language in Call of Duty 4: Modern Warfare[^3^]<br />
|
27 |
-
Call of Duty 4: Modern Warfare English patch download<br />
|
28 |
-
Call of Duty 4 Language Pack - Gaming Tutorials - noName.zone[^1^]<br />
|
29 |
-
Call of Duty 4: Modern Warfare German language pack<br />
|
30 |
-
Call of Duty 4: Modern Warfare Russian language pack<br />
|
31 |
-
Call of Duty 4: Modern Warfare French language pack<br />
|
32 |
-
Call of Duty 4: Modern Warfare Italian language pack<br />
|
33 |
-
Call of Duty 4: Modern Warfare Polish language pack<br />
|
34 |
-
Call of Duty 4: Modern Warfare Spanish language pack<br />
|
35 |
-
Call of Duty 4: Modern Warfare Chinese language pack<br />
|
36 |
-
Call of Duty 4: Modern Warfare Lite version language pack[^1^]<br />
|
37 |
-
Call of Duty 4: Modern Warfare Full version language pack[^1^]<br />
|
38 |
-
Call of Duty 4: Modern Warfare Language Pack install guide[^1^]<br />
|
39 |
-
Call of Duty 4: Modern Warfare Language Pack uninstall guide[^1^]<br />
|
40 |
-
Call of Duty 4: Modern Warfare Language Pack tool download[^1^]<br />
|
41 |
-
Call of Duty 4: Modern Warfare English languagepack Steam discussion[^2^]<br />
|
42 |
-
Call of Duty 4: Modern Warfare English version keys<br />
|
43 |
-
Call of Duty 4: Modern Warfare Russian version keys<br />
|
44 |
-
Call of Duty 4: Modern Warfare German version keys<br />
|
45 |
-
Call of Duty 4: Modern Warfare English version backup profile<br />
|
46 |
-
Call of Duty 4: Modern Warfare English version SoundCloud stream[^4^]<br />
|
47 |
-
Call of Duty 4: Modern Warfare English version free download<br />
|
48 |
-
Call of Duty 4: Modern Warfare English version crack<br />
|
49 |
-
Call of Duty 4: Modern Warfare English version torrent<br />
|
50 |
-
Call of Duty 4: Modern Warfare English version gameplay<br />
|
51 |
-
Call of Duty 4: Modern Warfare English version review<br />
|
52 |
-
Call of Duty 4: Modern Warfare English version trailer<br />
|
53 |
-
Call of Duty 4: Modern Warfare English version system requirements<br />
|
54 |
-
Call of Duty 4: Modern Warfare English version cheats<br />
|
55 |
-
Call of Duty 4: Modern Warfare English version mods<br />
|
56 |
-
Call of Duty 4: Modern Warfare English version multiplayer<br />
|
57 |
-
Call of Duty 4: Modern Warfare English version singleplayer<br />
|
58 |
-
Call of Duty 4: Modern Warfare English version campaign<br />
|
59 |
-
Call of Duty 4: Modern Warfare English version missions<br />
|
60 |
-
Call of Duty 4: Modern Warfare English version weapons<br />
|
61 |
-
Call of Duty 4: Modern Warfare English version maps<br />
|
62 |
-
Call of Duty 4: Modern Warfare English version graphics<br />
|
63 |
-
Call of Duty 4: Modern Warfare English version soundtrack<br />
|
64 |
-
Call of Duty 4: Modern Warfare English version voice actors<br />
|
65 |
-
Call of Duty 4: Modern Warfare English version subtitles<br />
|
66 |
-
Call of Duty 4: Modern Warfare English version settings<br />
|
67 |
-
Call of Duty 4: Modern Warfare English version patch notes<br />
|
68 |
-
Call of Duty 4: Modern Warfare English version bugs and fixes<br />
|
69 |
-
Call of Duty 4: Modern Warfare English version tips and tricks<br />
|
70 |
-
Call of Duty 4: Modern Warfare English version best loadout<br />
|
71 |
-
Call of Duty 4: Modern Warfare English version ranking system<br />
|
72 |
-
Call of Duty 4: Modern Warfare English version achievements and trophies<br />
|
73 |
-
Call of Duty 4: Modern Warfare English version comparison with other versions<br />
|
74 |
-
Call of Duty 4: Modern Warfare English version history and development</p>
|
75 |
-
<ol>
|
76 |
-
<li>Go to https://noname.zone/index.php?/tutorials/article/8-call-of-duty-4-language-pack/ .</li>
|
77 |
-
<li>Scroll down until you see two links :Full version (~443MB) - Patch entire multiplayer Lite version (~8MB) - Patch almost everything (more details in spoiler) .</li>
|
78 |
-
<li>Select which version you want depending on how much data you want to download .The full version will patch everything related to multiplayer mode ,while lite version will patch most things except some minor text elements .Both versions will patch single-player mode as well .</li>
|
79 |
-
<li>Click on either link and download LanguagePack.zip or LanguagePack (Lite).zip file .</li>
|
80 |
-
<li>Extract LanguagePack folder to CoD4 root directory .This is usually located at C:\Program Files (x86)\Steam\steamapps\common\Call Of Duty\Modern Warfare\Call Of Duty\Modern Warfare\Call Of Duty\Modern Warfare</p> 0a6ba089eb<br />
|
81 |
-
<br />
|
82 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Embird 2017 Registration Password The Secret to Creating Amazing Embroidery Designs.md
DELETED
@@ -1,33 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Get Embird 2017 Registration Password for Free</h1>
|
3 |
-
<p>Embird 2017 is a popular embroidery software that allows you to create, edit, and digitize embroidery designs. It also supports various embroidery formats and machines. However, to use the full features of Embird 2017, you need to register it with a password that you can purchase from the official website.</p>
|
4 |
-
<h2>embird 2017 registration password crack</h2><br /><p><b><b>Download</b> ✑ ✑ ✑ <a href="https://byltly.com/2uKwnS">https://byltly.com/2uKwnS</a></b></p><br /><br />
|
5 |
-
<p>But what if you don't want to spend money on the registration password? Is there a way to get Embird 2017 registration password for free? The answer is yes, but you need to be careful. There are many websites and programs that claim to offer free Embird 2017 registration passwords, but most of them are scams or viruses that can harm your computer or steal your personal information.</p>
|
6 |
-
<p>In this article, we will show you how to get Embird 2017 registration password for free safely and legally. We will also share some tips on how to use Embird 2017 effectively and avoid common problems.</p>
|
7 |
-
<h2>How to Get Embird 2017 Registration Password for Free</h2>
|
8 |
-
<p>The best way to get Embird 2017 registration password for free is to use the trial version of the software. The trial version allows you to use Embird 2017 for 30 days without any limitations. You can download the trial version from the official website <a href="https://www.embird.net/">here</a>.</p>
|
9 |
-
<p>To use the trial version, you need to enter your name and email address when you install the software. You will then receive an email with a link to activate the trial version. Once you activate it, you can use Embird 2017 for 30 days without any restrictions.</p>
|
10 |
-
<p></p>
|
11 |
-
<p>However, after 30 days, the trial version will expire and you will need to purchase the registration password to continue using the software. If you want to extend the trial period, you can try uninstalling and reinstalling the software with a different name and email address. However, this may not work for some computers or versions of Embird 2017.</p>
|
12 |
-
<p>Another way to get Embird 2017 registration password for free is to use a crack or a keygen program. These are programs that generate fake registration passwords that can bypass the security of Embird 2017. However, we do not recommend using these programs for several reasons:</p>
|
13 |
-
<ul>
|
14 |
-
<li>They are illegal and violate the terms of service of Embird 2017.</li>
|
15 |
-
<li>They may contain viruses or malware that can damage your computer or steal your data.</li>
|
16 |
-
<li>They may not work properly or cause errors or crashes in Embird 2017.</li>
|
17 |
-
<li>They may not be compatible with the latest updates or features of Embird 2017.</li>
|
18 |
-
<li>They may not support all embroidery formats or machines.</li>
|
19 |
-
</ul>
|
20 |
-
<p>Therefore, we advise you to avoid using crack or keygen programs and stick to the trial version or purchase the registration password from the official website.</p>
|
21 |
-
<h2>How to Use Embird 2017 Effectively</h2>
|
22 |
-
<p>Now that you know how to get Embird 2017 registration password for free, let's see how to use the software effectively. Here are some tips and tricks that can help you create beautiful embroidery designs with Embird 2017:</p>
|
23 |
-
<ul>
|
24 |
-
<li>Use the tutorials and manuals that come with the software. They will teach you how to use the basic and advanced features of Embird 2017 and how to solve common problems.</li>
|
25 |
-
<li>Use the online support and forums that are available on the official website. They will answer your questions and provide tips and advice from other users and experts.</li>
|
26 |
-
<li>Use the built-in design library that contains thousands of ready-made embroidery designs that you can edit or combine with your own designs.</li>
|
27 |
-
<li>Use the design manager that allows you to organize, view, convert, print, and export your embroidery designs in various formats and sizes.</li>
|
28 |
-
<li>Use the editor that allows you to modify, resize, rotate, mirror, split, merge, align, and optimize your embroidery designs.</li>
|
29 |
-
<li>Use the digitizer that allows you to create your own embroidery designs from scratch or from images or vector graphics.</li>
|
30 |
-
<li>Use the simulator that allows you to preview how your embroidery designs will look on different fabrics and colors before stitching them.</li>
|
31 |
-
<li>Use the manager that allows you to control your embroidery machine and send your designs directly from your</p> ddb901b051<br />
|
32 |
-
<br />
|
33 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Buku Zoologi Vertebrata.pdf Extra Quality.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Buku Zoologi Vertebrata.pdf</h2><br /><p><b><b>Download</b> ⏩ <a href="https://imgfil.com/2uxYsB">https://imgfil.com/2uxYsB</a></b></p><br /><br />
|
2 |
-
|
3 |
-
899543212b<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Delock Usb Sound Adapter 7.1 Driver Download.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Delock Usb Sound Adapter 7.1 Driver Download</h2><br /><p><b><b>Download File</b> ✔ <a href="https://imgfil.com/2uy20S">https://imgfil.com/2uy20S</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Download and Print Say You Love Me sheet music for Piano & Vocal by Fleetwood ... Delock Adapter USB 2.0 Sound 7.1 extern. frutiger roman font free mac; ... address allows us to send you informative newsletters and driver information, and ... 1fdad05405<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1toTree/lora_test/ppdiffusers/pipelines/stable_diffusion_safe/safety_checker.py
DELETED
@@ -1,113 +0,0 @@
|
|
1 |
-
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
2 |
-
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import paddle
|
17 |
-
import paddle.nn.functional as F
|
18 |
-
|
19 |
-
from paddlenlp.transformers import (
|
20 |
-
CLIPPretrainedModel,
|
21 |
-
CLIPVisionConfig,
|
22 |
-
CLIPVisionModel,
|
23 |
-
)
|
24 |
-
|
25 |
-
from ...utils import logging
|
26 |
-
|
27 |
-
logger = logging.get_logger(__name__)
|
28 |
-
|
29 |
-
|
30 |
-
def cosine_distance(image_embeds, text_embeds):
|
31 |
-
normalized_image_embeds = F.normalize(image_embeds)
|
32 |
-
normalized_text_embeds = F.normalize(text_embeds)
|
33 |
-
return paddle.matmul(normalized_image_embeds, normalized_text_embeds, transpose_y=True)
|
34 |
-
|
35 |
-
|
36 |
-
class SafeStableDiffusionSafetyChecker(CLIPPretrainedModel):
|
37 |
-
config_class = CLIPVisionConfig
|
38 |
-
|
39 |
-
def __init__(self, config: CLIPVisionConfig):
|
40 |
-
super().__init__(config)
|
41 |
-
self.clip = CLIPVisionModel(config)
|
42 |
-
|
43 |
-
self.vision_projection = paddle.create_parameter(
|
44 |
-
(config.hidden_size, config.projection_dim), dtype=paddle.get_default_dtype()
|
45 |
-
)
|
46 |
-
|
47 |
-
self.register_buffer("concept_embeds", paddle.ones([17, config.projection_dim]))
|
48 |
-
self.register_buffer("special_care_embeds", paddle.ones([3, config.projection_dim]))
|
49 |
-
|
50 |
-
self.register_buffer("concept_embeds_weights", paddle.ones([17]))
|
51 |
-
self.register_buffer("special_care_embeds_weights", paddle.ones([3]))
|
52 |
-
|
53 |
-
@paddle.no_grad()
|
54 |
-
def forward(self, clip_input, images):
|
55 |
-
pooled_output = self.clip(clip_input)[1] # pooled_output
|
56 |
-
image_embeds = paddle.matmul(pooled_output, self.vision_projection)
|
57 |
-
|
58 |
-
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
|
59 |
-
special_cos_dist = cosine_distance(image_embeds, self.special_care_embeds).astype("float32").numpy()
|
60 |
-
cos_dist = cosine_distance(image_embeds, self.concept_embeds).astype("float32").numpy()
|
61 |
-
|
62 |
-
result = []
|
63 |
-
batch_size = image_embeds.shape[0]
|
64 |
-
for i in range(batch_size):
|
65 |
-
result_img = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
|
66 |
-
|
67 |
-
# increase this value to create a stronger `nfsw` filter
|
68 |
-
# at the cost of increasing the possibility of filtering benign images
|
69 |
-
adjustment = 0.0
|
70 |
-
|
71 |
-
for concept_idx in range(len(special_cos_dist[0])):
|
72 |
-
concept_cos = special_cos_dist[i][concept_idx]
|
73 |
-
concept_threshold = self.special_care_embeds_weights[concept_idx].item()
|
74 |
-
result_img["special_scores"][concept_idx] = round(concept_cos - concept_threshold + adjustment, 3)
|
75 |
-
if result_img["special_scores"][concept_idx] > 0:
|
76 |
-
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]})
|
77 |
-
adjustment = 0.01
|
78 |
-
|
79 |
-
for concept_idx in range(len(cos_dist[0])):
|
80 |
-
concept_cos = cos_dist[i][concept_idx]
|
81 |
-
concept_threshold = self.concept_embeds_weights[concept_idx].item()
|
82 |
-
result_img["concept_scores"][concept_idx] = round(concept_cos - concept_threshold + adjustment, 3)
|
83 |
-
if result_img["concept_scores"][concept_idx] > 0:
|
84 |
-
result_img["bad_concepts"].append(concept_idx)
|
85 |
-
|
86 |
-
result.append(result_img)
|
87 |
-
|
88 |
-
has_nsfw_concepts = [len(res["bad_concepts"]) > 0 for res in result]
|
89 |
-
|
90 |
-
return images, has_nsfw_concepts
|
91 |
-
|
92 |
-
def forward_fastdeploy(self, clip_input: paddle.Tensor, images: paddle.Tensor):
|
93 |
-
pooled_output = self.clip(clip_input)[1] # pooled_output
|
94 |
-
image_embeds = paddle.matmul(pooled_output, self.vision_projection)
|
95 |
-
|
96 |
-
special_cos_dist = cosine_distance(image_embeds, self.special_care_embeds)
|
97 |
-
cos_dist = cosine_distance(image_embeds, self.concept_embeds)
|
98 |
-
|
99 |
-
# increase this value to create a stronger `nsfw` filter
|
100 |
-
# at the cost of increasing the possibility of filtering benign images
|
101 |
-
adjustment = 0.0
|
102 |
-
|
103 |
-
special_scores = special_cos_dist - self.special_care_embeds_weights + adjustment
|
104 |
-
# special_scores = special_scores.round(decimals=3)
|
105 |
-
special_care = paddle.any(special_scores > 0, axis=1)
|
106 |
-
special_adjustment = special_care * 0.01
|
107 |
-
special_adjustment = special_adjustment.unsqueeze(1).expand([-1, cos_dist.shape[1]])
|
108 |
-
|
109 |
-
concept_scores = (cos_dist - self.concept_embeds_weights) + special_adjustment
|
110 |
-
# concept_scores = concept_scores.round(decimals=3)
|
111 |
-
has_nsfw_concepts = paddle.any(concept_scores > 0, axis=1)
|
112 |
-
|
113 |
-
return images, has_nsfw_concepts
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/2023Liu2023/bingo/src/components/tone-selector.tsx
DELETED
@@ -1,43 +0,0 @@
|
|
1 |
-
import React from 'react'
|
2 |
-
import { BingConversationStyle } from '@/lib/bots/bing/types'
|
3 |
-
import { cn } from '@/lib/utils'
|
4 |
-
|
5 |
-
type ToneItem = {
|
6 |
-
type: BingConversationStyle,
|
7 |
-
name: string
|
8 |
-
}
|
9 |
-
|
10 |
-
const ToneList: ToneItem[] = [
|
11 |
-
{ name: '有创造力', type: BingConversationStyle.Creative },
|
12 |
-
{ name: '更平衡', type: BingConversationStyle.Balanced },
|
13 |
-
{ name: '更精确', type: BingConversationStyle.Precise }
|
14 |
-
]
|
15 |
-
|
16 |
-
interface ToneSelectorProps {
|
17 |
-
type: BingConversationStyle | ''
|
18 |
-
onChange?: (type: BingConversationStyle) => void
|
19 |
-
}
|
20 |
-
|
21 |
-
export function ToneSelector({ type, onChange }: ToneSelectorProps) {
|
22 |
-
return (
|
23 |
-
<div className="fieldset">
|
24 |
-
<div className="legend">
|
25 |
-
选择对话样式
|
26 |
-
</div>
|
27 |
-
<div className="options-list-container">
|
28 |
-
<ul id="tone-options" className="options">
|
29 |
-
{
|
30 |
-
ToneList.map(tone => (
|
31 |
-
<li className="option" key={tone.name} onClick={() => onChange?.(tone.type)}>
|
32 |
-
<button className={cn(`tone-${type.toLowerCase()}`, { selected: tone.type === type}) } aria-pressed="true" >
|
33 |
-
<span className="caption-2-strong label-modifier">更</span>
|
34 |
-
<span className="body-1-strong label">{tone.name}</span>
|
35 |
-
</button>
|
36 |
-
</li>
|
37 |
-
))
|
38 |
-
}
|
39 |
-
</ul>
|
40 |
-
</div>
|
41 |
-
</div>
|
42 |
-
)
|
43 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/A00001/bingothoo/src/components/ui/textarea.tsx
DELETED
@@ -1,24 +0,0 @@
|
|
1 |
-
import * as React from 'react'
|
2 |
-
|
3 |
-
import { cn } from '@/lib/utils'
|
4 |
-
|
5 |
-
export interface TextareaProps
|
6 |
-
extends React.TextareaHTMLAttributes<HTMLTextAreaElement> {}
|
7 |
-
|
8 |
-
const Textarea = React.forwardRef<HTMLTextAreaElement, TextareaProps>(
|
9 |
-
({ className, ...props }, ref) => {
|
10 |
-
return (
|
11 |
-
<textarea
|
12 |
-
className={cn(
|
13 |
-
'flex min-h-[80px] w-full rounded-md border border-input bg-transparent px-3 py-2 text-sm ring-offset-background placeholder:text-muted-foreground focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-ring focus-visible:ring-offset-2 disabled:cursor-not-allowed disabled:opacity-50',
|
14 |
-
className
|
15 |
-
)}
|
16 |
-
ref={ref}
|
17 |
-
{...props}
|
18 |
-
/>
|
19 |
-
)
|
20 |
-
}
|
21 |
-
)
|
22 |
-
Textarea.displayName = 'Textarea'
|
23 |
-
|
24 |
-
export { Textarea }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/vqperceptual.py
DELETED
@@ -1,136 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
import sys
|
5 |
-
from ldm.util import exists
|
6 |
-
sys.path.insert(0, '.') # nopep8
|
7 |
-
from ldm.modules.discriminator.model import (NLayerDiscriminator, NLayerDiscriminator1dFeats,
|
8 |
-
NLayerDiscriminator1dSpecs,
|
9 |
-
weights_init)
|
10 |
-
from ldm.modules.losses_audio.lpaps import LPAPS
|
11 |
-
from ldm.modules.losses.vqperceptual import l1, l2, measure_perplexity, hinge_d_loss, vanilla_d_loss, adopt_weight
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
class DummyLoss(nn.Module):
|
16 |
-
def __init__(self):
|
17 |
-
super().__init__()
|
18 |
-
|
19 |
-
class VQLPAPSWithDiscriminator(nn.Module):
|
20 |
-
def __init__(self, disc_start, codebook_weight=1.0, pixelloss_weight=1.0,
|
21 |
-
disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0,
|
22 |
-
perceptual_weight=1.0, use_actnorm=False, disc_conditional=False,
|
23 |
-
disc_ndf=64, disc_loss="hinge", n_classes=None, pixel_loss="l1"):
|
24 |
-
super().__init__()
|
25 |
-
assert disc_loss in ["hinge", "vanilla"]
|
26 |
-
self.codebook_weight = codebook_weight
|
27 |
-
self.pixel_weight = pixelloss_weight
|
28 |
-
self.perceptual_loss = LPAPS().eval()
|
29 |
-
self.perceptual_weight = perceptual_weight
|
30 |
-
|
31 |
-
if pixel_loss == "l1":
|
32 |
-
self.pixel_loss = l1
|
33 |
-
else:
|
34 |
-
self.pixel_loss = l2
|
35 |
-
|
36 |
-
self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels,
|
37 |
-
n_layers=disc_num_layers,
|
38 |
-
use_actnorm=use_actnorm,
|
39 |
-
ndf=disc_ndf
|
40 |
-
).apply(weights_init)
|
41 |
-
self.discriminator_iter_start = disc_start
|
42 |
-
if disc_loss == "hinge":
|
43 |
-
self.disc_loss = hinge_d_loss
|
44 |
-
elif disc_loss == "vanilla":
|
45 |
-
self.disc_loss = vanilla_d_loss
|
46 |
-
else:
|
47 |
-
raise ValueError(f"Unknown GAN loss '{disc_loss}'.")
|
48 |
-
print(f"VQLPAPSWithDiscriminator running with {disc_loss} loss.")
|
49 |
-
self.disc_factor = disc_factor
|
50 |
-
self.discriminator_weight = disc_weight
|
51 |
-
self.disc_conditional = disc_conditional
|
52 |
-
self.n_classes = n_classes
|
53 |
-
|
54 |
-
def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
|
55 |
-
if last_layer is not None:
|
56 |
-
nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
|
57 |
-
g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
|
58 |
-
else:
|
59 |
-
nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]
|
60 |
-
g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]
|
61 |
-
|
62 |
-
d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
|
63 |
-
d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
|
64 |
-
d_weight = d_weight * self.discriminator_weight
|
65 |
-
return d_weight
|
66 |
-
|
67 |
-
def forward(self, codebook_loss, inputs, reconstructions, optimizer_idx,
|
68 |
-
global_step, last_layer=None, cond=None, split="train", predicted_indices=None):
|
69 |
-
if not exists(codebook_loss):
|
70 |
-
codebook_loss = torch.tensor([0.]).to(inputs.device)
|
71 |
-
rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous())
|
72 |
-
if self.perceptual_weight > 0:
|
73 |
-
p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous())
|
74 |
-
rec_loss = rec_loss + self.perceptual_weight * p_loss
|
75 |
-
else:
|
76 |
-
p_loss = torch.tensor([0.0])
|
77 |
-
|
78 |
-
nll_loss = rec_loss
|
79 |
-
# nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
|
80 |
-
nll_loss = torch.mean(nll_loss)
|
81 |
-
|
82 |
-
# now the GAN part
|
83 |
-
if optimizer_idx == 0:
|
84 |
-
# generator update
|
85 |
-
if cond is None:
|
86 |
-
assert not self.disc_conditional
|
87 |
-
logits_fake = self.discriminator(reconstructions.contiguous())
|
88 |
-
else:
|
89 |
-
assert self.disc_conditional
|
90 |
-
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1))
|
91 |
-
g_loss = -torch.mean(logits_fake)
|
92 |
-
|
93 |
-
try:
|
94 |
-
d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)
|
95 |
-
except RuntimeError:
|
96 |
-
assert not self.training
|
97 |
-
d_weight = torch.tensor(0.0)
|
98 |
-
|
99 |
-
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
|
100 |
-
loss = nll_loss + d_weight * disc_factor * g_loss + self.codebook_weight * codebook_loss.mean()
|
101 |
-
|
102 |
-
log = {"{}/total_loss".format(split): loss.clone().detach().mean(),
|
103 |
-
"{}/quant_loss".format(split): codebook_loss.detach().mean(),
|
104 |
-
"{}/nll_loss".format(split): nll_loss.detach().mean(),
|
105 |
-
"{}/rec_loss".format(split): rec_loss.detach().mean(),
|
106 |
-
"{}/p_loss".format(split): p_loss.detach().mean(),
|
107 |
-
"{}/d_weight".format(split): d_weight.detach(),
|
108 |
-
"{}/disc_factor".format(split): torch.tensor(disc_factor),
|
109 |
-
"{}/g_loss".format(split): g_loss.detach().mean(),
|
110 |
-
}
|
111 |
-
# if predicted_indices is not None:
|
112 |
-
# assert self.n_classes is not None
|
113 |
-
# with torch.no_grad():
|
114 |
-
# perplexity, cluster_usage = measure_perplexity(predicted_indices, self.n_classes)
|
115 |
-
# log[f"{split}/perplexity"] = perplexity
|
116 |
-
# log[f"{split}/cluster_usage"] = cluster_usage
|
117 |
-
return loss, log
|
118 |
-
|
119 |
-
if optimizer_idx == 1:
|
120 |
-
# second pass for discriminator update
|
121 |
-
if cond is None:
|
122 |
-
logits_real = self.discriminator(inputs.contiguous().detach())
|
123 |
-
logits_fake = self.discriminator(reconstructions.contiguous().detach())
|
124 |
-
else:
|
125 |
-
logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1))
|
126 |
-
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1))
|
127 |
-
|
128 |
-
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
|
129 |
-
d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)
|
130 |
-
|
131 |
-
log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(),
|
132 |
-
"{}/logits_real".format(split): logits_real.detach().mean(),
|
133 |
-
"{}/logits_fake".format(split): logits_fake.detach().mean()
|
134 |
-
}
|
135 |
-
return d_loss, log
|
136 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/commons/conformer/conformer.py
DELETED
@@ -1,72 +0,0 @@
|
|
1 |
-
from torch import nn
|
2 |
-
from .espnet_positional_embedding import RelPositionalEncoding
|
3 |
-
from .espnet_transformer_attn import RelPositionMultiHeadedAttention
|
4 |
-
from .layers import Swish, ConvolutionModule, EncoderLayer, MultiLayeredConv1d
|
5 |
-
from ..layers import Embedding
|
6 |
-
|
7 |
-
|
8 |
-
class ConformerLayers(nn.Module):
|
9 |
-
def __init__(self, hidden_size, num_layers, kernel_size=9, dropout=0.0, num_heads=4,
|
10 |
-
use_last_norm=True, save_hidden=False):
|
11 |
-
super().__init__()
|
12 |
-
self.use_last_norm = use_last_norm
|
13 |
-
self.layers = nn.ModuleList()
|
14 |
-
positionwise_layer = MultiLayeredConv1d
|
15 |
-
positionwise_layer_args = (hidden_size, hidden_size * 4, 1, dropout)
|
16 |
-
self.pos_embed = RelPositionalEncoding(hidden_size, dropout)
|
17 |
-
self.encoder_layers = nn.ModuleList([EncoderLayer(
|
18 |
-
hidden_size,
|
19 |
-
RelPositionMultiHeadedAttention(num_heads, hidden_size, 0.0),
|
20 |
-
positionwise_layer(*positionwise_layer_args),
|
21 |
-
positionwise_layer(*positionwise_layer_args),
|
22 |
-
ConvolutionModule(hidden_size, kernel_size, Swish()),
|
23 |
-
dropout,
|
24 |
-
) for _ in range(num_layers)])
|
25 |
-
if self.use_last_norm:
|
26 |
-
self.layer_norm = nn.LayerNorm(hidden_size)
|
27 |
-
else:
|
28 |
-
self.layer_norm = nn.Linear(hidden_size, hidden_size)
|
29 |
-
self.save_hidden = save_hidden
|
30 |
-
if save_hidden:
|
31 |
-
self.hiddens = []
|
32 |
-
|
33 |
-
def forward(self, x, padding_mask=None):
|
34 |
-
"""
|
35 |
-
|
36 |
-
:param x: [B, T, H]
|
37 |
-
:param padding_mask: [B, T]
|
38 |
-
:return: [B, T, H]
|
39 |
-
"""
|
40 |
-
self.hiddens = []
|
41 |
-
nonpadding_mask = x.abs().sum(-1) > 0
|
42 |
-
x = self.pos_embed(x)
|
43 |
-
for l in self.encoder_layers:
|
44 |
-
x, mask = l(x, nonpadding_mask[:, None, :])
|
45 |
-
if self.save_hidden:
|
46 |
-
self.hiddens.append(x[0])
|
47 |
-
x = x[0]
|
48 |
-
x = self.layer_norm(x) * nonpadding_mask.float()[:, :, None]
|
49 |
-
return x
|
50 |
-
|
51 |
-
|
52 |
-
class ConformerEncoder(ConformerLayers):
|
53 |
-
def __init__(self, hidden_size, dict_size, num_layers=None):
|
54 |
-
conformer_enc_kernel_size = 9
|
55 |
-
super().__init__(hidden_size, num_layers, conformer_enc_kernel_size)
|
56 |
-
self.embed = Embedding(dict_size, hidden_size, padding_idx=0)
|
57 |
-
|
58 |
-
def forward(self, x):
|
59 |
-
"""
|
60 |
-
|
61 |
-
:param src_tokens: [B, T]
|
62 |
-
:return: [B x T x C]
|
63 |
-
"""
|
64 |
-
x = self.embed(x) # [B, T, H]
|
65 |
-
x = super(ConformerEncoder, self).forward(x)
|
66 |
-
return x
|
67 |
-
|
68 |
-
|
69 |
-
class ConformerDecoder(ConformerLayers):
|
70 |
-
def __init__(self, hidden_size, num_layers):
|
71 |
-
conformer_dec_kernel_size = 9
|
72 |
-
super().__init__(hidden_size, num_layers, conformer_dec_kernel_size)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIxPha/QSign/unidbg-fetch-qsign/bin/unidbg-fetch-qsign.bat
DELETED
@@ -1,89 +0,0 @@
|
|
1 |
-
@rem
|
2 |
-
@rem Copyright 2015 the original author or authors.
|
3 |
-
@rem
|
4 |
-
@rem Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
@rem you may not use this file except in compliance with the License.
|
6 |
-
@rem You may obtain a copy of the License at
|
7 |
-
@rem
|
8 |
-
@rem https://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
@rem
|
10 |
-
@rem Unless required by applicable law or agreed to in writing, software
|
11 |
-
@rem distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
@rem See the License for the specific language governing permissions and
|
14 |
-
@rem limitations under the License.
|
15 |
-
@rem
|
16 |
-
|
17 |
-
@if "%DEBUG%" == "" @echo off
|
18 |
-
@rem ##########################################################################
|
19 |
-
@rem
|
20 |
-
@rem unidbg-fetch-qsign startup script for Windows
|
21 |
-
@rem
|
22 |
-
@rem ##########################################################################
|
23 |
-
|
24 |
-
@rem Set local scope for the variables with windows NT shell
|
25 |
-
if "%OS%"=="Windows_NT" setlocal
|
26 |
-
|
27 |
-
set DIRNAME=%~dp0
|
28 |
-
if "%DIRNAME%" == "" set DIRNAME=.
|
29 |
-
set APP_BASE_NAME=%~n0
|
30 |
-
set APP_HOME=%DIRNAME%..
|
31 |
-
|
32 |
-
@rem Resolve any "." and ".." in APP_HOME to make it shorter.
|
33 |
-
for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi
|
34 |
-
|
35 |
-
@rem Add default JVM options here. You can also use JAVA_OPTS and UNIDBG_FETCH_QSIGN_OPTS to pass JVM options to this script.
|
36 |
-
set DEFAULT_JVM_OPTS=
|
37 |
-
|
38 |
-
@rem Find java.exe
|
39 |
-
if defined JAVA_HOME goto findJavaFromJavaHome
|
40 |
-
|
41 |
-
set JAVA_EXE=java.exe
|
42 |
-
%JAVA_EXE% -version >NUL 2>&1
|
43 |
-
if "%ERRORLEVEL%" == "0" goto execute
|
44 |
-
|
45 |
-
echo.
|
46 |
-
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
|
47 |
-
echo.
|
48 |
-
echo Please set the JAVA_HOME variable in your environment to match the
|
49 |
-
echo location of your Java installation.
|
50 |
-
|
51 |
-
goto fail
|
52 |
-
|
53 |
-
:findJavaFromJavaHome
|
54 |
-
set JAVA_HOME=%JAVA_HOME:"=%
|
55 |
-
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
|
56 |
-
|
57 |
-
if exist "%JAVA_EXE%" goto execute
|
58 |
-
|
59 |
-
echo.
|
60 |
-
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
|
61 |
-
echo.
|
62 |
-
echo Please set the JAVA_HOME variable in your environment to match the
|
63 |
-
echo location of your Java installation.
|
64 |
-
|
65 |
-
goto fail
|
66 |
-
|
67 |
-
:execute
|
68 |
-
@rem Setup the command line
|
69 |
-
|
70 |
-
set CLASSPATH=%APP_HOME%\lib\unidbg-fetch-qsign-1.1.0.jar;%APP_HOME%\lib\unidbg-fix.jar;%APP_HOME%\lib\ktor-server-content-negotiation-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-serialization-kotlinx-json-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-server-netty-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-server-host-common-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-server-core-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-serialization-kotlinx-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-serialization-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-events-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-websockets-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-http-cio-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-http-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-network-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-utils-jvm-2.3.1.jar;%APP_HOME%\lib\ktor-io-jvm-2.3.1.jar;%APP_HOME%\lib\kotlin-stdlib-jdk8-1.8.22.jar;%APP_HOME%\lib\kotlinx-serialization-json-jvm-1.5.1.jar;%APP_HOME%\lib\kotlinx-serialization-protobuf-jvm-1.5.1.jar;%APP_HOME%\lib\kotlinx-serialization-core-jvm-1.5.1.jar;%APP_HOME%\lib\logback-classic-1.2.11.jar;%APP_HOME%\lib\kotlinx-coroutines-jdk8-1.7.1.jar;%APP_HOME%\lib\kotlinx-coroutines-core-jvm-1.7.1.jar;%APP_HOME%\lib\kotlin-stdlib-jdk7-1.8.22.jar;%APP_HOME%\lib\kotlin-reflect-1.8.10.jar;%APP_HOME%\lib\kotlin-stdlib-1.8.22.jar;%APP_HOME%\lib\slf4j-api-1.7.36.jar;%APP_HOME%\lib\kotlin-stdlib-common-1.8.22.jar;%APP_HOME%\lib\config-1.4.2.jar;%APP_HOME%\lib\jansi-2.4.0.jar;%APP_HOME%\lib\netty-codec-http2-4.1.92.Final.jar;%APP_HOME%\lib\alpn-api-1.1.3.v20160715.jar;%APP_HOME%\lib\netty-transport-native-kqueue-4.1.92.Final.jar;%APP_HOME%\lib\netty-transport-native-epoll-4.1.92.Final.jar;%APP_HOME%\lib\logback-core-1.2.11.jar;%APP_HOME%\lib\annotations-23.0.0.jar;%APP_HOME%\lib\netty-codec-http-4.1.92.Final.jar;%APP_HOME%\lib\netty-handler-4.1.92.Final.jar;%APP_HOME%\lib\netty-codec-4.1.92.Final.jar;%APP_HOME%\lib\netty-transport-classes-kqueue-4.1.92.Final.jar;%APP_HOME%\lib\netty-transport-classes-epoll-4.1.92.Final.jar;%APP_HOME%\lib\netty-transport-native-unix-common-4.1.92.Final.jar;%APP_HOME%\lib\netty-transport-4.1.92.Final.jar;%APP_HOME%\lib\netty-buffer-4.1.92.Final.jar;%APP_HOME%\lib\netty-resolver-4.1.92.Final.jar;%APP_HOME%\lib\netty-common-4.1.92.Final.jar
|
71 |
-
|
72 |
-
|
73 |
-
@rem Execute unidbg-fetch-qsign
|
74 |
-
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %UNIDBG_FETCH_QSIGN_OPTS% -classpath "%CLASSPATH%" MainKt %*
|
75 |
-
|
76 |
-
:end
|
77 |
-
@rem End local scope for the variables with windows NT shell
|
78 |
-
if "%ERRORLEVEL%"=="0" goto mainEnd
|
79 |
-
|
80 |
-
:fail
|
81 |
-
rem Set variable UNIDBG_FETCH_QSIGN_EXIT_CONSOLE if you need the _script_ return code instead of
|
82 |
-
rem the _cmd.exe /c_ return code!
|
83 |
-
if not "" == "%UNIDBG_FETCH_QSIGN_EXIT_CONSOLE%" exit 1
|
84 |
-
exit /b 1
|
85 |
-
|
86 |
-
:mainEnd
|
87 |
-
if "%OS%"=="Windows_NT" endlocal
|
88 |
-
|
89 |
-
:omega
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/ImMagician-Image-Generator/share_btn.py
DELETED
@@ -1,78 +0,0 @@
|
|
1 |
-
community_icon_html = """<svg id="share-btn-share-icon" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32">
|
2 |
-
<path d="M20.6081 3C21.7684 3 22.8053 3.49196 23.5284 4.38415C23.9756 4.93678 24.4428 5.82749 24.4808 7.16133C24.9674 7.01707 25.4353 6.93643 25.8725 6.93643C26.9833 6.93643 27.9865 7.37587 28.696 8.17411C29.6075 9.19872 30.0124 10.4579 29.8361 11.7177C29.7523 12.3177 29.5581 12.8555 29.2678 13.3534C29.8798 13.8646 30.3306 14.5763 30.5485 15.4322C30.719 16.1032 30.8939 17.5006 29.9808 18.9403C30.0389 19.0342 30.0934 19.1319 30.1442 19.2318C30.6932 20.3074 30.7283 21.5229 30.2439 22.6548C29.5093 24.3704 27.6841 25.7219 24.1397 27.1727C21.9347 28.0753 19.9174 28.6523 19.8994 28.6575C16.9842 29.4379 14.3477 29.8345 12.0653 29.8345C7.87017 29.8345 4.8668 28.508 3.13831 25.8921C0.356375 21.6797 0.754104 17.8269 4.35369 14.1131C6.34591 12.058 7.67023 9.02782 7.94613 8.36275C8.50224 6.39343 9.97271 4.20438 12.4172 4.20438H12.4179C12.6236 4.20438 12.8314 4.2214 13.0364 4.25468C14.107 4.42854 15.0428 5.06476 15.7115 6.02205C16.4331 5.09583 17.134 4.359 17.7682 3.94323C18.7242 3.31737 19.6794 3 20.6081 3ZM20.6081 5.95917C20.2427 5.95917 19.7963 6.1197 19.3039 6.44225C17.7754 7.44319 14.8258 12.6772 13.7458 14.7131C13.3839 15.3952 12.7655 15.6837 12.2086 15.6837C11.1036 15.6837 10.2408 14.5497 12.1076 13.1085C14.9146 10.9402 13.9299 7.39584 12.5898 7.1776C12.5311 7.16799 12.4731 7.16355 12.4172 7.16355C11.1989 7.16355 10.6615 9.33114 10.6615 9.33114C10.6615 9.33114 9.0863 13.4148 6.38031 16.206C3.67434 18.998 3.5346 21.2388 5.50675 24.2246C6.85185 26.2606 9.42666 26.8753 12.0653 26.8753C14.8021 26.8753 17.6077 26.2139 19.1799 25.793C19.2574 25.7723 28.8193 22.984 27.6081 20.6107C27.4046 20.212 27.0693 20.0522 26.6471 20.0522C24.9416 20.0522 21.8393 22.6726 20.5057 22.6726C20.2076 22.6726 19.9976 22.5416 19.9116 22.222C19.3433 20.1173 28.552 19.2325 27.7758 16.1839C27.639 15.6445 27.2677 15.4256 26.746 15.4263C24.4923 15.4263 19.4358 19.5181 18.3759 19.5181C18.2949 19.5181 18.2368 19.4937 18.2053 19.4419C17.6743 18.557 17.9653 17.9394 21.7082 15.6009C25.4511 13.2617 28.0783 11.8545 26.5841 10.1752C26.4121 9.98141 26.1684 9.8956 25.8725 9.8956C23.6001 9.89634 18.2311 14.9403 18.2311 14.9403C18.2311 14.9403 16.7821 16.496 15.9057 16.496C15.7043 16.496 15.533 16.4139 15.4169 16.2112C14.7956 15.1296 21.1879 10.1286 21.5484 8.06535C21.7928 6.66715 21.3771 5.95917 20.6081 5.95917Z" fill="#FF9D00"></path>
|
3 |
-
<path d="M5.50686 24.2246C3.53472 21.2387 3.67446 18.9979 6.38043 16.206C9.08641 13.4147 10.6615 9.33111 10.6615 9.33111C10.6615 9.33111 11.2499 6.95933 12.59 7.17757C13.93 7.39581 14.9139 10.9401 12.1069 13.1084C9.29997 15.276 12.6659 16.7489 13.7459 14.713C14.8258 12.6772 17.7747 7.44316 19.304 6.44221C20.8326 5.44128 21.9089 6.00204 21.5484 8.06532C21.188 10.1286 14.795 15.1295 15.4171 16.2118C16.0391 17.2934 18.2312 14.9402 18.2312 14.9402C18.2312 14.9402 25.0907 8.49588 26.5842 10.1752C28.0776 11.8545 25.4512 13.2616 21.7082 15.6008C17.9646 17.9393 17.6744 18.557 18.2054 19.4418C18.7372 20.3266 26.9998 13.1351 27.7759 16.1838C28.5513 19.2324 19.3434 20.1173 19.9117 22.2219C20.48 24.3274 26.3979 18.2382 27.6082 20.6107C28.8193 22.9839 19.2574 25.7722 19.18 25.7929C16.0914 26.62 8.24723 28.3726 5.50686 24.2246Z" fill="#FFD21E"></path>
|
4 |
-
</svg>"""
|
5 |
-
|
6 |
-
loading_icon_html = """<svg id="share-btn-loading-icon" style="display:none;" class="animate-spin"
|
7 |
-
style="color: #ffffff;
|
8 |
-
"
|
9 |
-
xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" fill="none" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><circle style="opacity: 0.25;" cx="12" cy="12" r="10" stroke="white" stroke-width="4"></circle><path style="opacity: 0.75;" fill="white" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path></svg>"""
|
10 |
-
|
11 |
-
share_js = """async () => {
|
12 |
-
async function uploadFile(file){
|
13 |
-
const UPLOAD_URL = 'https://huggingface.co/uploads';
|
14 |
-
const response = await fetch(UPLOAD_URL, {
|
15 |
-
method: 'POST',
|
16 |
-
headers: {
|
17 |
-
'Content-Type': file.type,
|
18 |
-
'X-Requested-With': 'XMLHttpRequest',
|
19 |
-
},
|
20 |
-
body: file, /// <- File inherits from Blob
|
21 |
-
});
|
22 |
-
const url = await response.text();
|
23 |
-
return url;
|
24 |
-
}
|
25 |
-
|
26 |
-
async function getInputImgFile(imgEl){
|
27 |
-
const res = await fetch(imgEl.src);
|
28 |
-
const blob = await res.blob();
|
29 |
-
const imgId = Date.now() % 200;
|
30 |
-
const isPng = imgEl.src.startsWith(`data:image/png`);
|
31 |
-
if(isPng){
|
32 |
-
const fileName = `sd-perception-${{imgId}}.png`;
|
33 |
-
return new File([blob], fileName, { type: 'image/png' });
|
34 |
-
}else{
|
35 |
-
const fileName = `sd-perception-${{imgId}}.jpg`;
|
36 |
-
return new File([blob], fileName, { type: 'image/jpeg' });
|
37 |
-
}
|
38 |
-
}
|
39 |
-
const gradioEl = document.querySelector("gradio-app").shadowRoot || document.querySelector('body > gradio-app');
|
40 |
-
const generatedImages = gradioEl.querySelectorAll(".grid-wrap img")
|
41 |
-
const prompt = gradioEl.querySelector("#component-3 textarea").value
|
42 |
-
|
43 |
-
const shareBtnEl = gradioEl.querySelector('#share-btn');
|
44 |
-
const shareIconEl = gradioEl.querySelector('#share-btn-share-icon');
|
45 |
-
const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon');
|
46 |
-
|
47 |
-
shareBtnEl.style.pointerEvents = 'none';
|
48 |
-
shareIconEl.style.display = 'none';
|
49 |
-
loadingIconEl.style.removeProperty('display');
|
50 |
-
|
51 |
-
let urlOutputs = [];
|
52 |
-
|
53 |
-
for (let i = 0; i < generatedImages.length; i++) {
|
54 |
-
let imgEl = generatedImages[i];
|
55 |
-
let outputFile = await getInputImgFile(imgEl);
|
56 |
-
let urlOutputImg = await uploadFile(outputFile);
|
57 |
-
urlOutputs.push(urlOutputImg);
|
58 |
-
}
|
59 |
-
const imgTags = urlOutputs.map(url => ``).join('\n');
|
60 |
-
|
61 |
-
const descriptionMd = `### Prompt
|
62 |
-
${prompt}
|
63 |
-
|
64 |
-
#### Generated Images:
|
65 |
-
{imgTags}
|
66 |
-
`;
|
67 |
-
console.log(descriptionMd)
|
68 |
-
const params = new URLSearchParams({
|
69 |
-
title: prompt,
|
70 |
-
description: descriptionMd,
|
71 |
-
preview: true
|
72 |
-
});
|
73 |
-
const paramsStr = params.toString();
|
74 |
-
window.open(`https://huggingface.co/spaces/warp-ai/Wuerstchen/discussions/new?${paramsStr}`, '_blank');
|
75 |
-
shareBtnEl.style.removeProperty('pointer-events');
|
76 |
-
shareIconEl.style.removeProperty('display');
|
77 |
-
loadingIconEl.style.display = 'none';
|
78 |
-
}"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/app.html
DELETED
@@ -1,32 +0,0 @@
|
|
1 |
-
<!DOCTYPE html>
|
2 |
-
<html lang="en" class="h-full">
|
3 |
-
<link rel="stylesheet" href="https://www.w3schools.com/w3css/4/w3.css" />
|
4 |
-
<head>
|
5 |
-
<!-- Google Tag Manager -->
|
6 |
-
<script>
|
7 |
-
var _paq = window._paq || [];
|
8 |
-
window._paq=_paq;
|
9 |
-
(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start':
|
10 |
-
new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],
|
11 |
-
j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=
|
12 |
-
'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);
|
13 |
-
})(window,document,'script','dataLayer','GTM-TVD93MF');
|
14 |
-
</script>
|
15 |
-
<!-- End Google Tag Manager -->
|
16 |
-
<meta charset="utf-8" />
|
17 |
-
<meta name="viewport" content="width=device-width, initial-scale=1, user-scalable=no" />
|
18 |
-
<meta property="og:image" content="/chatui/thumbnail.jpg" />
|
19 |
-
<script>
|
20 |
-
if (
|
21 |
-
localStorage.theme === "dark" ||
|
22 |
-
(!("theme" in localStorage) && window.matchMedia("(prefers-color-scheme: dark)").matches)
|
23 |
-
) {
|
24 |
-
document.documentElement.classList.add("dark");
|
25 |
-
}
|
26 |
-
</script>
|
27 |
-
%sveltekit.head%
|
28 |
-
</head>
|
29 |
-
<body data-sveltekit-preload-data="hover" class="h-full dark:bg-gray-900">
|
30 |
-
<div id="app" class="contents h-full">%sveltekit.body%</div>
|
31 |
-
</body>
|
32 |
-
</html>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT-Chat-UI/src/lib/types/SharedConversation.ts
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
import type { Message } from "./Message";
|
2 |
-
import type { Timestamps } from "./Timestamps";
|
3 |
-
|
4 |
-
export interface SharedConversation extends Timestamps {
|
5 |
-
_id: string;
|
6 |
-
|
7 |
-
hash: string;
|
8 |
-
|
9 |
-
model: string;
|
10 |
-
title: string;
|
11 |
-
messages: Message[];
|
12 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AchyuthGamer/OpenGPT/server/babel.py
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import subprocess
|
3 |
-
from flask import request, session, jsonify
|
4 |
-
from flask_babel import Babel
|
5 |
-
|
6 |
-
|
7 |
-
def get_languages_from_dir(directory):
|
8 |
-
"""Return a list of directory names in the given directory."""
|
9 |
-
return [name for name in os.listdir(directory)
|
10 |
-
if os.path.isdir(os.path.join(directory, name))]
|
11 |
-
|
12 |
-
|
13 |
-
BABEL_DEFAULT_LOCALE = 'en_US'
|
14 |
-
BABEL_LANGUAGES = get_languages_from_dir('translations')
|
15 |
-
|
16 |
-
|
17 |
-
def create_babel(app):
|
18 |
-
"""Create and initialize a Babel instance with the given Flask app."""
|
19 |
-
babel = Babel(app)
|
20 |
-
app.config['BABEL_DEFAULT_LOCALE'] = BABEL_DEFAULT_LOCALE
|
21 |
-
app.config['BABEL_LANGUAGES'] = BABEL_LANGUAGES
|
22 |
-
|
23 |
-
babel.init_app(app, locale_selector=get_locale)
|
24 |
-
compile_translations()
|
25 |
-
|
26 |
-
|
27 |
-
def get_locale():
|
28 |
-
"""Get the user's locale from the session or the request's accepted languages."""
|
29 |
-
return session.get('language') or request.accept_languages.best_match(BABEL_LANGUAGES)
|
30 |
-
|
31 |
-
|
32 |
-
def get_languages():
|
33 |
-
"""Return a list of available languages in JSON format."""
|
34 |
-
return jsonify(BABEL_LANGUAGES)
|
35 |
-
|
36 |
-
|
37 |
-
def compile_translations():
|
38 |
-
"""Compile the translation files."""
|
39 |
-
result = subprocess.run(
|
40 |
-
['pybabel', 'compile', '-d', 'translations'],
|
41 |
-
stdout=subprocess.PIPE,
|
42 |
-
)
|
43 |
-
|
44 |
-
if result.returncode != 0:
|
45 |
-
raise Exception(
|
46 |
-
f'Compiling translations failed:\n{result.stdout.decode()}')
|
47 |
-
|
48 |
-
print('Translations compiled successfully')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Adapter/CoAdapter/ldm/modules/diffusionmodules/model.py
DELETED
@@ -1,852 +0,0 @@
|
|
1 |
-
# pytorch_diffusion + derived encoder decoder
|
2 |
-
import math
|
3 |
-
import torch
|
4 |
-
import torch.nn as nn
|
5 |
-
import numpy as np
|
6 |
-
from einops import rearrange
|
7 |
-
from typing import Optional, Any
|
8 |
-
|
9 |
-
from ldm.modules.attention import MemoryEfficientCrossAttention
|
10 |
-
|
11 |
-
try:
|
12 |
-
import xformers
|
13 |
-
import xformers.ops
|
14 |
-
XFORMERS_IS_AVAILBLE = True
|
15 |
-
except:
|
16 |
-
XFORMERS_IS_AVAILBLE = False
|
17 |
-
print("No module 'xformers'. Proceeding without it.")
|
18 |
-
|
19 |
-
|
20 |
-
def get_timestep_embedding(timesteps, embedding_dim):
|
21 |
-
"""
|
22 |
-
This matches the implementation in Denoising Diffusion Probabilistic Models:
|
23 |
-
From Fairseq.
|
24 |
-
Build sinusoidal embeddings.
|
25 |
-
This matches the implementation in tensor2tensor, but differs slightly
|
26 |
-
from the description in Section 3.5 of "Attention Is All You Need".
|
27 |
-
"""
|
28 |
-
assert len(timesteps.shape) == 1
|
29 |
-
|
30 |
-
half_dim = embedding_dim // 2
|
31 |
-
emb = math.log(10000) / (half_dim - 1)
|
32 |
-
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
|
33 |
-
emb = emb.to(device=timesteps.device)
|
34 |
-
emb = timesteps.float()[:, None] * emb[None, :]
|
35 |
-
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
|
36 |
-
if embedding_dim % 2 == 1: # zero pad
|
37 |
-
emb = torch.nn.functional.pad(emb, (0,1,0,0))
|
38 |
-
return emb
|
39 |
-
|
40 |
-
|
41 |
-
def nonlinearity(x):
|
42 |
-
# swish
|
43 |
-
return x*torch.sigmoid(x)
|
44 |
-
|
45 |
-
|
46 |
-
def Normalize(in_channels, num_groups=32):
|
47 |
-
return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True)
|
48 |
-
|
49 |
-
|
50 |
-
class Upsample(nn.Module):
|
51 |
-
def __init__(self, in_channels, with_conv):
|
52 |
-
super().__init__()
|
53 |
-
self.with_conv = with_conv
|
54 |
-
if self.with_conv:
|
55 |
-
self.conv = torch.nn.Conv2d(in_channels,
|
56 |
-
in_channels,
|
57 |
-
kernel_size=3,
|
58 |
-
stride=1,
|
59 |
-
padding=1)
|
60 |
-
|
61 |
-
def forward(self, x):
|
62 |
-
x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
|
63 |
-
if self.with_conv:
|
64 |
-
x = self.conv(x)
|
65 |
-
return x
|
66 |
-
|
67 |
-
|
68 |
-
class Downsample(nn.Module):
|
69 |
-
def __init__(self, in_channels, with_conv):
|
70 |
-
super().__init__()
|
71 |
-
self.with_conv = with_conv
|
72 |
-
if self.with_conv:
|
73 |
-
# no asymmetric padding in torch conv, must do it ourselves
|
74 |
-
self.conv = torch.nn.Conv2d(in_channels,
|
75 |
-
in_channels,
|
76 |
-
kernel_size=3,
|
77 |
-
stride=2,
|
78 |
-
padding=0)
|
79 |
-
|
80 |
-
def forward(self, x):
|
81 |
-
if self.with_conv:
|
82 |
-
pad = (0,1,0,1)
|
83 |
-
x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
|
84 |
-
x = self.conv(x)
|
85 |
-
else:
|
86 |
-
x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
|
87 |
-
return x
|
88 |
-
|
89 |
-
|
90 |
-
class ResnetBlock(nn.Module):
|
91 |
-
def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False,
|
92 |
-
dropout, temb_channels=512):
|
93 |
-
super().__init__()
|
94 |
-
self.in_channels = in_channels
|
95 |
-
out_channels = in_channels if out_channels is None else out_channels
|
96 |
-
self.out_channels = out_channels
|
97 |
-
self.use_conv_shortcut = conv_shortcut
|
98 |
-
|
99 |
-
self.norm1 = Normalize(in_channels)
|
100 |
-
self.conv1 = torch.nn.Conv2d(in_channels,
|
101 |
-
out_channels,
|
102 |
-
kernel_size=3,
|
103 |
-
stride=1,
|
104 |
-
padding=1)
|
105 |
-
if temb_channels > 0:
|
106 |
-
self.temb_proj = torch.nn.Linear(temb_channels,
|
107 |
-
out_channels)
|
108 |
-
self.norm2 = Normalize(out_channels)
|
109 |
-
self.dropout = torch.nn.Dropout(dropout)
|
110 |
-
self.conv2 = torch.nn.Conv2d(out_channels,
|
111 |
-
out_channels,
|
112 |
-
kernel_size=3,
|
113 |
-
stride=1,
|
114 |
-
padding=1)
|
115 |
-
if self.in_channels != self.out_channels:
|
116 |
-
if self.use_conv_shortcut:
|
117 |
-
self.conv_shortcut = torch.nn.Conv2d(in_channels,
|
118 |
-
out_channels,
|
119 |
-
kernel_size=3,
|
120 |
-
stride=1,
|
121 |
-
padding=1)
|
122 |
-
else:
|
123 |
-
self.nin_shortcut = torch.nn.Conv2d(in_channels,
|
124 |
-
out_channels,
|
125 |
-
kernel_size=1,
|
126 |
-
stride=1,
|
127 |
-
padding=0)
|
128 |
-
|
129 |
-
def forward(self, x, temb):
|
130 |
-
h = x
|
131 |
-
h = self.norm1(h)
|
132 |
-
h = nonlinearity(h)
|
133 |
-
h = self.conv1(h)
|
134 |
-
|
135 |
-
if temb is not None:
|
136 |
-
h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None]
|
137 |
-
|
138 |
-
h = self.norm2(h)
|
139 |
-
h = nonlinearity(h)
|
140 |
-
h = self.dropout(h)
|
141 |
-
h = self.conv2(h)
|
142 |
-
|
143 |
-
if self.in_channels != self.out_channels:
|
144 |
-
if self.use_conv_shortcut:
|
145 |
-
x = self.conv_shortcut(x)
|
146 |
-
else:
|
147 |
-
x = self.nin_shortcut(x)
|
148 |
-
|
149 |
-
return x+h
|
150 |
-
|
151 |
-
|
152 |
-
class AttnBlock(nn.Module):
|
153 |
-
def __init__(self, in_channels):
|
154 |
-
super().__init__()
|
155 |
-
self.in_channels = in_channels
|
156 |
-
|
157 |
-
self.norm = Normalize(in_channels)
|
158 |
-
self.q = torch.nn.Conv2d(in_channels,
|
159 |
-
in_channels,
|
160 |
-
kernel_size=1,
|
161 |
-
stride=1,
|
162 |
-
padding=0)
|
163 |
-
self.k = torch.nn.Conv2d(in_channels,
|
164 |
-
in_channels,
|
165 |
-
kernel_size=1,
|
166 |
-
stride=1,
|
167 |
-
padding=0)
|
168 |
-
self.v = torch.nn.Conv2d(in_channels,
|
169 |
-
in_channels,
|
170 |
-
kernel_size=1,
|
171 |
-
stride=1,
|
172 |
-
padding=0)
|
173 |
-
self.proj_out = torch.nn.Conv2d(in_channels,
|
174 |
-
in_channels,
|
175 |
-
kernel_size=1,
|
176 |
-
stride=1,
|
177 |
-
padding=0)
|
178 |
-
|
179 |
-
def forward(self, x):
|
180 |
-
h_ = x
|
181 |
-
h_ = self.norm(h_)
|
182 |
-
q = self.q(h_)
|
183 |
-
k = self.k(h_)
|
184 |
-
v = self.v(h_)
|
185 |
-
|
186 |
-
# compute attention
|
187 |
-
b,c,h,w = q.shape
|
188 |
-
q = q.reshape(b,c,h*w)
|
189 |
-
q = q.permute(0,2,1) # b,hw,c
|
190 |
-
k = k.reshape(b,c,h*w) # b,c,hw
|
191 |
-
w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
|
192 |
-
w_ = w_ * (int(c)**(-0.5))
|
193 |
-
w_ = torch.nn.functional.softmax(w_, dim=2)
|
194 |
-
|
195 |
-
# attend to values
|
196 |
-
v = v.reshape(b,c,h*w)
|
197 |
-
w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q)
|
198 |
-
h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
|
199 |
-
h_ = h_.reshape(b,c,h,w)
|
200 |
-
|
201 |
-
h_ = self.proj_out(h_)
|
202 |
-
|
203 |
-
return x+h_
|
204 |
-
|
205 |
-
class MemoryEfficientAttnBlock(nn.Module):
|
206 |
-
"""
|
207 |
-
Uses xformers efficient implementation,
|
208 |
-
see https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223
|
209 |
-
Note: this is a single-head self-attention operation
|
210 |
-
"""
|
211 |
-
#
|
212 |
-
def __init__(self, in_channels):
|
213 |
-
super().__init__()
|
214 |
-
self.in_channels = in_channels
|
215 |
-
|
216 |
-
self.norm = Normalize(in_channels)
|
217 |
-
self.q = torch.nn.Conv2d(in_channels,
|
218 |
-
in_channels,
|
219 |
-
kernel_size=1,
|
220 |
-
stride=1,
|
221 |
-
padding=0)
|
222 |
-
self.k = torch.nn.Conv2d(in_channels,
|
223 |
-
in_channels,
|
224 |
-
kernel_size=1,
|
225 |
-
stride=1,
|
226 |
-
padding=0)
|
227 |
-
self.v = torch.nn.Conv2d(in_channels,
|
228 |
-
in_channels,
|
229 |
-
kernel_size=1,
|
230 |
-
stride=1,
|
231 |
-
padding=0)
|
232 |
-
self.proj_out = torch.nn.Conv2d(in_channels,
|
233 |
-
in_channels,
|
234 |
-
kernel_size=1,
|
235 |
-
stride=1,
|
236 |
-
padding=0)
|
237 |
-
self.attention_op: Optional[Any] = None
|
238 |
-
|
239 |
-
def forward(self, x):
|
240 |
-
h_ = x
|
241 |
-
h_ = self.norm(h_)
|
242 |
-
q = self.q(h_)
|
243 |
-
k = self.k(h_)
|
244 |
-
v = self.v(h_)
|
245 |
-
|
246 |
-
# compute attention
|
247 |
-
B, C, H, W = q.shape
|
248 |
-
q, k, v = map(lambda x: rearrange(x, 'b c h w -> b (h w) c'), (q, k, v))
|
249 |
-
|
250 |
-
q, k, v = map(
|
251 |
-
lambda t: t.unsqueeze(3)
|
252 |
-
.reshape(B, t.shape[1], 1, C)
|
253 |
-
.permute(0, 2, 1, 3)
|
254 |
-
.reshape(B * 1, t.shape[1], C)
|
255 |
-
.contiguous(),
|
256 |
-
(q, k, v),
|
257 |
-
)
|
258 |
-
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op)
|
259 |
-
|
260 |
-
out = (
|
261 |
-
out.unsqueeze(0)
|
262 |
-
.reshape(B, 1, out.shape[1], C)
|
263 |
-
.permute(0, 2, 1, 3)
|
264 |
-
.reshape(B, out.shape[1], C)
|
265 |
-
)
|
266 |
-
out = rearrange(out, 'b (h w) c -> b c h w', b=B, h=H, w=W, c=C)
|
267 |
-
out = self.proj_out(out)
|
268 |
-
return x+out
|
269 |
-
|
270 |
-
|
271 |
-
class MemoryEfficientCrossAttentionWrapper(MemoryEfficientCrossAttention):
|
272 |
-
def forward(self, x, context=None, mask=None):
|
273 |
-
b, c, h, w = x.shape
|
274 |
-
x = rearrange(x, 'b c h w -> b (h w) c')
|
275 |
-
out = super().forward(x, context=context, mask=mask)
|
276 |
-
out = rearrange(out, 'b (h w) c -> b c h w', h=h, w=w, c=c)
|
277 |
-
return x + out
|
278 |
-
|
279 |
-
|
280 |
-
def make_attn(in_channels, attn_type="vanilla", attn_kwargs=None):
|
281 |
-
assert attn_type in ["vanilla", "vanilla-xformers", "memory-efficient-cross-attn", "linear", "none"], f'attn_type {attn_type} unknown'
|
282 |
-
if XFORMERS_IS_AVAILBLE and attn_type == "vanilla":
|
283 |
-
attn_type = "vanilla-xformers"
|
284 |
-
print(f"making attention of type '{attn_type}' with {in_channels} in_channels")
|
285 |
-
if attn_type == "vanilla":
|
286 |
-
assert attn_kwargs is None
|
287 |
-
return AttnBlock(in_channels)
|
288 |
-
elif attn_type == "vanilla-xformers":
|
289 |
-
print(f"building MemoryEfficientAttnBlock with {in_channels} in_channels...")
|
290 |
-
return MemoryEfficientAttnBlock(in_channels)
|
291 |
-
elif type == "memory-efficient-cross-attn":
|
292 |
-
attn_kwargs["query_dim"] = in_channels
|
293 |
-
return MemoryEfficientCrossAttentionWrapper(**attn_kwargs)
|
294 |
-
elif attn_type == "none":
|
295 |
-
return nn.Identity(in_channels)
|
296 |
-
else:
|
297 |
-
raise NotImplementedError()
|
298 |
-
|
299 |
-
|
300 |
-
class Model(nn.Module):
|
301 |
-
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
|
302 |
-
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
|
303 |
-
resolution, use_timestep=True, use_linear_attn=False, attn_type="vanilla"):
|
304 |
-
super().__init__()
|
305 |
-
if use_linear_attn: attn_type = "linear"
|
306 |
-
self.ch = ch
|
307 |
-
self.temb_ch = self.ch*4
|
308 |
-
self.num_resolutions = len(ch_mult)
|
309 |
-
self.num_res_blocks = num_res_blocks
|
310 |
-
self.resolution = resolution
|
311 |
-
self.in_channels = in_channels
|
312 |
-
|
313 |
-
self.use_timestep = use_timestep
|
314 |
-
if self.use_timestep:
|
315 |
-
# timestep embedding
|
316 |
-
self.temb = nn.Module()
|
317 |
-
self.temb.dense = nn.ModuleList([
|
318 |
-
torch.nn.Linear(self.ch,
|
319 |
-
self.temb_ch),
|
320 |
-
torch.nn.Linear(self.temb_ch,
|
321 |
-
self.temb_ch),
|
322 |
-
])
|
323 |
-
|
324 |
-
# downsampling
|
325 |
-
self.conv_in = torch.nn.Conv2d(in_channels,
|
326 |
-
self.ch,
|
327 |
-
kernel_size=3,
|
328 |
-
stride=1,
|
329 |
-
padding=1)
|
330 |
-
|
331 |
-
curr_res = resolution
|
332 |
-
in_ch_mult = (1,)+tuple(ch_mult)
|
333 |
-
self.down = nn.ModuleList()
|
334 |
-
for i_level in range(self.num_resolutions):
|
335 |
-
block = nn.ModuleList()
|
336 |
-
attn = nn.ModuleList()
|
337 |
-
block_in = ch*in_ch_mult[i_level]
|
338 |
-
block_out = ch*ch_mult[i_level]
|
339 |
-
for i_block in range(self.num_res_blocks):
|
340 |
-
block.append(ResnetBlock(in_channels=block_in,
|
341 |
-
out_channels=block_out,
|
342 |
-
temb_channels=self.temb_ch,
|
343 |
-
dropout=dropout))
|
344 |
-
block_in = block_out
|
345 |
-
if curr_res in attn_resolutions:
|
346 |
-
attn.append(make_attn(block_in, attn_type=attn_type))
|
347 |
-
down = nn.Module()
|
348 |
-
down.block = block
|
349 |
-
down.attn = attn
|
350 |
-
if i_level != self.num_resolutions-1:
|
351 |
-
down.downsample = Downsample(block_in, resamp_with_conv)
|
352 |
-
curr_res = curr_res // 2
|
353 |
-
self.down.append(down)
|
354 |
-
|
355 |
-
# middle
|
356 |
-
self.mid = nn.Module()
|
357 |
-
self.mid.block_1 = ResnetBlock(in_channels=block_in,
|
358 |
-
out_channels=block_in,
|
359 |
-
temb_channels=self.temb_ch,
|
360 |
-
dropout=dropout)
|
361 |
-
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
|
362 |
-
self.mid.block_2 = ResnetBlock(in_channels=block_in,
|
363 |
-
out_channels=block_in,
|
364 |
-
temb_channels=self.temb_ch,
|
365 |
-
dropout=dropout)
|
366 |
-
|
367 |
-
# upsampling
|
368 |
-
self.up = nn.ModuleList()
|
369 |
-
for i_level in reversed(range(self.num_resolutions)):
|
370 |
-
block = nn.ModuleList()
|
371 |
-
attn = nn.ModuleList()
|
372 |
-
block_out = ch*ch_mult[i_level]
|
373 |
-
skip_in = ch*ch_mult[i_level]
|
374 |
-
for i_block in range(self.num_res_blocks+1):
|
375 |
-
if i_block == self.num_res_blocks:
|
376 |
-
skip_in = ch*in_ch_mult[i_level]
|
377 |
-
block.append(ResnetBlock(in_channels=block_in+skip_in,
|
378 |
-
out_channels=block_out,
|
379 |
-
temb_channels=self.temb_ch,
|
380 |
-
dropout=dropout))
|
381 |
-
block_in = block_out
|
382 |
-
if curr_res in attn_resolutions:
|
383 |
-
attn.append(make_attn(block_in, attn_type=attn_type))
|
384 |
-
up = nn.Module()
|
385 |
-
up.block = block
|
386 |
-
up.attn = attn
|
387 |
-
if i_level != 0:
|
388 |
-
up.upsample = Upsample(block_in, resamp_with_conv)
|
389 |
-
curr_res = curr_res * 2
|
390 |
-
self.up.insert(0, up) # prepend to get consistent order
|
391 |
-
|
392 |
-
# end
|
393 |
-
self.norm_out = Normalize(block_in)
|
394 |
-
self.conv_out = torch.nn.Conv2d(block_in,
|
395 |
-
out_ch,
|
396 |
-
kernel_size=3,
|
397 |
-
stride=1,
|
398 |
-
padding=1)
|
399 |
-
|
400 |
-
def forward(self, x, t=None, context=None):
|
401 |
-
#assert x.shape[2] == x.shape[3] == self.resolution
|
402 |
-
if context is not None:
|
403 |
-
# assume aligned context, cat along channel axis
|
404 |
-
x = torch.cat((x, context), dim=1)
|
405 |
-
if self.use_timestep:
|
406 |
-
# timestep embedding
|
407 |
-
assert t is not None
|
408 |
-
temb = get_timestep_embedding(t, self.ch)
|
409 |
-
temb = self.temb.dense[0](temb)
|
410 |
-
temb = nonlinearity(temb)
|
411 |
-
temb = self.temb.dense[1](temb)
|
412 |
-
else:
|
413 |
-
temb = None
|
414 |
-
|
415 |
-
# downsampling
|
416 |
-
hs = [self.conv_in(x)]
|
417 |
-
for i_level in range(self.num_resolutions):
|
418 |
-
for i_block in range(self.num_res_blocks):
|
419 |
-
h = self.down[i_level].block[i_block](hs[-1], temb)
|
420 |
-
if len(self.down[i_level].attn) > 0:
|
421 |
-
h = self.down[i_level].attn[i_block](h)
|
422 |
-
hs.append(h)
|
423 |
-
if i_level != self.num_resolutions-1:
|
424 |
-
hs.append(self.down[i_level].downsample(hs[-1]))
|
425 |
-
|
426 |
-
# middle
|
427 |
-
h = hs[-1]
|
428 |
-
h = self.mid.block_1(h, temb)
|
429 |
-
h = self.mid.attn_1(h)
|
430 |
-
h = self.mid.block_2(h, temb)
|
431 |
-
|
432 |
-
# upsampling
|
433 |
-
for i_level in reversed(range(self.num_resolutions)):
|
434 |
-
for i_block in range(self.num_res_blocks+1):
|
435 |
-
h = self.up[i_level].block[i_block](
|
436 |
-
torch.cat([h, hs.pop()], dim=1), temb)
|
437 |
-
if len(self.up[i_level].attn) > 0:
|
438 |
-
h = self.up[i_level].attn[i_block](h)
|
439 |
-
if i_level != 0:
|
440 |
-
h = self.up[i_level].upsample(h)
|
441 |
-
|
442 |
-
# end
|
443 |
-
h = self.norm_out(h)
|
444 |
-
h = nonlinearity(h)
|
445 |
-
h = self.conv_out(h)
|
446 |
-
return h
|
447 |
-
|
448 |
-
def get_last_layer(self):
|
449 |
-
return self.conv_out.weight
|
450 |
-
|
451 |
-
|
452 |
-
class Encoder(nn.Module):
|
453 |
-
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
|
454 |
-
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
|
455 |
-
resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla",
|
456 |
-
**ignore_kwargs):
|
457 |
-
super().__init__()
|
458 |
-
if use_linear_attn: attn_type = "linear"
|
459 |
-
self.ch = ch
|
460 |
-
self.temb_ch = 0
|
461 |
-
self.num_resolutions = len(ch_mult)
|
462 |
-
self.num_res_blocks = num_res_blocks
|
463 |
-
self.resolution = resolution
|
464 |
-
self.in_channels = in_channels
|
465 |
-
|
466 |
-
# downsampling
|
467 |
-
self.conv_in = torch.nn.Conv2d(in_channels,
|
468 |
-
self.ch,
|
469 |
-
kernel_size=3,
|
470 |
-
stride=1,
|
471 |
-
padding=1)
|
472 |
-
|
473 |
-
curr_res = resolution
|
474 |
-
in_ch_mult = (1,)+tuple(ch_mult)
|
475 |
-
self.in_ch_mult = in_ch_mult
|
476 |
-
self.down = nn.ModuleList()
|
477 |
-
for i_level in range(self.num_resolutions):
|
478 |
-
block = nn.ModuleList()
|
479 |
-
attn = nn.ModuleList()
|
480 |
-
block_in = ch*in_ch_mult[i_level]
|
481 |
-
block_out = ch*ch_mult[i_level]
|
482 |
-
for i_block in range(self.num_res_blocks):
|
483 |
-
block.append(ResnetBlock(in_channels=block_in,
|
484 |
-
out_channels=block_out,
|
485 |
-
temb_channels=self.temb_ch,
|
486 |
-
dropout=dropout))
|
487 |
-
block_in = block_out
|
488 |
-
if curr_res in attn_resolutions:
|
489 |
-
attn.append(make_attn(block_in, attn_type=attn_type))
|
490 |
-
down = nn.Module()
|
491 |
-
down.block = block
|
492 |
-
down.attn = attn
|
493 |
-
if i_level != self.num_resolutions-1:
|
494 |
-
down.downsample = Downsample(block_in, resamp_with_conv)
|
495 |
-
curr_res = curr_res // 2
|
496 |
-
self.down.append(down)
|
497 |
-
|
498 |
-
# middle
|
499 |
-
self.mid = nn.Module()
|
500 |
-
self.mid.block_1 = ResnetBlock(in_channels=block_in,
|
501 |
-
out_channels=block_in,
|
502 |
-
temb_channels=self.temb_ch,
|
503 |
-
dropout=dropout)
|
504 |
-
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
|
505 |
-
self.mid.block_2 = ResnetBlock(in_channels=block_in,
|
506 |
-
out_channels=block_in,
|
507 |
-
temb_channels=self.temb_ch,
|
508 |
-
dropout=dropout)
|
509 |
-
|
510 |
-
# end
|
511 |
-
self.norm_out = Normalize(block_in)
|
512 |
-
self.conv_out = torch.nn.Conv2d(block_in,
|
513 |
-
2*z_channels if double_z else z_channels,
|
514 |
-
kernel_size=3,
|
515 |
-
stride=1,
|
516 |
-
padding=1)
|
517 |
-
|
518 |
-
def forward(self, x):
|
519 |
-
# timestep embedding
|
520 |
-
temb = None
|
521 |
-
|
522 |
-
# downsampling
|
523 |
-
hs = [self.conv_in(x)]
|
524 |
-
for i_level in range(self.num_resolutions):
|
525 |
-
for i_block in range(self.num_res_blocks):
|
526 |
-
h = self.down[i_level].block[i_block](hs[-1], temb)
|
527 |
-
if len(self.down[i_level].attn) > 0:
|
528 |
-
h = self.down[i_level].attn[i_block](h)
|
529 |
-
hs.append(h)
|
530 |
-
if i_level != self.num_resolutions-1:
|
531 |
-
hs.append(self.down[i_level].downsample(hs[-1]))
|
532 |
-
|
533 |
-
# middle
|
534 |
-
h = hs[-1]
|
535 |
-
h = self.mid.block_1(h, temb)
|
536 |
-
h = self.mid.attn_1(h)
|
537 |
-
h = self.mid.block_2(h, temb)
|
538 |
-
|
539 |
-
# end
|
540 |
-
h = self.norm_out(h)
|
541 |
-
h = nonlinearity(h)
|
542 |
-
h = self.conv_out(h)
|
543 |
-
return h
|
544 |
-
|
545 |
-
|
546 |
-
class Decoder(nn.Module):
|
547 |
-
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
|
548 |
-
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
|
549 |
-
resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,
|
550 |
-
attn_type="vanilla", **ignorekwargs):
|
551 |
-
super().__init__()
|
552 |
-
if use_linear_attn: attn_type = "linear"
|
553 |
-
self.ch = ch
|
554 |
-
self.temb_ch = 0
|
555 |
-
self.num_resolutions = len(ch_mult)
|
556 |
-
self.num_res_blocks = num_res_blocks
|
557 |
-
self.resolution = resolution
|
558 |
-
self.in_channels = in_channels
|
559 |
-
self.give_pre_end = give_pre_end
|
560 |
-
self.tanh_out = tanh_out
|
561 |
-
|
562 |
-
# compute in_ch_mult, block_in and curr_res at lowest res
|
563 |
-
in_ch_mult = (1,)+tuple(ch_mult)
|
564 |
-
block_in = ch*ch_mult[self.num_resolutions-1]
|
565 |
-
curr_res = resolution // 2**(self.num_resolutions-1)
|
566 |
-
self.z_shape = (1,z_channels,curr_res,curr_res)
|
567 |
-
print("Working with z of shape {} = {} dimensions.".format(
|
568 |
-
self.z_shape, np.prod(self.z_shape)))
|
569 |
-
|
570 |
-
# z to block_in
|
571 |
-
self.conv_in = torch.nn.Conv2d(z_channels,
|
572 |
-
block_in,
|
573 |
-
kernel_size=3,
|
574 |
-
stride=1,
|
575 |
-
padding=1)
|
576 |
-
|
577 |
-
# middle
|
578 |
-
self.mid = nn.Module()
|
579 |
-
self.mid.block_1 = ResnetBlock(in_channels=block_in,
|
580 |
-
out_channels=block_in,
|
581 |
-
temb_channels=self.temb_ch,
|
582 |
-
dropout=dropout)
|
583 |
-
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
|
584 |
-
self.mid.block_2 = ResnetBlock(in_channels=block_in,
|
585 |
-
out_channels=block_in,
|
586 |
-
temb_channels=self.temb_ch,
|
587 |
-
dropout=dropout)
|
588 |
-
|
589 |
-
# upsampling
|
590 |
-
self.up = nn.ModuleList()
|
591 |
-
for i_level in reversed(range(self.num_resolutions)):
|
592 |
-
block = nn.ModuleList()
|
593 |
-
attn = nn.ModuleList()
|
594 |
-
block_out = ch*ch_mult[i_level]
|
595 |
-
for i_block in range(self.num_res_blocks+1):
|
596 |
-
block.append(ResnetBlock(in_channels=block_in,
|
597 |
-
out_channels=block_out,
|
598 |
-
temb_channels=self.temb_ch,
|
599 |
-
dropout=dropout))
|
600 |
-
block_in = block_out
|
601 |
-
if curr_res in attn_resolutions:
|
602 |
-
attn.append(make_attn(block_in, attn_type=attn_type))
|
603 |
-
up = nn.Module()
|
604 |
-
up.block = block
|
605 |
-
up.attn = attn
|
606 |
-
if i_level != 0:
|
607 |
-
up.upsample = Upsample(block_in, resamp_with_conv)
|
608 |
-
curr_res = curr_res * 2
|
609 |
-
self.up.insert(0, up) # prepend to get consistent order
|
610 |
-
|
611 |
-
# end
|
612 |
-
self.norm_out = Normalize(block_in)
|
613 |
-
self.conv_out = torch.nn.Conv2d(block_in,
|
614 |
-
out_ch,
|
615 |
-
kernel_size=3,
|
616 |
-
stride=1,
|
617 |
-
padding=1)
|
618 |
-
|
619 |
-
def forward(self, z):
|
620 |
-
#assert z.shape[1:] == self.z_shape[1:]
|
621 |
-
self.last_z_shape = z.shape
|
622 |
-
|
623 |
-
# timestep embedding
|
624 |
-
temb = None
|
625 |
-
|
626 |
-
# z to block_in
|
627 |
-
h = self.conv_in(z)
|
628 |
-
|
629 |
-
# middle
|
630 |
-
h = self.mid.block_1(h, temb)
|
631 |
-
h = self.mid.attn_1(h)
|
632 |
-
h = self.mid.block_2(h, temb)
|
633 |
-
|
634 |
-
# upsampling
|
635 |
-
for i_level in reversed(range(self.num_resolutions)):
|
636 |
-
for i_block in range(self.num_res_blocks+1):
|
637 |
-
h = self.up[i_level].block[i_block](h, temb)
|
638 |
-
if len(self.up[i_level].attn) > 0:
|
639 |
-
h = self.up[i_level].attn[i_block](h)
|
640 |
-
if i_level != 0:
|
641 |
-
h = self.up[i_level].upsample(h)
|
642 |
-
|
643 |
-
# end
|
644 |
-
if self.give_pre_end:
|
645 |
-
return h
|
646 |
-
|
647 |
-
h = self.norm_out(h)
|
648 |
-
h = nonlinearity(h)
|
649 |
-
h = self.conv_out(h)
|
650 |
-
if self.tanh_out:
|
651 |
-
h = torch.tanh(h)
|
652 |
-
return h
|
653 |
-
|
654 |
-
|
655 |
-
class SimpleDecoder(nn.Module):
|
656 |
-
def __init__(self, in_channels, out_channels, *args, **kwargs):
|
657 |
-
super().__init__()
|
658 |
-
self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1),
|
659 |
-
ResnetBlock(in_channels=in_channels,
|
660 |
-
out_channels=2 * in_channels,
|
661 |
-
temb_channels=0, dropout=0.0),
|
662 |
-
ResnetBlock(in_channels=2 * in_channels,
|
663 |
-
out_channels=4 * in_channels,
|
664 |
-
temb_channels=0, dropout=0.0),
|
665 |
-
ResnetBlock(in_channels=4 * in_channels,
|
666 |
-
out_channels=2 * in_channels,
|
667 |
-
temb_channels=0, dropout=0.0),
|
668 |
-
nn.Conv2d(2*in_channels, in_channels, 1),
|
669 |
-
Upsample(in_channels, with_conv=True)])
|
670 |
-
# end
|
671 |
-
self.norm_out = Normalize(in_channels)
|
672 |
-
self.conv_out = torch.nn.Conv2d(in_channels,
|
673 |
-
out_channels,
|
674 |
-
kernel_size=3,
|
675 |
-
stride=1,
|
676 |
-
padding=1)
|
677 |
-
|
678 |
-
def forward(self, x):
|
679 |
-
for i, layer in enumerate(self.model):
|
680 |
-
if i in [1,2,3]:
|
681 |
-
x = layer(x, None)
|
682 |
-
else:
|
683 |
-
x = layer(x)
|
684 |
-
|
685 |
-
h = self.norm_out(x)
|
686 |
-
h = nonlinearity(h)
|
687 |
-
x = self.conv_out(h)
|
688 |
-
return x
|
689 |
-
|
690 |
-
|
691 |
-
class UpsampleDecoder(nn.Module):
|
692 |
-
def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution,
|
693 |
-
ch_mult=(2,2), dropout=0.0):
|
694 |
-
super().__init__()
|
695 |
-
# upsampling
|
696 |
-
self.temb_ch = 0
|
697 |
-
self.num_resolutions = len(ch_mult)
|
698 |
-
self.num_res_blocks = num_res_blocks
|
699 |
-
block_in = in_channels
|
700 |
-
curr_res = resolution // 2 ** (self.num_resolutions - 1)
|
701 |
-
self.res_blocks = nn.ModuleList()
|
702 |
-
self.upsample_blocks = nn.ModuleList()
|
703 |
-
for i_level in range(self.num_resolutions):
|
704 |
-
res_block = []
|
705 |
-
block_out = ch * ch_mult[i_level]
|
706 |
-
for i_block in range(self.num_res_blocks + 1):
|
707 |
-
res_block.append(ResnetBlock(in_channels=block_in,
|
708 |
-
out_channels=block_out,
|
709 |
-
temb_channels=self.temb_ch,
|
710 |
-
dropout=dropout))
|
711 |
-
block_in = block_out
|
712 |
-
self.res_blocks.append(nn.ModuleList(res_block))
|
713 |
-
if i_level != self.num_resolutions - 1:
|
714 |
-
self.upsample_blocks.append(Upsample(block_in, True))
|
715 |
-
curr_res = curr_res * 2
|
716 |
-
|
717 |
-
# end
|
718 |
-
self.norm_out = Normalize(block_in)
|
719 |
-
self.conv_out = torch.nn.Conv2d(block_in,
|
720 |
-
out_channels,
|
721 |
-
kernel_size=3,
|
722 |
-
stride=1,
|
723 |
-
padding=1)
|
724 |
-
|
725 |
-
def forward(self, x):
|
726 |
-
# upsampling
|
727 |
-
h = x
|
728 |
-
for k, i_level in enumerate(range(self.num_resolutions)):
|
729 |
-
for i_block in range(self.num_res_blocks + 1):
|
730 |
-
h = self.res_blocks[i_level][i_block](h, None)
|
731 |
-
if i_level != self.num_resolutions - 1:
|
732 |
-
h = self.upsample_blocks[k](h)
|
733 |
-
h = self.norm_out(h)
|
734 |
-
h = nonlinearity(h)
|
735 |
-
h = self.conv_out(h)
|
736 |
-
return h
|
737 |
-
|
738 |
-
|
739 |
-
class LatentRescaler(nn.Module):
|
740 |
-
def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2):
|
741 |
-
super().__init__()
|
742 |
-
# residual block, interpolate, residual block
|
743 |
-
self.factor = factor
|
744 |
-
self.conv_in = nn.Conv2d(in_channels,
|
745 |
-
mid_channels,
|
746 |
-
kernel_size=3,
|
747 |
-
stride=1,
|
748 |
-
padding=1)
|
749 |
-
self.res_block1 = nn.ModuleList([ResnetBlock(in_channels=mid_channels,
|
750 |
-
out_channels=mid_channels,
|
751 |
-
temb_channels=0,
|
752 |
-
dropout=0.0) for _ in range(depth)])
|
753 |
-
self.attn = AttnBlock(mid_channels)
|
754 |
-
self.res_block2 = nn.ModuleList([ResnetBlock(in_channels=mid_channels,
|
755 |
-
out_channels=mid_channels,
|
756 |
-
temb_channels=0,
|
757 |
-
dropout=0.0) for _ in range(depth)])
|
758 |
-
|
759 |
-
self.conv_out = nn.Conv2d(mid_channels,
|
760 |
-
out_channels,
|
761 |
-
kernel_size=1,
|
762 |
-
)
|
763 |
-
|
764 |
-
def forward(self, x):
|
765 |
-
x = self.conv_in(x)
|
766 |
-
for block in self.res_block1:
|
767 |
-
x = block(x, None)
|
768 |
-
x = torch.nn.functional.interpolate(x, size=(int(round(x.shape[2]*self.factor)), int(round(x.shape[3]*self.factor))))
|
769 |
-
x = self.attn(x)
|
770 |
-
for block in self.res_block2:
|
771 |
-
x = block(x, None)
|
772 |
-
x = self.conv_out(x)
|
773 |
-
return x
|
774 |
-
|
775 |
-
|
776 |
-
class MergedRescaleEncoder(nn.Module):
|
777 |
-
def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks,
|
778 |
-
attn_resolutions, dropout=0.0, resamp_with_conv=True,
|
779 |
-
ch_mult=(1,2,4,8), rescale_factor=1.0, rescale_module_depth=1):
|
780 |
-
super().__init__()
|
781 |
-
intermediate_chn = ch * ch_mult[-1]
|
782 |
-
self.encoder = Encoder(in_channels=in_channels, num_res_blocks=num_res_blocks, ch=ch, ch_mult=ch_mult,
|
783 |
-
z_channels=intermediate_chn, double_z=False, resolution=resolution,
|
784 |
-
attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv,
|
785 |
-
out_ch=None)
|
786 |
-
self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=intermediate_chn,
|
787 |
-
mid_channels=intermediate_chn, out_channels=out_ch, depth=rescale_module_depth)
|
788 |
-
|
789 |
-
def forward(self, x):
|
790 |
-
x = self.encoder(x)
|
791 |
-
x = self.rescaler(x)
|
792 |
-
return x
|
793 |
-
|
794 |
-
|
795 |
-
class MergedRescaleDecoder(nn.Module):
|
796 |
-
def __init__(self, z_channels, out_ch, resolution, num_res_blocks, attn_resolutions, ch, ch_mult=(1,2,4,8),
|
797 |
-
dropout=0.0, resamp_with_conv=True, rescale_factor=1.0, rescale_module_depth=1):
|
798 |
-
super().__init__()
|
799 |
-
tmp_chn = z_channels*ch_mult[-1]
|
800 |
-
self.decoder = Decoder(out_ch=out_ch, z_channels=tmp_chn, attn_resolutions=attn_resolutions, dropout=dropout,
|
801 |
-
resamp_with_conv=resamp_with_conv, in_channels=None, num_res_blocks=num_res_blocks,
|
802 |
-
ch_mult=ch_mult, resolution=resolution, ch=ch)
|
803 |
-
self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=z_channels, mid_channels=tmp_chn,
|
804 |
-
out_channels=tmp_chn, depth=rescale_module_depth)
|
805 |
-
|
806 |
-
def forward(self, x):
|
807 |
-
x = self.rescaler(x)
|
808 |
-
x = self.decoder(x)
|
809 |
-
return x
|
810 |
-
|
811 |
-
|
812 |
-
class Upsampler(nn.Module):
|
813 |
-
def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2):
|
814 |
-
super().__init__()
|
815 |
-
assert out_size >= in_size
|
816 |
-
num_blocks = int(np.log2(out_size//in_size))+1
|
817 |
-
factor_up = 1.+ (out_size % in_size)
|
818 |
-
print(f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}")
|
819 |
-
self.rescaler = LatentRescaler(factor=factor_up, in_channels=in_channels, mid_channels=2*in_channels,
|
820 |
-
out_channels=in_channels)
|
821 |
-
self.decoder = Decoder(out_ch=out_channels, resolution=out_size, z_channels=in_channels, num_res_blocks=2,
|
822 |
-
attn_resolutions=[], in_channels=None, ch=in_channels,
|
823 |
-
ch_mult=[ch_mult for _ in range(num_blocks)])
|
824 |
-
|
825 |
-
def forward(self, x):
|
826 |
-
x = self.rescaler(x)
|
827 |
-
x = self.decoder(x)
|
828 |
-
return x
|
829 |
-
|
830 |
-
|
831 |
-
class Resize(nn.Module):
|
832 |
-
def __init__(self, in_channels=None, learned=False, mode="bilinear"):
|
833 |
-
super().__init__()
|
834 |
-
self.with_conv = learned
|
835 |
-
self.mode = mode
|
836 |
-
if self.with_conv:
|
837 |
-
print(f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode")
|
838 |
-
raise NotImplementedError()
|
839 |
-
assert in_channels is not None
|
840 |
-
# no asymmetric padding in torch conv, must do it ourselves
|
841 |
-
self.conv = torch.nn.Conv2d(in_channels,
|
842 |
-
in_channels,
|
843 |
-
kernel_size=4,
|
844 |
-
stride=2,
|
845 |
-
padding=1)
|
846 |
-
|
847 |
-
def forward(self, x, scale_factor=1.0):
|
848 |
-
if scale_factor==1.0:
|
849 |
-
return x
|
850 |
-
else:
|
851 |
-
x = torch.nn.functional.interpolate(x, mode=self.mode, align_corners=False, scale_factor=scale_factor)
|
852 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/describer/pokemon.py
DELETED
@@ -1,51 +0,0 @@
|
|
1 |
-
from __future__ import annotations
|
2 |
-
|
3 |
-
from typing import TYPE_CHECKING, Any, List, Optional, Dict
|
4 |
-
from copy import deepcopy
|
5 |
-
|
6 |
-
from . import describer_registry as DescriberRegistry
|
7 |
-
from .base import BaseDescriber
|
8 |
-
|
9 |
-
if TYPE_CHECKING:
|
10 |
-
from agentverse.environments.pokemon import PokemonEnvironment
|
11 |
-
|
12 |
-
|
13 |
-
@DescriberRegistry.register("pokemon")
|
14 |
-
class PokemonDescriber(BaseDescriber):
|
15 |
-
def get_env_description(
|
16 |
-
self,
|
17 |
-
environment: PokemonEnvironment,
|
18 |
-
player_content: str = "",
|
19 |
-
) -> List[str]:
|
20 |
-
time = environment.time
|
21 |
-
if player_content == "":
|
22 |
-
agent_to_location = environment.get_agent_to_location()
|
23 |
-
descriptions = []
|
24 |
-
for agent in environment.agents:
|
25 |
-
description = ""
|
26 |
-
if agent.name not in agent_to_location:
|
27 |
-
# Agent is on the way to a location
|
28 |
-
descriptions.append("")
|
29 |
-
continue
|
30 |
-
location = agent_to_location[agent.name]
|
31 |
-
agents_in_same_loc = deepcopy(environment.locations_to_agents[location])
|
32 |
-
agents_in_same_loc.remove(agent.name)
|
33 |
-
agents_in_same_loc = list(agents_in_same_loc)
|
34 |
-
description += f"It is now {time}. You are at {location}."
|
35 |
-
if len(agents_in_same_loc) == 0:
|
36 |
-
description += " There is no one else here."
|
37 |
-
elif len(agents_in_same_loc) == 1:
|
38 |
-
description += f" {agents_in_same_loc[0]} is also here."
|
39 |
-
else:
|
40 |
-
other_agents = ", ".join(agents_in_same_loc)
|
41 |
-
description += f" {other_agents} are also here."
|
42 |
-
# description += " The locations you can go to include: \n"
|
43 |
-
# for loc, dsec in environment.locations_descriptions.items():
|
44 |
-
# description += f"{loc}: {dsec}\n"
|
45 |
-
descriptions.append(description)
|
46 |
-
return descriptions
|
47 |
-
else:
|
48 |
-
description = ""
|
49 |
-
description += f"It is now {time}. Brendan is talking to you.\n"
|
50 |
-
description += f"[Brendan]: {player_content}\n"
|
51 |
-
return [description for _ in range(len(environment.agents))]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/bejeweled/methods/WaitEventMethods.js
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
export default {
|
2 |
-
waitEvent(eventEmitter, eventName) {
|
3 |
-
if (eventName === undefined) {
|
4 |
-
eventName = 'complete';
|
5 |
-
}
|
6 |
-
this.waitEvents.waitEvent(eventEmitter, eventName);
|
7 |
-
return this;
|
8 |
-
},
|
9 |
-
|
10 |
-
isWaitingEvent() {
|
11 |
-
return !this.waitEvents.noWaitEvent;
|
12 |
-
},
|
13 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/utils/CreateAnyLabel.js
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
import MergeStyle from './MergeStyle.js';
|
2 |
-
import CreateChild from './CreateChild.js';
|
3 |
-
|
4 |
-
var CreateAnyLabel = function (scene, data, view, styles, customBuilders, LabelClass) {
|
5 |
-
data = MergeStyle(data, styles);
|
6 |
-
|
7 |
-
// Replace data by child game object
|
8 |
-
CreateChild(scene, data, 'background', view, styles, customBuilders);
|
9 |
-
CreateChild(scene, data, 'icon', view, styles, customBuilders);
|
10 |
-
CreateChild(scene, data, 'text', view, styles, customBuilders);
|
11 |
-
CreateChild(scene, data, 'action', view, styles, customBuilders);
|
12 |
-
|
13 |
-
var gameObject = new LabelClass(scene, data);
|
14 |
-
scene.add.existing(gameObject);
|
15 |
-
return gameObject;
|
16 |
-
}
|
17 |
-
|
18 |
-
export default CreateAnyLabel;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AkitoP/umamusume_bert_vits2/text/english_bert_mock.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
|
3 |
-
|
4 |
-
def get_bert_feature(norm_text, word2ph):
|
5 |
-
return torch.zeros(1024, sum(word2ph))
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AliSaria/MilitarEye/app.py
DELETED
@@ -1,45 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
from tensorflow.keras.models import load_model
|
3 |
-
from PIL import Image
|
4 |
-
import numpy as np
|
5 |
-
import matplotlib.pyplot as plt
|
6 |
-
from io import BytesIO
|
7 |
-
|
8 |
-
# Load the trained model
|
9 |
-
model = load_model('model1.h5') # Make sure 'model1.h5' is the correct path to your model
|
10 |
-
|
11 |
-
# Prediction function for the Gradio app
|
12 |
-
def predict_and_visualize(img):
|
13 |
-
# Store the original image size
|
14 |
-
original_size = img.size
|
15 |
-
|
16 |
-
# Convert the input image to the target size expected by the model
|
17 |
-
img_resized = img.resize((256, 256))
|
18 |
-
img_array = np.array(img_resized) / 255.0 # Normalize the image
|
19 |
-
img_array = np.expand_dims(img_array, axis=0) # Add batch dimension
|
20 |
-
|
21 |
-
# Make a prediction
|
22 |
-
prediction = model.predict(img_array)
|
23 |
-
|
24 |
-
# Assuming the model outputs a single-channel image, normalize to 0-255 range for display
|
25 |
-
predicted_mask = (prediction[0, :, :, 0] * 255).astype(np.uint8)
|
26 |
-
|
27 |
-
# Convert the prediction to a PIL image
|
28 |
-
prediction_image = Image.fromarray(predicted_mask, mode='L') # 'L' mode is for grayscale
|
29 |
-
|
30 |
-
# Resize the predicted image back to the original image size
|
31 |
-
prediction_image = prediction_image.resize(original_size, Image.NEAREST)
|
32 |
-
|
33 |
-
return prediction_image
|
34 |
-
|
35 |
-
# Create the Gradio interface
|
36 |
-
iface = gr.Interface(
|
37 |
-
fn=predict_and_visualize,
|
38 |
-
inputs=gr.Image(type="pil"), # We expect a PIL Image
|
39 |
-
outputs=gr.Image(type="pil"), # We will return a PIL Image
|
40 |
-
title="MilitarEye: Military Stealth Camouflage Detector",
|
41 |
-
description="Please upload an image of a military personnel camouflaged in their surroundings. On the right, the model will attempt to predict the camouflage mask silhouette."
|
42 |
-
)
|
43 |
-
|
44 |
-
# Launch the Gradio app
|
45 |
-
iface.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/dnnlib/util.py
DELETED
@@ -1,472 +0,0 @@
|
|
1 |
-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
2 |
-
#
|
3 |
-
# NVIDIA CORPORATION and its licensors retain all intellectual property
|
4 |
-
# and proprietary rights in and to this software, related documentation
|
5 |
-
# and any modifications thereto. Any use, reproduction, disclosure or
|
6 |
-
# distribution of this software and related documentation without an express
|
7 |
-
# license agreement from NVIDIA CORPORATION is strictly prohibited.
|
8 |
-
|
9 |
-
"""Miscellaneous utility classes and functions."""
|
10 |
-
|
11 |
-
import ctypes
|
12 |
-
import fnmatch
|
13 |
-
import importlib
|
14 |
-
import inspect
|
15 |
-
import numpy as np
|
16 |
-
import os
|
17 |
-
import shutil
|
18 |
-
import sys
|
19 |
-
import types
|
20 |
-
import io
|
21 |
-
import pickle
|
22 |
-
import re
|
23 |
-
import requests
|
24 |
-
import html
|
25 |
-
import hashlib
|
26 |
-
import glob
|
27 |
-
import tempfile
|
28 |
-
import urllib
|
29 |
-
import urllib.request
|
30 |
-
import uuid
|
31 |
-
|
32 |
-
from distutils.util import strtobool
|
33 |
-
from typing import Any, List, Tuple, Union
|
34 |
-
|
35 |
-
|
36 |
-
# Util classes
|
37 |
-
# ------------------------------------------------------------------------------------------
|
38 |
-
|
39 |
-
|
40 |
-
class EasyDict(dict):
|
41 |
-
"""Convenience class that behaves like a dict but allows access with the attribute syntax."""
|
42 |
-
|
43 |
-
def __getattr__(self, name: str) -> Any:
|
44 |
-
try:
|
45 |
-
return self[name]
|
46 |
-
except KeyError:
|
47 |
-
raise AttributeError(name)
|
48 |
-
|
49 |
-
def __setattr__(self, name: str, value: Any) -> None:
|
50 |
-
self[name] = value
|
51 |
-
|
52 |
-
def __delattr__(self, name: str) -> None:
|
53 |
-
del self[name]
|
54 |
-
|
55 |
-
|
56 |
-
class Logger(object):
|
57 |
-
"""Redirect stderr to stdout, optionally print stdout to a file, and optionally force flushing on both stdout and the file."""
|
58 |
-
|
59 |
-
def __init__(self, file_name: str = None, file_mode: str = "w", should_flush: bool = True):
|
60 |
-
self.file = None
|
61 |
-
|
62 |
-
if file_name is not None:
|
63 |
-
self.file = open(file_name, file_mode)
|
64 |
-
|
65 |
-
self.should_flush = should_flush
|
66 |
-
self.stdout = sys.stdout
|
67 |
-
self.stderr = sys.stderr
|
68 |
-
|
69 |
-
sys.stdout = self
|
70 |
-
sys.stderr = self
|
71 |
-
|
72 |
-
def __enter__(self) -> "Logger":
|
73 |
-
return self
|
74 |
-
|
75 |
-
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
|
76 |
-
self.close()
|
77 |
-
|
78 |
-
def write(self, text: str) -> None:
|
79 |
-
"""Write text to stdout (and a file) and optionally flush."""
|
80 |
-
if len(text) == 0: # workaround for a bug in VSCode debugger: sys.stdout.write(''); sys.stdout.flush() => crash
|
81 |
-
return
|
82 |
-
|
83 |
-
if self.file is not None:
|
84 |
-
self.file.write(text)
|
85 |
-
|
86 |
-
self.stdout.write(text)
|
87 |
-
|
88 |
-
if self.should_flush:
|
89 |
-
self.flush()
|
90 |
-
|
91 |
-
def flush(self) -> None:
|
92 |
-
"""Flush written text to both stdout and a file, if open."""
|
93 |
-
if self.file is not None:
|
94 |
-
self.file.flush()
|
95 |
-
|
96 |
-
self.stdout.flush()
|
97 |
-
|
98 |
-
def close(self) -> None:
|
99 |
-
"""Flush, close possible files, and remove stdout/stderr mirroring."""
|
100 |
-
self.flush()
|
101 |
-
|
102 |
-
# if using multiple loggers, prevent closing in wrong order
|
103 |
-
if sys.stdout is self:
|
104 |
-
sys.stdout = self.stdout
|
105 |
-
if sys.stderr is self:
|
106 |
-
sys.stderr = self.stderr
|
107 |
-
|
108 |
-
if self.file is not None:
|
109 |
-
self.file.close()
|
110 |
-
|
111 |
-
|
112 |
-
# Cache directories
|
113 |
-
# ------------------------------------------------------------------------------------------
|
114 |
-
|
115 |
-
_dnnlib_cache_dir = None
|
116 |
-
|
117 |
-
def set_cache_dir(path: str) -> None:
|
118 |
-
global _dnnlib_cache_dir
|
119 |
-
_dnnlib_cache_dir = path
|
120 |
-
|
121 |
-
def make_cache_dir_path(*paths: str) -> str:
|
122 |
-
if _dnnlib_cache_dir is not None:
|
123 |
-
return os.path.join(_dnnlib_cache_dir, *paths)
|
124 |
-
if 'DNNLIB_CACHE_DIR' in os.environ:
|
125 |
-
return os.path.join(os.environ['DNNLIB_CACHE_DIR'], *paths)
|
126 |
-
if 'HOME' in os.environ:
|
127 |
-
return os.path.join(os.environ['HOME'], '.cache', 'dnnlib', *paths)
|
128 |
-
if 'USERPROFILE' in os.environ:
|
129 |
-
return os.path.join(os.environ['USERPROFILE'], '.cache', 'dnnlib', *paths)
|
130 |
-
return os.path.join(tempfile.gettempdir(), '.cache', 'dnnlib', *paths)
|
131 |
-
|
132 |
-
# Small util functions
|
133 |
-
# ------------------------------------------------------------------------------------------
|
134 |
-
|
135 |
-
|
136 |
-
def format_time(seconds: Union[int, float]) -> str:
|
137 |
-
"""Convert the seconds to human readable string with days, hours, minutes and seconds."""
|
138 |
-
s = int(np.rint(seconds))
|
139 |
-
|
140 |
-
if s < 60:
|
141 |
-
return "{0}s".format(s)
|
142 |
-
elif s < 60 * 60:
|
143 |
-
return "{0}m {1:02}s".format(s // 60, s % 60)
|
144 |
-
elif s < 24 * 60 * 60:
|
145 |
-
return "{0}h {1:02}m {2:02}s".format(s // (60 * 60), (s // 60) % 60, s % 60)
|
146 |
-
else:
|
147 |
-
return "{0}d {1:02}h {2:02}m".format(s // (24 * 60 * 60), (s // (60 * 60)) % 24, (s // 60) % 60)
|
148 |
-
|
149 |
-
|
150 |
-
def ask_yes_no(question: str) -> bool:
|
151 |
-
"""Ask the user the question until the user inputs a valid answer."""
|
152 |
-
while True:
|
153 |
-
try:
|
154 |
-
print("{0} [y/n]".format(question))
|
155 |
-
return strtobool(input().lower())
|
156 |
-
except ValueError:
|
157 |
-
pass
|
158 |
-
|
159 |
-
|
160 |
-
def tuple_product(t: Tuple) -> Any:
|
161 |
-
"""Calculate the product of the tuple elements."""
|
162 |
-
result = 1
|
163 |
-
|
164 |
-
for v in t:
|
165 |
-
result *= v
|
166 |
-
|
167 |
-
return result
|
168 |
-
|
169 |
-
|
170 |
-
_str_to_ctype = {
|
171 |
-
"uint8": ctypes.c_ubyte,
|
172 |
-
"uint16": ctypes.c_uint16,
|
173 |
-
"uint32": ctypes.c_uint32,
|
174 |
-
"uint64": ctypes.c_uint64,
|
175 |
-
"int8": ctypes.c_byte,
|
176 |
-
"int16": ctypes.c_int16,
|
177 |
-
"int32": ctypes.c_int32,
|
178 |
-
"int64": ctypes.c_int64,
|
179 |
-
"float32": ctypes.c_float,
|
180 |
-
"float64": ctypes.c_double
|
181 |
-
}
|
182 |
-
|
183 |
-
|
184 |
-
def get_dtype_and_ctype(type_obj: Any) -> Tuple[np.dtype, Any]:
|
185 |
-
"""Given a type name string (or an object having a __name__ attribute), return matching Numpy and ctypes types that have the same size in bytes."""
|
186 |
-
type_str = None
|
187 |
-
|
188 |
-
if isinstance(type_obj, str):
|
189 |
-
type_str = type_obj
|
190 |
-
elif hasattr(type_obj, "__name__"):
|
191 |
-
type_str = type_obj.__name__
|
192 |
-
elif hasattr(type_obj, "name"):
|
193 |
-
type_str = type_obj.name
|
194 |
-
else:
|
195 |
-
raise RuntimeError("Cannot infer type name from input")
|
196 |
-
|
197 |
-
assert type_str in _str_to_ctype.keys()
|
198 |
-
|
199 |
-
my_dtype = np.dtype(type_str)
|
200 |
-
my_ctype = _str_to_ctype[type_str]
|
201 |
-
|
202 |
-
assert my_dtype.itemsize == ctypes.sizeof(my_ctype)
|
203 |
-
|
204 |
-
return my_dtype, my_ctype
|
205 |
-
|
206 |
-
|
207 |
-
def is_pickleable(obj: Any) -> bool:
|
208 |
-
try:
|
209 |
-
with io.BytesIO() as stream:
|
210 |
-
pickle.dump(obj, stream)
|
211 |
-
return True
|
212 |
-
except:
|
213 |
-
return False
|
214 |
-
|
215 |
-
|
216 |
-
# Functionality to import modules/objects by name, and call functions by name
|
217 |
-
# ------------------------------------------------------------------------------------------
|
218 |
-
|
219 |
-
def get_module_from_obj_name(obj_name: str) -> Tuple[types.ModuleType, str]:
|
220 |
-
"""Searches for the underlying module behind the name to some python object.
|
221 |
-
Returns the module and the object name (original name with module part removed)."""
|
222 |
-
|
223 |
-
# allow convenience shorthands, substitute them by full names
|
224 |
-
obj_name = re.sub("^np.", "numpy.", obj_name)
|
225 |
-
obj_name = re.sub("^tf.", "tensorflow.", obj_name)
|
226 |
-
|
227 |
-
# list alternatives for (module_name, local_obj_name)
|
228 |
-
parts = obj_name.split(".")
|
229 |
-
name_pairs = [(".".join(parts[:i]), ".".join(parts[i:])) for i in range(len(parts), 0, -1)]
|
230 |
-
|
231 |
-
# try each alternative in turn
|
232 |
-
for module_name, local_obj_name in name_pairs:
|
233 |
-
try:
|
234 |
-
module = importlib.import_module(module_name) # may raise ImportError
|
235 |
-
get_obj_from_module(module, local_obj_name) # may raise AttributeError
|
236 |
-
return module, local_obj_name
|
237 |
-
except:
|
238 |
-
pass
|
239 |
-
|
240 |
-
# maybe some of the modules themselves contain errors?
|
241 |
-
for module_name, _local_obj_name in name_pairs:
|
242 |
-
try:
|
243 |
-
importlib.import_module(module_name) # may raise ImportError
|
244 |
-
except ImportError:
|
245 |
-
if not str(sys.exc_info()[1]).startswith("No module named '" + module_name + "'"):
|
246 |
-
raise
|
247 |
-
|
248 |
-
# maybe the requested attribute is missing?
|
249 |
-
for module_name, local_obj_name in name_pairs:
|
250 |
-
try:
|
251 |
-
module = importlib.import_module(module_name) # may raise ImportError
|
252 |
-
get_obj_from_module(module, local_obj_name) # may raise AttributeError
|
253 |
-
except ImportError:
|
254 |
-
pass
|
255 |
-
|
256 |
-
# we are out of luck, but we have no idea why
|
257 |
-
raise ImportError(obj_name)
|
258 |
-
|
259 |
-
|
260 |
-
def get_obj_from_module(module: types.ModuleType, obj_name: str) -> Any:
|
261 |
-
"""Traverses the object name and returns the last (rightmost) python object."""
|
262 |
-
if obj_name == '':
|
263 |
-
return module
|
264 |
-
obj = module
|
265 |
-
for part in obj_name.split("."):
|
266 |
-
obj = getattr(obj, part)
|
267 |
-
return obj
|
268 |
-
|
269 |
-
|
270 |
-
def get_obj_by_name(name: str) -> Any:
|
271 |
-
"""Finds the python object with the given name."""
|
272 |
-
module, obj_name = get_module_from_obj_name(name)
|
273 |
-
return get_obj_from_module(module, obj_name)
|
274 |
-
|
275 |
-
|
276 |
-
def call_func_by_name(*args, func_name: str = None, **kwargs) -> Any:
|
277 |
-
"""Finds the python object with the given name and calls it as a function."""
|
278 |
-
assert func_name is not None
|
279 |
-
func_obj = get_obj_by_name(func_name)
|
280 |
-
assert callable(func_obj)
|
281 |
-
return func_obj(*args, **kwargs)
|
282 |
-
|
283 |
-
|
284 |
-
def construct_class_by_name(*args, class_name: str = None, **kwargs) -> Any:
|
285 |
-
"""Finds the python class with the given name and constructs it with the given arguments."""
|
286 |
-
return call_func_by_name(*args, func_name=class_name, **kwargs)
|
287 |
-
|
288 |
-
|
289 |
-
def get_module_dir_by_obj_name(obj_name: str) -> str:
|
290 |
-
"""Get the directory path of the module containing the given object name."""
|
291 |
-
module, _ = get_module_from_obj_name(obj_name)
|
292 |
-
return os.path.dirname(inspect.getfile(module))
|
293 |
-
|
294 |
-
|
295 |
-
def is_top_level_function(obj: Any) -> bool:
|
296 |
-
"""Determine whether the given object is a top-level function, i.e., defined at module scope using 'def'."""
|
297 |
-
return callable(obj) and obj.__name__ in sys.modules[obj.__module__].__dict__
|
298 |
-
|
299 |
-
|
300 |
-
def get_top_level_function_name(obj: Any) -> str:
|
301 |
-
"""Return the fully-qualified name of a top-level function."""
|
302 |
-
assert is_top_level_function(obj)
|
303 |
-
module = obj.__module__
|
304 |
-
if module == '__main__':
|
305 |
-
module = os.path.splitext(os.path.basename(sys.modules[module].__file__))[0]
|
306 |
-
return module + "." + obj.__name__
|
307 |
-
|
308 |
-
|
309 |
-
# File system helpers
|
310 |
-
# ------------------------------------------------------------------------------------------
|
311 |
-
|
312 |
-
def list_dir_recursively_with_ignore(dir_path: str, ignores: List[str] = None, add_base_to_relative: bool = False) -> List[Tuple[str, str]]:
|
313 |
-
"""List all files recursively in a given directory while ignoring given file and directory names.
|
314 |
-
Returns list of tuples containing both absolute and relative paths."""
|
315 |
-
assert os.path.isdir(dir_path)
|
316 |
-
base_name = os.path.basename(os.path.normpath(dir_path))
|
317 |
-
|
318 |
-
if ignores is None:
|
319 |
-
ignores = []
|
320 |
-
|
321 |
-
result = []
|
322 |
-
|
323 |
-
for root, dirs, files in os.walk(dir_path, topdown=True):
|
324 |
-
for ignore_ in ignores:
|
325 |
-
dirs_to_remove = [d for d in dirs if fnmatch.fnmatch(d, ignore_)]
|
326 |
-
|
327 |
-
# dirs need to be edited in-place
|
328 |
-
for d in dirs_to_remove:
|
329 |
-
dirs.remove(d)
|
330 |
-
|
331 |
-
files = [f for f in files if not fnmatch.fnmatch(f, ignore_)]
|
332 |
-
|
333 |
-
absolute_paths = [os.path.join(root, f) for f in files]
|
334 |
-
relative_paths = [os.path.relpath(p, dir_path) for p in absolute_paths]
|
335 |
-
|
336 |
-
if add_base_to_relative:
|
337 |
-
relative_paths = [os.path.join(base_name, p) for p in relative_paths]
|
338 |
-
|
339 |
-
assert len(absolute_paths) == len(relative_paths)
|
340 |
-
result += zip(absolute_paths, relative_paths)
|
341 |
-
|
342 |
-
return result
|
343 |
-
|
344 |
-
|
345 |
-
def copy_files_and_create_dirs(files: List[Tuple[str, str]]) -> None:
|
346 |
-
"""Takes in a list of tuples of (src, dst) paths and copies files.
|
347 |
-
Will create all necessary directories."""
|
348 |
-
for file in files:
|
349 |
-
target_dir_name = os.path.dirname(file[1])
|
350 |
-
|
351 |
-
# will create all intermediate-level directories
|
352 |
-
if not os.path.exists(target_dir_name):
|
353 |
-
os.makedirs(target_dir_name)
|
354 |
-
|
355 |
-
shutil.copyfile(file[0], file[1])
|
356 |
-
|
357 |
-
|
358 |
-
# URL helpers
|
359 |
-
# ------------------------------------------------------------------------------------------
|
360 |
-
|
361 |
-
def is_url(obj: Any, allow_file_urls: bool = False) -> bool:
|
362 |
-
"""Determine whether the given object is a valid URL string."""
|
363 |
-
if not isinstance(obj, str) or not "://" in obj:
|
364 |
-
return False
|
365 |
-
if allow_file_urls and obj.startswith('file://'):
|
366 |
-
return True
|
367 |
-
try:
|
368 |
-
res = requests.compat.urlparse(obj)
|
369 |
-
if not res.scheme or not res.netloc or not "." in res.netloc:
|
370 |
-
return False
|
371 |
-
res = requests.compat.urlparse(requests.compat.urljoin(obj, "/"))
|
372 |
-
if not res.scheme or not res.netloc or not "." in res.netloc:
|
373 |
-
return False
|
374 |
-
except:
|
375 |
-
return False
|
376 |
-
return True
|
377 |
-
|
378 |
-
|
379 |
-
def open_url(url: str, cache_dir: str = None, num_attempts: int = 10, verbose: bool = True, return_filename: bool = False, cache: bool = True) -> Any:
|
380 |
-
"""Download the given URL and return a binary-mode file object to access the data."""
|
381 |
-
assert num_attempts >= 1
|
382 |
-
assert not (return_filename and (not cache))
|
383 |
-
|
384 |
-
# Doesn't look like an URL scheme so interpret it as a local filename.
|
385 |
-
if not re.match('^[a-z]+://', url):
|
386 |
-
return url if return_filename else open(url, "rb")
|
387 |
-
|
388 |
-
# Handle file URLs. This code handles unusual file:// patterns that
|
389 |
-
# arise on Windows:
|
390 |
-
#
|
391 |
-
# file:///c:/foo.txt
|
392 |
-
#
|
393 |
-
# which would translate to a local '/c:/foo.txt' filename that's
|
394 |
-
# invalid. Drop the forward slash for such pathnames.
|
395 |
-
#
|
396 |
-
# If you touch this code path, you should test it on both Linux and
|
397 |
-
# Windows.
|
398 |
-
#
|
399 |
-
# Some internet resources suggest using urllib.request.url2pathname() but
|
400 |
-
# but that converts forward slashes to backslashes and this causes
|
401 |
-
# its own set of problems.
|
402 |
-
if url.startswith('file://'):
|
403 |
-
filename = urllib.parse.urlparse(url).path
|
404 |
-
if re.match(r'^/[a-zA-Z]:', filename):
|
405 |
-
filename = filename[1:]
|
406 |
-
return filename if return_filename else open(filename, "rb")
|
407 |
-
|
408 |
-
assert is_url(url)
|
409 |
-
|
410 |
-
# Lookup from cache.
|
411 |
-
if cache_dir is None:
|
412 |
-
cache_dir = make_cache_dir_path('downloads')
|
413 |
-
|
414 |
-
url_md5 = hashlib.md5(url.encode("utf-8")).hexdigest()
|
415 |
-
if cache:
|
416 |
-
cache_files = glob.glob(os.path.join(cache_dir, url_md5 + "_*"))
|
417 |
-
if len(cache_files) == 1:
|
418 |
-
filename = cache_files[0]
|
419 |
-
return filename if return_filename else open(filename, "rb")
|
420 |
-
|
421 |
-
# Download.
|
422 |
-
url_name = None
|
423 |
-
url_data = None
|
424 |
-
with requests.Session() as session:
|
425 |
-
if verbose:
|
426 |
-
print("Downloading %s ..." % url, end="", flush=True)
|
427 |
-
for attempts_left in reversed(range(num_attempts)):
|
428 |
-
try:
|
429 |
-
with session.get(url) as res:
|
430 |
-
res.raise_for_status()
|
431 |
-
if len(res.content) == 0:
|
432 |
-
raise IOError("No data received")
|
433 |
-
|
434 |
-
if len(res.content) < 8192:
|
435 |
-
content_str = res.content.decode("utf-8")
|
436 |
-
if "download_warning" in res.headers.get("Set-Cookie", ""):
|
437 |
-
links = [html.unescape(link) for link in content_str.split('"') if "export=download" in link]
|
438 |
-
if len(links) == 1:
|
439 |
-
url = requests.compat.urljoin(url, links[0])
|
440 |
-
raise IOError("Google Drive virus checker nag")
|
441 |
-
if "Google Drive - Quota exceeded" in content_str:
|
442 |
-
raise IOError("Google Drive download quota exceeded -- please try again later")
|
443 |
-
|
444 |
-
match = re.search(r'filename="([^"]*)"', res.headers.get("Content-Disposition", ""))
|
445 |
-
url_name = match[1] if match else url
|
446 |
-
url_data = res.content
|
447 |
-
if verbose:
|
448 |
-
print(" done")
|
449 |
-
break
|
450 |
-
except:
|
451 |
-
if not attempts_left:
|
452 |
-
if verbose:
|
453 |
-
print(" failed")
|
454 |
-
raise
|
455 |
-
if verbose:
|
456 |
-
print(".", end="", flush=True)
|
457 |
-
|
458 |
-
# Save to cache.
|
459 |
-
if cache:
|
460 |
-
safe_name = re.sub(r"[^0-9a-zA-Z-._]", "_", url_name)
|
461 |
-
cache_file = os.path.join(cache_dir, url_md5 + "_" + safe_name)
|
462 |
-
temp_file = os.path.join(cache_dir, "tmp_" + uuid.uuid4().hex + "_" + url_md5 + "_" + safe_name)
|
463 |
-
os.makedirs(cache_dir, exist_ok=True)
|
464 |
-
with open(temp_file, "wb") as f:
|
465 |
-
f.write(url_data)
|
466 |
-
os.replace(temp_file, cache_file) # atomic
|
467 |
-
if return_filename:
|
468 |
-
return cache_file
|
469 |
-
|
470 |
-
# Return data as file object.
|
471 |
-
assert not return_filename
|
472 |
-
return io.BytesIO(url_data)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/fashion-aggregator-duplicated/app.py
DELETED
@@ -1,217 +0,0 @@
|
|
1 |
-
"""Provide a text query describing what you are looking for and get back out images with links!"""
|
2 |
-
"""This has been duplicated to show the new duplication feature demo"""
|
3 |
-
import argparse
|
4 |
-
import logging
|
5 |
-
import os
|
6 |
-
import wandb
|
7 |
-
import gradio as gr
|
8 |
-
|
9 |
-
import zipfile
|
10 |
-
import pickle
|
11 |
-
from pathlib import Path
|
12 |
-
from typing import List, Any, Dict
|
13 |
-
from PIL import Image
|
14 |
-
from pathlib import Path
|
15 |
-
|
16 |
-
from transformers import AutoTokenizer
|
17 |
-
from sentence_transformers import SentenceTransformer, util
|
18 |
-
from multilingual_clip import pt_multilingual_clip
|
19 |
-
import torch
|
20 |
-
|
21 |
-
from pathlib import Path
|
22 |
-
from typing import Callable, Dict, List, Tuple
|
23 |
-
from PIL.Image import Image
|
24 |
-
|
25 |
-
print(__file__)
|
26 |
-
|
27 |
-
os.environ["CUDA_VISIBLE_DEVICES"] = "" # do not use GPU
|
28 |
-
|
29 |
-
logging.basicConfig(level=logging.INFO)
|
30 |
-
DEFAULT_APPLICATION_NAME = "fashion-aggregator"
|
31 |
-
|
32 |
-
APP_DIR = Path(__file__).resolve().parent # what is the directory for this application?
|
33 |
-
FAVICON = APP_DIR / "t-shirt_1f455.png" # path to a small image for display in browser tab and social media
|
34 |
-
README = APP_DIR / "README.md" # path to an app readme file in HTML/markdown
|
35 |
-
|
36 |
-
DEFAULT_PORT = 11700
|
37 |
-
|
38 |
-
EMBEDDINGS_DIR = "artifacts/img-embeddings"
|
39 |
-
EMBEDDINGS_FILE = os.path.join(EMBEDDINGS_DIR, "embeddings.pkl")
|
40 |
-
RAW_PHOTOS_DIR = "artifacts/raw-photos"
|
41 |
-
|
42 |
-
# Download image embeddings and raw photos
|
43 |
-
wandb.login(key="4b5a23a662b20fdd61f2aeb5032cf56fdce278a4") # os.getenv('wandb')
|
44 |
-
api = wandb.Api()
|
45 |
-
artifact_embeddings = api.artifact("ryparmar/fashion-aggregator/unimoda-images:v1")
|
46 |
-
artifact_embeddings.download(EMBEDDINGS_DIR)
|
47 |
-
artifact_raw_photos = api.artifact("ryparmar/fashion-aggregator/unimoda-raw-images:v1")
|
48 |
-
artifact_raw_photos.download("artifacts")
|
49 |
-
|
50 |
-
with zipfile.ZipFile("artifacts/unimoda.zip", 'r') as zip_ref:
|
51 |
-
zip_ref.extractall(RAW_PHOTOS_DIR)
|
52 |
-
|
53 |
-
|
54 |
-
class TextEncoder:
|
55 |
-
"""Encodes the given text"""
|
56 |
-
|
57 |
-
def __init__(self, model_path="M-CLIP/XLM-Roberta-Large-Vit-B-32"):
|
58 |
-
self.model = pt_multilingual_clip.MultilingualCLIP.from_pretrained(model_path)
|
59 |
-
self.tokenizer = AutoTokenizer.from_pretrained(model_path)
|
60 |
-
|
61 |
-
@torch.no_grad()
|
62 |
-
def encode(self, query: str) -> torch.Tensor:
|
63 |
-
"""Predict/infer text embedding for a given query."""
|
64 |
-
query_emb = self.model.forward([query], self.tokenizer)
|
65 |
-
return query_emb
|
66 |
-
|
67 |
-
|
68 |
-
class ImageEnoder:
|
69 |
-
"""Encodes the given image"""
|
70 |
-
|
71 |
-
def __init__(self, model_path="clip-ViT-B-32"):
|
72 |
-
self.model = SentenceTransformer(model_path)
|
73 |
-
|
74 |
-
@torch.no_grad()
|
75 |
-
def encode(self, image: Image) -> torch.Tensor:
|
76 |
-
"""Predict/infer text embedding for a given query."""
|
77 |
-
image_emb = self.model.encode([image], convert_to_tensor=True, show_progress_bar=False)
|
78 |
-
return image_emb
|
79 |
-
|
80 |
-
|
81 |
-
class Retriever:
|
82 |
-
"""Retrieves relevant images for a given text embedding."""
|
83 |
-
|
84 |
-
def __init__(self, image_embeddings_path=None):
|
85 |
-
self.text_encoder = TextEncoder()
|
86 |
-
self.image_encoder = ImageEnoder()
|
87 |
-
|
88 |
-
with open(image_embeddings_path, "rb") as file:
|
89 |
-
self.image_names, self.image_embeddings = pickle.load(file)
|
90 |
-
self.image_names = [
|
91 |
-
img_name.replace("fashion-aggregator/fashion_aggregator/data/photos/", "")
|
92 |
-
for img_name in self.image_names
|
93 |
-
]
|
94 |
-
print("Images:", len(self.image_names))
|
95 |
-
|
96 |
-
@torch.no_grad()
|
97 |
-
def predict(self, text_query: str, k: int = 10) -> List[Any]:
|
98 |
-
"""Return top-k relevant items for a given embedding"""
|
99 |
-
query_emb = self.text_encoder.encode(text_query)
|
100 |
-
relevant_images = util.semantic_search(query_emb, self.image_embeddings, top_k=k)[0]
|
101 |
-
return relevant_images
|
102 |
-
|
103 |
-
@torch.no_grad()
|
104 |
-
def search_images(self, text_query: str, k: int = 6) -> Dict[str, List[Any]]:
|
105 |
-
"""Return top-k relevant images for a given embedding"""
|
106 |
-
images = self.predict(text_query, k)
|
107 |
-
paths_and_scores = {"path": [], "score": []}
|
108 |
-
for img in images:
|
109 |
-
paths_and_scores["path"].append(os.path.join(RAW_PHOTOS_DIR, self.image_names[img["corpus_id"]]))
|
110 |
-
paths_and_scores["score"].append(img["score"])
|
111 |
-
return paths_and_scores
|
112 |
-
|
113 |
-
|
114 |
-
def main(args):
|
115 |
-
predictor = PredictorBackend(url=args.model_url)
|
116 |
-
frontend = make_frontend(predictor.run, flagging=args.flagging, gantry=args.gantry, app_name=args.application)
|
117 |
-
frontend.launch(
|
118 |
-
# server_name="0.0.0.0", # make server accessible, binding all interfaces # noqa: S104
|
119 |
-
# server_port=args.port, # set a port to bind to, failing if unavailable
|
120 |
-
# share=False, # should we create a (temporary) public link on https://gradio.app?
|
121 |
-
# favicon_path=FAVICON, # what icon should we display in the address bar?
|
122 |
-
)
|
123 |
-
|
124 |
-
|
125 |
-
def make_frontend(
|
126 |
-
fn: Callable[[Image], str], flagging: bool = False, gantry: bool = False, app_name: str = "fashion-aggregator"
|
127 |
-
):
|
128 |
-
"""Creates a gradio.Interface frontend for text to image search function."""
|
129 |
-
|
130 |
-
allow_flagging = "never"
|
131 |
-
|
132 |
-
# build a basic browser interface to a Python function
|
133 |
-
frontend = gr.Interface(
|
134 |
-
fn=fn, # which Python function are we interacting with?
|
135 |
-
outputs=gr.Gallery(label="Relevant Items"),
|
136 |
-
# what input widgets does it need? we configure an image widget
|
137 |
-
inputs=gr.components.Textbox(label="Item Description"),
|
138 |
-
title="📝 Text2Image 👕", # what should we display at the top of the page?
|
139 |
-
thumbnail=FAVICON, # what should we display when the link is shared, e.g. on social media?
|
140 |
-
description=__doc__, # what should we display just above the interface?
|
141 |
-
cache_examples=False, # should we cache those inputs for faster inference? slows down start
|
142 |
-
allow_flagging=allow_flagging, # should we show users the option to "flag" outputs?
|
143 |
-
flagging_options=["incorrect", "offensive", "other"], # what options do users have for feedback?
|
144 |
-
)
|
145 |
-
return frontend
|
146 |
-
|
147 |
-
|
148 |
-
class PredictorBackend:
|
149 |
-
"""Interface to a backend that serves predictions.
|
150 |
-
|
151 |
-
To communicate with a backend accessible via a URL, provide the url kwarg.
|
152 |
-
|
153 |
-
Otherwise, runs a predictor locally.
|
154 |
-
"""
|
155 |
-
|
156 |
-
def __init__(self, url=None):
|
157 |
-
if url is not None:
|
158 |
-
self.url = url
|
159 |
-
self._predict = self._predict_from_endpoint
|
160 |
-
else:
|
161 |
-
model = Retriever(image_embeddings_path=EMBEDDINGS_FILE)
|
162 |
-
self._predict = model.predict
|
163 |
-
self._search_images = model.search_images
|
164 |
-
|
165 |
-
def run(self, text: str):
|
166 |
-
pred, metrics = self._predict_with_metrics(text)
|
167 |
-
self._log_inference(pred, metrics)
|
168 |
-
return pred
|
169 |
-
|
170 |
-
def _predict_with_metrics(self, text: str) -> Tuple[List[str], Dict[str, float]]:
|
171 |
-
paths_and_scores = self._search_images(text)
|
172 |
-
metrics = {"mean_score": sum(paths_and_scores["score"]) / len(paths_and_scores["score"])}
|
173 |
-
return paths_and_scores["path"], metrics
|
174 |
-
|
175 |
-
def _log_inference(self, pred, metrics):
|
176 |
-
for key, value in metrics.items():
|
177 |
-
logging.info(f"METRIC {key} {value}")
|
178 |
-
logging.info(f"PRED >begin\n{pred}\nPRED >end")
|
179 |
-
|
180 |
-
|
181 |
-
def _make_parser():
|
182 |
-
parser = argparse.ArgumentParser(description=__doc__)
|
183 |
-
parser.add_argument(
|
184 |
-
"--model_url",
|
185 |
-
default=None,
|
186 |
-
type=str,
|
187 |
-
help="Identifies a URL to which to send image data. Data is base64-encoded, converted to a utf-8 string, and then set via a POST request as JSON with the key 'image'. Default is None, which instead sends the data to a model running locally.",
|
188 |
-
)
|
189 |
-
parser.add_argument(
|
190 |
-
"--port",
|
191 |
-
default=DEFAULT_PORT,
|
192 |
-
type=int,
|
193 |
-
help=f"Port on which to expose this server. Default is {DEFAULT_PORT}.",
|
194 |
-
)
|
195 |
-
parser.add_argument(
|
196 |
-
"--flagging",
|
197 |
-
action="store_true",
|
198 |
-
help="Pass this flag to allow users to 'flag' model behavior and provide feedback.",
|
199 |
-
)
|
200 |
-
parser.add_argument(
|
201 |
-
"--gantry",
|
202 |
-
action="store_true",
|
203 |
-
help="Pass --flagging and this flag to log user feedback to Gantry. Requires GANTRY_API_KEY to be defined as an environment variable.",
|
204 |
-
)
|
205 |
-
parser.add_argument(
|
206 |
-
"--application",
|
207 |
-
default=DEFAULT_APPLICATION_NAME,
|
208 |
-
type=str,
|
209 |
-
help=f"Name of the Gantry application to which feedback should be logged, if --gantry and --flagging are passed. Default is {DEFAULT_APPLICATION_NAME}.",
|
210 |
-
)
|
211 |
-
return parser
|
212 |
-
|
213 |
-
|
214 |
-
if __name__ == "__main__":
|
215 |
-
parser = _make_parser()
|
216 |
-
args = parser.parse_args()
|
217 |
-
main(args)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py
DELETED
@@ -1,1398 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import gc
|
17 |
-
import random
|
18 |
-
import traceback
|
19 |
-
import unittest
|
20 |
-
|
21 |
-
import numpy as np
|
22 |
-
import torch
|
23 |
-
from huggingface_hub import hf_hub_download
|
24 |
-
from PIL import Image
|
25 |
-
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
|
26 |
-
|
27 |
-
from diffusers import (
|
28 |
-
AsymmetricAutoencoderKL,
|
29 |
-
AutoencoderKL,
|
30 |
-
DDIMScheduler,
|
31 |
-
DPMSolverMultistepScheduler,
|
32 |
-
LMSDiscreteScheduler,
|
33 |
-
PNDMScheduler,
|
34 |
-
StableDiffusionInpaintPipeline,
|
35 |
-
UNet2DConditionModel,
|
36 |
-
)
|
37 |
-
from diffusers.models.attention_processor import AttnProcessor
|
38 |
-
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint import prepare_mask_and_masked_image
|
39 |
-
from diffusers.utils import floats_tensor, load_image, load_numpy, nightly, slow, torch_device
|
40 |
-
from diffusers.utils.testing_utils import (
|
41 |
-
enable_full_determinism,
|
42 |
-
require_torch_2,
|
43 |
-
require_torch_gpu,
|
44 |
-
run_test_in_subprocess,
|
45 |
-
)
|
46 |
-
|
47 |
-
from ...models.test_models_unet_2d_condition import create_lora_layers
|
48 |
-
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
|
49 |
-
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
|
50 |
-
|
51 |
-
|
52 |
-
enable_full_determinism()
|
53 |
-
|
54 |
-
|
55 |
-
# Will be run via run_test_in_subprocess
|
56 |
-
def _test_inpaint_compile(in_queue, out_queue, timeout):
|
57 |
-
error = None
|
58 |
-
try:
|
59 |
-
inputs = in_queue.get(timeout=timeout)
|
60 |
-
torch_device = inputs.pop("torch_device")
|
61 |
-
seed = inputs.pop("seed")
|
62 |
-
inputs["generator"] = torch.Generator(device=torch_device).manual_seed(seed)
|
63 |
-
|
64 |
-
pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
65 |
-
"runwayml/stable-diffusion-inpainting", safety_checker=None
|
66 |
-
)
|
67 |
-
pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config)
|
68 |
-
pipe.to(torch_device)
|
69 |
-
pipe.set_progress_bar_config(disable=None)
|
70 |
-
|
71 |
-
pipe.unet.to(memory_format=torch.channels_last)
|
72 |
-
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
73 |
-
|
74 |
-
image = pipe(**inputs).images
|
75 |
-
image_slice = image[0, 253:256, 253:256, -1].flatten()
|
76 |
-
|
77 |
-
assert image.shape == (1, 512, 512, 3)
|
78 |
-
expected_slice = np.array([0.0425, 0.0273, 0.0344, 0.1694, 0.1727, 0.1812, 0.3256, 0.3311, 0.3272])
|
79 |
-
|
80 |
-
assert np.abs(expected_slice - image_slice).max() < 3e-3
|
81 |
-
except Exception:
|
82 |
-
error = f"{traceback.format_exc()}"
|
83 |
-
|
84 |
-
results = {"error": error}
|
85 |
-
out_queue.put(results, timeout=timeout)
|
86 |
-
out_queue.join()
|
87 |
-
|
88 |
-
|
89 |
-
class StableDiffusionInpaintPipelineFastTests(
|
90 |
-
PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase
|
91 |
-
):
|
92 |
-
pipeline_class = StableDiffusionInpaintPipeline
|
93 |
-
params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
|
94 |
-
batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
|
95 |
-
image_params = frozenset([])
|
96 |
-
# TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
|
97 |
-
image_latents_params = frozenset([])
|
98 |
-
|
99 |
-
def get_dummy_components(self):
|
100 |
-
torch.manual_seed(0)
|
101 |
-
unet = UNet2DConditionModel(
|
102 |
-
block_out_channels=(32, 64),
|
103 |
-
layers_per_block=2,
|
104 |
-
sample_size=32,
|
105 |
-
in_channels=9,
|
106 |
-
out_channels=4,
|
107 |
-
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
|
108 |
-
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
|
109 |
-
cross_attention_dim=32,
|
110 |
-
)
|
111 |
-
scheduler = PNDMScheduler(skip_prk_steps=True)
|
112 |
-
torch.manual_seed(0)
|
113 |
-
vae = AutoencoderKL(
|
114 |
-
block_out_channels=[32, 64],
|
115 |
-
in_channels=3,
|
116 |
-
out_channels=3,
|
117 |
-
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
|
118 |
-
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
|
119 |
-
latent_channels=4,
|
120 |
-
)
|
121 |
-
torch.manual_seed(0)
|
122 |
-
text_encoder_config = CLIPTextConfig(
|
123 |
-
bos_token_id=0,
|
124 |
-
eos_token_id=2,
|
125 |
-
hidden_size=32,
|
126 |
-
intermediate_size=37,
|
127 |
-
layer_norm_eps=1e-05,
|
128 |
-
num_attention_heads=4,
|
129 |
-
num_hidden_layers=5,
|
130 |
-
pad_token_id=1,
|
131 |
-
vocab_size=1000,
|
132 |
-
)
|
133 |
-
text_encoder = CLIPTextModel(text_encoder_config)
|
134 |
-
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
|
135 |
-
|
136 |
-
components = {
|
137 |
-
"unet": unet,
|
138 |
-
"scheduler": scheduler,
|
139 |
-
"vae": vae,
|
140 |
-
"text_encoder": text_encoder,
|
141 |
-
"tokenizer": tokenizer,
|
142 |
-
"safety_checker": None,
|
143 |
-
"feature_extractor": None,
|
144 |
-
}
|
145 |
-
return components
|
146 |
-
|
147 |
-
def get_dummy_inputs(self, device, seed=0):
|
148 |
-
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
|
149 |
-
image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
|
150 |
-
image = image.cpu().permute(0, 2, 3, 1)[0]
|
151 |
-
init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64))
|
152 |
-
mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((64, 64))
|
153 |
-
if str(device).startswith("mps"):
|
154 |
-
generator = torch.manual_seed(seed)
|
155 |
-
else:
|
156 |
-
generator = torch.Generator(device=device).manual_seed(seed)
|
157 |
-
inputs = {
|
158 |
-
"prompt": "A painting of a squirrel eating a burger",
|
159 |
-
"image": init_image,
|
160 |
-
"mask_image": mask_image,
|
161 |
-
"generator": generator,
|
162 |
-
"num_inference_steps": 2,
|
163 |
-
"guidance_scale": 6.0,
|
164 |
-
"output_type": "numpy",
|
165 |
-
}
|
166 |
-
return inputs
|
167 |
-
|
168 |
-
def test_stable_diffusion_inpaint(self):
|
169 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
170 |
-
components = self.get_dummy_components()
|
171 |
-
sd_pipe = StableDiffusionInpaintPipeline(**components)
|
172 |
-
sd_pipe = sd_pipe.to(device)
|
173 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
174 |
-
|
175 |
-
inputs = self.get_dummy_inputs(device)
|
176 |
-
image = sd_pipe(**inputs).images
|
177 |
-
image_slice = image[0, -3:, -3:, -1]
|
178 |
-
|
179 |
-
assert image.shape == (1, 64, 64, 3)
|
180 |
-
expected_slice = np.array([0.4723, 0.5731, 0.3939, 0.5441, 0.5922, 0.4392, 0.5059, 0.4651, 0.4474])
|
181 |
-
|
182 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
183 |
-
|
184 |
-
def test_stable_diffusion_inpaint_image_tensor(self):
|
185 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
186 |
-
components = self.get_dummy_components()
|
187 |
-
sd_pipe = StableDiffusionInpaintPipeline(**components)
|
188 |
-
sd_pipe = sd_pipe.to(device)
|
189 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
190 |
-
|
191 |
-
inputs = self.get_dummy_inputs(device)
|
192 |
-
output = sd_pipe(**inputs)
|
193 |
-
out_pil = output.images
|
194 |
-
|
195 |
-
inputs = self.get_dummy_inputs(device)
|
196 |
-
inputs["image"] = torch.tensor(np.array(inputs["image"]) / 127.5 - 1).permute(2, 0, 1).unsqueeze(0)
|
197 |
-
inputs["mask_image"] = torch.tensor(np.array(inputs["mask_image"]) / 255).permute(2, 0, 1)[:1].unsqueeze(0)
|
198 |
-
output = sd_pipe(**inputs)
|
199 |
-
out_tensor = output.images
|
200 |
-
|
201 |
-
assert out_pil.shape == (1, 64, 64, 3)
|
202 |
-
assert np.abs(out_pil.flatten() - out_tensor.flatten()).max() < 5e-2
|
203 |
-
|
204 |
-
def test_stable_diffusion_inpaint_lora(self):
|
205 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
206 |
-
|
207 |
-
components = self.get_dummy_components()
|
208 |
-
sd_pipe = StableDiffusionInpaintPipeline(**components)
|
209 |
-
sd_pipe = sd_pipe.to(torch_device)
|
210 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
211 |
-
|
212 |
-
# forward 1
|
213 |
-
inputs = self.get_dummy_inputs(device)
|
214 |
-
output = sd_pipe(**inputs)
|
215 |
-
image = output.images
|
216 |
-
image_slice = image[0, -3:, -3:, -1]
|
217 |
-
|
218 |
-
# set lora layers
|
219 |
-
lora_attn_procs = create_lora_layers(sd_pipe.unet)
|
220 |
-
sd_pipe.unet.set_attn_processor(lora_attn_procs)
|
221 |
-
sd_pipe = sd_pipe.to(torch_device)
|
222 |
-
|
223 |
-
# forward 2
|
224 |
-
inputs = self.get_dummy_inputs(device)
|
225 |
-
output = sd_pipe(**inputs, cross_attention_kwargs={"scale": 0.0})
|
226 |
-
image = output.images
|
227 |
-
image_slice_1 = image[0, -3:, -3:, -1]
|
228 |
-
|
229 |
-
# forward 3
|
230 |
-
inputs = self.get_dummy_inputs(device)
|
231 |
-
output = sd_pipe(**inputs, cross_attention_kwargs={"scale": 0.5})
|
232 |
-
image = output.images
|
233 |
-
image_slice_2 = image[0, -3:, -3:, -1]
|
234 |
-
|
235 |
-
assert np.abs(image_slice - image_slice_1).max() < 1e-2
|
236 |
-
assert np.abs(image_slice - image_slice_2).max() > 1e-2
|
237 |
-
|
238 |
-
def test_inference_batch_single_identical(self):
|
239 |
-
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
|
240 |
-
|
241 |
-
def test_stable_diffusion_inpaint_strength_zero_test(self):
|
242 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
243 |
-
components = self.get_dummy_components()
|
244 |
-
sd_pipe = StableDiffusionInpaintPipeline(**components)
|
245 |
-
sd_pipe = sd_pipe.to(device)
|
246 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
247 |
-
|
248 |
-
inputs = self.get_dummy_inputs(device)
|
249 |
-
|
250 |
-
# check that the pipeline raises value error when num_inference_steps is < 1
|
251 |
-
inputs["strength"] = 0.01
|
252 |
-
with self.assertRaises(ValueError):
|
253 |
-
sd_pipe(**inputs).images
|
254 |
-
|
255 |
-
|
256 |
-
class StableDiffusionSimpleInpaintPipelineFastTests(StableDiffusionInpaintPipelineFastTests):
|
257 |
-
pipeline_class = StableDiffusionInpaintPipeline
|
258 |
-
params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
|
259 |
-
batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
|
260 |
-
image_params = frozenset([])
|
261 |
-
# TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
|
262 |
-
|
263 |
-
def get_dummy_components(self):
|
264 |
-
torch.manual_seed(0)
|
265 |
-
unet = UNet2DConditionModel(
|
266 |
-
block_out_channels=(32, 64),
|
267 |
-
layers_per_block=2,
|
268 |
-
sample_size=32,
|
269 |
-
in_channels=4,
|
270 |
-
out_channels=4,
|
271 |
-
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
|
272 |
-
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
|
273 |
-
cross_attention_dim=32,
|
274 |
-
)
|
275 |
-
scheduler = PNDMScheduler(skip_prk_steps=True)
|
276 |
-
torch.manual_seed(0)
|
277 |
-
vae = AutoencoderKL(
|
278 |
-
block_out_channels=[32, 64],
|
279 |
-
in_channels=3,
|
280 |
-
out_channels=3,
|
281 |
-
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
|
282 |
-
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
|
283 |
-
latent_channels=4,
|
284 |
-
)
|
285 |
-
torch.manual_seed(0)
|
286 |
-
text_encoder_config = CLIPTextConfig(
|
287 |
-
bos_token_id=0,
|
288 |
-
eos_token_id=2,
|
289 |
-
hidden_size=32,
|
290 |
-
intermediate_size=37,
|
291 |
-
layer_norm_eps=1e-05,
|
292 |
-
num_attention_heads=4,
|
293 |
-
num_hidden_layers=5,
|
294 |
-
pad_token_id=1,
|
295 |
-
vocab_size=1000,
|
296 |
-
)
|
297 |
-
text_encoder = CLIPTextModel(text_encoder_config)
|
298 |
-
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
|
299 |
-
|
300 |
-
components = {
|
301 |
-
"unet": unet,
|
302 |
-
"scheduler": scheduler,
|
303 |
-
"vae": vae,
|
304 |
-
"text_encoder": text_encoder,
|
305 |
-
"tokenizer": tokenizer,
|
306 |
-
"safety_checker": None,
|
307 |
-
"feature_extractor": None,
|
308 |
-
}
|
309 |
-
return components
|
310 |
-
|
311 |
-
def test_stable_diffusion_inpaint(self):
|
312 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
313 |
-
components = self.get_dummy_components()
|
314 |
-
sd_pipe = StableDiffusionInpaintPipeline(**components)
|
315 |
-
sd_pipe = sd_pipe.to(device)
|
316 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
317 |
-
|
318 |
-
inputs = self.get_dummy_inputs(device)
|
319 |
-
image = sd_pipe(**inputs).images
|
320 |
-
image_slice = image[0, -3:, -3:, -1]
|
321 |
-
|
322 |
-
assert image.shape == (1, 64, 64, 3)
|
323 |
-
expected_slice = np.array([0.4925, 0.4967, 0.4100, 0.5234, 0.5322, 0.4532, 0.5805, 0.5877, 0.4151])
|
324 |
-
|
325 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
326 |
-
|
327 |
-
@unittest.skip("skipped here because area stays unchanged due to mask")
|
328 |
-
def test_stable_diffusion_inpaint_lora(self):
|
329 |
-
...
|
330 |
-
|
331 |
-
|
332 |
-
@slow
|
333 |
-
@require_torch_gpu
|
334 |
-
class StableDiffusionInpaintPipelineSlowTests(unittest.TestCase):
|
335 |
-
def setUp(self):
|
336 |
-
super().setUp()
|
337 |
-
|
338 |
-
def tearDown(self):
|
339 |
-
super().tearDown()
|
340 |
-
gc.collect()
|
341 |
-
torch.cuda.empty_cache()
|
342 |
-
|
343 |
-
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
|
344 |
-
generator = torch.Generator(device=generator_device).manual_seed(seed)
|
345 |
-
init_image = load_image(
|
346 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
347 |
-
"/stable_diffusion_inpaint/input_bench_image.png"
|
348 |
-
)
|
349 |
-
mask_image = load_image(
|
350 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
351 |
-
"/stable_diffusion_inpaint/input_bench_mask.png"
|
352 |
-
)
|
353 |
-
inputs = {
|
354 |
-
"prompt": "Face of a yellow cat, high resolution, sitting on a park bench",
|
355 |
-
"image": init_image,
|
356 |
-
"mask_image": mask_image,
|
357 |
-
"generator": generator,
|
358 |
-
"num_inference_steps": 3,
|
359 |
-
"guidance_scale": 7.5,
|
360 |
-
"output_type": "numpy",
|
361 |
-
}
|
362 |
-
return inputs
|
363 |
-
|
364 |
-
def test_stable_diffusion_inpaint_ddim(self):
|
365 |
-
pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
366 |
-
"runwayml/stable-diffusion-inpainting", safety_checker=None
|
367 |
-
)
|
368 |
-
pipe.to(torch_device)
|
369 |
-
pipe.set_progress_bar_config(disable=None)
|
370 |
-
pipe.enable_attention_slicing()
|
371 |
-
|
372 |
-
inputs = self.get_inputs(torch_device)
|
373 |
-
image = pipe(**inputs).images
|
374 |
-
image_slice = image[0, 253:256, 253:256, -1].flatten()
|
375 |
-
|
376 |
-
assert image.shape == (1, 512, 512, 3)
|
377 |
-
expected_slice = np.array([0.0427, 0.0460, 0.0483, 0.0460, 0.0584, 0.0521, 0.1549, 0.1695, 0.1794])
|
378 |
-
|
379 |
-
assert np.abs(expected_slice - image_slice).max() < 6e-4
|
380 |
-
|
381 |
-
def test_stable_diffusion_inpaint_fp16(self):
|
382 |
-
pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
383 |
-
"runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, safety_checker=None
|
384 |
-
)
|
385 |
-
pipe.to(torch_device)
|
386 |
-
pipe.set_progress_bar_config(disable=None)
|
387 |
-
pipe.enable_attention_slicing()
|
388 |
-
|
389 |
-
inputs = self.get_inputs(torch_device, dtype=torch.float16)
|
390 |
-
image = pipe(**inputs).images
|
391 |
-
image_slice = image[0, 253:256, 253:256, -1].flatten()
|
392 |
-
|
393 |
-
assert image.shape == (1, 512, 512, 3)
|
394 |
-
expected_slice = np.array([0.1350, 0.1123, 0.1350, 0.1641, 0.1328, 0.1230, 0.1289, 0.1531, 0.1687])
|
395 |
-
|
396 |
-
assert np.abs(expected_slice - image_slice).max() < 5e-2
|
397 |
-
|
398 |
-
def test_stable_diffusion_inpaint_pndm(self):
|
399 |
-
pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
400 |
-
"runwayml/stable-diffusion-inpainting", safety_checker=None
|
401 |
-
)
|
402 |
-
pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config)
|
403 |
-
pipe.to(torch_device)
|
404 |
-
pipe.set_progress_bar_config(disable=None)
|
405 |
-
pipe.enable_attention_slicing()
|
406 |
-
|
407 |
-
inputs = self.get_inputs(torch_device)
|
408 |
-
image = pipe(**inputs).images
|
409 |
-
image_slice = image[0, 253:256, 253:256, -1].flatten()
|
410 |
-
|
411 |
-
assert image.shape == (1, 512, 512, 3)
|
412 |
-
expected_slice = np.array([0.0425, 0.0273, 0.0344, 0.1694, 0.1727, 0.1812, 0.3256, 0.3311, 0.3272])
|
413 |
-
|
414 |
-
assert np.abs(expected_slice - image_slice).max() < 5e-3
|
415 |
-
|
416 |
-
def test_stable_diffusion_inpaint_k_lms(self):
|
417 |
-
pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
418 |
-
"runwayml/stable-diffusion-inpainting", safety_checker=None
|
419 |
-
)
|
420 |
-
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
|
421 |
-
pipe.to(torch_device)
|
422 |
-
pipe.set_progress_bar_config(disable=None)
|
423 |
-
pipe.enable_attention_slicing()
|
424 |
-
|
425 |
-
inputs = self.get_inputs(torch_device)
|
426 |
-
image = pipe(**inputs).images
|
427 |
-
image_slice = image[0, 253:256, 253:256, -1].flatten()
|
428 |
-
|
429 |
-
assert image.shape == (1, 512, 512, 3)
|
430 |
-
expected_slice = np.array([0.9314, 0.7575, 0.9432, 0.8885, 0.9028, 0.7298, 0.9811, 0.9667, 0.7633])
|
431 |
-
|
432 |
-
assert np.abs(expected_slice - image_slice).max() < 6e-3
|
433 |
-
|
434 |
-
def test_stable_diffusion_inpaint_with_sequential_cpu_offloading(self):
|
435 |
-
torch.cuda.empty_cache()
|
436 |
-
torch.cuda.reset_max_memory_allocated()
|
437 |
-
torch.cuda.reset_peak_memory_stats()
|
438 |
-
|
439 |
-
pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
440 |
-
"runwayml/stable-diffusion-inpainting", safety_checker=None, torch_dtype=torch.float16
|
441 |
-
)
|
442 |
-
pipe = pipe.to(torch_device)
|
443 |
-
pipe.set_progress_bar_config(disable=None)
|
444 |
-
pipe.enable_attention_slicing(1)
|
445 |
-
pipe.enable_sequential_cpu_offload()
|
446 |
-
|
447 |
-
inputs = self.get_inputs(torch_device, dtype=torch.float16)
|
448 |
-
_ = pipe(**inputs)
|
449 |
-
|
450 |
-
mem_bytes = torch.cuda.max_memory_allocated()
|
451 |
-
# make sure that less than 2.2 GB is allocated
|
452 |
-
assert mem_bytes < 2.2 * 10**9
|
453 |
-
|
454 |
-
@require_torch_2
|
455 |
-
def test_inpaint_compile(self):
|
456 |
-
seed = 0
|
457 |
-
inputs = self.get_inputs(torch_device, seed=seed)
|
458 |
-
# Can't pickle a Generator object
|
459 |
-
del inputs["generator"]
|
460 |
-
inputs["torch_device"] = torch_device
|
461 |
-
inputs["seed"] = seed
|
462 |
-
run_test_in_subprocess(test_case=self, target_func=_test_inpaint_compile, inputs=inputs)
|
463 |
-
|
464 |
-
def test_stable_diffusion_inpaint_pil_input_resolution_test(self):
|
465 |
-
pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
466 |
-
"runwayml/stable-diffusion-inpainting", safety_checker=None
|
467 |
-
)
|
468 |
-
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
|
469 |
-
pipe.to(torch_device)
|
470 |
-
pipe.set_progress_bar_config(disable=None)
|
471 |
-
pipe.enable_attention_slicing()
|
472 |
-
|
473 |
-
inputs = self.get_inputs(torch_device)
|
474 |
-
# change input image to a random size (one that would cause a tensor mismatch error)
|
475 |
-
inputs["image"] = inputs["image"].resize((127, 127))
|
476 |
-
inputs["mask_image"] = inputs["mask_image"].resize((127, 127))
|
477 |
-
inputs["height"] = 128
|
478 |
-
inputs["width"] = 128
|
479 |
-
image = pipe(**inputs).images
|
480 |
-
# verify that the returned image has the same height and width as the input height and width
|
481 |
-
assert image.shape == (1, inputs["height"], inputs["width"], 3)
|
482 |
-
|
483 |
-
def test_stable_diffusion_inpaint_strength_test(self):
|
484 |
-
pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
485 |
-
"runwayml/stable-diffusion-inpainting", safety_checker=None
|
486 |
-
)
|
487 |
-
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
|
488 |
-
pipe.to(torch_device)
|
489 |
-
pipe.set_progress_bar_config(disable=None)
|
490 |
-
pipe.enable_attention_slicing()
|
491 |
-
|
492 |
-
inputs = self.get_inputs(torch_device)
|
493 |
-
# change input strength
|
494 |
-
inputs["strength"] = 0.75
|
495 |
-
image = pipe(**inputs).images
|
496 |
-
# verify that the returned image has the same height and width as the input height and width
|
497 |
-
assert image.shape == (1, 512, 512, 3)
|
498 |
-
|
499 |
-
image_slice = image[0, 253:256, 253:256, -1].flatten()
|
500 |
-
expected_slice = np.array([0.0021, 0.2350, 0.3712, 0.0575, 0.2485, 0.3451, 0.1857, 0.3156, 0.3943])
|
501 |
-
assert np.abs(expected_slice - image_slice).max() < 3e-3
|
502 |
-
|
503 |
-
def test_stable_diffusion_simple_inpaint_ddim(self):
|
504 |
-
pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None)
|
505 |
-
pipe.to(torch_device)
|
506 |
-
pipe.set_progress_bar_config(disable=None)
|
507 |
-
pipe.enable_attention_slicing()
|
508 |
-
|
509 |
-
inputs = self.get_inputs(torch_device)
|
510 |
-
image = pipe(**inputs).images
|
511 |
-
|
512 |
-
image_slice = image[0, 253:256, 253:256, -1].flatten()
|
513 |
-
|
514 |
-
assert image.shape == (1, 512, 512, 3)
|
515 |
-
expected_slice = np.array([0.5157, 0.6858, 0.6873, 0.4619, 0.6416, 0.6898, 0.3702, 0.5960, 0.6935])
|
516 |
-
|
517 |
-
assert np.abs(expected_slice - image_slice).max() < 6e-4
|
518 |
-
|
519 |
-
def test_download_local(self):
|
520 |
-
filename = hf_hub_download("runwayml/stable-diffusion-inpainting", filename="sd-v1-5-inpainting.ckpt")
|
521 |
-
|
522 |
-
pipe = StableDiffusionInpaintPipeline.from_single_file(filename, torch_dtype=torch.float16)
|
523 |
-
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
524 |
-
pipe.to("cuda")
|
525 |
-
|
526 |
-
inputs = self.get_inputs(torch_device)
|
527 |
-
inputs["num_inference_steps"] = 1
|
528 |
-
image_out = pipe(**inputs).images[0]
|
529 |
-
|
530 |
-
assert image_out.shape == (512, 512, 3)
|
531 |
-
|
532 |
-
def test_download_ckpt_diff_format_is_same(self):
|
533 |
-
ckpt_path = "https://huggingface.co/runwayml/stable-diffusion-inpainting/blob/main/sd-v1-5-inpainting.ckpt"
|
534 |
-
|
535 |
-
pipe = StableDiffusionInpaintPipeline.from_single_file(ckpt_path)
|
536 |
-
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
537 |
-
pipe.unet.set_attn_processor(AttnProcessor())
|
538 |
-
pipe.to("cuda")
|
539 |
-
|
540 |
-
inputs = self.get_inputs(torch_device)
|
541 |
-
inputs["num_inference_steps"] = 5
|
542 |
-
image_ckpt = pipe(**inputs).images[0]
|
543 |
-
|
544 |
-
pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting")
|
545 |
-
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
546 |
-
pipe.unet.set_attn_processor(AttnProcessor())
|
547 |
-
pipe.to("cuda")
|
548 |
-
|
549 |
-
inputs = self.get_inputs(torch_device)
|
550 |
-
inputs["num_inference_steps"] = 5
|
551 |
-
image = pipe(**inputs).images[0]
|
552 |
-
|
553 |
-
assert np.max(np.abs(image - image_ckpt)) < 1e-4
|
554 |
-
|
555 |
-
|
556 |
-
@slow
|
557 |
-
@require_torch_gpu
|
558 |
-
class StableDiffusionInpaintPipelineAsymmetricAutoencoderKLSlowTests(unittest.TestCase):
|
559 |
-
def setUp(self):
|
560 |
-
super().setUp()
|
561 |
-
|
562 |
-
def tearDown(self):
|
563 |
-
super().tearDown()
|
564 |
-
gc.collect()
|
565 |
-
torch.cuda.empty_cache()
|
566 |
-
|
567 |
-
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
|
568 |
-
generator = torch.Generator(device=generator_device).manual_seed(seed)
|
569 |
-
init_image = load_image(
|
570 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
571 |
-
"/stable_diffusion_inpaint/input_bench_image.png"
|
572 |
-
)
|
573 |
-
mask_image = load_image(
|
574 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
575 |
-
"/stable_diffusion_inpaint/input_bench_mask.png"
|
576 |
-
)
|
577 |
-
inputs = {
|
578 |
-
"prompt": "Face of a yellow cat, high resolution, sitting on a park bench",
|
579 |
-
"image": init_image,
|
580 |
-
"mask_image": mask_image,
|
581 |
-
"generator": generator,
|
582 |
-
"num_inference_steps": 3,
|
583 |
-
"guidance_scale": 7.5,
|
584 |
-
"output_type": "numpy",
|
585 |
-
}
|
586 |
-
return inputs
|
587 |
-
|
588 |
-
def test_stable_diffusion_inpaint_ddim(self):
|
589 |
-
vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5")
|
590 |
-
pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
591 |
-
"runwayml/stable-diffusion-inpainting", safety_checker=None
|
592 |
-
)
|
593 |
-
pipe.vae = vae
|
594 |
-
pipe.to(torch_device)
|
595 |
-
pipe.set_progress_bar_config(disable=None)
|
596 |
-
pipe.enable_attention_slicing()
|
597 |
-
|
598 |
-
inputs = self.get_inputs(torch_device)
|
599 |
-
image = pipe(**inputs).images
|
600 |
-
image_slice = image[0, 253:256, 253:256, -1].flatten()
|
601 |
-
|
602 |
-
assert image.shape == (1, 512, 512, 3)
|
603 |
-
expected_slice = np.array([0.0521, 0.0606, 0.0602, 0.0446, 0.0495, 0.0434, 0.1175, 0.1290, 0.1431])
|
604 |
-
|
605 |
-
assert np.abs(expected_slice - image_slice).max() < 6e-4
|
606 |
-
|
607 |
-
def test_stable_diffusion_inpaint_fp16(self):
|
608 |
-
vae = AsymmetricAutoencoderKL.from_pretrained(
|
609 |
-
"cross-attention/asymmetric-autoencoder-kl-x-1-5", torch_dtype=torch.float16
|
610 |
-
)
|
611 |
-
pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
612 |
-
"runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, safety_checker=None
|
613 |
-
)
|
614 |
-
pipe.vae = vae
|
615 |
-
pipe.to(torch_device)
|
616 |
-
pipe.set_progress_bar_config(disable=None)
|
617 |
-
pipe.enable_attention_slicing()
|
618 |
-
|
619 |
-
inputs = self.get_inputs(torch_device, dtype=torch.float16)
|
620 |
-
image = pipe(**inputs).images
|
621 |
-
image_slice = image[0, 253:256, 253:256, -1].flatten()
|
622 |
-
|
623 |
-
assert image.shape == (1, 512, 512, 3)
|
624 |
-
expected_slice = np.array([0.1343, 0.1406, 0.1440, 0.1504, 0.1729, 0.0989, 0.1807, 0.2822, 0.1179])
|
625 |
-
|
626 |
-
assert np.abs(expected_slice - image_slice).max() < 5e-2
|
627 |
-
|
628 |
-
def test_stable_diffusion_inpaint_pndm(self):
|
629 |
-
vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5")
|
630 |
-
pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
631 |
-
"runwayml/stable-diffusion-inpainting", safety_checker=None
|
632 |
-
)
|
633 |
-
pipe.vae = vae
|
634 |
-
pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config)
|
635 |
-
pipe.to(torch_device)
|
636 |
-
pipe.set_progress_bar_config(disable=None)
|
637 |
-
pipe.enable_attention_slicing()
|
638 |
-
|
639 |
-
inputs = self.get_inputs(torch_device)
|
640 |
-
image = pipe(**inputs).images
|
641 |
-
image_slice = image[0, 253:256, 253:256, -1].flatten()
|
642 |
-
|
643 |
-
assert image.shape == (1, 512, 512, 3)
|
644 |
-
expected_slice = np.array([0.0976, 0.1071, 0.1119, 0.1363, 0.1260, 0.1150, 0.3745, 0.3586, 0.3340])
|
645 |
-
|
646 |
-
assert np.abs(expected_slice - image_slice).max() < 5e-3
|
647 |
-
|
648 |
-
def test_stable_diffusion_inpaint_k_lms(self):
|
649 |
-
vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5")
|
650 |
-
pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
651 |
-
"runwayml/stable-diffusion-inpainting", safety_checker=None
|
652 |
-
)
|
653 |
-
pipe.vae = vae
|
654 |
-
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
|
655 |
-
pipe.to(torch_device)
|
656 |
-
pipe.set_progress_bar_config(disable=None)
|
657 |
-
pipe.enable_attention_slicing()
|
658 |
-
|
659 |
-
inputs = self.get_inputs(torch_device)
|
660 |
-
image = pipe(**inputs).images
|
661 |
-
image_slice = image[0, 253:256, 253:256, -1].flatten()
|
662 |
-
|
663 |
-
assert image.shape == (1, 512, 512, 3)
|
664 |
-
expected_slice = np.array([0.8909, 0.8620, 0.9024, 0.8501, 0.8558, 0.9074, 0.8790, 0.7540, 0.9003])
|
665 |
-
|
666 |
-
assert np.abs(expected_slice - image_slice).max() < 6e-3
|
667 |
-
|
668 |
-
def test_stable_diffusion_inpaint_with_sequential_cpu_offloading(self):
|
669 |
-
torch.cuda.empty_cache()
|
670 |
-
torch.cuda.reset_max_memory_allocated()
|
671 |
-
torch.cuda.reset_peak_memory_stats()
|
672 |
-
|
673 |
-
vae = AsymmetricAutoencoderKL.from_pretrained(
|
674 |
-
"cross-attention/asymmetric-autoencoder-kl-x-1-5", torch_dtype=torch.float16
|
675 |
-
)
|
676 |
-
pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
677 |
-
"runwayml/stable-diffusion-inpainting", safety_checker=None, torch_dtype=torch.float16
|
678 |
-
)
|
679 |
-
pipe.vae = vae
|
680 |
-
pipe = pipe.to(torch_device)
|
681 |
-
pipe.set_progress_bar_config(disable=None)
|
682 |
-
pipe.enable_attention_slicing(1)
|
683 |
-
pipe.enable_sequential_cpu_offload()
|
684 |
-
|
685 |
-
inputs = self.get_inputs(torch_device, dtype=torch.float16)
|
686 |
-
_ = pipe(**inputs)
|
687 |
-
|
688 |
-
mem_bytes = torch.cuda.max_memory_allocated()
|
689 |
-
# make sure that less than 2.45 GB is allocated
|
690 |
-
assert mem_bytes < 2.45 * 10**9
|
691 |
-
|
692 |
-
@require_torch_2
|
693 |
-
def test_inpaint_compile(self):
|
694 |
-
pass
|
695 |
-
|
696 |
-
def test_stable_diffusion_inpaint_pil_input_resolution_test(self):
|
697 |
-
vae = AsymmetricAutoencoderKL.from_pretrained(
|
698 |
-
"cross-attention/asymmetric-autoencoder-kl-x-1-5",
|
699 |
-
)
|
700 |
-
pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
701 |
-
"runwayml/stable-diffusion-inpainting", safety_checker=None
|
702 |
-
)
|
703 |
-
pipe.vae = vae
|
704 |
-
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
|
705 |
-
pipe.to(torch_device)
|
706 |
-
pipe.set_progress_bar_config(disable=None)
|
707 |
-
pipe.enable_attention_slicing()
|
708 |
-
|
709 |
-
inputs = self.get_inputs(torch_device)
|
710 |
-
# change input image to a random size (one that would cause a tensor mismatch error)
|
711 |
-
inputs["image"] = inputs["image"].resize((127, 127))
|
712 |
-
inputs["mask_image"] = inputs["mask_image"].resize((127, 127))
|
713 |
-
inputs["height"] = 128
|
714 |
-
inputs["width"] = 128
|
715 |
-
image = pipe(**inputs).images
|
716 |
-
# verify that the returned image has the same height and width as the input height and width
|
717 |
-
assert image.shape == (1, inputs["height"], inputs["width"], 3)
|
718 |
-
|
719 |
-
def test_stable_diffusion_inpaint_strength_test(self):
|
720 |
-
vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5")
|
721 |
-
pipe = StableDiffusionInpaintPipeline.from_pretrained(
|
722 |
-
"runwayml/stable-diffusion-inpainting", safety_checker=None
|
723 |
-
)
|
724 |
-
pipe.vae = vae
|
725 |
-
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
|
726 |
-
pipe.to(torch_device)
|
727 |
-
pipe.set_progress_bar_config(disable=None)
|
728 |
-
pipe.enable_attention_slicing()
|
729 |
-
|
730 |
-
inputs = self.get_inputs(torch_device)
|
731 |
-
# change input strength
|
732 |
-
inputs["strength"] = 0.75
|
733 |
-
image = pipe(**inputs).images
|
734 |
-
# verify that the returned image has the same height and width as the input height and width
|
735 |
-
assert image.shape == (1, 512, 512, 3)
|
736 |
-
|
737 |
-
image_slice = image[0, 253:256, 253:256, -1].flatten()
|
738 |
-
expected_slice = np.array([0.2458, 0.2576, 0.3124, 0.2679, 0.2669, 0.2796, 0.2872, 0.2975, 0.2661])
|
739 |
-
assert np.abs(expected_slice - image_slice).max() < 3e-3
|
740 |
-
|
741 |
-
def test_stable_diffusion_simple_inpaint_ddim(self):
|
742 |
-
vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5")
|
743 |
-
pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None)
|
744 |
-
pipe.vae = vae
|
745 |
-
pipe.to(torch_device)
|
746 |
-
pipe.set_progress_bar_config(disable=None)
|
747 |
-
pipe.enable_attention_slicing()
|
748 |
-
|
749 |
-
inputs = self.get_inputs(torch_device)
|
750 |
-
image = pipe(**inputs).images
|
751 |
-
|
752 |
-
image_slice = image[0, 253:256, 253:256, -1].flatten()
|
753 |
-
|
754 |
-
assert image.shape == (1, 512, 512, 3)
|
755 |
-
expected_slice = np.array([0.3312, 0.4052, 0.4103, 0.4153, 0.4347, 0.4154, 0.4932, 0.4920, 0.4431])
|
756 |
-
|
757 |
-
assert np.abs(expected_slice - image_slice).max() < 6e-4
|
758 |
-
|
759 |
-
def test_download_local(self):
|
760 |
-
vae = AsymmetricAutoencoderKL.from_pretrained(
|
761 |
-
"cross-attention/asymmetric-autoencoder-kl-x-1-5", torch_dtype=torch.float16
|
762 |
-
)
|
763 |
-
filename = hf_hub_download("runwayml/stable-diffusion-inpainting", filename="sd-v1-5-inpainting.ckpt")
|
764 |
-
|
765 |
-
pipe = StableDiffusionInpaintPipeline.from_single_file(filename, torch_dtype=torch.float16)
|
766 |
-
pipe.vae = vae
|
767 |
-
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
768 |
-
pipe.to("cuda")
|
769 |
-
|
770 |
-
inputs = self.get_inputs(torch_device)
|
771 |
-
inputs["num_inference_steps"] = 1
|
772 |
-
image_out = pipe(**inputs).images[0]
|
773 |
-
|
774 |
-
assert image_out.shape == (512, 512, 3)
|
775 |
-
|
776 |
-
def test_download_ckpt_diff_format_is_same(self):
|
777 |
-
pass
|
778 |
-
|
779 |
-
|
780 |
-
@nightly
|
781 |
-
@require_torch_gpu
|
782 |
-
class StableDiffusionInpaintPipelineNightlyTests(unittest.TestCase):
|
783 |
-
def tearDown(self):
|
784 |
-
super().tearDown()
|
785 |
-
gc.collect()
|
786 |
-
torch.cuda.empty_cache()
|
787 |
-
|
788 |
-
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
|
789 |
-
generator = torch.Generator(device=generator_device).manual_seed(seed)
|
790 |
-
init_image = load_image(
|
791 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
792 |
-
"/stable_diffusion_inpaint/input_bench_image.png"
|
793 |
-
)
|
794 |
-
mask_image = load_image(
|
795 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
796 |
-
"/stable_diffusion_inpaint/input_bench_mask.png"
|
797 |
-
)
|
798 |
-
inputs = {
|
799 |
-
"prompt": "Face of a yellow cat, high resolution, sitting on a park bench",
|
800 |
-
"image": init_image,
|
801 |
-
"mask_image": mask_image,
|
802 |
-
"generator": generator,
|
803 |
-
"num_inference_steps": 50,
|
804 |
-
"guidance_scale": 7.5,
|
805 |
-
"output_type": "numpy",
|
806 |
-
}
|
807 |
-
return inputs
|
808 |
-
|
809 |
-
def test_inpaint_ddim(self):
|
810 |
-
sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting")
|
811 |
-
sd_pipe.to(torch_device)
|
812 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
813 |
-
|
814 |
-
inputs = self.get_inputs(torch_device)
|
815 |
-
image = sd_pipe(**inputs).images[0]
|
816 |
-
|
817 |
-
expected_image = load_numpy(
|
818 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
819 |
-
"/stable_diffusion_inpaint/stable_diffusion_inpaint_ddim.npy"
|
820 |
-
)
|
821 |
-
max_diff = np.abs(expected_image - image).max()
|
822 |
-
assert max_diff < 1e-3
|
823 |
-
|
824 |
-
def test_inpaint_pndm(self):
|
825 |
-
sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting")
|
826 |
-
sd_pipe.scheduler = PNDMScheduler.from_config(sd_pipe.scheduler.config)
|
827 |
-
sd_pipe.to(torch_device)
|
828 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
829 |
-
|
830 |
-
inputs = self.get_inputs(torch_device)
|
831 |
-
image = sd_pipe(**inputs).images[0]
|
832 |
-
|
833 |
-
expected_image = load_numpy(
|
834 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
835 |
-
"/stable_diffusion_inpaint/stable_diffusion_inpaint_pndm.npy"
|
836 |
-
)
|
837 |
-
max_diff = np.abs(expected_image - image).max()
|
838 |
-
assert max_diff < 1e-3
|
839 |
-
|
840 |
-
def test_inpaint_lms(self):
|
841 |
-
sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting")
|
842 |
-
sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
|
843 |
-
sd_pipe.to(torch_device)
|
844 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
845 |
-
|
846 |
-
inputs = self.get_inputs(torch_device)
|
847 |
-
image = sd_pipe(**inputs).images[0]
|
848 |
-
|
849 |
-
expected_image = load_numpy(
|
850 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
851 |
-
"/stable_diffusion_inpaint/stable_diffusion_inpaint_lms.npy"
|
852 |
-
)
|
853 |
-
max_diff = np.abs(expected_image - image).max()
|
854 |
-
assert max_diff < 1e-3
|
855 |
-
|
856 |
-
def test_inpaint_dpm(self):
|
857 |
-
sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting")
|
858 |
-
sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config)
|
859 |
-
sd_pipe.to(torch_device)
|
860 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
861 |
-
|
862 |
-
inputs = self.get_inputs(torch_device)
|
863 |
-
inputs["num_inference_steps"] = 30
|
864 |
-
image = sd_pipe(**inputs).images[0]
|
865 |
-
|
866 |
-
expected_image = load_numpy(
|
867 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
868 |
-
"/stable_diffusion_inpaint/stable_diffusion_inpaint_dpm_multi.npy"
|
869 |
-
)
|
870 |
-
max_diff = np.abs(expected_image - image).max()
|
871 |
-
assert max_diff < 1e-3
|
872 |
-
|
873 |
-
|
874 |
-
class StableDiffusionInpaintingPrepareMaskAndMaskedImageTests(unittest.TestCase):
|
875 |
-
def test_pil_inputs(self):
|
876 |
-
height, width = 32, 32
|
877 |
-
im = np.random.randint(0, 255, (height, width, 3), dtype=np.uint8)
|
878 |
-
im = Image.fromarray(im)
|
879 |
-
mask = np.random.randint(0, 255, (height, width), dtype=np.uint8) > 127.5
|
880 |
-
mask = Image.fromarray((mask * 255).astype(np.uint8))
|
881 |
-
|
882 |
-
t_mask, t_masked, t_image = prepare_mask_and_masked_image(im, mask, height, width, return_image=True)
|
883 |
-
|
884 |
-
self.assertTrue(isinstance(t_mask, torch.Tensor))
|
885 |
-
self.assertTrue(isinstance(t_masked, torch.Tensor))
|
886 |
-
self.assertTrue(isinstance(t_image, torch.Tensor))
|
887 |
-
|
888 |
-
self.assertEqual(t_mask.ndim, 4)
|
889 |
-
self.assertEqual(t_masked.ndim, 4)
|
890 |
-
self.assertEqual(t_image.ndim, 4)
|
891 |
-
|
892 |
-
self.assertEqual(t_mask.shape, (1, 1, height, width))
|
893 |
-
self.assertEqual(t_masked.shape, (1, 3, height, width))
|
894 |
-
self.assertEqual(t_image.shape, (1, 3, height, width))
|
895 |
-
|
896 |
-
self.assertTrue(t_mask.dtype == torch.float32)
|
897 |
-
self.assertTrue(t_masked.dtype == torch.float32)
|
898 |
-
self.assertTrue(t_image.dtype == torch.float32)
|
899 |
-
|
900 |
-
self.assertTrue(t_mask.min() >= 0.0)
|
901 |
-
self.assertTrue(t_mask.max() <= 1.0)
|
902 |
-
self.assertTrue(t_masked.min() >= -1.0)
|
903 |
-
self.assertTrue(t_masked.min() <= 1.0)
|
904 |
-
self.assertTrue(t_image.min() >= -1.0)
|
905 |
-
self.assertTrue(t_image.min() >= -1.0)
|
906 |
-
|
907 |
-
self.assertTrue(t_mask.sum() > 0.0)
|
908 |
-
|
909 |
-
def test_np_inputs(self):
|
910 |
-
height, width = 32, 32
|
911 |
-
|
912 |
-
im_np = np.random.randint(0, 255, (height, width, 3), dtype=np.uint8)
|
913 |
-
im_pil = Image.fromarray(im_np)
|
914 |
-
mask_np = (
|
915 |
-
np.random.randint(
|
916 |
-
0,
|
917 |
-
255,
|
918 |
-
(
|
919 |
-
height,
|
920 |
-
width,
|
921 |
-
),
|
922 |
-
dtype=np.uint8,
|
923 |
-
)
|
924 |
-
> 127.5
|
925 |
-
)
|
926 |
-
mask_pil = Image.fromarray((mask_np * 255).astype(np.uint8))
|
927 |
-
|
928 |
-
t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image(
|
929 |
-
im_np, mask_np, height, width, return_image=True
|
930 |
-
)
|
931 |
-
t_mask_pil, t_masked_pil, t_image_pil = prepare_mask_and_masked_image(
|
932 |
-
im_pil, mask_pil, height, width, return_image=True
|
933 |
-
)
|
934 |
-
|
935 |
-
self.assertTrue((t_mask_np == t_mask_pil).all())
|
936 |
-
self.assertTrue((t_masked_np == t_masked_pil).all())
|
937 |
-
self.assertTrue((t_image_np == t_image_pil).all())
|
938 |
-
|
939 |
-
def test_torch_3D_2D_inputs(self):
|
940 |
-
height, width = 32, 32
|
941 |
-
|
942 |
-
im_tensor = torch.randint(
|
943 |
-
0,
|
944 |
-
255,
|
945 |
-
(
|
946 |
-
3,
|
947 |
-
height,
|
948 |
-
width,
|
949 |
-
),
|
950 |
-
dtype=torch.uint8,
|
951 |
-
)
|
952 |
-
mask_tensor = (
|
953 |
-
torch.randint(
|
954 |
-
0,
|
955 |
-
255,
|
956 |
-
(
|
957 |
-
height,
|
958 |
-
width,
|
959 |
-
),
|
960 |
-
dtype=torch.uint8,
|
961 |
-
)
|
962 |
-
> 127.5
|
963 |
-
)
|
964 |
-
im_np = im_tensor.numpy().transpose(1, 2, 0)
|
965 |
-
mask_np = mask_tensor.numpy()
|
966 |
-
|
967 |
-
t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image(
|
968 |
-
im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True
|
969 |
-
)
|
970 |
-
t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image(
|
971 |
-
im_np, mask_np, height, width, return_image=True
|
972 |
-
)
|
973 |
-
|
974 |
-
self.assertTrue((t_mask_tensor == t_mask_np).all())
|
975 |
-
self.assertTrue((t_masked_tensor == t_masked_np).all())
|
976 |
-
self.assertTrue((t_image_tensor == t_image_np).all())
|
977 |
-
|
978 |
-
def test_torch_3D_3D_inputs(self):
|
979 |
-
height, width = 32, 32
|
980 |
-
|
981 |
-
im_tensor = torch.randint(
|
982 |
-
0,
|
983 |
-
255,
|
984 |
-
(
|
985 |
-
3,
|
986 |
-
height,
|
987 |
-
width,
|
988 |
-
),
|
989 |
-
dtype=torch.uint8,
|
990 |
-
)
|
991 |
-
mask_tensor = (
|
992 |
-
torch.randint(
|
993 |
-
0,
|
994 |
-
255,
|
995 |
-
(
|
996 |
-
1,
|
997 |
-
height,
|
998 |
-
width,
|
999 |
-
),
|
1000 |
-
dtype=torch.uint8,
|
1001 |
-
)
|
1002 |
-
> 127.5
|
1003 |
-
)
|
1004 |
-
im_np = im_tensor.numpy().transpose(1, 2, 0)
|
1005 |
-
mask_np = mask_tensor.numpy()[0]
|
1006 |
-
|
1007 |
-
t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image(
|
1008 |
-
im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True
|
1009 |
-
)
|
1010 |
-
t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image(
|
1011 |
-
im_np, mask_np, height, width, return_image=True
|
1012 |
-
)
|
1013 |
-
|
1014 |
-
self.assertTrue((t_mask_tensor == t_mask_np).all())
|
1015 |
-
self.assertTrue((t_masked_tensor == t_masked_np).all())
|
1016 |
-
self.assertTrue((t_image_tensor == t_image_np).all())
|
1017 |
-
|
1018 |
-
def test_torch_4D_2D_inputs(self):
|
1019 |
-
height, width = 32, 32
|
1020 |
-
|
1021 |
-
im_tensor = torch.randint(
|
1022 |
-
0,
|
1023 |
-
255,
|
1024 |
-
(
|
1025 |
-
1,
|
1026 |
-
3,
|
1027 |
-
height,
|
1028 |
-
width,
|
1029 |
-
),
|
1030 |
-
dtype=torch.uint8,
|
1031 |
-
)
|
1032 |
-
mask_tensor = (
|
1033 |
-
torch.randint(
|
1034 |
-
0,
|
1035 |
-
255,
|
1036 |
-
(
|
1037 |
-
height,
|
1038 |
-
width,
|
1039 |
-
),
|
1040 |
-
dtype=torch.uint8,
|
1041 |
-
)
|
1042 |
-
> 127.5
|
1043 |
-
)
|
1044 |
-
im_np = im_tensor.numpy()[0].transpose(1, 2, 0)
|
1045 |
-
mask_np = mask_tensor.numpy()
|
1046 |
-
|
1047 |
-
t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image(
|
1048 |
-
im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True
|
1049 |
-
)
|
1050 |
-
t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image(
|
1051 |
-
im_np, mask_np, height, width, return_image=True
|
1052 |
-
)
|
1053 |
-
|
1054 |
-
self.assertTrue((t_mask_tensor == t_mask_np).all())
|
1055 |
-
self.assertTrue((t_masked_tensor == t_masked_np).all())
|
1056 |
-
self.assertTrue((t_image_tensor == t_image_np).all())
|
1057 |
-
|
1058 |
-
def test_torch_4D_3D_inputs(self):
|
1059 |
-
height, width = 32, 32
|
1060 |
-
|
1061 |
-
im_tensor = torch.randint(
|
1062 |
-
0,
|
1063 |
-
255,
|
1064 |
-
(
|
1065 |
-
1,
|
1066 |
-
3,
|
1067 |
-
height,
|
1068 |
-
width,
|
1069 |
-
),
|
1070 |
-
dtype=torch.uint8,
|
1071 |
-
)
|
1072 |
-
mask_tensor = (
|
1073 |
-
torch.randint(
|
1074 |
-
0,
|
1075 |
-
255,
|
1076 |
-
(
|
1077 |
-
1,
|
1078 |
-
height,
|
1079 |
-
width,
|
1080 |
-
),
|
1081 |
-
dtype=torch.uint8,
|
1082 |
-
)
|
1083 |
-
> 127.5
|
1084 |
-
)
|
1085 |
-
im_np = im_tensor.numpy()[0].transpose(1, 2, 0)
|
1086 |
-
mask_np = mask_tensor.numpy()[0]
|
1087 |
-
|
1088 |
-
t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image(
|
1089 |
-
im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True
|
1090 |
-
)
|
1091 |
-
t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image(
|
1092 |
-
im_np, mask_np, height, width, return_image=True
|
1093 |
-
)
|
1094 |
-
|
1095 |
-
self.assertTrue((t_mask_tensor == t_mask_np).all())
|
1096 |
-
self.assertTrue((t_masked_tensor == t_masked_np).all())
|
1097 |
-
self.assertTrue((t_image_tensor == t_image_np).all())
|
1098 |
-
|
1099 |
-
def test_torch_4D_4D_inputs(self):
|
1100 |
-
height, width = 32, 32
|
1101 |
-
|
1102 |
-
im_tensor = torch.randint(
|
1103 |
-
0,
|
1104 |
-
255,
|
1105 |
-
(
|
1106 |
-
1,
|
1107 |
-
3,
|
1108 |
-
height,
|
1109 |
-
width,
|
1110 |
-
),
|
1111 |
-
dtype=torch.uint8,
|
1112 |
-
)
|
1113 |
-
mask_tensor = (
|
1114 |
-
torch.randint(
|
1115 |
-
0,
|
1116 |
-
255,
|
1117 |
-
(
|
1118 |
-
1,
|
1119 |
-
1,
|
1120 |
-
height,
|
1121 |
-
width,
|
1122 |
-
),
|
1123 |
-
dtype=torch.uint8,
|
1124 |
-
)
|
1125 |
-
> 127.5
|
1126 |
-
)
|
1127 |
-
im_np = im_tensor.numpy()[0].transpose(1, 2, 0)
|
1128 |
-
mask_np = mask_tensor.numpy()[0][0]
|
1129 |
-
|
1130 |
-
t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image(
|
1131 |
-
im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True
|
1132 |
-
)
|
1133 |
-
t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image(
|
1134 |
-
im_np, mask_np, height, width, return_image=True
|
1135 |
-
)
|
1136 |
-
|
1137 |
-
self.assertTrue((t_mask_tensor == t_mask_np).all())
|
1138 |
-
self.assertTrue((t_masked_tensor == t_masked_np).all())
|
1139 |
-
self.assertTrue((t_image_tensor == t_image_np).all())
|
1140 |
-
|
1141 |
-
def test_torch_batch_4D_3D(self):
|
1142 |
-
height, width = 32, 32
|
1143 |
-
|
1144 |
-
im_tensor = torch.randint(
|
1145 |
-
0,
|
1146 |
-
255,
|
1147 |
-
(
|
1148 |
-
2,
|
1149 |
-
3,
|
1150 |
-
height,
|
1151 |
-
width,
|
1152 |
-
),
|
1153 |
-
dtype=torch.uint8,
|
1154 |
-
)
|
1155 |
-
mask_tensor = (
|
1156 |
-
torch.randint(
|
1157 |
-
0,
|
1158 |
-
255,
|
1159 |
-
(
|
1160 |
-
2,
|
1161 |
-
height,
|
1162 |
-
width,
|
1163 |
-
),
|
1164 |
-
dtype=torch.uint8,
|
1165 |
-
)
|
1166 |
-
> 127.5
|
1167 |
-
)
|
1168 |
-
|
1169 |
-
im_nps = [im.numpy().transpose(1, 2, 0) for im in im_tensor]
|
1170 |
-
mask_nps = [mask.numpy() for mask in mask_tensor]
|
1171 |
-
|
1172 |
-
t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image(
|
1173 |
-
im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True
|
1174 |
-
)
|
1175 |
-
nps = [prepare_mask_and_masked_image(i, m, height, width, return_image=True) for i, m in zip(im_nps, mask_nps)]
|
1176 |
-
t_mask_np = torch.cat([n[0] for n in nps])
|
1177 |
-
t_masked_np = torch.cat([n[1] for n in nps])
|
1178 |
-
t_image_np = torch.cat([n[2] for n in nps])
|
1179 |
-
|
1180 |
-
self.assertTrue((t_mask_tensor == t_mask_np).all())
|
1181 |
-
self.assertTrue((t_masked_tensor == t_masked_np).all())
|
1182 |
-
self.assertTrue((t_image_tensor == t_image_np).all())
|
1183 |
-
|
1184 |
-
def test_torch_batch_4D_4D(self):
|
1185 |
-
height, width = 32, 32
|
1186 |
-
|
1187 |
-
im_tensor = torch.randint(
|
1188 |
-
0,
|
1189 |
-
255,
|
1190 |
-
(
|
1191 |
-
2,
|
1192 |
-
3,
|
1193 |
-
height,
|
1194 |
-
width,
|
1195 |
-
),
|
1196 |
-
dtype=torch.uint8,
|
1197 |
-
)
|
1198 |
-
mask_tensor = (
|
1199 |
-
torch.randint(
|
1200 |
-
0,
|
1201 |
-
255,
|
1202 |
-
(
|
1203 |
-
2,
|
1204 |
-
1,
|
1205 |
-
height,
|
1206 |
-
width,
|
1207 |
-
),
|
1208 |
-
dtype=torch.uint8,
|
1209 |
-
)
|
1210 |
-
> 127.5
|
1211 |
-
)
|
1212 |
-
|
1213 |
-
im_nps = [im.numpy().transpose(1, 2, 0) for im in im_tensor]
|
1214 |
-
mask_nps = [mask.numpy()[0] for mask in mask_tensor]
|
1215 |
-
|
1216 |
-
t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image(
|
1217 |
-
im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True
|
1218 |
-
)
|
1219 |
-
nps = [prepare_mask_and_masked_image(i, m, height, width, return_image=True) for i, m in zip(im_nps, mask_nps)]
|
1220 |
-
t_mask_np = torch.cat([n[0] for n in nps])
|
1221 |
-
t_masked_np = torch.cat([n[1] for n in nps])
|
1222 |
-
t_image_np = torch.cat([n[2] for n in nps])
|
1223 |
-
|
1224 |
-
self.assertTrue((t_mask_tensor == t_mask_np).all())
|
1225 |
-
self.assertTrue((t_masked_tensor == t_masked_np).all())
|
1226 |
-
self.assertTrue((t_image_tensor == t_image_np).all())
|
1227 |
-
|
1228 |
-
def test_shape_mismatch(self):
|
1229 |
-
height, width = 32, 32
|
1230 |
-
|
1231 |
-
# test height and width
|
1232 |
-
with self.assertRaises(AssertionError):
|
1233 |
-
prepare_mask_and_masked_image(
|
1234 |
-
torch.randn(
|
1235 |
-
3,
|
1236 |
-
height,
|
1237 |
-
width,
|
1238 |
-
),
|
1239 |
-
torch.randn(64, 64),
|
1240 |
-
height,
|
1241 |
-
width,
|
1242 |
-
return_image=True,
|
1243 |
-
)
|
1244 |
-
# test batch dim
|
1245 |
-
with self.assertRaises(AssertionError):
|
1246 |
-
prepare_mask_and_masked_image(
|
1247 |
-
torch.randn(
|
1248 |
-
2,
|
1249 |
-
3,
|
1250 |
-
height,
|
1251 |
-
width,
|
1252 |
-
),
|
1253 |
-
torch.randn(4, 64, 64),
|
1254 |
-
height,
|
1255 |
-
width,
|
1256 |
-
return_image=True,
|
1257 |
-
)
|
1258 |
-
# test batch dim
|
1259 |
-
with self.assertRaises(AssertionError):
|
1260 |
-
prepare_mask_and_masked_image(
|
1261 |
-
torch.randn(
|
1262 |
-
2,
|
1263 |
-
3,
|
1264 |
-
height,
|
1265 |
-
width,
|
1266 |
-
),
|
1267 |
-
torch.randn(4, 1, 64, 64),
|
1268 |
-
height,
|
1269 |
-
width,
|
1270 |
-
return_image=True,
|
1271 |
-
)
|
1272 |
-
|
1273 |
-
def test_type_mismatch(self):
|
1274 |
-
height, width = 32, 32
|
1275 |
-
|
1276 |
-
# test tensors-only
|
1277 |
-
with self.assertRaises(TypeError):
|
1278 |
-
prepare_mask_and_masked_image(
|
1279 |
-
torch.rand(
|
1280 |
-
3,
|
1281 |
-
height,
|
1282 |
-
width,
|
1283 |
-
),
|
1284 |
-
torch.rand(
|
1285 |
-
3,
|
1286 |
-
height,
|
1287 |
-
width,
|
1288 |
-
).numpy(),
|
1289 |
-
height,
|
1290 |
-
width,
|
1291 |
-
return_image=True,
|
1292 |
-
)
|
1293 |
-
# test tensors-only
|
1294 |
-
with self.assertRaises(TypeError):
|
1295 |
-
prepare_mask_and_masked_image(
|
1296 |
-
torch.rand(
|
1297 |
-
3,
|
1298 |
-
height,
|
1299 |
-
width,
|
1300 |
-
).numpy(),
|
1301 |
-
torch.rand(
|
1302 |
-
3,
|
1303 |
-
height,
|
1304 |
-
width,
|
1305 |
-
),
|
1306 |
-
height,
|
1307 |
-
width,
|
1308 |
-
return_image=True,
|
1309 |
-
)
|
1310 |
-
|
1311 |
-
def test_channels_first(self):
|
1312 |
-
height, width = 32, 32
|
1313 |
-
|
1314 |
-
# test channels first for 3D tensors
|
1315 |
-
with self.assertRaises(AssertionError):
|
1316 |
-
prepare_mask_and_masked_image(
|
1317 |
-
torch.rand(height, width, 3),
|
1318 |
-
torch.rand(
|
1319 |
-
3,
|
1320 |
-
height,
|
1321 |
-
width,
|
1322 |
-
),
|
1323 |
-
height,
|
1324 |
-
width,
|
1325 |
-
return_image=True,
|
1326 |
-
)
|
1327 |
-
|
1328 |
-
def test_tensor_range(self):
|
1329 |
-
height, width = 32, 32
|
1330 |
-
|
1331 |
-
# test im <= 1
|
1332 |
-
with self.assertRaises(ValueError):
|
1333 |
-
prepare_mask_and_masked_image(
|
1334 |
-
torch.ones(
|
1335 |
-
3,
|
1336 |
-
height,
|
1337 |
-
width,
|
1338 |
-
)
|
1339 |
-
* 2,
|
1340 |
-
torch.rand(
|
1341 |
-
height,
|
1342 |
-
width,
|
1343 |
-
),
|
1344 |
-
height,
|
1345 |
-
width,
|
1346 |
-
return_image=True,
|
1347 |
-
)
|
1348 |
-
# test im >= -1
|
1349 |
-
with self.assertRaises(ValueError):
|
1350 |
-
prepare_mask_and_masked_image(
|
1351 |
-
torch.ones(
|
1352 |
-
3,
|
1353 |
-
height,
|
1354 |
-
width,
|
1355 |
-
)
|
1356 |
-
* (-2),
|
1357 |
-
torch.rand(
|
1358 |
-
height,
|
1359 |
-
width,
|
1360 |
-
),
|
1361 |
-
height,
|
1362 |
-
width,
|
1363 |
-
return_image=True,
|
1364 |
-
)
|
1365 |
-
# test mask <= 1
|
1366 |
-
with self.assertRaises(ValueError):
|
1367 |
-
prepare_mask_and_masked_image(
|
1368 |
-
torch.rand(
|
1369 |
-
3,
|
1370 |
-
height,
|
1371 |
-
width,
|
1372 |
-
),
|
1373 |
-
torch.ones(
|
1374 |
-
height,
|
1375 |
-
width,
|
1376 |
-
)
|
1377 |
-
* 2,
|
1378 |
-
height,
|
1379 |
-
width,
|
1380 |
-
return_image=True,
|
1381 |
-
)
|
1382 |
-
# test mask >= 0
|
1383 |
-
with self.assertRaises(ValueError):
|
1384 |
-
prepare_mask_and_masked_image(
|
1385 |
-
torch.rand(
|
1386 |
-
3,
|
1387 |
-
height,
|
1388 |
-
width,
|
1389 |
-
),
|
1390 |
-
torch.ones(
|
1391 |
-
height,
|
1392 |
-
width,
|
1393 |
-
)
|
1394 |
-
* -1,
|
1395 |
-
height,
|
1396 |
-
width,
|
1397 |
-
return_image=True,
|
1398 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py
DELETED
@@ -1,599 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import gc
|
17 |
-
import random
|
18 |
-
import tempfile
|
19 |
-
import unittest
|
20 |
-
|
21 |
-
import numpy as np
|
22 |
-
import torch
|
23 |
-
from PIL import Image
|
24 |
-
from transformers import (
|
25 |
-
CLIPTextConfig,
|
26 |
-
CLIPTextModel,
|
27 |
-
CLIPTokenizer,
|
28 |
-
DPTConfig,
|
29 |
-
DPTFeatureExtractor,
|
30 |
-
DPTForDepthEstimation,
|
31 |
-
)
|
32 |
-
|
33 |
-
from diffusers import (
|
34 |
-
AutoencoderKL,
|
35 |
-
DDIMScheduler,
|
36 |
-
DPMSolverMultistepScheduler,
|
37 |
-
LMSDiscreteScheduler,
|
38 |
-
PNDMScheduler,
|
39 |
-
StableDiffusionDepth2ImgPipeline,
|
40 |
-
UNet2DConditionModel,
|
41 |
-
)
|
42 |
-
from diffusers.utils import (
|
43 |
-
floats_tensor,
|
44 |
-
is_accelerate_available,
|
45 |
-
is_accelerate_version,
|
46 |
-
load_image,
|
47 |
-
load_numpy,
|
48 |
-
nightly,
|
49 |
-
slow,
|
50 |
-
torch_device,
|
51 |
-
)
|
52 |
-
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
|
53 |
-
|
54 |
-
from ..pipeline_params import (
|
55 |
-
IMAGE_TO_IMAGE_IMAGE_PARAMS,
|
56 |
-
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
|
57 |
-
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
|
58 |
-
TEXT_TO_IMAGE_IMAGE_PARAMS,
|
59 |
-
)
|
60 |
-
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
|
61 |
-
|
62 |
-
|
63 |
-
enable_full_determinism()
|
64 |
-
|
65 |
-
|
66 |
-
@skip_mps
|
67 |
-
class StableDiffusionDepth2ImgPipelineFastTests(
|
68 |
-
PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase
|
69 |
-
):
|
70 |
-
pipeline_class = StableDiffusionDepth2ImgPipeline
|
71 |
-
test_save_load_optional_components = False
|
72 |
-
params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
|
73 |
-
required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"}
|
74 |
-
batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
|
75 |
-
image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS
|
76 |
-
image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
|
77 |
-
|
78 |
-
def get_dummy_components(self):
|
79 |
-
torch.manual_seed(0)
|
80 |
-
unet = UNet2DConditionModel(
|
81 |
-
block_out_channels=(32, 64),
|
82 |
-
layers_per_block=2,
|
83 |
-
sample_size=32,
|
84 |
-
in_channels=5,
|
85 |
-
out_channels=4,
|
86 |
-
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
|
87 |
-
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
|
88 |
-
cross_attention_dim=32,
|
89 |
-
attention_head_dim=(2, 4),
|
90 |
-
use_linear_projection=True,
|
91 |
-
)
|
92 |
-
scheduler = PNDMScheduler(skip_prk_steps=True)
|
93 |
-
torch.manual_seed(0)
|
94 |
-
vae = AutoencoderKL(
|
95 |
-
block_out_channels=[32, 64],
|
96 |
-
in_channels=3,
|
97 |
-
out_channels=3,
|
98 |
-
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
|
99 |
-
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
|
100 |
-
latent_channels=4,
|
101 |
-
)
|
102 |
-
torch.manual_seed(0)
|
103 |
-
text_encoder_config = CLIPTextConfig(
|
104 |
-
bos_token_id=0,
|
105 |
-
eos_token_id=2,
|
106 |
-
hidden_size=32,
|
107 |
-
intermediate_size=37,
|
108 |
-
layer_norm_eps=1e-05,
|
109 |
-
num_attention_heads=4,
|
110 |
-
num_hidden_layers=5,
|
111 |
-
pad_token_id=1,
|
112 |
-
vocab_size=1000,
|
113 |
-
)
|
114 |
-
text_encoder = CLIPTextModel(text_encoder_config)
|
115 |
-
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
|
116 |
-
|
117 |
-
backbone_config = {
|
118 |
-
"global_padding": "same",
|
119 |
-
"layer_type": "bottleneck",
|
120 |
-
"depths": [3, 4, 9],
|
121 |
-
"out_features": ["stage1", "stage2", "stage3"],
|
122 |
-
"embedding_dynamic_padding": True,
|
123 |
-
"hidden_sizes": [96, 192, 384, 768],
|
124 |
-
"num_groups": 2,
|
125 |
-
}
|
126 |
-
depth_estimator_config = DPTConfig(
|
127 |
-
image_size=32,
|
128 |
-
patch_size=16,
|
129 |
-
num_channels=3,
|
130 |
-
hidden_size=32,
|
131 |
-
num_hidden_layers=4,
|
132 |
-
backbone_out_indices=(0, 1, 2, 3),
|
133 |
-
num_attention_heads=4,
|
134 |
-
intermediate_size=37,
|
135 |
-
hidden_act="gelu",
|
136 |
-
hidden_dropout_prob=0.1,
|
137 |
-
attention_probs_dropout_prob=0.1,
|
138 |
-
is_decoder=False,
|
139 |
-
initializer_range=0.02,
|
140 |
-
is_hybrid=True,
|
141 |
-
backbone_config=backbone_config,
|
142 |
-
backbone_featmap_shape=[1, 384, 24, 24],
|
143 |
-
)
|
144 |
-
depth_estimator = DPTForDepthEstimation(depth_estimator_config).eval()
|
145 |
-
feature_extractor = DPTFeatureExtractor.from_pretrained(
|
146 |
-
"hf-internal-testing/tiny-random-DPTForDepthEstimation"
|
147 |
-
)
|
148 |
-
|
149 |
-
components = {
|
150 |
-
"unet": unet,
|
151 |
-
"scheduler": scheduler,
|
152 |
-
"vae": vae,
|
153 |
-
"text_encoder": text_encoder,
|
154 |
-
"tokenizer": tokenizer,
|
155 |
-
"depth_estimator": depth_estimator,
|
156 |
-
"feature_extractor": feature_extractor,
|
157 |
-
}
|
158 |
-
return components
|
159 |
-
|
160 |
-
def get_dummy_inputs(self, device, seed=0):
|
161 |
-
image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed))
|
162 |
-
image = image.cpu().permute(0, 2, 3, 1)[0]
|
163 |
-
image = Image.fromarray(np.uint8(image)).convert("RGB").resize((32, 32))
|
164 |
-
if str(device).startswith("mps"):
|
165 |
-
generator = torch.manual_seed(seed)
|
166 |
-
else:
|
167 |
-
generator = torch.Generator(device=device).manual_seed(seed)
|
168 |
-
inputs = {
|
169 |
-
"prompt": "A painting of a squirrel eating a burger",
|
170 |
-
"image": image,
|
171 |
-
"generator": generator,
|
172 |
-
"num_inference_steps": 2,
|
173 |
-
"guidance_scale": 6.0,
|
174 |
-
"output_type": "numpy",
|
175 |
-
}
|
176 |
-
return inputs
|
177 |
-
|
178 |
-
def test_save_load_local(self):
|
179 |
-
components = self.get_dummy_components()
|
180 |
-
pipe = self.pipeline_class(**components)
|
181 |
-
pipe.to(torch_device)
|
182 |
-
pipe.set_progress_bar_config(disable=None)
|
183 |
-
|
184 |
-
inputs = self.get_dummy_inputs(torch_device)
|
185 |
-
output = pipe(**inputs)[0]
|
186 |
-
|
187 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
188 |
-
pipe.save_pretrained(tmpdir)
|
189 |
-
pipe_loaded = self.pipeline_class.from_pretrained(tmpdir)
|
190 |
-
pipe_loaded.to(torch_device)
|
191 |
-
pipe_loaded.set_progress_bar_config(disable=None)
|
192 |
-
|
193 |
-
inputs = self.get_dummy_inputs(torch_device)
|
194 |
-
output_loaded = pipe_loaded(**inputs)[0]
|
195 |
-
|
196 |
-
max_diff = np.abs(output - output_loaded).max()
|
197 |
-
self.assertLess(max_diff, 1e-4)
|
198 |
-
|
199 |
-
@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA")
|
200 |
-
def test_save_load_float16(self):
|
201 |
-
components = self.get_dummy_components()
|
202 |
-
for name, module in components.items():
|
203 |
-
if hasattr(module, "half"):
|
204 |
-
components[name] = module.to(torch_device).half()
|
205 |
-
pipe = self.pipeline_class(**components)
|
206 |
-
pipe.to(torch_device)
|
207 |
-
pipe.set_progress_bar_config(disable=None)
|
208 |
-
|
209 |
-
inputs = self.get_dummy_inputs(torch_device)
|
210 |
-
output = pipe(**inputs)[0]
|
211 |
-
|
212 |
-
with tempfile.TemporaryDirectory() as tmpdir:
|
213 |
-
pipe.save_pretrained(tmpdir)
|
214 |
-
pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16)
|
215 |
-
pipe_loaded.to(torch_device)
|
216 |
-
pipe_loaded.set_progress_bar_config(disable=None)
|
217 |
-
|
218 |
-
for name, component in pipe_loaded.components.items():
|
219 |
-
if hasattr(component, "dtype"):
|
220 |
-
self.assertTrue(
|
221 |
-
component.dtype == torch.float16,
|
222 |
-
f"`{name}.dtype` switched from `float16` to {component.dtype} after loading.",
|
223 |
-
)
|
224 |
-
|
225 |
-
inputs = self.get_dummy_inputs(torch_device)
|
226 |
-
output_loaded = pipe_loaded(**inputs)[0]
|
227 |
-
|
228 |
-
max_diff = np.abs(output - output_loaded).max()
|
229 |
-
self.assertLess(max_diff, 2e-2, "The output of the fp16 pipeline changed after saving and loading.")
|
230 |
-
|
231 |
-
@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA")
|
232 |
-
def test_float16_inference(self):
|
233 |
-
components = self.get_dummy_components()
|
234 |
-
pipe = self.pipeline_class(**components)
|
235 |
-
pipe.to(torch_device)
|
236 |
-
pipe.set_progress_bar_config(disable=None)
|
237 |
-
|
238 |
-
for name, module in components.items():
|
239 |
-
if hasattr(module, "half"):
|
240 |
-
components[name] = module.half()
|
241 |
-
pipe_fp16 = self.pipeline_class(**components)
|
242 |
-
pipe_fp16.to(torch_device)
|
243 |
-
pipe_fp16.set_progress_bar_config(disable=None)
|
244 |
-
|
245 |
-
output = pipe(**self.get_dummy_inputs(torch_device))[0]
|
246 |
-
output_fp16 = pipe_fp16(**self.get_dummy_inputs(torch_device))[0]
|
247 |
-
|
248 |
-
max_diff = np.abs(output - output_fp16).max()
|
249 |
-
self.assertLess(max_diff, 1.3e-2, "The outputs of the fp16 and fp32 pipelines are too different.")
|
250 |
-
|
251 |
-
@unittest.skipIf(
|
252 |
-
torch_device != "cuda" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"),
|
253 |
-
reason="CPU offload is only available with CUDA and `accelerate v0.14.0` or higher",
|
254 |
-
)
|
255 |
-
def test_cpu_offload_forward_pass(self):
|
256 |
-
components = self.get_dummy_components()
|
257 |
-
pipe = self.pipeline_class(**components)
|
258 |
-
pipe.to(torch_device)
|
259 |
-
pipe.set_progress_bar_config(disable=None)
|
260 |
-
|
261 |
-
inputs = self.get_dummy_inputs(torch_device)
|
262 |
-
output_without_offload = pipe(**inputs)[0]
|
263 |
-
|
264 |
-
pipe.enable_sequential_cpu_offload()
|
265 |
-
inputs = self.get_dummy_inputs(torch_device)
|
266 |
-
output_with_offload = pipe(**inputs)[0]
|
267 |
-
|
268 |
-
max_diff = np.abs(output_with_offload - output_without_offload).max()
|
269 |
-
self.assertLess(max_diff, 1e-4, "CPU offloading should not affect the inference results")
|
270 |
-
|
271 |
-
def test_dict_tuple_outputs_equivalent(self):
|
272 |
-
components = self.get_dummy_components()
|
273 |
-
pipe = self.pipeline_class(**components)
|
274 |
-
pipe.to(torch_device)
|
275 |
-
pipe.set_progress_bar_config(disable=None)
|
276 |
-
|
277 |
-
output = pipe(**self.get_dummy_inputs(torch_device))[0]
|
278 |
-
output_tuple = pipe(**self.get_dummy_inputs(torch_device), return_dict=False)[0]
|
279 |
-
|
280 |
-
max_diff = np.abs(output - output_tuple).max()
|
281 |
-
self.assertLess(max_diff, 1e-4)
|
282 |
-
|
283 |
-
def test_progress_bar(self):
|
284 |
-
super().test_progress_bar()
|
285 |
-
|
286 |
-
def test_stable_diffusion_depth2img_default_case(self):
|
287 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
288 |
-
components = self.get_dummy_components()
|
289 |
-
pipe = StableDiffusionDepth2ImgPipeline(**components)
|
290 |
-
pipe = pipe.to(device)
|
291 |
-
pipe.set_progress_bar_config(disable=None)
|
292 |
-
|
293 |
-
inputs = self.get_dummy_inputs(device)
|
294 |
-
image = pipe(**inputs).images
|
295 |
-
image_slice = image[0, -3:, -3:, -1]
|
296 |
-
|
297 |
-
assert image.shape == (1, 32, 32, 3)
|
298 |
-
if torch_device == "mps":
|
299 |
-
expected_slice = np.array([0.6071, 0.5035, 0.4378, 0.5776, 0.5753, 0.4316, 0.4513, 0.5263, 0.4546])
|
300 |
-
else:
|
301 |
-
expected_slice = np.array([0.5435, 0.4992, 0.3783, 0.4411, 0.5842, 0.4654, 0.3786, 0.5077, 0.4655])
|
302 |
-
|
303 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
|
304 |
-
|
305 |
-
def test_stable_diffusion_depth2img_negative_prompt(self):
|
306 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
307 |
-
components = self.get_dummy_components()
|
308 |
-
pipe = StableDiffusionDepth2ImgPipeline(**components)
|
309 |
-
pipe = pipe.to(device)
|
310 |
-
pipe.set_progress_bar_config(disable=None)
|
311 |
-
|
312 |
-
inputs = self.get_dummy_inputs(device)
|
313 |
-
negative_prompt = "french fries"
|
314 |
-
output = pipe(**inputs, negative_prompt=negative_prompt)
|
315 |
-
image = output.images
|
316 |
-
image_slice = image[0, -3:, -3:, -1]
|
317 |
-
|
318 |
-
assert image.shape == (1, 32, 32, 3)
|
319 |
-
if torch_device == "mps":
|
320 |
-
expected_slice = np.array([0.6296, 0.5125, 0.3890, 0.4456, 0.5955, 0.4621, 0.3810, 0.5310, 0.4626])
|
321 |
-
else:
|
322 |
-
expected_slice = np.array([0.6012, 0.4507, 0.3769, 0.4121, 0.5566, 0.4585, 0.3803, 0.5045, 0.4631])
|
323 |
-
|
324 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
|
325 |
-
|
326 |
-
def test_stable_diffusion_depth2img_multiple_init_images(self):
|
327 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
328 |
-
components = self.get_dummy_components()
|
329 |
-
pipe = StableDiffusionDepth2ImgPipeline(**components)
|
330 |
-
pipe = pipe.to(device)
|
331 |
-
pipe.set_progress_bar_config(disable=None)
|
332 |
-
|
333 |
-
inputs = self.get_dummy_inputs(device)
|
334 |
-
inputs["prompt"] = [inputs["prompt"]] * 2
|
335 |
-
inputs["image"] = 2 * [inputs["image"]]
|
336 |
-
image = pipe(**inputs).images
|
337 |
-
image_slice = image[-1, -3:, -3:, -1]
|
338 |
-
|
339 |
-
assert image.shape == (2, 32, 32, 3)
|
340 |
-
|
341 |
-
if torch_device == "mps":
|
342 |
-
expected_slice = np.array([0.6501, 0.5150, 0.4939, 0.6688, 0.5437, 0.5758, 0.5115, 0.4406, 0.4551])
|
343 |
-
else:
|
344 |
-
expected_slice = np.array([0.6557, 0.6214, 0.6254, 0.5775, 0.4785, 0.5949, 0.5904, 0.4785, 0.4730])
|
345 |
-
|
346 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
|
347 |
-
|
348 |
-
def test_stable_diffusion_depth2img_pil(self):
|
349 |
-
device = "cpu" # ensure determinism for the device-dependent torch.Generator
|
350 |
-
components = self.get_dummy_components()
|
351 |
-
pipe = StableDiffusionDepth2ImgPipeline(**components)
|
352 |
-
pipe = pipe.to(device)
|
353 |
-
pipe.set_progress_bar_config(disable=None)
|
354 |
-
|
355 |
-
inputs = self.get_dummy_inputs(device)
|
356 |
-
|
357 |
-
image = pipe(**inputs).images
|
358 |
-
image_slice = image[0, -3:, -3:, -1]
|
359 |
-
|
360 |
-
if torch_device == "mps":
|
361 |
-
expected_slice = np.array([0.53232, 0.47015, 0.40868, 0.45651, 0.4891, 0.4668, 0.4287, 0.48822, 0.47439])
|
362 |
-
else:
|
363 |
-
expected_slice = np.array([0.5435, 0.4992, 0.3783, 0.4411, 0.5842, 0.4654, 0.3786, 0.5077, 0.4655])
|
364 |
-
|
365 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
|
366 |
-
|
367 |
-
@skip_mps
|
368 |
-
def test_attention_slicing_forward_pass(self):
|
369 |
-
return super().test_attention_slicing_forward_pass()
|
370 |
-
|
371 |
-
def test_inference_batch_single_identical(self):
|
372 |
-
super().test_inference_batch_single_identical(expected_max_diff=7e-3)
|
373 |
-
|
374 |
-
|
375 |
-
@slow
|
376 |
-
@require_torch_gpu
|
377 |
-
class StableDiffusionDepth2ImgPipelineSlowTests(unittest.TestCase):
|
378 |
-
def tearDown(self):
|
379 |
-
super().tearDown()
|
380 |
-
gc.collect()
|
381 |
-
torch.cuda.empty_cache()
|
382 |
-
|
383 |
-
def get_inputs(self, device="cpu", dtype=torch.float32, seed=0):
|
384 |
-
generator = torch.Generator(device=device).manual_seed(seed)
|
385 |
-
init_image = load_image(
|
386 |
-
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/depth2img/two_cats.png"
|
387 |
-
)
|
388 |
-
inputs = {
|
389 |
-
"prompt": "two tigers",
|
390 |
-
"image": init_image,
|
391 |
-
"generator": generator,
|
392 |
-
"num_inference_steps": 3,
|
393 |
-
"strength": 0.75,
|
394 |
-
"guidance_scale": 7.5,
|
395 |
-
"output_type": "numpy",
|
396 |
-
}
|
397 |
-
return inputs
|
398 |
-
|
399 |
-
def test_stable_diffusion_depth2img_pipeline_default(self):
|
400 |
-
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained(
|
401 |
-
"stabilityai/stable-diffusion-2-depth", safety_checker=None
|
402 |
-
)
|
403 |
-
pipe.to(torch_device)
|
404 |
-
pipe.set_progress_bar_config(disable=None)
|
405 |
-
pipe.enable_attention_slicing()
|
406 |
-
|
407 |
-
inputs = self.get_inputs()
|
408 |
-
image = pipe(**inputs).images
|
409 |
-
image_slice = image[0, 253:256, 253:256, -1].flatten()
|
410 |
-
|
411 |
-
assert image.shape == (1, 480, 640, 3)
|
412 |
-
expected_slice = np.array([0.5435, 0.4992, 0.3783, 0.4411, 0.5842, 0.4654, 0.3786, 0.5077, 0.4655])
|
413 |
-
|
414 |
-
assert np.abs(expected_slice - image_slice).max() < 6e-1
|
415 |
-
|
416 |
-
def test_stable_diffusion_depth2img_pipeline_k_lms(self):
|
417 |
-
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained(
|
418 |
-
"stabilityai/stable-diffusion-2-depth", safety_checker=None
|
419 |
-
)
|
420 |
-
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
|
421 |
-
pipe.to(torch_device)
|
422 |
-
pipe.set_progress_bar_config(disable=None)
|
423 |
-
pipe.enable_attention_slicing()
|
424 |
-
|
425 |
-
inputs = self.get_inputs()
|
426 |
-
image = pipe(**inputs).images
|
427 |
-
image_slice = image[0, 253:256, 253:256, -1].flatten()
|
428 |
-
|
429 |
-
assert image.shape == (1, 480, 640, 3)
|
430 |
-
expected_slice = np.array([0.6363, 0.6274, 0.6309, 0.6370, 0.6226, 0.6286, 0.6213, 0.6453, 0.6306])
|
431 |
-
|
432 |
-
assert np.abs(expected_slice - image_slice).max() < 8e-4
|
433 |
-
|
434 |
-
def test_stable_diffusion_depth2img_pipeline_ddim(self):
|
435 |
-
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained(
|
436 |
-
"stabilityai/stable-diffusion-2-depth", safety_checker=None
|
437 |
-
)
|
438 |
-
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
439 |
-
pipe.to(torch_device)
|
440 |
-
pipe.set_progress_bar_config(disable=None)
|
441 |
-
pipe.enable_attention_slicing()
|
442 |
-
|
443 |
-
inputs = self.get_inputs()
|
444 |
-
image = pipe(**inputs).images
|
445 |
-
image_slice = image[0, 253:256, 253:256, -1].flatten()
|
446 |
-
|
447 |
-
assert image.shape == (1, 480, 640, 3)
|
448 |
-
expected_slice = np.array([0.6424, 0.6524, 0.6249, 0.6041, 0.6634, 0.6420, 0.6522, 0.6555, 0.6436])
|
449 |
-
|
450 |
-
assert np.abs(expected_slice - image_slice).max() < 5e-4
|
451 |
-
|
452 |
-
def test_stable_diffusion_depth2img_intermediate_state(self):
|
453 |
-
number_of_steps = 0
|
454 |
-
|
455 |
-
def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None:
|
456 |
-
callback_fn.has_been_called = True
|
457 |
-
nonlocal number_of_steps
|
458 |
-
number_of_steps += 1
|
459 |
-
if step == 1:
|
460 |
-
latents = latents.detach().cpu().numpy()
|
461 |
-
assert latents.shape == (1, 4, 60, 80)
|
462 |
-
latents_slice = latents[0, -3:, -3:, -1]
|
463 |
-
expected_slice = np.array(
|
464 |
-
[-0.7168, -1.5137, -0.1418, -2.9219, -2.7266, -2.4414, -2.1035, -3.0078, -1.7051]
|
465 |
-
)
|
466 |
-
|
467 |
-
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
|
468 |
-
elif step == 2:
|
469 |
-
latents = latents.detach().cpu().numpy()
|
470 |
-
assert latents.shape == (1, 4, 60, 80)
|
471 |
-
latents_slice = latents[0, -3:, -3:, -1]
|
472 |
-
expected_slice = np.array(
|
473 |
-
[-0.7109, -1.5068, -0.1403, -2.9160, -2.7207, -2.4414, -2.1035, -3.0059, -1.7090]
|
474 |
-
)
|
475 |
-
|
476 |
-
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
|
477 |
-
|
478 |
-
callback_fn.has_been_called = False
|
479 |
-
|
480 |
-
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained(
|
481 |
-
"stabilityai/stable-diffusion-2-depth", safety_checker=None, torch_dtype=torch.float16
|
482 |
-
)
|
483 |
-
pipe = pipe.to(torch_device)
|
484 |
-
pipe.set_progress_bar_config(disable=None)
|
485 |
-
pipe.enable_attention_slicing()
|
486 |
-
|
487 |
-
inputs = self.get_inputs(dtype=torch.float16)
|
488 |
-
pipe(**inputs, callback=callback_fn, callback_steps=1)
|
489 |
-
assert callback_fn.has_been_called
|
490 |
-
assert number_of_steps == 2
|
491 |
-
|
492 |
-
def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self):
|
493 |
-
torch.cuda.empty_cache()
|
494 |
-
torch.cuda.reset_max_memory_allocated()
|
495 |
-
torch.cuda.reset_peak_memory_stats()
|
496 |
-
|
497 |
-
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained(
|
498 |
-
"stabilityai/stable-diffusion-2-depth", safety_checker=None, torch_dtype=torch.float16
|
499 |
-
)
|
500 |
-
pipe = pipe.to(torch_device)
|
501 |
-
pipe.set_progress_bar_config(disable=None)
|
502 |
-
pipe.enable_attention_slicing(1)
|
503 |
-
pipe.enable_sequential_cpu_offload()
|
504 |
-
|
505 |
-
inputs = self.get_inputs(dtype=torch.float16)
|
506 |
-
_ = pipe(**inputs)
|
507 |
-
|
508 |
-
mem_bytes = torch.cuda.max_memory_allocated()
|
509 |
-
# make sure that less than 2.9 GB is allocated
|
510 |
-
assert mem_bytes < 2.9 * 10**9
|
511 |
-
|
512 |
-
|
513 |
-
@nightly
|
514 |
-
@require_torch_gpu
|
515 |
-
class StableDiffusionImg2ImgPipelineNightlyTests(unittest.TestCase):
|
516 |
-
def tearDown(self):
|
517 |
-
super().tearDown()
|
518 |
-
gc.collect()
|
519 |
-
torch.cuda.empty_cache()
|
520 |
-
|
521 |
-
def get_inputs(self, device="cpu", dtype=torch.float32, seed=0):
|
522 |
-
generator = torch.Generator(device=device).manual_seed(seed)
|
523 |
-
init_image = load_image(
|
524 |
-
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/depth2img/two_cats.png"
|
525 |
-
)
|
526 |
-
inputs = {
|
527 |
-
"prompt": "two tigers",
|
528 |
-
"image": init_image,
|
529 |
-
"generator": generator,
|
530 |
-
"num_inference_steps": 3,
|
531 |
-
"strength": 0.75,
|
532 |
-
"guidance_scale": 7.5,
|
533 |
-
"output_type": "numpy",
|
534 |
-
}
|
535 |
-
return inputs
|
536 |
-
|
537 |
-
def test_depth2img_pndm(self):
|
538 |
-
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-depth")
|
539 |
-
pipe.to(torch_device)
|
540 |
-
pipe.set_progress_bar_config(disable=None)
|
541 |
-
|
542 |
-
inputs = self.get_inputs()
|
543 |
-
image = pipe(**inputs).images[0]
|
544 |
-
|
545 |
-
expected_image = load_numpy(
|
546 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
547 |
-
"/stable_diffusion_depth2img/stable_diffusion_2_0_pndm.npy"
|
548 |
-
)
|
549 |
-
max_diff = np.abs(expected_image - image).max()
|
550 |
-
assert max_diff < 1e-3
|
551 |
-
|
552 |
-
def test_depth2img_ddim(self):
|
553 |
-
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-depth")
|
554 |
-
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
|
555 |
-
pipe.to(torch_device)
|
556 |
-
pipe.set_progress_bar_config(disable=None)
|
557 |
-
|
558 |
-
inputs = self.get_inputs()
|
559 |
-
image = pipe(**inputs).images[0]
|
560 |
-
|
561 |
-
expected_image = load_numpy(
|
562 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
563 |
-
"/stable_diffusion_depth2img/stable_diffusion_2_0_ddim.npy"
|
564 |
-
)
|
565 |
-
max_diff = np.abs(expected_image - image).max()
|
566 |
-
assert max_diff < 1e-3
|
567 |
-
|
568 |
-
def test_img2img_lms(self):
|
569 |
-
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-depth")
|
570 |
-
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
|
571 |
-
pipe.to(torch_device)
|
572 |
-
pipe.set_progress_bar_config(disable=None)
|
573 |
-
|
574 |
-
inputs = self.get_inputs()
|
575 |
-
image = pipe(**inputs).images[0]
|
576 |
-
|
577 |
-
expected_image = load_numpy(
|
578 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
579 |
-
"/stable_diffusion_depth2img/stable_diffusion_2_0_lms.npy"
|
580 |
-
)
|
581 |
-
max_diff = np.abs(expected_image - image).max()
|
582 |
-
assert max_diff < 1e-3
|
583 |
-
|
584 |
-
def test_img2img_dpm(self):
|
585 |
-
pipe = StableDiffusionDepth2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-depth")
|
586 |
-
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
|
587 |
-
pipe.to(torch_device)
|
588 |
-
pipe.set_progress_bar_config(disable=None)
|
589 |
-
|
590 |
-
inputs = self.get_inputs()
|
591 |
-
inputs["num_inference_steps"] = 30
|
592 |
-
image = pipe(**inputs).images[0]
|
593 |
-
|
594 |
-
expected_image = load_numpy(
|
595 |
-
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
|
596 |
-
"/stable_diffusion_depth2img/stable_diffusion_2_0_dpm_multi.npy"
|
597 |
-
)
|
598 |
-
max_diff = np.abs(expected_image - image).max()
|
599 |
-
assert max_diff < 1e-3
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r50-d8_512x1024_80k_cityscapes.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/deeplabv3_r50-d8.py', '../_base_/datasets/cityscapes.py',
|
3 |
-
'../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
|
4 |
-
]
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/dmnet/dmnet_r50-d8_512x1024_80k_cityscapes.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
_base_ = [
|
2 |
-
'../_base_/models/dmnet_r50-d8.py', '../_base_/datasets/cityscapes.py',
|
3 |
-
'../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py'
|
4 |
-
]
|
|
|
|
|
|
|
|
|
|
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/.github/pull_request_template.md
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
## Checklist:
|
2 |
-
|
3 |
-
- [ ] I have read the [Contributing guidelines](https://github.com/oobabooga/text-generation-webui/wiki/Contributing-guidelines).
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/config.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
save_memory = False
|
|
|
|
spaces/Ariharasudhan/YoloV5/models/tf.py
DELETED
@@ -1,608 +0,0 @@
|
|
1 |
-
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
|
2 |
-
"""
|
3 |
-
TensorFlow, Keras and TFLite versions of YOLOv5
|
4 |
-
Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127
|
5 |
-
|
6 |
-
Usage:
|
7 |
-
$ python models/tf.py --weights yolov5s.pt
|
8 |
-
|
9 |
-
Export:
|
10 |
-
$ python export.py --weights yolov5s.pt --include saved_model pb tflite tfjs
|
11 |
-
"""
|
12 |
-
|
13 |
-
import argparse
|
14 |
-
import sys
|
15 |
-
from copy import deepcopy
|
16 |
-
from pathlib import Path
|
17 |
-
|
18 |
-
FILE = Path(__file__).resolve()
|
19 |
-
ROOT = FILE.parents[1] # YOLOv5 root directory
|
20 |
-
if str(ROOT) not in sys.path:
|
21 |
-
sys.path.append(str(ROOT)) # add ROOT to PATH
|
22 |
-
# ROOT = ROOT.relative_to(Path.cwd()) # relative
|
23 |
-
|
24 |
-
import numpy as np
|
25 |
-
import tensorflow as tf
|
26 |
-
import torch
|
27 |
-
import torch.nn as nn
|
28 |
-
from tensorflow import keras
|
29 |
-
|
30 |
-
from models.common import (C3, SPP, SPPF, Bottleneck, BottleneckCSP, C3x, Concat, Conv, CrossConv, DWConv,
|
31 |
-
DWConvTranspose2d, Focus, autopad)
|
32 |
-
from models.experimental import MixConv2d, attempt_load
|
33 |
-
from models.yolo import Detect, Segment
|
34 |
-
from utils.activations import SiLU
|
35 |
-
from utils.general import LOGGER, make_divisible, print_args
|
36 |
-
|
37 |
-
|
38 |
-
class TFBN(keras.layers.Layer):
|
39 |
-
# TensorFlow BatchNormalization wrapper
|
40 |
-
def __init__(self, w=None):
|
41 |
-
super().__init__()
|
42 |
-
self.bn = keras.layers.BatchNormalization(
|
43 |
-
beta_initializer=keras.initializers.Constant(w.bias.numpy()),
|
44 |
-
gamma_initializer=keras.initializers.Constant(w.weight.numpy()),
|
45 |
-
moving_mean_initializer=keras.initializers.Constant(w.running_mean.numpy()),
|
46 |
-
moving_variance_initializer=keras.initializers.Constant(w.running_var.numpy()),
|
47 |
-
epsilon=w.eps)
|
48 |
-
|
49 |
-
def call(self, inputs):
|
50 |
-
return self.bn(inputs)
|
51 |
-
|
52 |
-
|
53 |
-
class TFPad(keras.layers.Layer):
|
54 |
-
# Pad inputs in spatial dimensions 1 and 2
|
55 |
-
def __init__(self, pad):
|
56 |
-
super().__init__()
|
57 |
-
if isinstance(pad, int):
|
58 |
-
self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]])
|
59 |
-
else: # tuple/list
|
60 |
-
self.pad = tf.constant([[0, 0], [pad[0], pad[0]], [pad[1], pad[1]], [0, 0]])
|
61 |
-
|
62 |
-
def call(self, inputs):
|
63 |
-
return tf.pad(inputs, self.pad, mode='constant', constant_values=0)
|
64 |
-
|
65 |
-
|
66 |
-
class TFConv(keras.layers.Layer):
|
67 |
-
# Standard convolution
|
68 |
-
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
|
69 |
-
# ch_in, ch_out, weights, kernel, stride, padding, groups
|
70 |
-
super().__init__()
|
71 |
-
assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
|
72 |
-
# TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding)
|
73 |
-
# see https://stackoverflow.com/questions/52975843/comparing-conv2d-with-padding-between-tensorflow-and-pytorch
|
74 |
-
conv = keras.layers.Conv2D(
|
75 |
-
filters=c2,
|
76 |
-
kernel_size=k,
|
77 |
-
strides=s,
|
78 |
-
padding='SAME' if s == 1 else 'VALID',
|
79 |
-
use_bias=not hasattr(w, 'bn'),
|
80 |
-
kernel_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()),
|
81 |
-
bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy()))
|
82 |
-
self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv])
|
83 |
-
self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity
|
84 |
-
self.act = activations(w.act) if act else tf.identity
|
85 |
-
|
86 |
-
def call(self, inputs):
|
87 |
-
return self.act(self.bn(self.conv(inputs)))
|
88 |
-
|
89 |
-
|
90 |
-
class TFDWConv(keras.layers.Layer):
|
91 |
-
# Depthwise convolution
|
92 |
-
def __init__(self, c1, c2, k=1, s=1, p=None, act=True, w=None):
|
93 |
-
# ch_in, ch_out, weights, kernel, stride, padding, groups
|
94 |
-
super().__init__()
|
95 |
-
assert c2 % c1 == 0, f'TFDWConv() output={c2} must be a multiple of input={c1} channels'
|
96 |
-
conv = keras.layers.DepthwiseConv2D(
|
97 |
-
kernel_size=k,
|
98 |
-
depth_multiplier=c2 // c1,
|
99 |
-
strides=s,
|
100 |
-
padding='SAME' if s == 1 else 'VALID',
|
101 |
-
use_bias=not hasattr(w, 'bn'),
|
102 |
-
depthwise_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()),
|
103 |
-
bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy()))
|
104 |
-
self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv])
|
105 |
-
self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity
|
106 |
-
self.act = activations(w.act) if act else tf.identity
|
107 |
-
|
108 |
-
def call(self, inputs):
|
109 |
-
return self.act(self.bn(self.conv(inputs)))
|
110 |
-
|
111 |
-
|
112 |
-
class TFDWConvTranspose2d(keras.layers.Layer):
|
113 |
-
# Depthwise ConvTranspose2d
|
114 |
-
def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0, w=None):
|
115 |
-
# ch_in, ch_out, weights, kernel, stride, padding, groups
|
116 |
-
super().__init__()
|
117 |
-
assert c1 == c2, f'TFDWConv() output={c2} must be equal to input={c1} channels'
|
118 |
-
assert k == 4 and p1 == 1, 'TFDWConv() only valid for k=4 and p1=1'
|
119 |
-
weight, bias = w.weight.permute(2, 3, 1, 0).numpy(), w.bias.numpy()
|
120 |
-
self.c1 = c1
|
121 |
-
self.conv = [
|
122 |
-
keras.layers.Conv2DTranspose(filters=1,
|
123 |
-
kernel_size=k,
|
124 |
-
strides=s,
|
125 |
-
padding='VALID',
|
126 |
-
output_padding=p2,
|
127 |
-
use_bias=True,
|
128 |
-
kernel_initializer=keras.initializers.Constant(weight[..., i:i + 1]),
|
129 |
-
bias_initializer=keras.initializers.Constant(bias[i])) for i in range(c1)]
|
130 |
-
|
131 |
-
def call(self, inputs):
|
132 |
-
return tf.concat([m(x) for m, x in zip(self.conv, tf.split(inputs, self.c1, 3))], 3)[:, 1:-1, 1:-1]
|
133 |
-
|
134 |
-
|
135 |
-
class TFFocus(keras.layers.Layer):
|
136 |
-
# Focus wh information into c-space
|
137 |
-
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
|
138 |
-
# ch_in, ch_out, kernel, stride, padding, groups
|
139 |
-
super().__init__()
|
140 |
-
self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv)
|
141 |
-
|
142 |
-
def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c)
|
143 |
-
# inputs = inputs / 255 # normalize 0-255 to 0-1
|
144 |
-
inputs = [inputs[:, ::2, ::2, :], inputs[:, 1::2, ::2, :], inputs[:, ::2, 1::2, :], inputs[:, 1::2, 1::2, :]]
|
145 |
-
return self.conv(tf.concat(inputs, 3))
|
146 |
-
|
147 |
-
|
148 |
-
class TFBottleneck(keras.layers.Layer):
|
149 |
-
# Standard bottleneck
|
150 |
-
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, shortcut, groups, expansion
|
151 |
-
super().__init__()
|
152 |
-
c_ = int(c2 * e) # hidden channels
|
153 |
-
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
154 |
-
self.cv2 = TFConv(c_, c2, 3, 1, g=g, w=w.cv2)
|
155 |
-
self.add = shortcut and c1 == c2
|
156 |
-
|
157 |
-
def call(self, inputs):
|
158 |
-
return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs))
|
159 |
-
|
160 |
-
|
161 |
-
class TFCrossConv(keras.layers.Layer):
|
162 |
-
# Cross Convolution
|
163 |
-
def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False, w=None):
|
164 |
-
super().__init__()
|
165 |
-
c_ = int(c2 * e) # hidden channels
|
166 |
-
self.cv1 = TFConv(c1, c_, (1, k), (1, s), w=w.cv1)
|
167 |
-
self.cv2 = TFConv(c_, c2, (k, 1), (s, 1), g=g, w=w.cv2)
|
168 |
-
self.add = shortcut and c1 == c2
|
169 |
-
|
170 |
-
def call(self, inputs):
|
171 |
-
return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs))
|
172 |
-
|
173 |
-
|
174 |
-
class TFConv2d(keras.layers.Layer):
|
175 |
-
# Substitution for PyTorch nn.Conv2D
|
176 |
-
def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None):
|
177 |
-
super().__init__()
|
178 |
-
assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
|
179 |
-
self.conv = keras.layers.Conv2D(filters=c2,
|
180 |
-
kernel_size=k,
|
181 |
-
strides=s,
|
182 |
-
padding='VALID',
|
183 |
-
use_bias=bias,
|
184 |
-
kernel_initializer=keras.initializers.Constant(
|
185 |
-
w.weight.permute(2, 3, 1, 0).numpy()),
|
186 |
-
bias_initializer=keras.initializers.Constant(w.bias.numpy()) if bias else None)
|
187 |
-
|
188 |
-
def call(self, inputs):
|
189 |
-
return self.conv(inputs)
|
190 |
-
|
191 |
-
|
192 |
-
class TFBottleneckCSP(keras.layers.Layer):
|
193 |
-
# CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
|
194 |
-
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
|
195 |
-
# ch_in, ch_out, number, shortcut, groups, expansion
|
196 |
-
super().__init__()
|
197 |
-
c_ = int(c2 * e) # hidden channels
|
198 |
-
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
199 |
-
self.cv2 = TFConv2d(c1, c_, 1, 1, bias=False, w=w.cv2)
|
200 |
-
self.cv3 = TFConv2d(c_, c_, 1, 1, bias=False, w=w.cv3)
|
201 |
-
self.cv4 = TFConv(2 * c_, c2, 1, 1, w=w.cv4)
|
202 |
-
self.bn = TFBN(w.bn)
|
203 |
-
self.act = lambda x: keras.activations.swish(x)
|
204 |
-
self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)])
|
205 |
-
|
206 |
-
def call(self, inputs):
|
207 |
-
y1 = self.cv3(self.m(self.cv1(inputs)))
|
208 |
-
y2 = self.cv2(inputs)
|
209 |
-
return self.cv4(self.act(self.bn(tf.concat((y1, y2), axis=3))))
|
210 |
-
|
211 |
-
|
212 |
-
class TFC3(keras.layers.Layer):
|
213 |
-
# CSP Bottleneck with 3 convolutions
|
214 |
-
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
|
215 |
-
# ch_in, ch_out, number, shortcut, groups, expansion
|
216 |
-
super().__init__()
|
217 |
-
c_ = int(c2 * e) # hidden channels
|
218 |
-
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
219 |
-
self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2)
|
220 |
-
self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3)
|
221 |
-
self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)])
|
222 |
-
|
223 |
-
def call(self, inputs):
|
224 |
-
return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3))
|
225 |
-
|
226 |
-
|
227 |
-
class TFC3x(keras.layers.Layer):
|
228 |
-
# 3 module with cross-convolutions
|
229 |
-
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
|
230 |
-
# ch_in, ch_out, number, shortcut, groups, expansion
|
231 |
-
super().__init__()
|
232 |
-
c_ = int(c2 * e) # hidden channels
|
233 |
-
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
234 |
-
self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2)
|
235 |
-
self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3)
|
236 |
-
self.m = keras.Sequential([
|
237 |
-
TFCrossConv(c_, c_, k=3, s=1, g=g, e=1.0, shortcut=shortcut, w=w.m[j]) for j in range(n)])
|
238 |
-
|
239 |
-
def call(self, inputs):
|
240 |
-
return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3))
|
241 |
-
|
242 |
-
|
243 |
-
class TFSPP(keras.layers.Layer):
|
244 |
-
# Spatial pyramid pooling layer used in YOLOv3-SPP
|
245 |
-
def __init__(self, c1, c2, k=(5, 9, 13), w=None):
|
246 |
-
super().__init__()
|
247 |
-
c_ = c1 // 2 # hidden channels
|
248 |
-
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
249 |
-
self.cv2 = TFConv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2)
|
250 |
-
self.m = [keras.layers.MaxPool2D(pool_size=x, strides=1, padding='SAME') for x in k]
|
251 |
-
|
252 |
-
def call(self, inputs):
|
253 |
-
x = self.cv1(inputs)
|
254 |
-
return self.cv2(tf.concat([x] + [m(x) for m in self.m], 3))
|
255 |
-
|
256 |
-
|
257 |
-
class TFSPPF(keras.layers.Layer):
|
258 |
-
# Spatial pyramid pooling-Fast layer
|
259 |
-
def __init__(self, c1, c2, k=5, w=None):
|
260 |
-
super().__init__()
|
261 |
-
c_ = c1 // 2 # hidden channels
|
262 |
-
self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
|
263 |
-
self.cv2 = TFConv(c_ * 4, c2, 1, 1, w=w.cv2)
|
264 |
-
self.m = keras.layers.MaxPool2D(pool_size=k, strides=1, padding='SAME')
|
265 |
-
|
266 |
-
def call(self, inputs):
|
267 |
-
x = self.cv1(inputs)
|
268 |
-
y1 = self.m(x)
|
269 |
-
y2 = self.m(y1)
|
270 |
-
return self.cv2(tf.concat([x, y1, y2, self.m(y2)], 3))
|
271 |
-
|
272 |
-
|
273 |
-
class TFDetect(keras.layers.Layer):
|
274 |
-
# TF YOLOv5 Detect layer
|
275 |
-
def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detection layer
|
276 |
-
super().__init__()
|
277 |
-
self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32)
|
278 |
-
self.nc = nc # number of classes
|
279 |
-
self.no = nc + 5 # number of outputs per anchor
|
280 |
-
self.nl = len(anchors) # number of detection layers
|
281 |
-
self.na = len(anchors[0]) // 2 # number of anchors
|
282 |
-
self.grid = [tf.zeros(1)] * self.nl # init grid
|
283 |
-
self.anchors = tf.convert_to_tensor(w.anchors.numpy(), dtype=tf.float32)
|
284 |
-
self.anchor_grid = tf.reshape(self.anchors * tf.reshape(self.stride, [self.nl, 1, 1]), [self.nl, 1, -1, 1, 2])
|
285 |
-
self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)]
|
286 |
-
self.training = False # set to False after building model
|
287 |
-
self.imgsz = imgsz
|
288 |
-
for i in range(self.nl):
|
289 |
-
ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i]
|
290 |
-
self.grid[i] = self._make_grid(nx, ny)
|
291 |
-
|
292 |
-
def call(self, inputs):
|
293 |
-
z = [] # inference output
|
294 |
-
x = []
|
295 |
-
for i in range(self.nl):
|
296 |
-
x.append(self.m[i](inputs[i]))
|
297 |
-
# x(bs,20,20,255) to x(bs,3,20,20,85)
|
298 |
-
ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i]
|
299 |
-
x[i] = tf.reshape(x[i], [-1, ny * nx, self.na, self.no])
|
300 |
-
|
301 |
-
if not self.training: # inference
|
302 |
-
y = x[i]
|
303 |
-
grid = tf.transpose(self.grid[i], [0, 2, 1, 3]) - 0.5
|
304 |
-
anchor_grid = tf.transpose(self.anchor_grid[i], [0, 2, 1, 3]) * 4
|
305 |
-
xy = (tf.sigmoid(y[..., 0:2]) * 2 + grid) * self.stride[i] # xy
|
306 |
-
wh = tf.sigmoid(y[..., 2:4]) ** 2 * anchor_grid
|
307 |
-
# Normalize xywh to 0-1 to reduce calibration error
|
308 |
-
xy /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32)
|
309 |
-
wh /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32)
|
310 |
-
y = tf.concat([xy, wh, tf.sigmoid(y[..., 4:5 + self.nc]), y[..., 5 + self.nc:]], -1)
|
311 |
-
z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no]))
|
312 |
-
|
313 |
-
return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1),)
|
314 |
-
|
315 |
-
@staticmethod
|
316 |
-
def _make_grid(nx=20, ny=20):
|
317 |
-
# yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
|
318 |
-
# return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
|
319 |
-
xv, yv = tf.meshgrid(tf.range(nx), tf.range(ny))
|
320 |
-
return tf.cast(tf.reshape(tf.stack([xv, yv], 2), [1, 1, ny * nx, 2]), dtype=tf.float32)
|
321 |
-
|
322 |
-
|
323 |
-
class TFSegment(TFDetect):
|
324 |
-
# YOLOv5 Segment head for segmentation models
|
325 |
-
def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), imgsz=(640, 640), w=None):
|
326 |
-
super().__init__(nc, anchors, ch, imgsz, w)
|
327 |
-
self.nm = nm # number of masks
|
328 |
-
self.npr = npr # number of protos
|
329 |
-
self.no = 5 + nc + self.nm # number of outputs per anchor
|
330 |
-
self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)] # output conv
|
331 |
-
self.proto = TFProto(ch[0], self.npr, self.nm, w=w.proto) # protos
|
332 |
-
self.detect = TFDetect.call
|
333 |
-
|
334 |
-
def call(self, x):
|
335 |
-
p = self.proto(x[0])
|
336 |
-
# p = TFUpsample(None, scale_factor=4, mode='nearest')(self.proto(x[0])) # (optional) full-size protos
|
337 |
-
p = tf.transpose(p, [0, 3, 1, 2]) # from shape(1,160,160,32) to shape(1,32,160,160)
|
338 |
-
x = self.detect(self, x)
|
339 |
-
return (x, p) if self.training else (x[0], p)
|
340 |
-
|
341 |
-
|
342 |
-
class TFProto(keras.layers.Layer):
|
343 |
-
|
344 |
-
def __init__(self, c1, c_=256, c2=32, w=None):
|
345 |
-
super().__init__()
|
346 |
-
self.cv1 = TFConv(c1, c_, k=3, w=w.cv1)
|
347 |
-
self.upsample = TFUpsample(None, scale_factor=2, mode='nearest')
|
348 |
-
self.cv2 = TFConv(c_, c_, k=3, w=w.cv2)
|
349 |
-
self.cv3 = TFConv(c_, c2, w=w.cv3)
|
350 |
-
|
351 |
-
def call(self, inputs):
|
352 |
-
return self.cv3(self.cv2(self.upsample(self.cv1(inputs))))
|
353 |
-
|
354 |
-
|
355 |
-
class TFUpsample(keras.layers.Layer):
|
356 |
-
# TF version of torch.nn.Upsample()
|
357 |
-
def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w'
|
358 |
-
super().__init__()
|
359 |
-
assert scale_factor % 2 == 0, "scale_factor must be multiple of 2"
|
360 |
-
self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * scale_factor, x.shape[2] * scale_factor), mode)
|
361 |
-
# self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode)
|
362 |
-
# with default arguments: align_corners=False, half_pixel_centers=False
|
363 |
-
# self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x,
|
364 |
-
# size=(x.shape[1] * 2, x.shape[2] * 2))
|
365 |
-
|
366 |
-
def call(self, inputs):
|
367 |
-
return self.upsample(inputs)
|
368 |
-
|
369 |
-
|
370 |
-
class TFConcat(keras.layers.Layer):
|
371 |
-
# TF version of torch.concat()
|
372 |
-
def __init__(self, dimension=1, w=None):
|
373 |
-
super().__init__()
|
374 |
-
assert dimension == 1, "convert only NCHW to NHWC concat"
|
375 |
-
self.d = 3
|
376 |
-
|
377 |
-
def call(self, inputs):
|
378 |
-
return tf.concat(inputs, self.d)
|
379 |
-
|
380 |
-
|
381 |
-
def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3)
|
382 |
-
LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}")
|
383 |
-
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
|
384 |
-
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
|
385 |
-
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
|
386 |
-
|
387 |
-
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
|
388 |
-
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
|
389 |
-
m_str = m
|
390 |
-
m = eval(m) if isinstance(m, str) else m # eval strings
|
391 |
-
for j, a in enumerate(args):
|
392 |
-
try:
|
393 |
-
args[j] = eval(a) if isinstance(a, str) else a # eval strings
|
394 |
-
except NameError:
|
395 |
-
pass
|
396 |
-
|
397 |
-
n = max(round(n * gd), 1) if n > 1 else n # depth gain
|
398 |
-
if m in [
|
399 |
-
nn.Conv2d, Conv, DWConv, DWConvTranspose2d, Bottleneck, SPP, SPPF, MixConv2d, Focus, CrossConv,
|
400 |
-
BottleneckCSP, C3, C3x]:
|
401 |
-
c1, c2 = ch[f], args[0]
|
402 |
-
c2 = make_divisible(c2 * gw, 8) if c2 != no else c2
|
403 |
-
|
404 |
-
args = [c1, c2, *args[1:]]
|
405 |
-
if m in [BottleneckCSP, C3, C3x]:
|
406 |
-
args.insert(2, n)
|
407 |
-
n = 1
|
408 |
-
elif m is nn.BatchNorm2d:
|
409 |
-
args = [ch[f]]
|
410 |
-
elif m is Concat:
|
411 |
-
c2 = sum(ch[-1 if x == -1 else x + 1] for x in f)
|
412 |
-
elif m in [Detect, Segment]:
|
413 |
-
args.append([ch[x + 1] for x in f])
|
414 |
-
if isinstance(args[1], int): # number of anchors
|
415 |
-
args[1] = [list(range(args[1] * 2))] * len(f)
|
416 |
-
if m is Segment:
|
417 |
-
args[3] = make_divisible(args[3] * gw, 8)
|
418 |
-
args.append(imgsz)
|
419 |
-
else:
|
420 |
-
c2 = ch[f]
|
421 |
-
|
422 |
-
tf_m = eval('TF' + m_str.replace('nn.', ''))
|
423 |
-
m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \
|
424 |
-
else tf_m(*args, w=model.model[i]) # module
|
425 |
-
|
426 |
-
torch_m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
|
427 |
-
t = str(m)[8:-2].replace('__main__.', '') # module type
|
428 |
-
np = sum(x.numel() for x in torch_m_.parameters()) # number params
|
429 |
-
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
|
430 |
-
LOGGER.info(f'{i:>3}{str(f):>18}{str(n):>3}{np:>10} {t:<40}{str(args):<30}') # print
|
431 |
-
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
|
432 |
-
layers.append(m_)
|
433 |
-
ch.append(c2)
|
434 |
-
return keras.Sequential(layers), sorted(save)
|
435 |
-
|
436 |
-
|
437 |
-
class TFModel:
|
438 |
-
# TF YOLOv5 model
|
439 |
-
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes
|
440 |
-
super().__init__()
|
441 |
-
if isinstance(cfg, dict):
|
442 |
-
self.yaml = cfg # model dict
|
443 |
-
else: # is *.yaml
|
444 |
-
import yaml # for torch hub
|
445 |
-
self.yaml_file = Path(cfg).name
|
446 |
-
with open(cfg) as f:
|
447 |
-
self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict
|
448 |
-
|
449 |
-
# Define model
|
450 |
-
if nc and nc != self.yaml['nc']:
|
451 |
-
LOGGER.info(f"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}")
|
452 |
-
self.yaml['nc'] = nc # override yaml value
|
453 |
-
self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz)
|
454 |
-
|
455 |
-
def predict(self,
|
456 |
-
inputs,
|
457 |
-
tf_nms=False,
|
458 |
-
agnostic_nms=False,
|
459 |
-
topk_per_class=100,
|
460 |
-
topk_all=100,
|
461 |
-
iou_thres=0.45,
|
462 |
-
conf_thres=0.25):
|
463 |
-
y = [] # outputs
|
464 |
-
x = inputs
|
465 |
-
for m in self.model.layers:
|
466 |
-
if m.f != -1: # if not from previous layer
|
467 |
-
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
|
468 |
-
|
469 |
-
x = m(x) # run
|
470 |
-
y.append(x if m.i in self.savelist else None) # save output
|
471 |
-
|
472 |
-
# Add TensorFlow NMS
|
473 |
-
if tf_nms:
|
474 |
-
boxes = self._xywh2xyxy(x[0][..., :4])
|
475 |
-
probs = x[0][:, :, 4:5]
|
476 |
-
classes = x[0][:, :, 5:]
|
477 |
-
scores = probs * classes
|
478 |
-
if agnostic_nms:
|
479 |
-
nms = AgnosticNMS()((boxes, classes, scores), topk_all, iou_thres, conf_thres)
|
480 |
-
else:
|
481 |
-
boxes = tf.expand_dims(boxes, 2)
|
482 |
-
nms = tf.image.combined_non_max_suppression(boxes,
|
483 |
-
scores,
|
484 |
-
topk_per_class,
|
485 |
-
topk_all,
|
486 |
-
iou_thres,
|
487 |
-
conf_thres,
|
488 |
-
clip_boxes=False)
|
489 |
-
return (nms,)
|
490 |
-
return x # output [1,6300,85] = [xywh, conf, class0, class1, ...]
|
491 |
-
# x = x[0] # [x(1,6300,85), ...] to x(6300,85)
|
492 |
-
# xywh = x[..., :4] # x(6300,4) boxes
|
493 |
-
# conf = x[..., 4:5] # x(6300,1) confidences
|
494 |
-
# cls = tf.reshape(tf.cast(tf.argmax(x[..., 5:], axis=1), tf.float32), (-1, 1)) # x(6300,1) classes
|
495 |
-
# return tf.concat([conf, cls, xywh], 1)
|
496 |
-
|
497 |
-
@staticmethod
|
498 |
-
def _xywh2xyxy(xywh):
|
499 |
-
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
|
500 |
-
x, y, w, h = tf.split(xywh, num_or_size_splits=4, axis=-1)
|
501 |
-
return tf.concat([x - w / 2, y - h / 2, x + w / 2, y + h / 2], axis=-1)
|
502 |
-
|
503 |
-
|
504 |
-
class AgnosticNMS(keras.layers.Layer):
|
505 |
-
# TF Agnostic NMS
|
506 |
-
def call(self, input, topk_all, iou_thres, conf_thres):
|
507 |
-
# wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450
|
508 |
-
return tf.map_fn(lambda x: self._nms(x, topk_all, iou_thres, conf_thres),
|
509 |
-
input,
|
510 |
-
fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32),
|
511 |
-
name='agnostic_nms')
|
512 |
-
|
513 |
-
@staticmethod
|
514 |
-
def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25): # agnostic NMS
|
515 |
-
boxes, classes, scores = x
|
516 |
-
class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32)
|
517 |
-
scores_inp = tf.reduce_max(scores, -1)
|
518 |
-
selected_inds = tf.image.non_max_suppression(boxes,
|
519 |
-
scores_inp,
|
520 |
-
max_output_size=topk_all,
|
521 |
-
iou_threshold=iou_thres,
|
522 |
-
score_threshold=conf_thres)
|
523 |
-
selected_boxes = tf.gather(boxes, selected_inds)
|
524 |
-
padded_boxes = tf.pad(selected_boxes,
|
525 |
-
paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]],
|
526 |
-
mode="CONSTANT",
|
527 |
-
constant_values=0.0)
|
528 |
-
selected_scores = tf.gather(scores_inp, selected_inds)
|
529 |
-
padded_scores = tf.pad(selected_scores,
|
530 |
-
paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]],
|
531 |
-
mode="CONSTANT",
|
532 |
-
constant_values=-1.0)
|
533 |
-
selected_classes = tf.gather(class_inds, selected_inds)
|
534 |
-
padded_classes = tf.pad(selected_classes,
|
535 |
-
paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]],
|
536 |
-
mode="CONSTANT",
|
537 |
-
constant_values=-1.0)
|
538 |
-
valid_detections = tf.shape(selected_inds)[0]
|
539 |
-
return padded_boxes, padded_scores, padded_classes, valid_detections
|
540 |
-
|
541 |
-
|
542 |
-
def activations(act=nn.SiLU):
|
543 |
-
# Returns TF activation from input PyTorch activation
|
544 |
-
if isinstance(act, nn.LeakyReLU):
|
545 |
-
return lambda x: keras.activations.relu(x, alpha=0.1)
|
546 |
-
elif isinstance(act, nn.Hardswish):
|
547 |
-
return lambda x: x * tf.nn.relu6(x + 3) * 0.166666667
|
548 |
-
elif isinstance(act, (nn.SiLU, SiLU)):
|
549 |
-
return lambda x: keras.activations.swish(x)
|
550 |
-
else:
|
551 |
-
raise Exception(f'no matching TensorFlow activation found for PyTorch activation {act}')
|
552 |
-
|
553 |
-
|
554 |
-
def representative_dataset_gen(dataset, ncalib=100):
|
555 |
-
# Representative dataset generator for use with converter.representative_dataset, returns a generator of np arrays
|
556 |
-
for n, (path, img, im0s, vid_cap, string) in enumerate(dataset):
|
557 |
-
im = np.transpose(img, [1, 2, 0])
|
558 |
-
im = np.expand_dims(im, axis=0).astype(np.float32)
|
559 |
-
im /= 255
|
560 |
-
yield [im]
|
561 |
-
if n >= ncalib:
|
562 |
-
break
|
563 |
-
|
564 |
-
|
565 |
-
def run(
|
566 |
-
weights=ROOT / 'yolov5s.pt', # weights path
|
567 |
-
imgsz=(640, 640), # inference size h,w
|
568 |
-
batch_size=1, # batch size
|
569 |
-
dynamic=False, # dynamic batch size
|
570 |
-
):
|
571 |
-
# PyTorch model
|
572 |
-
im = torch.zeros((batch_size, 3, *imgsz)) # BCHW image
|
573 |
-
model = attempt_load(weights, device=torch.device('cpu'), inplace=True, fuse=False)
|
574 |
-
_ = model(im) # inference
|
575 |
-
model.info()
|
576 |
-
|
577 |
-
# TensorFlow model
|
578 |
-
im = tf.zeros((batch_size, *imgsz, 3)) # BHWC image
|
579 |
-
tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
|
580 |
-
_ = tf_model.predict(im) # inference
|
581 |
-
|
582 |
-
# Keras model
|
583 |
-
im = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size)
|
584 |
-
keras_model = keras.Model(inputs=im, outputs=tf_model.predict(im))
|
585 |
-
keras_model.summary()
|
586 |
-
|
587 |
-
LOGGER.info('PyTorch, TensorFlow and Keras models successfully verified.\nUse export.py for TF model export.')
|
588 |
-
|
589 |
-
|
590 |
-
def parse_opt():
|
591 |
-
parser = argparse.ArgumentParser()
|
592 |
-
parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path')
|
593 |
-
parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
|
594 |
-
parser.add_argument('--batch-size', type=int, default=1, help='batch size')
|
595 |
-
parser.add_argument('--dynamic', action='store_true', help='dynamic batch size')
|
596 |
-
opt = parser.parse_args()
|
597 |
-
opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
|
598 |
-
print_args(vars(opt))
|
599 |
-
return opt
|
600 |
-
|
601 |
-
|
602 |
-
def main(opt):
|
603 |
-
run(**vars(opt))
|
604 |
-
|
605 |
-
|
606 |
-
if __name__ == "__main__":
|
607 |
-
opt = parse_opt()
|
608 |
-
main(opt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ArtyomKhyan/Detection/utils/datasets.py
DELETED
@@ -1,887 +0,0 @@
|
|
1 |
-
import glob
|
2 |
-
import math
|
3 |
-
import os
|
4 |
-
import random
|
5 |
-
import shutil
|
6 |
-
import time
|
7 |
-
from pathlib import Path
|
8 |
-
from threading import Thread
|
9 |
-
|
10 |
-
import cv2
|
11 |
-
import numpy as np
|
12 |
-
import torch
|
13 |
-
from PIL import Image, ExifTags
|
14 |
-
from torch.utils.data import Dataset
|
15 |
-
from tqdm import tqdm
|
16 |
-
|
17 |
-
from utils.utils import xyxy2xywh, xywh2xyxy
|
18 |
-
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
|
19 |
-
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
|
20 |
-
vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
|
21 |
-
|
22 |
-
# Get orientation exif tag
|
23 |
-
for orientation in ExifTags.TAGS.keys():
|
24 |
-
if ExifTags.TAGS[orientation] == 'Orientation':
|
25 |
-
break
|
26 |
-
|
27 |
-
|
28 |
-
def exif_size(img):
|
29 |
-
# Returns exif-corrected PIL size
|
30 |
-
s = img.size # (width, height)
|
31 |
-
try:
|
32 |
-
rotation = dict(img._getexif().items())[orientation]
|
33 |
-
if rotation == 6: # rotation 270
|
34 |
-
s = (s[1], s[0])
|
35 |
-
elif rotation == 8: # rotation 90
|
36 |
-
s = (s[1], s[0])
|
37 |
-
except:
|
38 |
-
pass
|
39 |
-
|
40 |
-
return s
|
41 |
-
|
42 |
-
|
43 |
-
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False):
|
44 |
-
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
|
45 |
-
augment=augment, # augment images
|
46 |
-
hyp=hyp, # augmentation hyperparameters
|
47 |
-
rect=rect, # rectangular training
|
48 |
-
cache_images=cache,
|
49 |
-
single_cls=opt.single_cls,
|
50 |
-
stride=stride,
|
51 |
-
pad=pad)
|
52 |
-
|
53 |
-
batch_size = min(batch_size, len(dataset))
|
54 |
-
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
|
55 |
-
dataloader = torch.utils.data.DataLoader(dataset,
|
56 |
-
batch_size=batch_size,
|
57 |
-
num_workers=nw,
|
58 |
-
pin_memory=True,
|
59 |
-
collate_fn=LoadImagesAndLabels.collate_fn)
|
60 |
-
return dataloader, dataset
|
61 |
-
|
62 |
-
|
63 |
-
class LoadImages: # for inference
|
64 |
-
def __init__(self, path, img_size=640):
|
65 |
-
path = str(Path(path)) # os-agnostic
|
66 |
-
files = []
|
67 |
-
if os.path.isdir(path):
|
68 |
-
files = sorted(glob.glob(os.path.join(path, '*.*')))
|
69 |
-
elif os.path.isfile(path):
|
70 |
-
files = [path]
|
71 |
-
|
72 |
-
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
|
73 |
-
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
|
74 |
-
nI, nV = len(images), len(videos)
|
75 |
-
|
76 |
-
self.img_size = img_size
|
77 |
-
self.files = images + videos
|
78 |
-
self.nF = nI + nV # number of files
|
79 |
-
self.video_flag = [False] * nI + [True] * nV
|
80 |
-
self.mode = 'images'
|
81 |
-
if any(videos):
|
82 |
-
self.new_video(videos[0]) # new video
|
83 |
-
else:
|
84 |
-
self.cap = None
|
85 |
-
assert self.nF > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
|
86 |
-
(path, img_formats, vid_formats)
|
87 |
-
|
88 |
-
def __iter__(self):
|
89 |
-
self.count = 0
|
90 |
-
return self
|
91 |
-
|
92 |
-
def __next__(self):
|
93 |
-
if self.count == self.nF:
|
94 |
-
raise StopIteration
|
95 |
-
path = self.files[self.count]
|
96 |
-
|
97 |
-
if self.video_flag[self.count]:
|
98 |
-
# Read video
|
99 |
-
self.mode = 'video'
|
100 |
-
ret_val, img0 = self.cap.read()
|
101 |
-
if not ret_val:
|
102 |
-
self.count += 1
|
103 |
-
self.cap.release()
|
104 |
-
if self.count == self.nF: # last video
|
105 |
-
raise StopIteration
|
106 |
-
else:
|
107 |
-
path = self.files[self.count]
|
108 |
-
self.new_video(path)
|
109 |
-
ret_val, img0 = self.cap.read()
|
110 |
-
|
111 |
-
self.frame += 1
|
112 |
-
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nF, self.frame, self.nframes, path), end='')
|
113 |
-
|
114 |
-
else:
|
115 |
-
# Read image
|
116 |
-
self.count += 1
|
117 |
-
img0 = cv2.imread(path) # BGR
|
118 |
-
assert img0 is not None, 'Image Not Found ' + path
|
119 |
-
print('image %g/%g %s: ' % (self.count, self.nF, path), end='')
|
120 |
-
|
121 |
-
# Padded resize
|
122 |
-
img = letterbox(img0, new_shape=self.img_size)[0]
|
123 |
-
|
124 |
-
# Convert
|
125 |
-
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
|
126 |
-
img = np.ascontiguousarray(img)
|
127 |
-
|
128 |
-
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
|
129 |
-
return path, img, img0, self.cap
|
130 |
-
|
131 |
-
def new_video(self, path):
|
132 |
-
self.frame = 0
|
133 |
-
self.cap = cv2.VideoCapture(path)
|
134 |
-
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
135 |
-
|
136 |
-
def __len__(self):
|
137 |
-
return self.nF # number of files
|
138 |
-
|
139 |
-
|
140 |
-
class LoadWebcam: # for inference
|
141 |
-
def __init__(self, pipe=0, img_size=640):
|
142 |
-
self.img_size = img_size
|
143 |
-
|
144 |
-
if pipe == '0':
|
145 |
-
pipe = 0 # local camera
|
146 |
-
# pipe = 'rtsp://192.168.1.64/1' # IP camera
|
147 |
-
# pipe = 'rtsp://username:[email protected]/1' # IP camera with login
|
148 |
-
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
|
149 |
-
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
|
150 |
-
|
151 |
-
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
|
152 |
-
# pipe = '"rtspsrc location="rtsp://username:[email protected]/1" latency=10 ! appsink' # GStreamer
|
153 |
-
|
154 |
-
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
|
155 |
-
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
|
156 |
-
# pipe = "rtspsrc location=rtsp://root:[email protected]:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
|
157 |
-
|
158 |
-
self.pipe = pipe
|
159 |
-
self.cap = cv2.VideoCapture(pipe) # video capture object
|
160 |
-
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
|
161 |
-
|
162 |
-
def __iter__(self):
|
163 |
-
self.count = -1
|
164 |
-
return self
|
165 |
-
|
166 |
-
def __next__(self):
|
167 |
-
self.count += 1
|
168 |
-
if cv2.waitKey(1) == ord('q'): # q to quit
|
169 |
-
self.cap.release()
|
170 |
-
cv2.destroyAllWindows()
|
171 |
-
raise StopIteration
|
172 |
-
|
173 |
-
# Read frame
|
174 |
-
if self.pipe == 0: # local camera
|
175 |
-
ret_val, img0 = self.cap.read()
|
176 |
-
img0 = cv2.flip(img0, 1) # flip left-right
|
177 |
-
else: # IP camera
|
178 |
-
n = 0
|
179 |
-
while True:
|
180 |
-
n += 1
|
181 |
-
self.cap.grab()
|
182 |
-
if n % 30 == 0: # skip frames
|
183 |
-
ret_val, img0 = self.cap.retrieve()
|
184 |
-
if ret_val:
|
185 |
-
break
|
186 |
-
|
187 |
-
# Print
|
188 |
-
assert ret_val, 'Camera Error %s' % self.pipe
|
189 |
-
img_path = 'webcam.jpg'
|
190 |
-
print('webcam %g: ' % self.count, end='')
|
191 |
-
|
192 |
-
# Padded resize
|
193 |
-
img = letterbox(img0, new_shape=self.img_size)[0]
|
194 |
-
|
195 |
-
# Convert
|
196 |
-
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
|
197 |
-
img = np.ascontiguousarray(img)
|
198 |
-
|
199 |
-
return img_path, img, img0, None
|
200 |
-
|
201 |
-
def __len__(self):
|
202 |
-
return 0
|
203 |
-
|
204 |
-
|
205 |
-
class LoadStreams: # multiple IP or RTSP cameras
|
206 |
-
def __init__(self, sources='streams.txt', img_size=640):
|
207 |
-
self.mode = 'images'
|
208 |
-
self.img_size = img_size
|
209 |
-
|
210 |
-
if os.path.isfile(sources):
|
211 |
-
with open(sources, 'r') as f:
|
212 |
-
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
|
213 |
-
else:
|
214 |
-
sources = [sources]
|
215 |
-
|
216 |
-
n = len(sources)
|
217 |
-
self.imgs = [None] * n
|
218 |
-
self.sources = sources
|
219 |
-
for i, s in enumerate(sources):
|
220 |
-
# Start the thread to read frames from the video stream
|
221 |
-
print('%g/%g: %s... ' % (i + 1, n, s), end='')
|
222 |
-
cap = cv2.VideoCapture(0 if s == '0' else s)
|
223 |
-
assert cap.isOpened(), 'Failed to open %s' % s
|
224 |
-
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
225 |
-
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
226 |
-
fps = cap.get(cv2.CAP_PROP_FPS) % 100
|
227 |
-
_, self.imgs[i] = cap.read() # guarantee first frame
|
228 |
-
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
|
229 |
-
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
|
230 |
-
thread.start()
|
231 |
-
print('') # newline
|
232 |
-
|
233 |
-
# check for common shapes
|
234 |
-
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
|
235 |
-
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
|
236 |
-
if not self.rect:
|
237 |
-
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
|
238 |
-
|
239 |
-
def update(self, index, cap):
|
240 |
-
# Read next stream frame in a daemon thread
|
241 |
-
n = 0
|
242 |
-
while cap.isOpened():
|
243 |
-
n += 1
|
244 |
-
# _, self.imgs[index] = cap.read()
|
245 |
-
cap.grab()
|
246 |
-
if n == 4: # read every 4th frame
|
247 |
-
_, self.imgs[index] = cap.retrieve()
|
248 |
-
n = 0
|
249 |
-
time.sleep(0.01) # wait time
|
250 |
-
|
251 |
-
def __iter__(self):
|
252 |
-
self.count = -1
|
253 |
-
return self
|
254 |
-
|
255 |
-
def __next__(self):
|
256 |
-
self.count += 1
|
257 |
-
img0 = self.imgs.copy()
|
258 |
-
if cv2.waitKey(1) == ord('q'): # q to quit
|
259 |
-
cv2.destroyAllWindows()
|
260 |
-
raise StopIteration
|
261 |
-
|
262 |
-
# Letterbox
|
263 |
-
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
|
264 |
-
|
265 |
-
# Stack
|
266 |
-
img = np.stack(img, 0)
|
267 |
-
|
268 |
-
# Convert
|
269 |
-
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
|
270 |
-
img = np.ascontiguousarray(img)
|
271 |
-
|
272 |
-
return self.sources, img, img0, None
|
273 |
-
|
274 |
-
def __len__(self):
|
275 |
-
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
|
276 |
-
|
277 |
-
|
278 |
-
class LoadImagesAndLabels(Dataset): # for training/testing
|
279 |
-
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
|
280 |
-
cache_images=False, single_cls=False, stride=32, pad=0.0):
|
281 |
-
try:
|
282 |
-
path = str(Path(path)) # os-agnostic
|
283 |
-
parent = str(Path(path).parent) + os.sep
|
284 |
-
if os.path.isfile(path): # file
|
285 |
-
with open(path, 'r') as f:
|
286 |
-
f = f.read().splitlines()
|
287 |
-
f = [x.replace('./', parent) if x.startswith('./') else x for x in f] # local to global path
|
288 |
-
elif os.path.isdir(path): # folder
|
289 |
-
f = glob.iglob(path + os.sep + '*.*')
|
290 |
-
else:
|
291 |
-
raise Exception('%s does not exist' % path)
|
292 |
-
self.img_files = [x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats]
|
293 |
-
except:
|
294 |
-
raise Exception('Error loading data from %s. See %s' % (path, help_url))
|
295 |
-
|
296 |
-
n = len(self.img_files)
|
297 |
-
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
|
298 |
-
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
|
299 |
-
nb = bi[-1] + 1 # number of batches
|
300 |
-
|
301 |
-
self.n = n # number of images
|
302 |
-
self.batch = bi # batch index of image
|
303 |
-
self.img_size = img_size
|
304 |
-
self.augment = augment
|
305 |
-
self.hyp = hyp
|
306 |
-
self.image_weights = image_weights
|
307 |
-
self.rect = False if image_weights else rect
|
308 |
-
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
|
309 |
-
self.mosaic_border = [-img_size // 2, -img_size // 2]
|
310 |
-
self.stride = stride
|
311 |
-
|
312 |
-
# Define labels
|
313 |
-
self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt')
|
314 |
-
for x in self.img_files]
|
315 |
-
|
316 |
-
# Read image shapes (wh)
|
317 |
-
sp = path.replace('.txt', '') + '.shapes' # shapefile path
|
318 |
-
try:
|
319 |
-
with open(sp, 'r') as f: # read existing shapefile
|
320 |
-
s = [x.split() for x in f.read().splitlines()]
|
321 |
-
assert len(s) == n, 'Shapefile out of sync'
|
322 |
-
except:
|
323 |
-
s = [exif_size(Image.open(f)) for f in tqdm(self.img_files, desc='Reading image shapes')]
|
324 |
-
np.savetxt(sp, s, fmt='%g') # overwrites existing (if any)
|
325 |
-
|
326 |
-
self.shapes = np.array(s, dtype=np.float64)
|
327 |
-
|
328 |
-
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
|
329 |
-
if self.rect:
|
330 |
-
# Sort by aspect ratio
|
331 |
-
s = self.shapes # wh
|
332 |
-
ar = s[:, 1] / s[:, 0] # aspect ratio
|
333 |
-
irect = ar.argsort()
|
334 |
-
self.img_files = [self.img_files[i] for i in irect]
|
335 |
-
self.label_files = [self.label_files[i] for i in irect]
|
336 |
-
self.shapes = s[irect] # wh
|
337 |
-
ar = ar[irect]
|
338 |
-
|
339 |
-
# Set training image shapes
|
340 |
-
shapes = [[1, 1]] * nb
|
341 |
-
for i in range(nb):
|
342 |
-
ari = ar[bi == i]
|
343 |
-
mini, maxi = ari.min(), ari.max()
|
344 |
-
if maxi < 1:
|
345 |
-
shapes[i] = [maxi, 1]
|
346 |
-
elif mini > 1:
|
347 |
-
shapes[i] = [1, 1 / mini]
|
348 |
-
|
349 |
-
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
|
350 |
-
|
351 |
-
# Cache labels
|
352 |
-
self.imgs = [None] * n
|
353 |
-
self.labels = [np.zeros((0, 5), dtype=np.float32)] * n
|
354 |
-
create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
|
355 |
-
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
|
356 |
-
np_labels_path = str(Path(self.label_files[0]).parent) + '.npy' # saved labels in *.npy file
|
357 |
-
if os.path.isfile(np_labels_path):
|
358 |
-
s = np_labels_path # print string
|
359 |
-
x = np.load(np_labels_path, allow_pickle=True)
|
360 |
-
if len(x) == n:
|
361 |
-
self.labels = x
|
362 |
-
labels_loaded = True
|
363 |
-
else:
|
364 |
-
s = path.replace('images', 'labels')
|
365 |
-
|
366 |
-
pbar = tqdm(self.label_files)
|
367 |
-
for i, file in enumerate(pbar):
|
368 |
-
if labels_loaded:
|
369 |
-
l = self.labels[i]
|
370 |
-
# np.savetxt(file, l, '%g') # save *.txt from *.npy file
|
371 |
-
else:
|
372 |
-
try:
|
373 |
-
with open(file, 'r') as f:
|
374 |
-
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
|
375 |
-
except:
|
376 |
-
nm += 1 # print('missing labels for image %s' % self.img_files[i]) # file missing
|
377 |
-
continue
|
378 |
-
|
379 |
-
if l.shape[0]:
|
380 |
-
assert l.shape[1] == 5, '> 5 label columns: %s' % file
|
381 |
-
assert (l >= 0).all(), 'negative labels: %s' % file
|
382 |
-
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
|
383 |
-
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
|
384 |
-
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
|
385 |
-
if single_cls:
|
386 |
-
l[:, 0] = 0 # force dataset into single-class mode
|
387 |
-
self.labels[i] = l
|
388 |
-
nf += 1 # file found
|
389 |
-
|
390 |
-
# Create subdataset (a smaller dataset)
|
391 |
-
if create_datasubset and ns < 1E4:
|
392 |
-
if ns == 0:
|
393 |
-
create_folder(path='./datasubset')
|
394 |
-
os.makedirs('./datasubset/images')
|
395 |
-
exclude_classes = 43
|
396 |
-
if exclude_classes not in l[:, 0]:
|
397 |
-
ns += 1
|
398 |
-
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
|
399 |
-
with open('./datasubset/images.txt', 'a') as f:
|
400 |
-
f.write(self.img_files[i] + '\n')
|
401 |
-
|
402 |
-
# Extract object detection boxes for a second stage classifier
|
403 |
-
if extract_bounding_boxes:
|
404 |
-
p = Path(self.img_files[i])
|
405 |
-
img = cv2.imread(str(p))
|
406 |
-
h, w = img.shape[:2]
|
407 |
-
for j, x in enumerate(l):
|
408 |
-
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
|
409 |
-
if not os.path.exists(Path(f).parent):
|
410 |
-
os.makedirs(Path(f).parent) # make new output folder
|
411 |
-
|
412 |
-
b = x[1:] * [w, h, w, h] # box
|
413 |
-
b[2:] = b[2:].max() # rectangle to square
|
414 |
-
b[2:] = b[2:] * 1.3 + 30 # pad
|
415 |
-
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
|
416 |
-
|
417 |
-
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
|
418 |
-
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
|
419 |
-
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
|
420 |
-
else:
|
421 |
-
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
|
422 |
-
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
|
423 |
-
|
424 |
-
pbar.desc = 'Caching labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
|
425 |
-
s, nf, nm, ne, nd, n)
|
426 |
-
assert nf > 0 or n == 20288, 'No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
|
427 |
-
if not labels_loaded and n > 1000:
|
428 |
-
print('Saving labels to %s for faster future loading' % np_labels_path)
|
429 |
-
np.save(np_labels_path, self.labels) # save for next time
|
430 |
-
|
431 |
-
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
|
432 |
-
if cache_images: # if training
|
433 |
-
gb = 0 # Gigabytes of cached images
|
434 |
-
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
|
435 |
-
self.img_hw0, self.img_hw = [None] * n, [None] * n
|
436 |
-
for i in pbar: # max 10k images
|
437 |
-
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
|
438 |
-
gb += self.imgs[i].nbytes
|
439 |
-
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
|
440 |
-
|
441 |
-
# Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3
|
442 |
-
detect_corrupted_images = False
|
443 |
-
if detect_corrupted_images:
|
444 |
-
from skimage import io # conda install -c conda-forge scikit-image
|
445 |
-
for file in tqdm(self.img_files, desc='Detecting corrupted images'):
|
446 |
-
try:
|
447 |
-
_ = io.imread(file)
|
448 |
-
except:
|
449 |
-
print('Corrupted image detected: %s' % file)
|
450 |
-
|
451 |
-
def __len__(self):
|
452 |
-
return len(self.img_files)
|
453 |
-
|
454 |
-
# def __iter__(self):
|
455 |
-
# self.count = -1
|
456 |
-
# print('ran dataset iter')
|
457 |
-
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
|
458 |
-
# return self
|
459 |
-
|
460 |
-
def __getitem__(self, index):
|
461 |
-
if self.image_weights:
|
462 |
-
index = self.indices[index]
|
463 |
-
|
464 |
-
hyp = self.hyp
|
465 |
-
if self.mosaic:
|
466 |
-
# Load mosaic
|
467 |
-
img, labels = load_mosaic(self, index)
|
468 |
-
shapes = None
|
469 |
-
|
470 |
-
else:
|
471 |
-
# Load image
|
472 |
-
img, (h0, w0), (h, w) = load_image(self, index)
|
473 |
-
|
474 |
-
# Letterbox
|
475 |
-
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
|
476 |
-
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
|
477 |
-
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
|
478 |
-
|
479 |
-
# Load labels
|
480 |
-
labels = []
|
481 |
-
x = self.labels[index]
|
482 |
-
if x.size > 0:
|
483 |
-
# Normalized xywh to pixel xyxy format
|
484 |
-
labels = x.copy()
|
485 |
-
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
|
486 |
-
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
|
487 |
-
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
|
488 |
-
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
|
489 |
-
|
490 |
-
if self.augment:
|
491 |
-
# Augment imagespace
|
492 |
-
if not self.mosaic:
|
493 |
-
img, labels = random_affine(img, labels,
|
494 |
-
degrees=hyp['degrees'],
|
495 |
-
translate=hyp['translate'],
|
496 |
-
scale=hyp['scale'],
|
497 |
-
shear=hyp['shear'])
|
498 |
-
|
499 |
-
# Augment colorspace
|
500 |
-
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
|
501 |
-
|
502 |
-
# Apply cutouts
|
503 |
-
# if random.random() < 0.9:
|
504 |
-
# labels = cutout(img, labels)
|
505 |
-
|
506 |
-
nL = len(labels) # number of labels
|
507 |
-
if nL:
|
508 |
-
# convert xyxy to xywh
|
509 |
-
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
|
510 |
-
|
511 |
-
# Normalize coordinates 0 - 1
|
512 |
-
labels[:, [2, 4]] /= img.shape[0] # height
|
513 |
-
labels[:, [1, 3]] /= img.shape[1] # width
|
514 |
-
|
515 |
-
if self.augment:
|
516 |
-
# random left-right flip
|
517 |
-
lr_flip = True
|
518 |
-
if lr_flip and random.random() < 0.5:
|
519 |
-
img = np.fliplr(img)
|
520 |
-
if nL:
|
521 |
-
labels[:, 1] = 1 - labels[:, 1]
|
522 |
-
|
523 |
-
# random up-down flip
|
524 |
-
ud_flip = False
|
525 |
-
if ud_flip and random.random() < 0.5:
|
526 |
-
img = np.flipud(img)
|
527 |
-
if nL:
|
528 |
-
labels[:, 2] = 1 - labels[:, 2]
|
529 |
-
|
530 |
-
labels_out = torch.zeros((nL, 6))
|
531 |
-
if nL:
|
532 |
-
labels_out[:, 1:] = torch.from_numpy(labels)
|
533 |
-
|
534 |
-
# Convert
|
535 |
-
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
|
536 |
-
img = np.ascontiguousarray(img)
|
537 |
-
|
538 |
-
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
|
539 |
-
|
540 |
-
@staticmethod
|
541 |
-
def collate_fn(batch):
|
542 |
-
img, label, path, shapes = zip(*batch) # transposed
|
543 |
-
for i, l in enumerate(label):
|
544 |
-
l[:, 0] = i # add target image index for build_targets()
|
545 |
-
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
|
546 |
-
|
547 |
-
|
548 |
-
def load_image(self, index):
|
549 |
-
# loads 1 image from dataset, returns img, original hw, resized hw
|
550 |
-
img = self.imgs[index]
|
551 |
-
if img is None: # not cached
|
552 |
-
path = self.img_files[index]
|
553 |
-
img = cv2.imread(path) # BGR
|
554 |
-
assert img is not None, 'Image Not Found ' + path
|
555 |
-
h0, w0 = img.shape[:2] # orig hw
|
556 |
-
r = self.img_size / max(h0, w0) # resize image to img_size
|
557 |
-
if r != 1: # always resize down, only resize up if training with augmentation
|
558 |
-
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
|
559 |
-
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
|
560 |
-
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
|
561 |
-
else:
|
562 |
-
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
|
563 |
-
|
564 |
-
|
565 |
-
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
|
566 |
-
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
|
567 |
-
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
|
568 |
-
dtype = img.dtype # uint8
|
569 |
-
|
570 |
-
x = np.arange(0, 256, dtype=np.int16)
|
571 |
-
lut_hue = ((x * r[0]) % 180).astype(dtype)
|
572 |
-
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
|
573 |
-
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
|
574 |
-
|
575 |
-
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
|
576 |
-
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
|
577 |
-
|
578 |
-
# Histogram equalization
|
579 |
-
# if random.random() < 0.2:
|
580 |
-
# for i in range(3):
|
581 |
-
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
|
582 |
-
|
583 |
-
|
584 |
-
def load_mosaic(self, index):
|
585 |
-
# loads images in a mosaic
|
586 |
-
|
587 |
-
labels4 = []
|
588 |
-
s = self.img_size
|
589 |
-
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
|
590 |
-
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
|
591 |
-
for i, index in enumerate(indices):
|
592 |
-
# Load image
|
593 |
-
img, _, (h, w) = load_image(self, index)
|
594 |
-
|
595 |
-
# place img in img4
|
596 |
-
if i == 0: # top left
|
597 |
-
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
|
598 |
-
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
|
599 |
-
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
|
600 |
-
elif i == 1: # top right
|
601 |
-
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
|
602 |
-
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
|
603 |
-
elif i == 2: # bottom left
|
604 |
-
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
|
605 |
-
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
|
606 |
-
elif i == 3: # bottom right
|
607 |
-
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
|
608 |
-
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
|
609 |
-
|
610 |
-
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
|
611 |
-
padw = x1a - x1b
|
612 |
-
padh = y1a - y1b
|
613 |
-
|
614 |
-
# Labels
|
615 |
-
x = self.labels[index]
|
616 |
-
labels = x.copy()
|
617 |
-
if x.size > 0: # Normalized xywh to pixel xyxy format
|
618 |
-
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
|
619 |
-
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
|
620 |
-
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
|
621 |
-
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
|
622 |
-
labels4.append(labels)
|
623 |
-
|
624 |
-
# Concat/clip labels
|
625 |
-
if len(labels4):
|
626 |
-
labels4 = np.concatenate(labels4, 0)
|
627 |
-
# np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
|
628 |
-
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine
|
629 |
-
|
630 |
-
# Replicate
|
631 |
-
# img4, labels4 = replicate(img4, labels4)
|
632 |
-
|
633 |
-
# Augment
|
634 |
-
# img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)
|
635 |
-
img4, labels4 = random_affine(img4, labels4,
|
636 |
-
degrees=self.hyp['degrees'],
|
637 |
-
translate=self.hyp['translate'],
|
638 |
-
scale=self.hyp['scale'],
|
639 |
-
shear=self.hyp['shear'],
|
640 |
-
border=self.mosaic_border) # border to remove
|
641 |
-
|
642 |
-
return img4, labels4
|
643 |
-
|
644 |
-
|
645 |
-
def replicate(img, labels):
|
646 |
-
# Replicate labels
|
647 |
-
h, w = img.shape[:2]
|
648 |
-
boxes = labels[:, 1:].astype(int)
|
649 |
-
x1, y1, x2, y2 = boxes.T
|
650 |
-
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
|
651 |
-
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
|
652 |
-
x1b, y1b, x2b, y2b = boxes[i]
|
653 |
-
bh, bw = y2b - y1b, x2b - x1b
|
654 |
-
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
|
655 |
-
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
|
656 |
-
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
|
657 |
-
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
|
658 |
-
|
659 |
-
return img, labels
|
660 |
-
|
661 |
-
|
662 |
-
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
|
663 |
-
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
|
664 |
-
shape = img.shape[:2] # current shape [height, width]
|
665 |
-
if isinstance(new_shape, int):
|
666 |
-
new_shape = (new_shape, new_shape)
|
667 |
-
|
668 |
-
# Scale ratio (new / old)
|
669 |
-
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
|
670 |
-
if not scaleup: # only scale down, do not scale up (for better test mAP)
|
671 |
-
r = min(r, 1.0)
|
672 |
-
|
673 |
-
# Compute padding
|
674 |
-
ratio = r, r # width, height ratios
|
675 |
-
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
|
676 |
-
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
|
677 |
-
if auto: # minimum rectangle
|
678 |
-
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
|
679 |
-
elif scaleFill: # stretch
|
680 |
-
dw, dh = 0.0, 0.0
|
681 |
-
new_unpad = new_shape
|
682 |
-
ratio = new_shape[0] / shape[1], new_shape[1] / shape[0] # width, height ratios
|
683 |
-
|
684 |
-
dw /= 2 # divide padding into 2 sides
|
685 |
-
dh /= 2
|
686 |
-
|
687 |
-
if shape[::-1] != new_unpad: # resize
|
688 |
-
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
|
689 |
-
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
|
690 |
-
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
|
691 |
-
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
|
692 |
-
return img, ratio, (dw, dh)
|
693 |
-
|
694 |
-
|
695 |
-
def random_affine(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, border=(0, 0)):
|
696 |
-
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
|
697 |
-
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
|
698 |
-
# targets = [cls, xyxy]
|
699 |
-
|
700 |
-
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
|
701 |
-
width = img.shape[1] + border[1] * 2
|
702 |
-
|
703 |
-
# Rotation and Scale
|
704 |
-
R = np.eye(3)
|
705 |
-
a = random.uniform(-degrees, degrees)
|
706 |
-
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
|
707 |
-
s = random.uniform(1 - scale, 1 + scale)
|
708 |
-
# s = 2 ** random.uniform(-scale, scale)
|
709 |
-
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
|
710 |
-
|
711 |
-
# Translation
|
712 |
-
T = np.eye(3)
|
713 |
-
T[0, 2] = random.uniform(-translate, translate) * img.shape[1] + border[1] # x translation (pixels)
|
714 |
-
T[1, 2] = random.uniform(-translate, translate) * img.shape[0] + border[0] # y translation (pixels)
|
715 |
-
|
716 |
-
# Shear
|
717 |
-
S = np.eye(3)
|
718 |
-
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
|
719 |
-
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
|
720 |
-
|
721 |
-
# Combined rotation matrix
|
722 |
-
M = S @ T @ R # ORDER IS IMPORTANT HERE!!
|
723 |
-
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
|
724 |
-
img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_LINEAR, borderValue=(114, 114, 114))
|
725 |
-
|
726 |
-
# Transform label coordinates
|
727 |
-
n = len(targets)
|
728 |
-
if n:
|
729 |
-
# warp points
|
730 |
-
xy = np.ones((n * 4, 3))
|
731 |
-
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
|
732 |
-
xy = (xy @ M.T)[:, :2].reshape(n, 8)
|
733 |
-
|
734 |
-
# create new boxes
|
735 |
-
x = xy[:, [0, 2, 4, 6]]
|
736 |
-
y = xy[:, [1, 3, 5, 7]]
|
737 |
-
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
|
738 |
-
|
739 |
-
# # apply angle-based reduction of bounding boxes
|
740 |
-
# radians = a * math.pi / 180
|
741 |
-
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
|
742 |
-
# x = (xy[:, 2] + xy[:, 0]) / 2
|
743 |
-
# y = (xy[:, 3] + xy[:, 1]) / 2
|
744 |
-
# w = (xy[:, 2] - xy[:, 0]) * reduction
|
745 |
-
# h = (xy[:, 3] - xy[:, 1]) * reduction
|
746 |
-
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
|
747 |
-
|
748 |
-
# reject warped points outside of image
|
749 |
-
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
|
750 |
-
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
|
751 |
-
w = xy[:, 2] - xy[:, 0]
|
752 |
-
h = xy[:, 3] - xy[:, 1]
|
753 |
-
area = w * h
|
754 |
-
area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2])
|
755 |
-
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) # aspect ratio
|
756 |
-
i = (w > 2) & (h > 2) & (area / (area0 * s + 1e-16) > 0.2) & (ar < 20)
|
757 |
-
|
758 |
-
targets = targets[i]
|
759 |
-
targets[:, 1:5] = xy[i]
|
760 |
-
|
761 |
-
return img, targets
|
762 |
-
|
763 |
-
|
764 |
-
def cutout(image, labels):
|
765 |
-
# https://arxiv.org/abs/1708.04552
|
766 |
-
# https://github.com/hysts/pytorch_cutout/blob/master/dataloader.py
|
767 |
-
# https://towardsdatascience.com/when-conventional-wisdom-fails-revisiting-data-augmentation-for-self-driving-cars-4831998c5509
|
768 |
-
h, w = image.shape[:2]
|
769 |
-
|
770 |
-
def bbox_ioa(box1, box2):
|
771 |
-
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
|
772 |
-
box2 = box2.transpose()
|
773 |
-
|
774 |
-
# Get the coordinates of bounding boxes
|
775 |
-
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
|
776 |
-
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
|
777 |
-
|
778 |
-
# Intersection area
|
779 |
-
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
|
780 |
-
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
|
781 |
-
|
782 |
-
# box2 area
|
783 |
-
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
|
784 |
-
|
785 |
-
# Intersection over box2 area
|
786 |
-
|
787 |
-
return inter_area / box2_area
|
788 |
-
|
789 |
-
# create random masks
|
790 |
-
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
|
791 |
-
for s in scales:
|
792 |
-
mask_h = random.randint(1, int(h * s))
|
793 |
-
mask_w = random.randint(1, int(w * s))
|
794 |
-
|
795 |
-
# box
|
796 |
-
xmin = max(0, random.randint(0, w) - mask_w // 2)
|
797 |
-
ymin = max(0, random.randint(0, h) - mask_h // 2)
|
798 |
-
xmax = min(w, xmin + mask_w)
|
799 |
-
ymax = min(h, ymin + mask_h)
|
800 |
-
|
801 |
-
# apply random color mask
|
802 |
-
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
|
803 |
-
|
804 |
-
# return unobscured labels
|
805 |
-
if len(labels) and s > 0.03:
|
806 |
-
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
|
807 |
-
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
|
808 |
-
labels = labels[ioa < 0.60] # remove >60% obscured labels
|
809 |
-
|
810 |
-
return labels
|
811 |
-
|
812 |
-
|
813 |
-
def reduce_img_size(path='../data/sm4/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
|
814 |
-
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
|
815 |
-
path_new = path + '_reduced' # reduced images path
|
816 |
-
create_folder(path_new)
|
817 |
-
for f in tqdm(glob.glob('%s/*.*' % path)):
|
818 |
-
try:
|
819 |
-
img = cv2.imread(f)
|
820 |
-
h, w = img.shape[:2]
|
821 |
-
r = img_size / max(h, w) # size ratio
|
822 |
-
if r < 1.0:
|
823 |
-
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
|
824 |
-
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
|
825 |
-
cv2.imwrite(fnew, img)
|
826 |
-
except:
|
827 |
-
print('WARNING: image failure %s' % f)
|
828 |
-
|
829 |
-
|
830 |
-
def convert_images2bmp(): # from utils.datasets import *; convert_images2bmp()
|
831 |
-
# Save images
|
832 |
-
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
|
833 |
-
# for path in ['../coco/images/val2014', '../coco/images/train2014']:
|
834 |
-
for path in ['../data/sm4/images', '../data/sm4/background']:
|
835 |
-
create_folder(path + 'bmp')
|
836 |
-
for ext in formats: # ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
|
837 |
-
for f in tqdm(glob.glob('%s/*%s' % (path, ext)), desc='Converting %s' % ext):
|
838 |
-
cv2.imwrite(f.replace(ext.lower(), '.bmp').replace(path, path + 'bmp'), cv2.imread(f))
|
839 |
-
|
840 |
-
# Save labels
|
841 |
-
# for path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']:
|
842 |
-
for file in ['../data/sm4/out_train.txt', '../data/sm4/out_test.txt']:
|
843 |
-
with open(file, 'r') as f:
|
844 |
-
lines = f.read()
|
845 |
-
# lines = f.read().replace('2014/', '2014bmp/') # coco
|
846 |
-
lines = lines.replace('/images', '/imagesbmp')
|
847 |
-
lines = lines.replace('/background', '/backgroundbmp')
|
848 |
-
for ext in formats:
|
849 |
-
lines = lines.replace(ext, '.bmp')
|
850 |
-
with open(file.replace('.txt', 'bmp.txt'), 'w') as f:
|
851 |
-
f.write(lines)
|
852 |
-
|
853 |
-
|
854 |
-
def recursive_dataset2bmp(dataset='../data/sm4_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
|
855 |
-
# Converts dataset to bmp (for faster training)
|
856 |
-
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
|
857 |
-
for a, b, files in os.walk(dataset):
|
858 |
-
for file in tqdm(files, desc=a):
|
859 |
-
p = a + '/' + file
|
860 |
-
s = Path(file).suffix
|
861 |
-
if s == '.txt': # replace text
|
862 |
-
with open(p, 'r') as f:
|
863 |
-
lines = f.read()
|
864 |
-
for f in formats:
|
865 |
-
lines = lines.replace(f, '.bmp')
|
866 |
-
with open(p, 'w') as f:
|
867 |
-
f.write(lines)
|
868 |
-
elif s in formats: # replace image
|
869 |
-
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
|
870 |
-
if s != '.bmp':
|
871 |
-
os.system("rm '%s'" % p)
|
872 |
-
|
873 |
-
|
874 |
-
def imagelist2folder(path='data/coco_64img.txt'): # from utils.datasets import *; imagelist2folder()
|
875 |
-
# Copies all the images in a text file (list of images) into a folder
|
876 |
-
create_folder(path[:-4])
|
877 |
-
with open(path, 'r') as f:
|
878 |
-
for line in f.read().splitlines():
|
879 |
-
os.system('cp "%s" %s' % (line, path[:-4]))
|
880 |
-
print(line)
|
881 |
-
|
882 |
-
|
883 |
-
def create_folder(path='./new_folder'):
|
884 |
-
# Create folder
|
885 |
-
if os.path.exists(path):
|
886 |
-
shutil.rmtree(path) # delete output folder
|
887 |
-
os.makedirs(path) # make new output folder
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AtomdffAI/wechatgpt4atom/bot/bot_factory.py
DELETED
@@ -1,26 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
channel factory
|
3 |
-
"""
|
4 |
-
|
5 |
-
|
6 |
-
def create_bot(bot_type):
|
7 |
-
"""
|
8 |
-
create a channel instance
|
9 |
-
:param channel_type: channel type code
|
10 |
-
:return: channel instance
|
11 |
-
"""
|
12 |
-
if bot_type == 'baidu':
|
13 |
-
# Baidu Unit对话接口
|
14 |
-
from bot.baidu.baidu_unit_bot import BaiduUnitBot
|
15 |
-
return BaiduUnitBot()
|
16 |
-
|
17 |
-
elif bot_type == 'chatGPT':
|
18 |
-
# ChatGPT 网页端web接口
|
19 |
-
from bot.chatgpt.chat_gpt_bot import ChatGPTBot
|
20 |
-
return ChatGPTBot()
|
21 |
-
|
22 |
-
elif bot_type == 'openAI':
|
23 |
-
# OpenAI 官方对话模型API
|
24 |
-
from bot.openai.open_ai_bot import OpenAIBot
|
25 |
-
return OpenAIBot()
|
26 |
-
raise RuntimeError
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/signers.py
DELETED
@@ -1,832 +0,0 @@
|
|
1 |
-
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License"). You
|
4 |
-
# may not use this file except in compliance with the License. A copy of
|
5 |
-
# the License is located at
|
6 |
-
#
|
7 |
-
# http://aws.amazon.com/apache2.0/
|
8 |
-
#
|
9 |
-
# or in the "license" file accompanying this file. This file is
|
10 |
-
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
|
11 |
-
# ANY KIND, either express or implied. See the License for the specific
|
12 |
-
# language governing permissions and limitations under the License.
|
13 |
-
import base64
|
14 |
-
import datetime
|
15 |
-
import json
|
16 |
-
import weakref
|
17 |
-
|
18 |
-
import botocore
|
19 |
-
import botocore.auth
|
20 |
-
from botocore.awsrequest import create_request_object, prepare_request_dict
|
21 |
-
from botocore.compat import OrderedDict
|
22 |
-
from botocore.exceptions import (
|
23 |
-
UnknownClientMethodError,
|
24 |
-
UnknownSignatureVersionError,
|
25 |
-
UnsupportedSignatureVersionError,
|
26 |
-
)
|
27 |
-
from botocore.utils import ArnParser, datetime2timestamp
|
28 |
-
|
29 |
-
# Keep these imported. There's pre-existing code that uses them.
|
30 |
-
from botocore.utils import fix_s3_host # noqa
|
31 |
-
|
32 |
-
|
33 |
-
class RequestSigner:
|
34 |
-
"""
|
35 |
-
An object to sign requests before they go out over the wire using
|
36 |
-
one of the authentication mechanisms defined in ``auth.py``. This
|
37 |
-
class fires two events scoped to a service and operation name:
|
38 |
-
|
39 |
-
* choose-signer: Allows overriding the auth signer name.
|
40 |
-
* before-sign: Allows mutating the request before signing.
|
41 |
-
|
42 |
-
Together these events allow for customization of the request
|
43 |
-
signing pipeline, including overrides, request path manipulation,
|
44 |
-
and disabling signing per operation.
|
45 |
-
|
46 |
-
|
47 |
-
:type service_id: botocore.model.ServiceId
|
48 |
-
:param service_id: The service id for the service, e.g. ``S3``
|
49 |
-
|
50 |
-
:type region_name: string
|
51 |
-
:param region_name: Name of the service region, e.g. ``us-east-1``
|
52 |
-
|
53 |
-
:type signing_name: string
|
54 |
-
:param signing_name: Service signing name. This is usually the
|
55 |
-
same as the service name, but can differ. E.g.
|
56 |
-
``emr`` vs. ``elasticmapreduce``.
|
57 |
-
|
58 |
-
:type signature_version: string
|
59 |
-
:param signature_version: Signature name like ``v4``.
|
60 |
-
|
61 |
-
:type credentials: :py:class:`~botocore.credentials.Credentials`
|
62 |
-
:param credentials: User credentials with which to sign requests.
|
63 |
-
|
64 |
-
:type event_emitter: :py:class:`~botocore.hooks.BaseEventHooks`
|
65 |
-
:param event_emitter: Extension mechanism to fire events.
|
66 |
-
"""
|
67 |
-
|
68 |
-
def __init__(
|
69 |
-
self,
|
70 |
-
service_id,
|
71 |
-
region_name,
|
72 |
-
signing_name,
|
73 |
-
signature_version,
|
74 |
-
credentials,
|
75 |
-
event_emitter,
|
76 |
-
auth_token=None,
|
77 |
-
):
|
78 |
-
self._region_name = region_name
|
79 |
-
self._signing_name = signing_name
|
80 |
-
self._signature_version = signature_version
|
81 |
-
self._credentials = credentials
|
82 |
-
self._auth_token = auth_token
|
83 |
-
self._service_id = service_id
|
84 |
-
|
85 |
-
# We need weakref to prevent leaking memory in Python 2.6 on Linux 2.6
|
86 |
-
self._event_emitter = weakref.proxy(event_emitter)
|
87 |
-
|
88 |
-
@property
|
89 |
-
def region_name(self):
|
90 |
-
return self._region_name
|
91 |
-
|
92 |
-
@property
|
93 |
-
def signature_version(self):
|
94 |
-
return self._signature_version
|
95 |
-
|
96 |
-
@property
|
97 |
-
def signing_name(self):
|
98 |
-
return self._signing_name
|
99 |
-
|
100 |
-
def handler(self, operation_name=None, request=None, **kwargs):
|
101 |
-
# This is typically hooked up to the "request-created" event
|
102 |
-
# from a client's event emitter. When a new request is created
|
103 |
-
# this method is invoked to sign the request.
|
104 |
-
# Don't call this method directly.
|
105 |
-
return self.sign(operation_name, request)
|
106 |
-
|
107 |
-
def sign(
|
108 |
-
self,
|
109 |
-
operation_name,
|
110 |
-
request,
|
111 |
-
region_name=None,
|
112 |
-
signing_type='standard',
|
113 |
-
expires_in=None,
|
114 |
-
signing_name=None,
|
115 |
-
):
|
116 |
-
"""Sign a request before it goes out over the wire.
|
117 |
-
|
118 |
-
:type operation_name: string
|
119 |
-
:param operation_name: The name of the current operation, e.g.
|
120 |
-
``ListBuckets``.
|
121 |
-
:type request: AWSRequest
|
122 |
-
:param request: The request object to be sent over the wire.
|
123 |
-
|
124 |
-
:type region_name: str
|
125 |
-
:param region_name: The region to sign the request for.
|
126 |
-
|
127 |
-
:type signing_type: str
|
128 |
-
:param signing_type: The type of signing to perform. This can be one of
|
129 |
-
three possible values:
|
130 |
-
|
131 |
-
* 'standard' - This should be used for most requests.
|
132 |
-
* 'presign-url' - This should be used when pre-signing a request.
|
133 |
-
* 'presign-post' - This should be used when pre-signing an S3 post.
|
134 |
-
|
135 |
-
:type expires_in: int
|
136 |
-
:param expires_in: The number of seconds the presigned url is valid
|
137 |
-
for. This parameter is only valid for signing type 'presign-url'.
|
138 |
-
|
139 |
-
:type signing_name: str
|
140 |
-
:param signing_name: The name to use for the service when signing.
|
141 |
-
"""
|
142 |
-
explicit_region_name = region_name
|
143 |
-
if region_name is None:
|
144 |
-
region_name = self._region_name
|
145 |
-
|
146 |
-
if signing_name is None:
|
147 |
-
signing_name = self._signing_name
|
148 |
-
|
149 |
-
signature_version = self._choose_signer(
|
150 |
-
operation_name, signing_type, request.context
|
151 |
-
)
|
152 |
-
|
153 |
-
# Allow mutating request before signing
|
154 |
-
self._event_emitter.emit(
|
155 |
-
'before-sign.{}.{}'.format(
|
156 |
-
self._service_id.hyphenize(), operation_name
|
157 |
-
),
|
158 |
-
request=request,
|
159 |
-
signing_name=signing_name,
|
160 |
-
region_name=self._region_name,
|
161 |
-
signature_version=signature_version,
|
162 |
-
request_signer=self,
|
163 |
-
operation_name=operation_name,
|
164 |
-
)
|
165 |
-
|
166 |
-
if signature_version != botocore.UNSIGNED:
|
167 |
-
kwargs = {
|
168 |
-
'signing_name': signing_name,
|
169 |
-
'region_name': region_name,
|
170 |
-
'signature_version': signature_version,
|
171 |
-
}
|
172 |
-
if expires_in is not None:
|
173 |
-
kwargs['expires'] = expires_in
|
174 |
-
signing_context = request.context.get('signing', {})
|
175 |
-
if not explicit_region_name and signing_context.get('region'):
|
176 |
-
kwargs['region_name'] = signing_context['region']
|
177 |
-
if signing_context.get('signing_name'):
|
178 |
-
kwargs['signing_name'] = signing_context['signing_name']
|
179 |
-
try:
|
180 |
-
auth = self.get_auth_instance(**kwargs)
|
181 |
-
except UnknownSignatureVersionError as e:
|
182 |
-
if signing_type != 'standard':
|
183 |
-
raise UnsupportedSignatureVersionError(
|
184 |
-
signature_version=signature_version
|
185 |
-
)
|
186 |
-
else:
|
187 |
-
raise e
|
188 |
-
|
189 |
-
auth.add_auth(request)
|
190 |
-
|
191 |
-
def _choose_signer(self, operation_name, signing_type, context):
|
192 |
-
"""
|
193 |
-
Allow setting the signature version via the choose-signer event.
|
194 |
-
A value of `botocore.UNSIGNED` means no signing will be performed.
|
195 |
-
|
196 |
-
:param operation_name: The operation to sign.
|
197 |
-
:param signing_type: The type of signing that the signer is to be used
|
198 |
-
for.
|
199 |
-
:return: The signature version to sign with.
|
200 |
-
"""
|
201 |
-
signing_type_suffix_map = {
|
202 |
-
'presign-post': '-presign-post',
|
203 |
-
'presign-url': '-query',
|
204 |
-
}
|
205 |
-
suffix = signing_type_suffix_map.get(signing_type, '')
|
206 |
-
|
207 |
-
# operation specific signing context takes precedent over client-level
|
208 |
-
# defaults
|
209 |
-
signature_version = context.get('auth_type') or self._signature_version
|
210 |
-
signing = context.get('signing', {})
|
211 |
-
signing_name = signing.get('signing_name', self._signing_name)
|
212 |
-
region_name = signing.get('region', self._region_name)
|
213 |
-
if (
|
214 |
-
signature_version is not botocore.UNSIGNED
|
215 |
-
and not signature_version.endswith(suffix)
|
216 |
-
):
|
217 |
-
signature_version += suffix
|
218 |
-
|
219 |
-
handler, response = self._event_emitter.emit_until_response(
|
220 |
-
'choose-signer.{}.{}'.format(
|
221 |
-
self._service_id.hyphenize(), operation_name
|
222 |
-
),
|
223 |
-
signing_name=signing_name,
|
224 |
-
region_name=region_name,
|
225 |
-
signature_version=signature_version,
|
226 |
-
context=context,
|
227 |
-
)
|
228 |
-
|
229 |
-
if response is not None:
|
230 |
-
signature_version = response
|
231 |
-
# The suffix needs to be checked again in case we get an improper
|
232 |
-
# signature version from choose-signer.
|
233 |
-
if (
|
234 |
-
signature_version is not botocore.UNSIGNED
|
235 |
-
and not signature_version.endswith(suffix)
|
236 |
-
):
|
237 |
-
signature_version += suffix
|
238 |
-
|
239 |
-
return signature_version
|
240 |
-
|
241 |
-
def get_auth_instance(
|
242 |
-
self, signing_name, region_name, signature_version=None, **kwargs
|
243 |
-
):
|
244 |
-
"""
|
245 |
-
Get an auth instance which can be used to sign a request
|
246 |
-
using the given signature version.
|
247 |
-
|
248 |
-
:type signing_name: string
|
249 |
-
:param signing_name: Service signing name. This is usually the
|
250 |
-
same as the service name, but can differ. E.g.
|
251 |
-
``emr`` vs. ``elasticmapreduce``.
|
252 |
-
|
253 |
-
:type region_name: string
|
254 |
-
:param region_name: Name of the service region, e.g. ``us-east-1``
|
255 |
-
|
256 |
-
:type signature_version: string
|
257 |
-
:param signature_version: Signature name like ``v4``.
|
258 |
-
|
259 |
-
:rtype: :py:class:`~botocore.auth.BaseSigner`
|
260 |
-
:return: Auth instance to sign a request.
|
261 |
-
"""
|
262 |
-
if signature_version is None:
|
263 |
-
signature_version = self._signature_version
|
264 |
-
|
265 |
-
cls = botocore.auth.AUTH_TYPE_MAPS.get(signature_version)
|
266 |
-
if cls is None:
|
267 |
-
raise UnknownSignatureVersionError(
|
268 |
-
signature_version=signature_version
|
269 |
-
)
|
270 |
-
|
271 |
-
if cls.REQUIRES_TOKEN is True:
|
272 |
-
frozen_token = None
|
273 |
-
if self._auth_token is not None:
|
274 |
-
frozen_token = self._auth_token.get_frozen_token()
|
275 |
-
auth = cls(frozen_token)
|
276 |
-
return auth
|
277 |
-
|
278 |
-
# If there's no credentials provided (i.e credentials is None),
|
279 |
-
# then we'll pass a value of "None" over to the auth classes,
|
280 |
-
# which already handle the cases where no credentials have
|
281 |
-
# been provided.
|
282 |
-
frozen_credentials = None
|
283 |
-
if self._credentials is not None:
|
284 |
-
frozen_credentials = self._credentials.get_frozen_credentials()
|
285 |
-
kwargs['credentials'] = frozen_credentials
|
286 |
-
if cls.REQUIRES_REGION:
|
287 |
-
if self._region_name is None:
|
288 |
-
raise botocore.exceptions.NoRegionError()
|
289 |
-
kwargs['region_name'] = region_name
|
290 |
-
kwargs['service_name'] = signing_name
|
291 |
-
auth = cls(**kwargs)
|
292 |
-
return auth
|
293 |
-
|
294 |
-
# Alias get_auth for backwards compatibility.
|
295 |
-
get_auth = get_auth_instance
|
296 |
-
|
297 |
-
def generate_presigned_url(
|
298 |
-
self,
|
299 |
-
request_dict,
|
300 |
-
operation_name,
|
301 |
-
expires_in=3600,
|
302 |
-
region_name=None,
|
303 |
-
signing_name=None,
|
304 |
-
):
|
305 |
-
"""Generates a presigned url
|
306 |
-
|
307 |
-
:type request_dict: dict
|
308 |
-
:param request_dict: The prepared request dictionary returned by
|
309 |
-
``botocore.awsrequest.prepare_request_dict()``
|
310 |
-
|
311 |
-
:type operation_name: str
|
312 |
-
:param operation_name: The operation being signed.
|
313 |
-
|
314 |
-
:type expires_in: int
|
315 |
-
:param expires_in: The number of seconds the presigned url is valid
|
316 |
-
for. By default it expires in an hour (3600 seconds)
|
317 |
-
|
318 |
-
:type region_name: string
|
319 |
-
:param region_name: The region name to sign the presigned url.
|
320 |
-
|
321 |
-
:type signing_name: str
|
322 |
-
:param signing_name: The name to use for the service when signing.
|
323 |
-
|
324 |
-
:returns: The presigned url
|
325 |
-
"""
|
326 |
-
request = create_request_object(request_dict)
|
327 |
-
self.sign(
|
328 |
-
operation_name,
|
329 |
-
request,
|
330 |
-
region_name,
|
331 |
-
'presign-url',
|
332 |
-
expires_in,
|
333 |
-
signing_name,
|
334 |
-
)
|
335 |
-
|
336 |
-
request.prepare()
|
337 |
-
return request.url
|
338 |
-
|
339 |
-
|
340 |
-
class CloudFrontSigner:
|
341 |
-
'''A signer to create a signed CloudFront URL.
|
342 |
-
|
343 |
-
First you create a cloudfront signer based on a normalized RSA signer::
|
344 |
-
|
345 |
-
import rsa
|
346 |
-
def rsa_signer(message):
|
347 |
-
private_key = open('private_key.pem', 'r').read()
|
348 |
-
return rsa.sign(
|
349 |
-
message,
|
350 |
-
rsa.PrivateKey.load_pkcs1(private_key.encode('utf8')),
|
351 |
-
'SHA-1') # CloudFront requires SHA-1 hash
|
352 |
-
cf_signer = CloudFrontSigner(key_id, rsa_signer)
|
353 |
-
|
354 |
-
To sign with a canned policy::
|
355 |
-
|
356 |
-
signed_url = cf_signer.generate_signed_url(
|
357 |
-
url, date_less_than=datetime(2015, 12, 1))
|
358 |
-
|
359 |
-
To sign with a custom policy::
|
360 |
-
|
361 |
-
signed_url = cf_signer.generate_signed_url(url, policy=my_policy)
|
362 |
-
'''
|
363 |
-
|
364 |
-
def __init__(self, key_id, rsa_signer):
|
365 |
-
"""Create a CloudFrontSigner.
|
366 |
-
|
367 |
-
:type key_id: str
|
368 |
-
:param key_id: The CloudFront Key Pair ID
|
369 |
-
|
370 |
-
:type rsa_signer: callable
|
371 |
-
:param rsa_signer: An RSA signer.
|
372 |
-
Its only input parameter will be the message to be signed,
|
373 |
-
and its output will be the signed content as a binary string.
|
374 |
-
The hash algorithm needed by CloudFront is SHA-1.
|
375 |
-
"""
|
376 |
-
self.key_id = key_id
|
377 |
-
self.rsa_signer = rsa_signer
|
378 |
-
|
379 |
-
def generate_presigned_url(self, url, date_less_than=None, policy=None):
|
380 |
-
"""Creates a signed CloudFront URL based on given parameters.
|
381 |
-
|
382 |
-
:type url: str
|
383 |
-
:param url: The URL of the protected object
|
384 |
-
|
385 |
-
:type date_less_than: datetime
|
386 |
-
:param date_less_than: The URL will expire after that date and time
|
387 |
-
|
388 |
-
:type policy: str
|
389 |
-
:param policy: The custom policy, possibly built by self.build_policy()
|
390 |
-
|
391 |
-
:rtype: str
|
392 |
-
:return: The signed URL.
|
393 |
-
"""
|
394 |
-
both_args_supplied = date_less_than is not None and policy is not None
|
395 |
-
neither_arg_supplied = date_less_than is None and policy is None
|
396 |
-
if both_args_supplied or neither_arg_supplied:
|
397 |
-
e = 'Need to provide either date_less_than or policy, but not both'
|
398 |
-
raise ValueError(e)
|
399 |
-
if date_less_than is not None:
|
400 |
-
# We still need to build a canned policy for signing purpose
|
401 |
-
policy = self.build_policy(url, date_less_than)
|
402 |
-
if isinstance(policy, str):
|
403 |
-
policy = policy.encode('utf8')
|
404 |
-
if date_less_than is not None:
|
405 |
-
params = ['Expires=%s' % int(datetime2timestamp(date_less_than))]
|
406 |
-
else:
|
407 |
-
params = ['Policy=%s' % self._url_b64encode(policy).decode('utf8')]
|
408 |
-
signature = self.rsa_signer(policy)
|
409 |
-
params.extend(
|
410 |
-
[
|
411 |
-
f"Signature={self._url_b64encode(signature).decode('utf8')}",
|
412 |
-
f"Key-Pair-Id={self.key_id}",
|
413 |
-
]
|
414 |
-
)
|
415 |
-
return self._build_url(url, params)
|
416 |
-
|
417 |
-
def _build_url(self, base_url, extra_params):
|
418 |
-
separator = '&' if '?' in base_url else '?'
|
419 |
-
return base_url + separator + '&'.join(extra_params)
|
420 |
-
|
421 |
-
def build_policy(
|
422 |
-
self, resource, date_less_than, date_greater_than=None, ip_address=None
|
423 |
-
):
|
424 |
-
"""A helper to build policy.
|
425 |
-
|
426 |
-
:type resource: str
|
427 |
-
:param resource: The URL or the stream filename of the protected object
|
428 |
-
|
429 |
-
:type date_less_than: datetime
|
430 |
-
:param date_less_than: The URL will expire after the time has passed
|
431 |
-
|
432 |
-
:type date_greater_than: datetime
|
433 |
-
:param date_greater_than: The URL will not be valid until this time
|
434 |
-
|
435 |
-
:type ip_address: str
|
436 |
-
:param ip_address: Use 'x.x.x.x' for an IP, or 'x.x.x.x/x' for a subnet
|
437 |
-
|
438 |
-
:rtype: str
|
439 |
-
:return: The policy in a compact string.
|
440 |
-
"""
|
441 |
-
# Note:
|
442 |
-
# 1. Order in canned policy is significant. Special care has been taken
|
443 |
-
# to ensure the output will match the order defined by the document.
|
444 |
-
# There is also a test case to ensure that order.
|
445 |
-
# SEE: http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-canned-policy.html#private-content-canned-policy-creating-policy-statement
|
446 |
-
# 2. Albeit the order in custom policy is not required by CloudFront,
|
447 |
-
# we still use OrderedDict internally to ensure the result is stable
|
448 |
-
# and also matches canned policy requirement.
|
449 |
-
# SEE: http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-custom-policy.html
|
450 |
-
moment = int(datetime2timestamp(date_less_than))
|
451 |
-
condition = OrderedDict({"DateLessThan": {"AWS:EpochTime": moment}})
|
452 |
-
if ip_address:
|
453 |
-
if '/' not in ip_address:
|
454 |
-
ip_address += '/32'
|
455 |
-
condition["IpAddress"] = {"AWS:SourceIp": ip_address}
|
456 |
-
if date_greater_than:
|
457 |
-
moment = int(datetime2timestamp(date_greater_than))
|
458 |
-
condition["DateGreaterThan"] = {"AWS:EpochTime": moment}
|
459 |
-
ordered_payload = [('Resource', resource), ('Condition', condition)]
|
460 |
-
custom_policy = {"Statement": [OrderedDict(ordered_payload)]}
|
461 |
-
return json.dumps(custom_policy, separators=(',', ':'))
|
462 |
-
|
463 |
-
def _url_b64encode(self, data):
|
464 |
-
# Required by CloudFront. See also:
|
465 |
-
# http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-linux-openssl.html
|
466 |
-
return (
|
467 |
-
base64.b64encode(data)
|
468 |
-
.replace(b'+', b'-')
|
469 |
-
.replace(b'=', b'_')
|
470 |
-
.replace(b'/', b'~')
|
471 |
-
)
|
472 |
-
|
473 |
-
|
474 |
-
def add_generate_db_auth_token(class_attributes, **kwargs):
|
475 |
-
class_attributes['generate_db_auth_token'] = generate_db_auth_token
|
476 |
-
|
477 |
-
|
478 |
-
def generate_db_auth_token(self, DBHostname, Port, DBUsername, Region=None):
|
479 |
-
"""Generates an auth token used to connect to a db with IAM credentials.
|
480 |
-
|
481 |
-
:type DBHostname: str
|
482 |
-
:param DBHostname: The hostname of the database to connect to.
|
483 |
-
|
484 |
-
:type Port: int
|
485 |
-
:param Port: The port number the database is listening on.
|
486 |
-
|
487 |
-
:type DBUsername: str
|
488 |
-
:param DBUsername: The username to log in as.
|
489 |
-
|
490 |
-
:type Region: str
|
491 |
-
:param Region: The region the database is in. If None, the client
|
492 |
-
region will be used.
|
493 |
-
|
494 |
-
:return: A presigned url which can be used as an auth token.
|
495 |
-
"""
|
496 |
-
region = Region
|
497 |
-
if region is None:
|
498 |
-
region = self.meta.region_name
|
499 |
-
|
500 |
-
params = {
|
501 |
-
'Action': 'connect',
|
502 |
-
'DBUser': DBUsername,
|
503 |
-
}
|
504 |
-
|
505 |
-
request_dict = {
|
506 |
-
'url_path': '/',
|
507 |
-
'query_string': '',
|
508 |
-
'headers': {},
|
509 |
-
'body': params,
|
510 |
-
'method': 'GET',
|
511 |
-
}
|
512 |
-
|
513 |
-
# RDS requires that the scheme not be set when sent over. This can cause
|
514 |
-
# issues when signing because the Python url parsing libraries follow
|
515 |
-
# RFC 1808 closely, which states that a netloc must be introduced by `//`.
|
516 |
-
# Otherwise the url is presumed to be relative, and thus the whole
|
517 |
-
# netloc would be treated as a path component. To work around this we
|
518 |
-
# introduce https here and remove it once we're done processing it.
|
519 |
-
scheme = 'https://'
|
520 |
-
endpoint_url = f'{scheme}{DBHostname}:{Port}'
|
521 |
-
prepare_request_dict(request_dict, endpoint_url)
|
522 |
-
presigned_url = self._request_signer.generate_presigned_url(
|
523 |
-
operation_name='connect',
|
524 |
-
request_dict=request_dict,
|
525 |
-
region_name=region,
|
526 |
-
expires_in=900,
|
527 |
-
signing_name='rds-db',
|
528 |
-
)
|
529 |
-
return presigned_url[len(scheme) :]
|
530 |
-
|
531 |
-
|
532 |
-
class S3PostPresigner:
|
533 |
-
def __init__(self, request_signer):
|
534 |
-
self._request_signer = request_signer
|
535 |
-
|
536 |
-
def generate_presigned_post(
|
537 |
-
self,
|
538 |
-
request_dict,
|
539 |
-
fields=None,
|
540 |
-
conditions=None,
|
541 |
-
expires_in=3600,
|
542 |
-
region_name=None,
|
543 |
-
):
|
544 |
-
"""Generates the url and the form fields used for a presigned s3 post
|
545 |
-
|
546 |
-
:type request_dict: dict
|
547 |
-
:param request_dict: The prepared request dictionary returned by
|
548 |
-
``botocore.awsrequest.prepare_request_dict()``
|
549 |
-
|
550 |
-
:type fields: dict
|
551 |
-
:param fields: A dictionary of prefilled form fields to build on top
|
552 |
-
of.
|
553 |
-
|
554 |
-
:type conditions: list
|
555 |
-
:param conditions: A list of conditions to include in the policy. Each
|
556 |
-
element can be either a list or a structure. For example:
|
557 |
-
[
|
558 |
-
{"acl": "public-read"},
|
559 |
-
{"bucket": "mybucket"},
|
560 |
-
["starts-with", "$key", "mykey"]
|
561 |
-
]
|
562 |
-
|
563 |
-
:type expires_in: int
|
564 |
-
:param expires_in: The number of seconds the presigned post is valid
|
565 |
-
for.
|
566 |
-
|
567 |
-
:type region_name: string
|
568 |
-
:param region_name: The region name to sign the presigned post to.
|
569 |
-
|
570 |
-
:rtype: dict
|
571 |
-
:returns: A dictionary with two elements: ``url`` and ``fields``.
|
572 |
-
Url is the url to post to. Fields is a dictionary filled with
|
573 |
-
the form fields and respective values to use when submitting the
|
574 |
-
post. For example:
|
575 |
-
|
576 |
-
{'url': 'https://mybucket.s3.amazonaws.com
|
577 |
-
'fields': {'acl': 'public-read',
|
578 |
-
'key': 'mykey',
|
579 |
-
'signature': 'mysignature',
|
580 |
-
'policy': 'mybase64 encoded policy'}
|
581 |
-
}
|
582 |
-
"""
|
583 |
-
if fields is None:
|
584 |
-
fields = {}
|
585 |
-
|
586 |
-
if conditions is None:
|
587 |
-
conditions = []
|
588 |
-
|
589 |
-
# Create the policy for the post.
|
590 |
-
policy = {}
|
591 |
-
|
592 |
-
# Create an expiration date for the policy
|
593 |
-
datetime_now = datetime.datetime.utcnow()
|
594 |
-
expire_date = datetime_now + datetime.timedelta(seconds=expires_in)
|
595 |
-
policy['expiration'] = expire_date.strftime(botocore.auth.ISO8601)
|
596 |
-
|
597 |
-
# Append all of the conditions that the user supplied.
|
598 |
-
policy['conditions'] = []
|
599 |
-
for condition in conditions:
|
600 |
-
policy['conditions'].append(condition)
|
601 |
-
|
602 |
-
# Store the policy and the fields in the request for signing
|
603 |
-
request = create_request_object(request_dict)
|
604 |
-
request.context['s3-presign-post-fields'] = fields
|
605 |
-
request.context['s3-presign-post-policy'] = policy
|
606 |
-
|
607 |
-
self._request_signer.sign(
|
608 |
-
'PutObject', request, region_name, 'presign-post'
|
609 |
-
)
|
610 |
-
# Return the url and the fields for th form to post.
|
611 |
-
return {'url': request.url, 'fields': fields}
|
612 |
-
|
613 |
-
|
614 |
-
def add_generate_presigned_url(class_attributes, **kwargs):
|
615 |
-
class_attributes['generate_presigned_url'] = generate_presigned_url
|
616 |
-
|
617 |
-
|
618 |
-
def generate_presigned_url(
|
619 |
-
self, ClientMethod, Params=None, ExpiresIn=3600, HttpMethod=None
|
620 |
-
):
|
621 |
-
"""Generate a presigned url given a client, its method, and arguments
|
622 |
-
|
623 |
-
:type ClientMethod: string
|
624 |
-
:param ClientMethod: The client method to presign for
|
625 |
-
|
626 |
-
:type Params: dict
|
627 |
-
:param Params: The parameters normally passed to
|
628 |
-
``ClientMethod``.
|
629 |
-
|
630 |
-
:type ExpiresIn: int
|
631 |
-
:param ExpiresIn: The number of seconds the presigned url is valid
|
632 |
-
for. By default it expires in an hour (3600 seconds)
|
633 |
-
|
634 |
-
:type HttpMethod: string
|
635 |
-
:param HttpMethod: The http method to use on the generated url. By
|
636 |
-
default, the http method is whatever is used in the method's model.
|
637 |
-
|
638 |
-
:returns: The presigned url
|
639 |
-
"""
|
640 |
-
client_method = ClientMethod
|
641 |
-
params = Params
|
642 |
-
if params is None:
|
643 |
-
params = {}
|
644 |
-
expires_in = ExpiresIn
|
645 |
-
http_method = HttpMethod
|
646 |
-
context = {
|
647 |
-
'is_presign_request': True,
|
648 |
-
'use_global_endpoint': _should_use_global_endpoint(self),
|
649 |
-
}
|
650 |
-
|
651 |
-
request_signer = self._request_signer
|
652 |
-
|
653 |
-
try:
|
654 |
-
operation_name = self._PY_TO_OP_NAME[client_method]
|
655 |
-
except KeyError:
|
656 |
-
raise UnknownClientMethodError(method_name=client_method)
|
657 |
-
|
658 |
-
operation_model = self.meta.service_model.operation_model(operation_name)
|
659 |
-
bucket_is_arn = ArnParser.is_arn(params.get('Bucket', ''))
|
660 |
-
endpoint_url, additional_headers = self._resolve_endpoint_ruleset(
|
661 |
-
operation_model,
|
662 |
-
params,
|
663 |
-
context,
|
664 |
-
ignore_signing_region=(not bucket_is_arn),
|
665 |
-
)
|
666 |
-
|
667 |
-
request_dict = self._convert_to_request_dict(
|
668 |
-
api_params=params,
|
669 |
-
operation_model=operation_model,
|
670 |
-
endpoint_url=endpoint_url,
|
671 |
-
context=context,
|
672 |
-
headers=additional_headers,
|
673 |
-
set_user_agent_header=False,
|
674 |
-
)
|
675 |
-
|
676 |
-
# Switch out the http method if user specified it.
|
677 |
-
if http_method is not None:
|
678 |
-
request_dict['method'] = http_method
|
679 |
-
|
680 |
-
# Generate the presigned url.
|
681 |
-
return request_signer.generate_presigned_url(
|
682 |
-
request_dict=request_dict,
|
683 |
-
expires_in=expires_in,
|
684 |
-
operation_name=operation_name,
|
685 |
-
)
|
686 |
-
|
687 |
-
|
688 |
-
def add_generate_presigned_post(class_attributes, **kwargs):
|
689 |
-
class_attributes['generate_presigned_post'] = generate_presigned_post
|
690 |
-
|
691 |
-
|
692 |
-
def generate_presigned_post(
|
693 |
-
self, Bucket, Key, Fields=None, Conditions=None, ExpiresIn=3600
|
694 |
-
):
|
695 |
-
"""Builds the url and the form fields used for a presigned s3 post
|
696 |
-
|
697 |
-
:type Bucket: string
|
698 |
-
:param Bucket: The name of the bucket to presign the post to. Note that
|
699 |
-
bucket related conditions should not be included in the
|
700 |
-
``conditions`` parameter.
|
701 |
-
|
702 |
-
:type Key: string
|
703 |
-
:param Key: Key name, optionally add ${filename} to the end to
|
704 |
-
attach the submitted filename. Note that key related conditions and
|
705 |
-
fields are filled out for you and should not be included in the
|
706 |
-
``Fields`` or ``Conditions`` parameter.
|
707 |
-
|
708 |
-
:type Fields: dict
|
709 |
-
:param Fields: A dictionary of prefilled form fields to build on top
|
710 |
-
of. Elements that may be included are acl, Cache-Control,
|
711 |
-
Content-Type, Content-Disposition, Content-Encoding, Expires,
|
712 |
-
success_action_redirect, redirect, success_action_status,
|
713 |
-
and x-amz-meta-.
|
714 |
-
|
715 |
-
Note that if a particular element is included in the fields
|
716 |
-
dictionary it will not be automatically added to the conditions
|
717 |
-
list. You must specify a condition for the element as well.
|
718 |
-
|
719 |
-
:type Conditions: list
|
720 |
-
:param Conditions: A list of conditions to include in the policy. Each
|
721 |
-
element can be either a list or a structure. For example:
|
722 |
-
|
723 |
-
[
|
724 |
-
{"acl": "public-read"},
|
725 |
-
["content-length-range", 2, 5],
|
726 |
-
["starts-with", "$success_action_redirect", ""]
|
727 |
-
]
|
728 |
-
|
729 |
-
Conditions that are included may pertain to acl,
|
730 |
-
content-length-range, Cache-Control, Content-Type,
|
731 |
-
Content-Disposition, Content-Encoding, Expires,
|
732 |
-
success_action_redirect, redirect, success_action_status,
|
733 |
-
and/or x-amz-meta-.
|
734 |
-
|
735 |
-
Note that if you include a condition, you must specify
|
736 |
-
the a valid value in the fields dictionary as well. A value will
|
737 |
-
not be added automatically to the fields dictionary based on the
|
738 |
-
conditions.
|
739 |
-
|
740 |
-
:type ExpiresIn: int
|
741 |
-
:param ExpiresIn: The number of seconds the presigned post
|
742 |
-
is valid for.
|
743 |
-
|
744 |
-
:rtype: dict
|
745 |
-
:returns: A dictionary with two elements: ``url`` and ``fields``.
|
746 |
-
Url is the url to post to. Fields is a dictionary filled with
|
747 |
-
the form fields and respective values to use when submitting the
|
748 |
-
post. For example:
|
749 |
-
|
750 |
-
{'url': 'https://mybucket.s3.amazonaws.com
|
751 |
-
'fields': {'acl': 'public-read',
|
752 |
-
'key': 'mykey',
|
753 |
-
'signature': 'mysignature',
|
754 |
-
'policy': 'mybase64 encoded policy'}
|
755 |
-
}
|
756 |
-
"""
|
757 |
-
bucket = Bucket
|
758 |
-
key = Key
|
759 |
-
fields = Fields
|
760 |
-
conditions = Conditions
|
761 |
-
expires_in = ExpiresIn
|
762 |
-
|
763 |
-
if fields is None:
|
764 |
-
fields = {}
|
765 |
-
else:
|
766 |
-
fields = fields.copy()
|
767 |
-
|
768 |
-
if conditions is None:
|
769 |
-
conditions = []
|
770 |
-
|
771 |
-
context = {
|
772 |
-
'is_presign_request': True,
|
773 |
-
'use_global_endpoint': _should_use_global_endpoint(self),
|
774 |
-
}
|
775 |
-
|
776 |
-
post_presigner = S3PostPresigner(self._request_signer)
|
777 |
-
|
778 |
-
# We choose the CreateBucket operation model because its url gets
|
779 |
-
# serialized to what a presign post requires.
|
780 |
-
operation_model = self.meta.service_model.operation_model('CreateBucket')
|
781 |
-
params = {'Bucket': bucket}
|
782 |
-
bucket_is_arn = ArnParser.is_arn(params.get('Bucket', ''))
|
783 |
-
endpoint_url, additional_headers = self._resolve_endpoint_ruleset(
|
784 |
-
operation_model,
|
785 |
-
params,
|
786 |
-
context,
|
787 |
-
ignore_signing_region=(not bucket_is_arn),
|
788 |
-
)
|
789 |
-
|
790 |
-
request_dict = self._convert_to_request_dict(
|
791 |
-
api_params=params,
|
792 |
-
operation_model=operation_model,
|
793 |
-
endpoint_url=endpoint_url,
|
794 |
-
context=context,
|
795 |
-
headers=additional_headers,
|
796 |
-
set_user_agent_header=False,
|
797 |
-
)
|
798 |
-
|
799 |
-
# Append that the bucket name to the list of conditions.
|
800 |
-
conditions.append({'bucket': bucket})
|
801 |
-
|
802 |
-
# If the key ends with filename, the only constraint that can be
|
803 |
-
# imposed is if it starts with the specified prefix.
|
804 |
-
if key.endswith('${filename}'):
|
805 |
-
conditions.append(["starts-with", '$key', key[: -len('${filename}')]])
|
806 |
-
else:
|
807 |
-
conditions.append({'key': key})
|
808 |
-
|
809 |
-
# Add the key to the fields.
|
810 |
-
fields['key'] = key
|
811 |
-
|
812 |
-
return post_presigner.generate_presigned_post(
|
813 |
-
request_dict=request_dict,
|
814 |
-
fields=fields,
|
815 |
-
conditions=conditions,
|
816 |
-
expires_in=expires_in,
|
817 |
-
)
|
818 |
-
|
819 |
-
|
820 |
-
def _should_use_global_endpoint(client):
|
821 |
-
if client.meta.partition != 'aws':
|
822 |
-
return False
|
823 |
-
s3_config = client.meta.config.s3
|
824 |
-
if s3_config:
|
825 |
-
if s3_config.get('use_dualstack_endpoint', False):
|
826 |
-
return False
|
827 |
-
if (
|
828 |
-
s3_config.get('us_east_1_regional_endpoint') == 'regional'
|
829 |
-
and client.meta.config.region_name == 'us-east-1'
|
830 |
-
):
|
831 |
-
return False
|
832 |
-
return True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Bready11/Onodofthenorth-SD_PixelArt_SpriteSheet_Generator/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/Onodofthenorth/SD_PixelArt_SpriteSheet_Generator").launch()
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/bottom-up-attention-vqa/classifier.py
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
import torch.nn as nn
|
2 |
-
from torch.nn.utils.weight_norm import weight_norm
|
3 |
-
|
4 |
-
|
5 |
-
class SimpleClassifier(nn.Module):
|
6 |
-
def __init__(self, in_dim, hid_dim, out_dim, dropout):
|
7 |
-
super(SimpleClassifier, self).__init__()
|
8 |
-
layers = [
|
9 |
-
weight_norm(nn.Linear(in_dim, hid_dim), dim=None),
|
10 |
-
nn.ReLU(),
|
11 |
-
nn.Dropout(dropout, inplace=True),
|
12 |
-
weight_norm(nn.Linear(hid_dim, out_dim), dim=None)
|
13 |
-
]
|
14 |
-
self.main = nn.Sequential(*layers)
|
15 |
-
|
16 |
-
def forward(self, x):
|
17 |
-
logits = self.main(x)
|
18 |
-
return logits
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/PointRend/README.md
DELETED
@@ -1,115 +0,0 @@
|
|
1 |
-
# PointRend: Image Segmentation as Rendering
|
2 |
-
|
3 |
-
Alexander Kirillov, Yuxin Wu, Kaiming He, Ross Girshick
|
4 |
-
|
5 |
-
[[`arXiv`](https://arxiv.org/abs/1912.08193)] [[`BibTeX`](#CitingPointRend)]
|
6 |
-
|
7 |
-
<div align="center">
|
8 |
-
<img src="https://alexander-kirillov.github.io/images/kirillov2019pointrend.jpg"/>
|
9 |
-
</div><br/>
|
10 |
-
|
11 |
-
In this repository, we release code for PointRend in Detectron2. PointRend can be flexibly applied to both instance and semantic (**comming soon**) segmentation tasks by building on top of existing state-of-the-art models.
|
12 |
-
|
13 |
-
## Installation
|
14 |
-
Install Detectron 2 following [INSTALL.md](https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md). You are ready to go!
|
15 |
-
|
16 |
-
## Quick start and visualization
|
17 |
-
|
18 |
-
This [Colab Notebook](https://colab.research.google.com/drive/1isGPL5h5_cKoPPhVL9XhMokRtHDvmMVL) tutorial contains examples of PointRend usage and visualizations of its point sampling stages.
|
19 |
-
|
20 |
-
## Training
|
21 |
-
|
22 |
-
To train a model with 8 GPUs run:
|
23 |
-
```bash
|
24 |
-
cd /path/to/detectron2/projects/PointRend
|
25 |
-
python train_net.py --config-file configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_1x_coco.yaml --num-gpus 8
|
26 |
-
```
|
27 |
-
|
28 |
-
## Evaluation
|
29 |
-
|
30 |
-
Model evaluation can be done similarly:
|
31 |
-
```bash
|
32 |
-
cd /path/to/detectron2/projects/PointRend
|
33 |
-
python train_net.py --config-file configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_1x_coco.yaml --eval-only MODEL.WEIGHTS /path/to/model_checkpoint
|
34 |
-
```
|
35 |
-
|
36 |
-
# Pretrained Models
|
37 |
-
|
38 |
-
## Instance Segmentation
|
39 |
-
#### COCO
|
40 |
-
|
41 |
-
<table><tbody>
|
42 |
-
<!-- START TABLE -->
|
43 |
-
<!-- TABLE HEADER -->
|
44 |
-
<th valign="bottom">Mask<br/>head</th>
|
45 |
-
<th valign="bottom">Backbone</th>
|
46 |
-
<th valign="bottom">lr<br/>sched</th>
|
47 |
-
<th valign="bottom">Output<br/>resolution</th>
|
48 |
-
<th valign="bottom">mask<br/>AP</th>
|
49 |
-
<th valign="bottom">mask<br/>AP*</th>
|
50 |
-
<th valign="bottom">model id</th>
|
51 |
-
<th valign="bottom">download</th>
|
52 |
-
<!-- TABLE BODY -->
|
53 |
-
<tr><td align="left"><a href="configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_1x_coco.yaml">PointRend</a></td>
|
54 |
-
<td align="center">R50-FPN</td>
|
55 |
-
<td align="center">1×</td>
|
56 |
-
<td align="center">224×224</td>
|
57 |
-
<td align="center">36.2</td>
|
58 |
-
<td align="center">39.7</td>
|
59 |
-
<td align="center">164254221</td>
|
60 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/PointRend/InstanceSegmentation/pointrend_rcnn_R_50_FPN_1x_coco/164254221/model_final_88c6f8.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/PointRend/InstanceSegmentation/pointrend_rcnn_R_50_FPN_1x_coco/164254221/metrics.json">metrics</a></td>
|
61 |
-
</tr>
|
62 |
-
<tr><td align="left"><a href="configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_coco.yaml">PointRend</a></td>
|
63 |
-
<td align="center">R50-FPN</td>
|
64 |
-
<td align="center">3×</td>
|
65 |
-
<td align="center">224×224</td>
|
66 |
-
<td align="center">38.3</td>
|
67 |
-
<td align="center">41.6</td>
|
68 |
-
<td align="center">164955410</td>
|
69 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/PointRend/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_coco/164955410/model_final_3c3198.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/PointRend/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_coco/164955410/metrics.json">metrics</a></td>
|
70 |
-
</tr>
|
71 |
-
</tbody></table>
|
72 |
-
|
73 |
-
AP* is COCO mask AP evaluated against the higher-quality LVIS annotations; see the paper for details. Run `python detectron2/datasets/prepare_cocofied_lvis.py` to prepare GT files for AP* evaluation. Since LVIS annotations are not exhaustive `lvis-api` and not `cocoapi` should be used to evaluate AP*.
|
74 |
-
|
75 |
-
#### Cityscapes
|
76 |
-
Cityscapes model is trained with ImageNet pretraining.
|
77 |
-
|
78 |
-
<table><tbody>
|
79 |
-
<!-- START TABLE -->
|
80 |
-
<!-- TABLE HEADER -->
|
81 |
-
<th valign="bottom">Mask<br/>head</th>
|
82 |
-
<th valign="bottom">Backbone</th>
|
83 |
-
<th valign="bottom">lr<br/>sched</th>
|
84 |
-
<th valign="bottom">Output<br/>resolution</th>
|
85 |
-
<th valign="bottom">mask<br/>AP</th>
|
86 |
-
<th valign="bottom">model id</th>
|
87 |
-
<th valign="bottom">download</th>
|
88 |
-
<!-- TABLE BODY -->
|
89 |
-
<tr><td align="left"><a href="configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_1x_cs.yaml">PointRend</a></td>
|
90 |
-
<td align="center">R50-FPN</td>
|
91 |
-
<td align="center">1×</td>
|
92 |
-
<td align="center">224×224</td>
|
93 |
-
<td align="center">35.9</td>
|
94 |
-
<td align="center">164255101</td>
|
95 |
-
<td align="center"><a href="https://dl.fbaipublicfiles.com/detectron2/PointRend/InstanceSegmentation/pointrend_rcnn_R_50_FPN_1x_cityscapes/164255101/model_final_318a02.pkl">model</a> | <a href="https://dl.fbaipublicfiles.com/detectron2/PointRend/InstanceSegmentation/pointrend_rcnn_R_50_FPN_1x_cityscapes/164255101/metrics.json">metrics</a></td>
|
96 |
-
</tr>
|
97 |
-
</tbody></table>
|
98 |
-
|
99 |
-
|
100 |
-
## Semantic Segmentation
|
101 |
-
|
102 |
-
**[comming soon]**
|
103 |
-
|
104 |
-
## <a name="CitingPointRend"></a>Citing PointRend
|
105 |
-
|
106 |
-
If you use PointRend, please use the following BibTeX entry.
|
107 |
-
|
108 |
-
```BibTeX
|
109 |
-
@InProceedings{kirillov2019pointrend,
|
110 |
-
title={{PointRend}: Image Segmentation as Rendering},
|
111 |
-
author={Alexander Kirillov and Yuxin Wu and Kaiming He and Ross Girshick},
|
112 |
-
journal={ArXiv:1912.08193},
|
113 |
-
year={2019}
|
114 |
-
}
|
115 |
-
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/cmake/AppendOptionIfAvailable.cmake
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
include_guard(GLOBAL)
|
2 |
-
include(CheckCXXCompilerFlag)
|
3 |
-
|
4 |
-
macro (APPEND_OPTION_IF_AVAILABLE _FLAG _LIST)
|
5 |
-
|
6 |
-
string(MAKE_C_IDENTIFIER "CXX_FLAG_${_FLAG}" _VAR)
|
7 |
-
check_cxx_compiler_flag(${_FLAG} ${_VAR})
|
8 |
-
|
9 |
-
if (${${_VAR}})
|
10 |
-
list(APPEND ${_LIST} ${_FLAG})
|
11 |
-
endif ()
|
12 |
-
|
13 |
-
endmacro ()
|
14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/detail/cstdint.h
DELETED
@@ -1,79 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#if (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_GCC) || (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_CLANG)
|
20 |
-
#include <stdint.h>
|
21 |
-
#endif
|
22 |
-
|
23 |
-
namespace thrust
|
24 |
-
{
|
25 |
-
namespace detail
|
26 |
-
{
|
27 |
-
|
28 |
-
#if (THRUST_HOST_COMPILER == THRUST_HOST_COMPILER_MSVC)
|
29 |
-
|
30 |
-
#if (_MSC_VER < 1300)
|
31 |
-
typedef signed char int8_t;
|
32 |
-
typedef signed short int16_t;
|
33 |
-
typedef signed int int32_t;
|
34 |
-
typedef unsigned char uint8_t;
|
35 |
-
typedef unsigned short uint16_t;
|
36 |
-
typedef unsigned int uint32_t;
|
37 |
-
#else
|
38 |
-
typedef signed __int8 int8_t;
|
39 |
-
typedef signed __int16 int16_t;
|
40 |
-
typedef signed __int32 int32_t;
|
41 |
-
typedef unsigned __int8 uint8_t;
|
42 |
-
typedef unsigned __int16 uint16_t;
|
43 |
-
typedef unsigned __int32 uint32_t;
|
44 |
-
#endif
|
45 |
-
typedef signed __int64 int64_t;
|
46 |
-
typedef unsigned __int64 uint64_t;
|
47 |
-
|
48 |
-
#else
|
49 |
-
|
50 |
-
typedef ::int8_t int8_t;
|
51 |
-
typedef ::int16_t int16_t;
|
52 |
-
typedef ::int32_t int32_t;
|
53 |
-
typedef ::int64_t int64_t;
|
54 |
-
typedef ::uint8_t uint8_t;
|
55 |
-
typedef ::uint16_t uint16_t;
|
56 |
-
typedef ::uint32_t uint32_t;
|
57 |
-
typedef ::uint64_t uint64_t;
|
58 |
-
|
59 |
-
#endif
|
60 |
-
|
61 |
-
|
62 |
-
// an oracle to tell us how to define intptr_t
|
63 |
-
template<int word_size = sizeof(void*)> struct divine_intptr_t;
|
64 |
-
template<int word_size = sizeof(void*)> struct divine_uintptr_t;
|
65 |
-
|
66 |
-
// 32b platforms
|
67 |
-
template<> struct divine_intptr_t<4> { typedef thrust::detail::int32_t type; };
|
68 |
-
template<> struct divine_uintptr_t<4> { typedef thrust::detail::uint32_t type; };
|
69 |
-
|
70 |
-
// 64b platforms
|
71 |
-
template<> struct divine_intptr_t<8> { typedef thrust::detail::int64_t type; };
|
72 |
-
template<> struct divine_uintptr_t<8> { typedef thrust::detail::uint64_t type; };
|
73 |
-
|
74 |
-
typedef divine_intptr_t<>::type intptr_t;
|
75 |
-
typedef divine_uintptr_t<>::type uintptr_t;
|
76 |
-
|
77 |
-
} // end detail
|
78 |
-
} // end thrust
|
79 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/async/reduce.h
DELETED
@@ -1,350 +0,0 @@
|
|
1 |
-
/******************************************************************************
|
2 |
-
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
|
3 |
-
*
|
4 |
-
* Redistribution and use in source and binary forms, with or without
|
5 |
-
* modification, are permitted provided that the following conditions are met:
|
6 |
-
* * Redistributions of source code must retain the above copyright
|
7 |
-
* notice, this list of conditions and the following disclaimer.
|
8 |
-
* * Redistributions in binary form must reproduce the above copyright
|
9 |
-
* notice, this list of conditions and the following disclaimer in the
|
10 |
-
* documentation and/or other materials provided with the distribution.
|
11 |
-
* * Neither the name of the NVIDIA CORPORATION nor the
|
12 |
-
* names of its contributors may be used to endorse or promote products
|
13 |
-
* derived from this software without specific prior written permission.
|
14 |
-
*
|
15 |
-
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
16 |
-
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
17 |
-
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
18 |
-
* ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
|
19 |
-
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
20 |
-
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
21 |
-
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
22 |
-
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
23 |
-
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
24 |
-
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
25 |
-
*
|
26 |
-
******************************************************************************/
|
27 |
-
|
28 |
-
// TODO: Optimize for thrust::plus
|
29 |
-
|
30 |
-
// TODO: Move into system::cuda
|
31 |
-
|
32 |
-
#pragma once
|
33 |
-
|
34 |
-
#include <thrust/detail/config.h>
|
35 |
-
#include <thrust/detail/cpp14_required.h>
|
36 |
-
|
37 |
-
#if THRUST_CPP_DIALECT >= 2014
|
38 |
-
|
39 |
-
#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
|
40 |
-
|
41 |
-
#include <thrust/system/cuda/config.h>
|
42 |
-
|
43 |
-
#include <thrust/system/cuda/detail/async/customization.h>
|
44 |
-
#include <thrust/system/cuda/detail/reduce.h>
|
45 |
-
#include <thrust/system/cuda/future.h>
|
46 |
-
#include <thrust/type_traits/remove_cvref.h>
|
47 |
-
#include <thrust/iterator/iterator_traits.h>
|
48 |
-
#include <thrust/distance.h>
|
49 |
-
|
50 |
-
#include <type_traits>
|
51 |
-
|
52 |
-
namespace thrust
|
53 |
-
{
|
54 |
-
|
55 |
-
namespace system { namespace cuda { namespace detail
|
56 |
-
{
|
57 |
-
|
58 |
-
template <
|
59 |
-
typename DerivedPolicy
|
60 |
-
, typename ForwardIt, typename Size, typename T, typename BinaryOp
|
61 |
-
>
|
62 |
-
auto async_reduce_n(
|
63 |
-
execution_policy<DerivedPolicy>& policy
|
64 |
-
, ForwardIt first
|
65 |
-
, Size n
|
66 |
-
, T init
|
67 |
-
, BinaryOp op
|
68 |
-
) -> unique_eager_future<remove_cvref_t<T>>
|
69 |
-
{
|
70 |
-
using U = remove_cvref_t<T>;
|
71 |
-
|
72 |
-
auto const device_alloc = get_async_device_allocator(policy);
|
73 |
-
|
74 |
-
using pointer
|
75 |
-
= typename thrust::detail::allocator_traits<decltype(device_alloc)>::
|
76 |
-
template rebind_traits<U>::pointer;
|
77 |
-
|
78 |
-
unique_eager_future_promise_pair<U, pointer> fp;
|
79 |
-
|
80 |
-
// Determine temporary device storage requirements.
|
81 |
-
|
82 |
-
size_t tmp_size = 0;
|
83 |
-
thrust::cuda_cub::throw_on_error(
|
84 |
-
cub::DeviceReduce::Reduce(
|
85 |
-
nullptr
|
86 |
-
, tmp_size
|
87 |
-
, first
|
88 |
-
, static_cast<U*>(nullptr)
|
89 |
-
, n
|
90 |
-
, op
|
91 |
-
, init
|
92 |
-
, nullptr // Null stream, just for sizing.
|
93 |
-
, THRUST_DEBUG_SYNC_FLAG
|
94 |
-
)
|
95 |
-
, "after reduction sizing"
|
96 |
-
);
|
97 |
-
|
98 |
-
// Allocate temporary storage.
|
99 |
-
|
100 |
-
auto content = uninitialized_allocate_unique_n<thrust::detail::uint8_t>(
|
101 |
-
device_alloc, sizeof(U) + tmp_size
|
102 |
-
);
|
103 |
-
|
104 |
-
// The array was dynamically allocated, so we assume that it's suitably
|
105 |
-
// aligned for any type of data. `malloc`/`cudaMalloc`/`new`/`std::allocator`
|
106 |
-
// make this guarantee.
|
107 |
-
auto const content_ptr = content.get();
|
108 |
-
U* const ret_ptr = thrust::detail::aligned_reinterpret_cast<U*>(
|
109 |
-
raw_pointer_cast(content_ptr)
|
110 |
-
);
|
111 |
-
void* const tmp_ptr = static_cast<void*>(
|
112 |
-
raw_pointer_cast(content_ptr + sizeof(U))
|
113 |
-
);
|
114 |
-
|
115 |
-
// Set up stream with dependencies.
|
116 |
-
|
117 |
-
cudaStream_t const user_raw_stream = thrust::cuda_cub::stream(policy);
|
118 |
-
|
119 |
-
if (thrust::cuda_cub::default_stream() != user_raw_stream)
|
120 |
-
{
|
121 |
-
fp = make_dependent_future<U, pointer>(
|
122 |
-
[] (decltype(content) const& c)
|
123 |
-
{
|
124 |
-
return pointer(
|
125 |
-
thrust::detail::aligned_reinterpret_cast<U*>(
|
126 |
-
raw_pointer_cast(c.get())
|
127 |
-
)
|
128 |
-
);
|
129 |
-
}
|
130 |
-
, std::tuple_cat(
|
131 |
-
std::make_tuple(
|
132 |
-
std::move(content)
|
133 |
-
, unique_stream(nonowning, user_raw_stream)
|
134 |
-
)
|
135 |
-
, extract_dependencies(
|
136 |
-
std::move(thrust::detail::derived_cast(policy))
|
137 |
-
)
|
138 |
-
)
|
139 |
-
);
|
140 |
-
}
|
141 |
-
else
|
142 |
-
{
|
143 |
-
fp = make_dependent_future<U, pointer>(
|
144 |
-
[] (decltype(content) const& c)
|
145 |
-
{
|
146 |
-
return pointer(
|
147 |
-
thrust::detail::aligned_reinterpret_cast<U*>(
|
148 |
-
raw_pointer_cast(c.get())
|
149 |
-
)
|
150 |
-
);
|
151 |
-
}
|
152 |
-
, std::tuple_cat(
|
153 |
-
std::make_tuple(
|
154 |
-
std::move(content)
|
155 |
-
)
|
156 |
-
, extract_dependencies(
|
157 |
-
std::move(thrust::detail::derived_cast(policy))
|
158 |
-
)
|
159 |
-
)
|
160 |
-
);
|
161 |
-
}
|
162 |
-
|
163 |
-
// Run reduction.
|
164 |
-
|
165 |
-
thrust::cuda_cub::throw_on_error(
|
166 |
-
cub::DeviceReduce::Reduce(
|
167 |
-
tmp_ptr
|
168 |
-
, tmp_size
|
169 |
-
, first
|
170 |
-
, ret_ptr
|
171 |
-
, n
|
172 |
-
, op
|
173 |
-
, init
|
174 |
-
, fp.future.stream().native_handle()
|
175 |
-
, THRUST_DEBUG_SYNC_FLAG
|
176 |
-
)
|
177 |
-
, "after reduction launch"
|
178 |
-
);
|
179 |
-
|
180 |
-
return std::move(fp.future);
|
181 |
-
}
|
182 |
-
|
183 |
-
}}} // namespace system::cuda::detail
|
184 |
-
|
185 |
-
namespace cuda_cub
|
186 |
-
{
|
187 |
-
|
188 |
-
// ADL entry point.
|
189 |
-
template <
|
190 |
-
typename DerivedPolicy
|
191 |
-
, typename ForwardIt, typename Sentinel, typename T, typename BinaryOp
|
192 |
-
>
|
193 |
-
auto async_reduce(
|
194 |
-
execution_policy<DerivedPolicy>& policy
|
195 |
-
, ForwardIt first
|
196 |
-
, Sentinel last
|
197 |
-
, T init
|
198 |
-
, BinaryOp op
|
199 |
-
)
|
200 |
-
THRUST_RETURNS(
|
201 |
-
thrust::system::cuda::detail::async_reduce_n(
|
202 |
-
policy, first, distance(first, last), init, op
|
203 |
-
)
|
204 |
-
)
|
205 |
-
|
206 |
-
} // cuda_cub
|
207 |
-
|
208 |
-
///////////////////////////////////////////////////////////////////////////////
|
209 |
-
|
210 |
-
namespace system { namespace cuda { namespace detail
|
211 |
-
{
|
212 |
-
|
213 |
-
template <
|
214 |
-
typename DerivedPolicy
|
215 |
-
, typename ForwardIt, typename Size, typename OutputIt
|
216 |
-
, typename T, typename BinaryOp
|
217 |
-
>
|
218 |
-
auto async_reduce_into_n(
|
219 |
-
execution_policy<DerivedPolicy>& policy
|
220 |
-
, ForwardIt first
|
221 |
-
, Size n
|
222 |
-
, OutputIt output
|
223 |
-
, T init
|
224 |
-
, BinaryOp op
|
225 |
-
) -> unique_eager_event
|
226 |
-
{
|
227 |
-
using U = remove_cvref_t<T>;
|
228 |
-
|
229 |
-
auto const device_alloc = get_async_device_allocator(policy);
|
230 |
-
|
231 |
-
unique_eager_event e;
|
232 |
-
|
233 |
-
// Determine temporary device storage requirements.
|
234 |
-
|
235 |
-
size_t tmp_size = 0;
|
236 |
-
thrust::cuda_cub::throw_on_error(
|
237 |
-
cub::DeviceReduce::Reduce(
|
238 |
-
nullptr
|
239 |
-
, tmp_size
|
240 |
-
, first
|
241 |
-
, static_cast<U*>(nullptr)
|
242 |
-
, n
|
243 |
-
, op
|
244 |
-
, init
|
245 |
-
, nullptr // Null stream, just for sizing.
|
246 |
-
, THRUST_DEBUG_SYNC_FLAG
|
247 |
-
)
|
248 |
-
, "after reduction sizing"
|
249 |
-
);
|
250 |
-
|
251 |
-
// Allocate temporary storage.
|
252 |
-
|
253 |
-
auto content = uninitialized_allocate_unique_n<thrust::detail::uint8_t>(
|
254 |
-
device_alloc, tmp_size
|
255 |
-
);
|
256 |
-
|
257 |
-
// The array was dynamically allocated, so we assume that it's suitably
|
258 |
-
// aligned for any type of data. `malloc`/`cudaMalloc`/`new`/`std::allocator`
|
259 |
-
// make this guarantee.
|
260 |
-
auto const content_ptr = content.get();
|
261 |
-
|
262 |
-
void* const tmp_ptr = static_cast<void*>(
|
263 |
-
raw_pointer_cast(content_ptr)
|
264 |
-
);
|
265 |
-
|
266 |
-
// Set up stream with dependencies.
|
267 |
-
|
268 |
-
cudaStream_t const user_raw_stream = thrust::cuda_cub::stream(policy);
|
269 |
-
|
270 |
-
if (thrust::cuda_cub::default_stream() != user_raw_stream)
|
271 |
-
{
|
272 |
-
e = make_dependent_event(
|
273 |
-
std::tuple_cat(
|
274 |
-
std::make_tuple(
|
275 |
-
std::move(content)
|
276 |
-
, unique_stream(nonowning, user_raw_stream)
|
277 |
-
)
|
278 |
-
, extract_dependencies(
|
279 |
-
std::move(thrust::detail::derived_cast(policy))
|
280 |
-
)
|
281 |
-
)
|
282 |
-
);
|
283 |
-
}
|
284 |
-
else
|
285 |
-
{
|
286 |
-
e = make_dependent_event(
|
287 |
-
std::tuple_cat(
|
288 |
-
std::make_tuple(
|
289 |
-
std::move(content)
|
290 |
-
)
|
291 |
-
, extract_dependencies(
|
292 |
-
std::move(thrust::detail::derived_cast(policy))
|
293 |
-
)
|
294 |
-
)
|
295 |
-
);
|
296 |
-
}
|
297 |
-
|
298 |
-
// Run reduction.
|
299 |
-
|
300 |
-
thrust::cuda_cub::throw_on_error(
|
301 |
-
cub::DeviceReduce::Reduce(
|
302 |
-
tmp_ptr
|
303 |
-
, tmp_size
|
304 |
-
, first
|
305 |
-
, output
|
306 |
-
, n
|
307 |
-
, op
|
308 |
-
, init
|
309 |
-
, e.stream().native_handle()
|
310 |
-
, THRUST_DEBUG_SYNC_FLAG
|
311 |
-
)
|
312 |
-
, "after reduction launch"
|
313 |
-
);
|
314 |
-
|
315 |
-
return e;
|
316 |
-
}
|
317 |
-
|
318 |
-
}}} // namespace system::cuda::detail
|
319 |
-
|
320 |
-
namespace cuda_cub
|
321 |
-
{
|
322 |
-
|
323 |
-
// ADL entry point.
|
324 |
-
template <
|
325 |
-
typename DerivedPolicy
|
326 |
-
, typename ForwardIt, typename Sentinel, typename OutputIt
|
327 |
-
, typename T, typename BinaryOp
|
328 |
-
>
|
329 |
-
auto async_reduce_into(
|
330 |
-
execution_policy<DerivedPolicy>& policy
|
331 |
-
, ForwardIt first
|
332 |
-
, Sentinel last
|
333 |
-
, OutputIt output
|
334 |
-
, T init
|
335 |
-
, BinaryOp op
|
336 |
-
)
|
337 |
-
THRUST_RETURNS(
|
338 |
-
thrust::system::cuda::detail::async_reduce_into_n(
|
339 |
-
policy, first, distance(first, last), output, init, op
|
340 |
-
)
|
341 |
-
)
|
342 |
-
|
343 |
-
} // cuda_cub
|
344 |
-
|
345 |
-
} // end namespace thrust
|
346 |
-
|
347 |
-
#endif // THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
|
348 |
-
|
349 |
-
#endif
|
350 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/gather.h
DELETED
@@ -1,107 +0,0 @@
|
|
1 |
-
/******************************************************************************
|
2 |
-
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
|
3 |
-
*
|
4 |
-
* Redistribution and use in source and binary forms, with or without
|
5 |
-
* modification, are permitted provided that the following conditions are met:
|
6 |
-
* * Redistributions of source code must retain the above copyright
|
7 |
-
* notice, this list of conditions and the following disclaimer.
|
8 |
-
* * Redistributions in binary form must reproduce the above copyright
|
9 |
-
* notice, this list of conditions and the following disclaimer in the
|
10 |
-
* documentation and/or other materials provided with the distribution.
|
11 |
-
* * Neither the name of the NVIDIA CORPORATION nor the
|
12 |
-
* names of its contributors may be used to endorse or promote products
|
13 |
-
* derived from this software without specific prior written permission.
|
14 |
-
*
|
15 |
-
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
16 |
-
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
17 |
-
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
18 |
-
* ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
|
19 |
-
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
20 |
-
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
21 |
-
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
22 |
-
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
23 |
-
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
24 |
-
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
25 |
-
*
|
26 |
-
******************************************************************************/
|
27 |
-
#pragma once
|
28 |
-
|
29 |
-
|
30 |
-
#if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
|
31 |
-
#include <thrust/system/cuda/detail/transform.h>
|
32 |
-
#include <thrust/iterator/permutation_iterator.h>
|
33 |
-
|
34 |
-
namespace thrust
|
35 |
-
{
|
36 |
-
namespace cuda_cub {
|
37 |
-
|
38 |
-
template <class Derived,
|
39 |
-
class MapIt,
|
40 |
-
class ItemsIt,
|
41 |
-
class ResultIt>
|
42 |
-
ResultIt __host__ __device__
|
43 |
-
gather(execution_policy<Derived>& policy,
|
44 |
-
MapIt map_first,
|
45 |
-
MapIt map_last,
|
46 |
-
ItemsIt items,
|
47 |
-
ResultIt result)
|
48 |
-
{
|
49 |
-
return cuda_cub::transform(policy,
|
50 |
-
thrust::make_permutation_iterator(items, map_first),
|
51 |
-
thrust::make_permutation_iterator(items, map_last),
|
52 |
-
result,
|
53 |
-
identity());
|
54 |
-
}
|
55 |
-
|
56 |
-
|
57 |
-
template <class Derived,
|
58 |
-
class MapIt,
|
59 |
-
class StencilIt,
|
60 |
-
class ItemsIt,
|
61 |
-
class ResultIt,
|
62 |
-
class Predicate>
|
63 |
-
ResultIt __host__ __device__
|
64 |
-
gather_if(execution_policy<Derived>& policy,
|
65 |
-
MapIt map_first,
|
66 |
-
MapIt map_last,
|
67 |
-
StencilIt stencil,
|
68 |
-
ItemsIt items,
|
69 |
-
ResultIt result,
|
70 |
-
Predicate predicate)
|
71 |
-
{
|
72 |
-
return cuda_cub::transform_if(policy,
|
73 |
-
thrust::make_permutation_iterator(items, map_first),
|
74 |
-
thrust::make_permutation_iterator(items, map_last),
|
75 |
-
stencil,
|
76 |
-
result,
|
77 |
-
identity(),
|
78 |
-
predicate);
|
79 |
-
}
|
80 |
-
|
81 |
-
template <class Derived,
|
82 |
-
class MapIt,
|
83 |
-
class StencilIt,
|
84 |
-
class ItemsIt,
|
85 |
-
class ResultIt>
|
86 |
-
ResultIt __host__ __device__
|
87 |
-
gather_if(execution_policy<Derived>& policy,
|
88 |
-
MapIt map_first,
|
89 |
-
MapIt map_last,
|
90 |
-
StencilIt stencil,
|
91 |
-
ItemsIt items,
|
92 |
-
ResultIt result)
|
93 |
-
{
|
94 |
-
return cuda_cub::gather_if(policy,
|
95 |
-
map_first,
|
96 |
-
map_last,
|
97 |
-
stencil,
|
98 |
-
items,
|
99 |
-
result,
|
100 |
-
identity());
|
101 |
-
}
|
102 |
-
|
103 |
-
|
104 |
-
} // namespace cuda_cub
|
105 |
-
} // end namespace thrust
|
106 |
-
|
107 |
-
#endif
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/VizWiz-CLIP-VQA/README.md
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: CLIP-VQA for VizWiz 2022
|
3 |
-
emoji: 👁️
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.0.17
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Choisuren/AnimeGANv3/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: AnimeGANv3
|
3 |
-
emoji: 🐠
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.29.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/meme-api/meme_generator/memes/hug_leg/__init__.py
DELETED
@@ -1,32 +0,0 @@
|
|
1 |
-
from pathlib import Path
|
2 |
-
from typing import List
|
3 |
-
|
4 |
-
from PIL.Image import Image as IMG
|
5 |
-
from pil_utils import BuildImage
|
6 |
-
|
7 |
-
from meme_generator import add_meme
|
8 |
-
from meme_generator.utils import save_gif
|
9 |
-
|
10 |
-
img_dir = Path(__file__).parent / "images"
|
11 |
-
|
12 |
-
|
13 |
-
def hug_leg(images: List[BuildImage], texts, args):
|
14 |
-
img = images[0].convert("RGBA").square()
|
15 |
-
locs = [
|
16 |
-
(50, 73, 68, 92),
|
17 |
-
(58, 60, 62, 95),
|
18 |
-
(65, 10, 67, 118),
|
19 |
-
(61, 20, 77, 97),
|
20 |
-
(55, 44, 65, 106),
|
21 |
-
(66, 85, 60, 98),
|
22 |
-
]
|
23 |
-
frames: List[IMG] = []
|
24 |
-
for i in range(6):
|
25 |
-
frame = BuildImage.open(img_dir / f"{i}.png")
|
26 |
-
x, y, w, h = locs[i]
|
27 |
-
frame.paste(img.resize((w, h)), (x, y), below=True)
|
28 |
-
frames.append(frame.image)
|
29 |
-
return save_gif(frames, 0.06)
|
30 |
-
|
31 |
-
|
32 |
-
add_meme("hug_leg", hug_leg, min_images=1, max_images=1, keywords=["抱大腿"])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CofAI/chat/client/css/conversation.css
DELETED
@@ -1,158 +0,0 @@
|
|
1 |
-
.conversation {
|
2 |
-
width: 60%;
|
3 |
-
margin: 0px 16px;
|
4 |
-
display: flex;
|
5 |
-
flex-direction: column;
|
6 |
-
}
|
7 |
-
|
8 |
-
.conversation #messages {
|
9 |
-
width: 100%;
|
10 |
-
display: flex;
|
11 |
-
flex-direction: column;
|
12 |
-
overflow: auto;
|
13 |
-
overflow-wrap: break-word;
|
14 |
-
padding-bottom: 8px;
|
15 |
-
}
|
16 |
-
|
17 |
-
.conversation .user-input {
|
18 |
-
max-height: 180px;
|
19 |
-
margin: 16px 0px;
|
20 |
-
}
|
21 |
-
|
22 |
-
.conversation .user-input input {
|
23 |
-
font-size: 1rem;
|
24 |
-
background: none;
|
25 |
-
border: none;
|
26 |
-
outline: none;
|
27 |
-
color: var(--colour-3);
|
28 |
-
}
|
29 |
-
|
30 |
-
.conversation .user-input input::placeholder {
|
31 |
-
color: var(--user-input);
|
32 |
-
}
|
33 |
-
|
34 |
-
.conversation-title {
|
35 |
-
color: var(--colour-3);
|
36 |
-
font-size: 14px;
|
37 |
-
}
|
38 |
-
|
39 |
-
.conversation .user-input textarea {
|
40 |
-
font-size: 1rem;
|
41 |
-
width: 100%;
|
42 |
-
height: 100%;
|
43 |
-
padding: 12px;
|
44 |
-
background: none;
|
45 |
-
border: none;
|
46 |
-
outline: none;
|
47 |
-
color: var(--colour-3);
|
48 |
-
resize: vertical;
|
49 |
-
max-height: 150px;
|
50 |
-
min-height: 80px;
|
51 |
-
}
|
52 |
-
|
53 |
-
.box {
|
54 |
-
backdrop-filter: blur(20px);
|
55 |
-
-webkit-backdrop-filter: blur(20px);
|
56 |
-
background-color: var(--blur-bg);
|
57 |
-
height: 100%;
|
58 |
-
width: 100%;
|
59 |
-
border-radius: var(--border-radius-1);
|
60 |
-
border: 1px solid var(--blur-border);
|
61 |
-
}
|
62 |
-
|
63 |
-
.box.input-box {
|
64 |
-
position: relative;
|
65 |
-
align-items: center;
|
66 |
-
padding: 8px;
|
67 |
-
cursor: pointer;
|
68 |
-
}
|
69 |
-
|
70 |
-
#send-button {
|
71 |
-
position: absolute;
|
72 |
-
bottom: 25%;
|
73 |
-
right: 10px;
|
74 |
-
z-index: 1;
|
75 |
-
padding: 16px;
|
76 |
-
}
|
77 |
-
|
78 |
-
#cursor {
|
79 |
-
line-height: 17px;
|
80 |
-
margin-left: 3px;
|
81 |
-
-webkit-animation: blink 0.8s infinite;
|
82 |
-
animation: blink 0.8s infinite;
|
83 |
-
width: 7px;
|
84 |
-
height: 15px;
|
85 |
-
}
|
86 |
-
|
87 |
-
@keyframes blink {
|
88 |
-
0% {
|
89 |
-
background: #ffffff00;
|
90 |
-
}
|
91 |
-
|
92 |
-
50% {
|
93 |
-
background: white;
|
94 |
-
}
|
95 |
-
|
96 |
-
100% {
|
97 |
-
background: #ffffff00;
|
98 |
-
}
|
99 |
-
}
|
100 |
-
|
101 |
-
@-webkit-keyframes blink {
|
102 |
-
0% {
|
103 |
-
background: #ffffff00;
|
104 |
-
}
|
105 |
-
|
106 |
-
50% {
|
107 |
-
background: white;
|
108 |
-
}
|
109 |
-
|
110 |
-
100% {
|
111 |
-
background: #ffffff00;
|
112 |
-
}
|
113 |
-
}
|
114 |
-
|
115 |
-
/* scrollbar */
|
116 |
-
.conversation #messages::-webkit-scrollbar {
|
117 |
-
width: 4px;
|
118 |
-
padding: 8px 0px;
|
119 |
-
}
|
120 |
-
|
121 |
-
.conversation #messages::-webkit-scrollbar-track {
|
122 |
-
background-color: #ffffff00;
|
123 |
-
}
|
124 |
-
|
125 |
-
.conversation #messages::-webkit-scrollbar-thumb {
|
126 |
-
background-color: #555555;
|
127 |
-
border-radius: 10px;
|
128 |
-
}
|
129 |
-
|
130 |
-
@media screen and (max-width: 990px) {
|
131 |
-
.conversation {
|
132 |
-
width: 100%;
|
133 |
-
height: 90%;
|
134 |
-
}
|
135 |
-
}
|
136 |
-
|
137 |
-
@media screen and (max-height: 720px) {
|
138 |
-
.conversation.box {
|
139 |
-
height: 70%;
|
140 |
-
}
|
141 |
-
|
142 |
-
.conversation .user-input textarea {
|
143 |
-
font-size: 0.875rem;
|
144 |
-
}
|
145 |
-
}
|
146 |
-
|
147 |
-
@media screen and (max-width: 360px) {
|
148 |
-
.box {
|
149 |
-
border-radius: 0;
|
150 |
-
}
|
151 |
-
.conversation {
|
152 |
-
margin: 0;
|
153 |
-
margin-top: 48px;
|
154 |
-
}
|
155 |
-
.conversation .user-input {
|
156 |
-
margin: 2px 0 8px 0;
|
157 |
-
}
|
158 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CofAI/chat/client/js/sidebar-toggler.js
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
const sidebar = document.querySelector(".sidebar");
|
2 |
-
const menuButton = document.querySelector(".menu-button");
|
3 |
-
|
4 |
-
function toggleSidebar(event) {
|
5 |
-
if (sidebar.classList.contains("shown")) {
|
6 |
-
hideSidebar(event.target);
|
7 |
-
} else {
|
8 |
-
showSidebar(event.target);
|
9 |
-
}
|
10 |
-
window.scrollTo(0, 0);
|
11 |
-
}
|
12 |
-
|
13 |
-
function showSidebar(target) {
|
14 |
-
sidebar.classList.add("shown");
|
15 |
-
target.classList.add("rotated");
|
16 |
-
document.body.style.overflow = "hidden";
|
17 |
-
}
|
18 |
-
|
19 |
-
function hideSidebar(target) {
|
20 |
-
sidebar.classList.remove("shown");
|
21 |
-
target.classList.remove("rotated");
|
22 |
-
document.body.style.overflow = "auto";
|
23 |
-
}
|
24 |
-
|
25 |
-
menuButton.addEventListener("click", toggleSidebar);
|
26 |
-
|
27 |
-
document.body.addEventListener('click', function(event) {
|
28 |
-
if (event.target.matches('.conversation-title')) {
|
29 |
-
const menuButtonStyle = window.getComputedStyle(menuButton);
|
30 |
-
if (menuButtonStyle.display !== 'none') {
|
31 |
-
hideSidebar(menuButton);
|
32 |
-
}
|
33 |
-
}
|
34 |
-
});
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DKDohare/Chat-GPT4-MAX/app.py
DELETED
@@ -1,141 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import os
|
3 |
-
import json
|
4 |
-
import requests
|
5 |
-
|
6 |
-
#Streaming endpoint
|
7 |
-
API_URL = "https://api.openai.com/v1/chat/completions" #os.getenv("API_URL") + "/generate_stream"
|
8 |
-
|
9 |
-
#Testing with my Open AI Key
|
10 |
-
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
11 |
-
|
12 |
-
def predict(inputs, top_p, temperature, chat_counter, chatbot=[], history=[]):
|
13 |
-
|
14 |
-
payload = {
|
15 |
-
"model": "gpt-4",
|
16 |
-
"messages": [{"role": "user", "content": f"{inputs}"}],
|
17 |
-
"temperature" : 1.0,
|
18 |
-
"top_p":1.0,
|
19 |
-
"n" : 1,
|
20 |
-
"stream": True,
|
21 |
-
"presence_penalty":0,
|
22 |
-
"frequency_penalty":0,
|
23 |
-
}
|
24 |
-
|
25 |
-
headers = {
|
26 |
-
"Content-Type": "application/json",
|
27 |
-
"Authorization": f"Bearer {OPENAI_API_KEY}"
|
28 |
-
}
|
29 |
-
|
30 |
-
print(f"chat_counter - {chat_counter}")
|
31 |
-
if chat_counter != 0 :
|
32 |
-
messages=[]
|
33 |
-
for data in chatbot:
|
34 |
-
temp1 = {}
|
35 |
-
temp1["role"] = "user"
|
36 |
-
temp1["content"] = data[0]
|
37 |
-
temp2 = {}
|
38 |
-
temp2["role"] = "assistant"
|
39 |
-
temp2["content"] = data[1]
|
40 |
-
messages.append(temp1)
|
41 |
-
messages.append(temp2)
|
42 |
-
temp3 = {}
|
43 |
-
temp3["role"] = "user"
|
44 |
-
temp3["content"] = inputs
|
45 |
-
messages.append(temp3)
|
46 |
-
#messages
|
47 |
-
payload = {
|
48 |
-
"model": "gpt-4",
|
49 |
-
"messages": messages, #[{"role": "user", "content": f"{inputs}"}],
|
50 |
-
"temperature" : temperature, #1.0,
|
51 |
-
"top_p": top_p, #1.0,
|
52 |
-
"n" : 1,
|
53 |
-
"stream": True,
|
54 |
-
"presence_penalty":0,
|
55 |
-
"frequency_penalty":0,
|
56 |
-
}
|
57 |
-
|
58 |
-
chat_counter+=1
|
59 |
-
|
60 |
-
history.append(inputs)
|
61 |
-
print(f"payload is - {payload}")
|
62 |
-
# make a POST request to the API endpoint using the requests.post method, passing in stream=True
|
63 |
-
response = requests.post(API_URL, headers=headers, json=payload, stream=True)
|
64 |
-
print(f"response code - {response}")
|
65 |
-
token_counter = 0
|
66 |
-
partial_words = ""
|
67 |
-
|
68 |
-
counter=0
|
69 |
-
for chunk in response.iter_lines():
|
70 |
-
#Skipping first chunk
|
71 |
-
if counter == 0:
|
72 |
-
counter+=1
|
73 |
-
continue
|
74 |
-
#counter+=1
|
75 |
-
# check whether each line is non-empty
|
76 |
-
if chunk.decode() :
|
77 |
-
chunk = chunk.decode()
|
78 |
-
# decode each line as response data is in bytes
|
79 |
-
if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']:
|
80 |
-
#if len(json.loads(chunk.decode()[6:])['choices'][0]["delta"]) == 0:
|
81 |
-
# break
|
82 |
-
partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"]
|
83 |
-
if token_counter == 0:
|
84 |
-
history.append(" " + partial_words)
|
85 |
-
else:
|
86 |
-
history[-1] = partial_words
|
87 |
-
chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] # convert to tuples of list
|
88 |
-
token_counter+=1
|
89 |
-
yield chat, history, chat_counter, response # resembles {chatbot: chat, state: history}
|
90 |
-
|
91 |
-
|
92 |
-
def reset_textbox():
|
93 |
-
return gr.update(value='')
|
94 |
-
|
95 |
-
title = """<h1 align="center">🔥GPT4 with ChatCompletions API +🚀Gradio-Streaming</h1>"""
|
96 |
-
description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form:
|
97 |
-
```
|
98 |
-
User: <utterance>
|
99 |
-
Assistant: <utterance>
|
100 |
-
User: <utterance>
|
101 |
-
Assistant: <utterance>
|
102 |
-
...
|
103 |
-
```
|
104 |
-
In this app, you can explore the outputs of a gpt-4 LLM.
|
105 |
-
"""
|
106 |
-
|
107 |
-
theme = gr.themes.Default(primary_hue="green")
|
108 |
-
|
109 |
-
with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;}
|
110 |
-
#chatbot {height: 520px; overflow: auto;}""",
|
111 |
-
theme=theme) as demo:
|
112 |
-
gr.HTML(title)
|
113 |
-
gr.HTML("""<h3 align="center">🔥This Huggingface Gradio Demo provides you full access to GPT4 API (4096 token limit). 🎉🥳🎉You don't need any OPENAI API key🙌</h1>""")
|
114 |
-
gr.HTML('''<center><a href="https://huggingface.co/spaces/ysharma/ChatGPT4?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>Duplicate the Space and run securely with your OpenAI API Key</center>''')
|
115 |
-
with gr.Column(elem_id = "col_container"):
|
116 |
-
#GPT4 API Key is provided by Huggingface
|
117 |
-
#openai_api_key = gr.Textbox(type='password', label="Enter only your GPT4 OpenAI API key here")
|
118 |
-
chatbot = gr.Chatbot(elem_id='chatbot') #c
|
119 |
-
inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter") #t
|
120 |
-
state = gr.State([]) #s
|
121 |
-
with gr.Row():
|
122 |
-
with gr.Column(scale=7):
|
123 |
-
b1 = gr.Button().style(full_width=True)
|
124 |
-
with gr.Column(scale=3):
|
125 |
-
server_status_code = gr.Textbox(label="Status code from OpenAI server", )
|
126 |
-
|
127 |
-
#inputs, top_p, temperature, top_k, repetition_penalty
|
128 |
-
with gr.Accordion("Parameters", open=False):
|
129 |
-
top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",)
|
130 |
-
temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",)
|
131 |
-
#top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",)
|
132 |
-
#repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", )
|
133 |
-
chat_counter = gr.Number(value=0, visible=False, precision=0)
|
134 |
-
|
135 |
-
inputs.submit( predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key
|
136 |
-
b1.click( predict, [inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key
|
137 |
-
b1.click(reset_textbox, [], [inputs])
|
138 |
-
inputs.submit(reset_textbox, [], [inputs])
|
139 |
-
|
140 |
-
#gr.Markdown(description)
|
141 |
-
demo.queue(max_size=20, concurrency_count=10).launch(debug=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpcore/_async/http_proxy.py
DELETED
@@ -1,350 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
import ssl
|
3 |
-
from base64 import b64encode
|
4 |
-
from typing import Iterable, List, Mapping, Optional, Sequence, Tuple, Union
|
5 |
-
|
6 |
-
from .._backends.base import SOCKET_OPTION, AsyncNetworkBackend
|
7 |
-
from .._exceptions import ProxyError
|
8 |
-
from .._models import (
|
9 |
-
URL,
|
10 |
-
Origin,
|
11 |
-
Request,
|
12 |
-
Response,
|
13 |
-
enforce_bytes,
|
14 |
-
enforce_headers,
|
15 |
-
enforce_url,
|
16 |
-
)
|
17 |
-
from .._ssl import default_ssl_context
|
18 |
-
from .._synchronization import AsyncLock
|
19 |
-
from .._trace import Trace
|
20 |
-
from .connection import AsyncHTTPConnection
|
21 |
-
from .connection_pool import AsyncConnectionPool
|
22 |
-
from .http11 import AsyncHTTP11Connection
|
23 |
-
from .interfaces import AsyncConnectionInterface
|
24 |
-
|
25 |
-
HeadersAsSequence = Sequence[Tuple[Union[bytes, str], Union[bytes, str]]]
|
26 |
-
HeadersAsMapping = Mapping[Union[bytes, str], Union[bytes, str]]
|
27 |
-
|
28 |
-
|
29 |
-
logger = logging.getLogger("httpcore.proxy")
|
30 |
-
|
31 |
-
|
32 |
-
def merge_headers(
|
33 |
-
default_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None,
|
34 |
-
override_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None,
|
35 |
-
) -> List[Tuple[bytes, bytes]]:
|
36 |
-
"""
|
37 |
-
Append default_headers and override_headers, de-duplicating if a key exists
|
38 |
-
in both cases.
|
39 |
-
"""
|
40 |
-
default_headers = [] if default_headers is None else list(default_headers)
|
41 |
-
override_headers = [] if override_headers is None else list(override_headers)
|
42 |
-
has_override = set(key.lower() for key, value in override_headers)
|
43 |
-
default_headers = [
|
44 |
-
(key, value)
|
45 |
-
for key, value in default_headers
|
46 |
-
if key.lower() not in has_override
|
47 |
-
]
|
48 |
-
return default_headers + override_headers
|
49 |
-
|
50 |
-
|
51 |
-
def build_auth_header(username: bytes, password: bytes) -> bytes:
|
52 |
-
userpass = username + b":" + password
|
53 |
-
return b"Basic " + b64encode(userpass)
|
54 |
-
|
55 |
-
|
56 |
-
class AsyncHTTPProxy(AsyncConnectionPool):
|
57 |
-
"""
|
58 |
-
A connection pool that sends requests via an HTTP proxy.
|
59 |
-
"""
|
60 |
-
|
61 |
-
def __init__(
|
62 |
-
self,
|
63 |
-
proxy_url: Union[URL, bytes, str],
|
64 |
-
proxy_auth: Optional[Tuple[Union[bytes, str], Union[bytes, str]]] = None,
|
65 |
-
proxy_headers: Union[HeadersAsMapping, HeadersAsSequence, None] = None,
|
66 |
-
ssl_context: Optional[ssl.SSLContext] = None,
|
67 |
-
max_connections: Optional[int] = 10,
|
68 |
-
max_keepalive_connections: Optional[int] = None,
|
69 |
-
keepalive_expiry: Optional[float] = None,
|
70 |
-
http1: bool = True,
|
71 |
-
http2: bool = False,
|
72 |
-
retries: int = 0,
|
73 |
-
local_address: Optional[str] = None,
|
74 |
-
uds: Optional[str] = None,
|
75 |
-
network_backend: Optional[AsyncNetworkBackend] = None,
|
76 |
-
socket_options: Optional[Iterable[SOCKET_OPTION]] = None,
|
77 |
-
) -> None:
|
78 |
-
"""
|
79 |
-
A connection pool for making HTTP requests.
|
80 |
-
|
81 |
-
Parameters:
|
82 |
-
proxy_url: The URL to use when connecting to the proxy server.
|
83 |
-
For example `"http://127.0.0.1:8080/"`.
|
84 |
-
proxy_auth: Any proxy authentication as a two-tuple of
|
85 |
-
(username, password). May be either bytes or ascii-only str.
|
86 |
-
proxy_headers: Any HTTP headers to use for the proxy requests.
|
87 |
-
For example `{"Proxy-Authorization": "Basic <username>:<password>"}`.
|
88 |
-
ssl_context: An SSL context to use for verifying connections.
|
89 |
-
If not specified, the default `httpcore.default_ssl_context()`
|
90 |
-
will be used.
|
91 |
-
max_connections: The maximum number of concurrent HTTP connections that
|
92 |
-
the pool should allow. Any attempt to send a request on a pool that
|
93 |
-
would exceed this amount will block until a connection is available.
|
94 |
-
max_keepalive_connections: The maximum number of idle HTTP connections
|
95 |
-
that will be maintained in the pool.
|
96 |
-
keepalive_expiry: The duration in seconds that an idle HTTP connection
|
97 |
-
may be maintained for before being expired from the pool.
|
98 |
-
http1: A boolean indicating if HTTP/1.1 requests should be supported
|
99 |
-
by the connection pool. Defaults to True.
|
100 |
-
http2: A boolean indicating if HTTP/2 requests should be supported by
|
101 |
-
the connection pool. Defaults to False.
|
102 |
-
retries: The maximum number of retries when trying to establish
|
103 |
-
a connection.
|
104 |
-
local_address: Local address to connect from. Can also be used to
|
105 |
-
connect using a particular address family. Using
|
106 |
-
`local_address="0.0.0.0"` will connect using an `AF_INET` address
|
107 |
-
(IPv4), while using `local_address="::"` will connect using an
|
108 |
-
`AF_INET6` address (IPv6).
|
109 |
-
uds: Path to a Unix Domain Socket to use instead of TCP sockets.
|
110 |
-
network_backend: A backend instance to use for handling network I/O.
|
111 |
-
"""
|
112 |
-
super().__init__(
|
113 |
-
ssl_context=ssl_context,
|
114 |
-
max_connections=max_connections,
|
115 |
-
max_keepalive_connections=max_keepalive_connections,
|
116 |
-
keepalive_expiry=keepalive_expiry,
|
117 |
-
http1=http1,
|
118 |
-
http2=http2,
|
119 |
-
network_backend=network_backend,
|
120 |
-
retries=retries,
|
121 |
-
local_address=local_address,
|
122 |
-
uds=uds,
|
123 |
-
socket_options=socket_options,
|
124 |
-
)
|
125 |
-
self._ssl_context = ssl_context
|
126 |
-
self._proxy_url = enforce_url(proxy_url, name="proxy_url")
|
127 |
-
self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers")
|
128 |
-
if proxy_auth is not None:
|
129 |
-
username = enforce_bytes(proxy_auth[0], name="proxy_auth")
|
130 |
-
password = enforce_bytes(proxy_auth[1], name="proxy_auth")
|
131 |
-
authorization = build_auth_header(username, password)
|
132 |
-
self._proxy_headers = [
|
133 |
-
(b"Proxy-Authorization", authorization)
|
134 |
-
] + self._proxy_headers
|
135 |
-
|
136 |
-
def create_connection(self, origin: Origin) -> AsyncConnectionInterface:
|
137 |
-
if origin.scheme == b"http":
|
138 |
-
return AsyncForwardHTTPConnection(
|
139 |
-
proxy_origin=self._proxy_url.origin,
|
140 |
-
proxy_headers=self._proxy_headers,
|
141 |
-
remote_origin=origin,
|
142 |
-
keepalive_expiry=self._keepalive_expiry,
|
143 |
-
network_backend=self._network_backend,
|
144 |
-
)
|
145 |
-
return AsyncTunnelHTTPConnection(
|
146 |
-
proxy_origin=self._proxy_url.origin,
|
147 |
-
proxy_headers=self._proxy_headers,
|
148 |
-
remote_origin=origin,
|
149 |
-
ssl_context=self._ssl_context,
|
150 |
-
keepalive_expiry=self._keepalive_expiry,
|
151 |
-
http1=self._http1,
|
152 |
-
http2=self._http2,
|
153 |
-
network_backend=self._network_backend,
|
154 |
-
)
|
155 |
-
|
156 |
-
|
157 |
-
class AsyncForwardHTTPConnection(AsyncConnectionInterface):
|
158 |
-
def __init__(
|
159 |
-
self,
|
160 |
-
proxy_origin: Origin,
|
161 |
-
remote_origin: Origin,
|
162 |
-
proxy_headers: Union[HeadersAsMapping, HeadersAsSequence, None] = None,
|
163 |
-
keepalive_expiry: Optional[float] = None,
|
164 |
-
network_backend: Optional[AsyncNetworkBackend] = None,
|
165 |
-
socket_options: Optional[Iterable[SOCKET_OPTION]] = None,
|
166 |
-
) -> None:
|
167 |
-
self._connection = AsyncHTTPConnection(
|
168 |
-
origin=proxy_origin,
|
169 |
-
keepalive_expiry=keepalive_expiry,
|
170 |
-
network_backend=network_backend,
|
171 |
-
socket_options=socket_options,
|
172 |
-
)
|
173 |
-
self._proxy_origin = proxy_origin
|
174 |
-
self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers")
|
175 |
-
self._remote_origin = remote_origin
|
176 |
-
|
177 |
-
async def handle_async_request(self, request: Request) -> Response:
|
178 |
-
headers = merge_headers(self._proxy_headers, request.headers)
|
179 |
-
url = URL(
|
180 |
-
scheme=self._proxy_origin.scheme,
|
181 |
-
host=self._proxy_origin.host,
|
182 |
-
port=self._proxy_origin.port,
|
183 |
-
target=bytes(request.url),
|
184 |
-
)
|
185 |
-
proxy_request = Request(
|
186 |
-
method=request.method,
|
187 |
-
url=url,
|
188 |
-
headers=headers,
|
189 |
-
content=request.stream,
|
190 |
-
extensions=request.extensions,
|
191 |
-
)
|
192 |
-
return await self._connection.handle_async_request(proxy_request)
|
193 |
-
|
194 |
-
def can_handle_request(self, origin: Origin) -> bool:
|
195 |
-
return origin == self._remote_origin
|
196 |
-
|
197 |
-
async def aclose(self) -> None:
|
198 |
-
await self._connection.aclose()
|
199 |
-
|
200 |
-
def info(self) -> str:
|
201 |
-
return self._connection.info()
|
202 |
-
|
203 |
-
def is_available(self) -> bool:
|
204 |
-
return self._connection.is_available()
|
205 |
-
|
206 |
-
def has_expired(self) -> bool:
|
207 |
-
return self._connection.has_expired()
|
208 |
-
|
209 |
-
def is_idle(self) -> bool:
|
210 |
-
return self._connection.is_idle()
|
211 |
-
|
212 |
-
def is_closed(self) -> bool:
|
213 |
-
return self._connection.is_closed()
|
214 |
-
|
215 |
-
def __repr__(self) -> str:
|
216 |
-
return f"<{self.__class__.__name__} [{self.info()}]>"
|
217 |
-
|
218 |
-
|
219 |
-
class AsyncTunnelHTTPConnection(AsyncConnectionInterface):
|
220 |
-
def __init__(
|
221 |
-
self,
|
222 |
-
proxy_origin: Origin,
|
223 |
-
remote_origin: Origin,
|
224 |
-
ssl_context: Optional[ssl.SSLContext] = None,
|
225 |
-
proxy_headers: Optional[Sequence[Tuple[bytes, bytes]]] = None,
|
226 |
-
keepalive_expiry: Optional[float] = None,
|
227 |
-
http1: bool = True,
|
228 |
-
http2: bool = False,
|
229 |
-
network_backend: Optional[AsyncNetworkBackend] = None,
|
230 |
-
socket_options: Optional[Iterable[SOCKET_OPTION]] = None,
|
231 |
-
) -> None:
|
232 |
-
self._connection: AsyncConnectionInterface = AsyncHTTPConnection(
|
233 |
-
origin=proxy_origin,
|
234 |
-
keepalive_expiry=keepalive_expiry,
|
235 |
-
network_backend=network_backend,
|
236 |
-
socket_options=socket_options,
|
237 |
-
)
|
238 |
-
self._proxy_origin = proxy_origin
|
239 |
-
self._remote_origin = remote_origin
|
240 |
-
self._ssl_context = ssl_context
|
241 |
-
self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers")
|
242 |
-
self._keepalive_expiry = keepalive_expiry
|
243 |
-
self._http1 = http1
|
244 |
-
self._http2 = http2
|
245 |
-
self._connect_lock = AsyncLock()
|
246 |
-
self._connected = False
|
247 |
-
|
248 |
-
async def handle_async_request(self, request: Request) -> Response:
|
249 |
-
timeouts = request.extensions.get("timeout", {})
|
250 |
-
timeout = timeouts.get("connect", None)
|
251 |
-
|
252 |
-
async with self._connect_lock:
|
253 |
-
if not self._connected:
|
254 |
-
target = b"%b:%d" % (self._remote_origin.host, self._remote_origin.port)
|
255 |
-
|
256 |
-
connect_url = URL(
|
257 |
-
scheme=self._proxy_origin.scheme,
|
258 |
-
host=self._proxy_origin.host,
|
259 |
-
port=self._proxy_origin.port,
|
260 |
-
target=target,
|
261 |
-
)
|
262 |
-
connect_headers = merge_headers(
|
263 |
-
[(b"Host", target), (b"Accept", b"*/*")], self._proxy_headers
|
264 |
-
)
|
265 |
-
connect_request = Request(
|
266 |
-
method=b"CONNECT",
|
267 |
-
url=connect_url,
|
268 |
-
headers=connect_headers,
|
269 |
-
extensions=request.extensions,
|
270 |
-
)
|
271 |
-
connect_response = await self._connection.handle_async_request(
|
272 |
-
connect_request
|
273 |
-
)
|
274 |
-
|
275 |
-
if connect_response.status < 200 or connect_response.status > 299:
|
276 |
-
reason_bytes = connect_response.extensions.get("reason_phrase", b"")
|
277 |
-
reason_str = reason_bytes.decode("ascii", errors="ignore")
|
278 |
-
msg = "%d %s" % (connect_response.status, reason_str)
|
279 |
-
await self._connection.aclose()
|
280 |
-
raise ProxyError(msg)
|
281 |
-
|
282 |
-
stream = connect_response.extensions["network_stream"]
|
283 |
-
|
284 |
-
# Upgrade the stream to SSL
|
285 |
-
ssl_context = (
|
286 |
-
default_ssl_context()
|
287 |
-
if self._ssl_context is None
|
288 |
-
else self._ssl_context
|
289 |
-
)
|
290 |
-
alpn_protocols = ["http/1.1", "h2"] if self._http2 else ["http/1.1"]
|
291 |
-
ssl_context.set_alpn_protocols(alpn_protocols)
|
292 |
-
|
293 |
-
kwargs = {
|
294 |
-
"ssl_context": ssl_context,
|
295 |
-
"server_hostname": self._remote_origin.host.decode("ascii"),
|
296 |
-
"timeout": timeout,
|
297 |
-
}
|
298 |
-
async with Trace("start_tls", logger, request, kwargs) as trace:
|
299 |
-
stream = await stream.start_tls(**kwargs)
|
300 |
-
trace.return_value = stream
|
301 |
-
|
302 |
-
# Determine if we should be using HTTP/1.1 or HTTP/2
|
303 |
-
ssl_object = stream.get_extra_info("ssl_object")
|
304 |
-
http2_negotiated = (
|
305 |
-
ssl_object is not None
|
306 |
-
and ssl_object.selected_alpn_protocol() == "h2"
|
307 |
-
)
|
308 |
-
|
309 |
-
# Create the HTTP/1.1 or HTTP/2 connection
|
310 |
-
if http2_negotiated or (self._http2 and not self._http1):
|
311 |
-
from .http2 import AsyncHTTP2Connection
|
312 |
-
|
313 |
-
self._connection = AsyncHTTP2Connection(
|
314 |
-
origin=self._remote_origin,
|
315 |
-
stream=stream,
|
316 |
-
keepalive_expiry=self._keepalive_expiry,
|
317 |
-
)
|
318 |
-
else:
|
319 |
-
self._connection = AsyncHTTP11Connection(
|
320 |
-
origin=self._remote_origin,
|
321 |
-
stream=stream,
|
322 |
-
keepalive_expiry=self._keepalive_expiry,
|
323 |
-
)
|
324 |
-
|
325 |
-
self._connected = True
|
326 |
-
return await self._connection.handle_async_request(request)
|
327 |
-
|
328 |
-
def can_handle_request(self, origin: Origin) -> bool:
|
329 |
-
return origin == self._remote_origin
|
330 |
-
|
331 |
-
async def aclose(self) -> None:
|
332 |
-
await self._connection.aclose()
|
333 |
-
|
334 |
-
def info(self) -> str:
|
335 |
-
return self._connection.info()
|
336 |
-
|
337 |
-
def is_available(self) -> bool:
|
338 |
-
return self._connection.is_available()
|
339 |
-
|
340 |
-
def has_expired(self) -> bool:
|
341 |
-
return self._connection.has_expired()
|
342 |
-
|
343 |
-
def is_idle(self) -> bool:
|
344 |
-
return self._connection.is_idle()
|
345 |
-
|
346 |
-
def is_closed(self) -> bool:
|
347 |
-
return self._connection.is_closed()
|
348 |
-
|
349 |
-
def __repr__(self) -> str:
|
350 |
-
return f"<{self.__class__.__name__} [{self.info()}]>"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/keras_mixin.py
DELETED
@@ -1,481 +0,0 @@
|
|
1 |
-
import collections.abc as collections
|
2 |
-
import json
|
3 |
-
import os
|
4 |
-
import warnings
|
5 |
-
from pathlib import Path
|
6 |
-
from shutil import copytree
|
7 |
-
from typing import Any, Dict, List, Optional, Union
|
8 |
-
|
9 |
-
from huggingface_hub import ModelHubMixin, snapshot_download
|
10 |
-
from huggingface_hub.utils import (
|
11 |
-
get_tf_version,
|
12 |
-
is_graphviz_available,
|
13 |
-
is_pydot_available,
|
14 |
-
is_tf_available,
|
15 |
-
yaml_dump,
|
16 |
-
)
|
17 |
-
|
18 |
-
from .constants import CONFIG_NAME
|
19 |
-
from .hf_api import HfApi
|
20 |
-
from .utils import SoftTemporaryDirectory, logging, validate_hf_hub_args
|
21 |
-
|
22 |
-
|
23 |
-
logger = logging.get_logger(__name__)
|
24 |
-
|
25 |
-
if is_tf_available():
|
26 |
-
import tensorflow as tf # type: ignore
|
27 |
-
|
28 |
-
|
29 |
-
def _flatten_dict(dictionary, parent_key=""):
|
30 |
-
"""Flatten a nested dictionary.
|
31 |
-
Reference: https://stackoverflow.com/a/6027615/10319735
|
32 |
-
|
33 |
-
Args:
|
34 |
-
dictionary (`dict`):
|
35 |
-
The nested dictionary to be flattened.
|
36 |
-
parent_key (`str`):
|
37 |
-
The parent key to be prefixed to the children keys.
|
38 |
-
Necessary for recursing over the nested dictionary.
|
39 |
-
|
40 |
-
Returns:
|
41 |
-
The flattened dictionary.
|
42 |
-
"""
|
43 |
-
items = []
|
44 |
-
for key, value in dictionary.items():
|
45 |
-
new_key = f"{parent_key}.{key}" if parent_key else key
|
46 |
-
if isinstance(value, collections.MutableMapping):
|
47 |
-
items.extend(
|
48 |
-
_flatten_dict(
|
49 |
-
value,
|
50 |
-
new_key,
|
51 |
-
).items()
|
52 |
-
)
|
53 |
-
else:
|
54 |
-
items.append((new_key, value))
|
55 |
-
return dict(items)
|
56 |
-
|
57 |
-
|
58 |
-
def _create_hyperparameter_table(model):
|
59 |
-
"""Parse hyperparameter dictionary into a markdown table."""
|
60 |
-
if model.optimizer is not None:
|
61 |
-
optimizer_params = model.optimizer.get_config()
|
62 |
-
# flatten the configuration
|
63 |
-
optimizer_params = _flatten_dict(optimizer_params)
|
64 |
-
optimizer_params["training_precision"] = tf.keras.mixed_precision.global_policy().name
|
65 |
-
table = "| Hyperparameters | Value |\n| :-- | :-- |\n"
|
66 |
-
for key, value in optimizer_params.items():
|
67 |
-
table += f"| {key} | {value} |\n"
|
68 |
-
else:
|
69 |
-
table = None
|
70 |
-
return table
|
71 |
-
|
72 |
-
|
73 |
-
def _plot_network(model, save_directory):
|
74 |
-
tf.keras.utils.plot_model(
|
75 |
-
model,
|
76 |
-
to_file=f"{save_directory}/model.png",
|
77 |
-
show_shapes=False,
|
78 |
-
show_dtype=False,
|
79 |
-
show_layer_names=True,
|
80 |
-
rankdir="TB",
|
81 |
-
expand_nested=False,
|
82 |
-
dpi=96,
|
83 |
-
layer_range=None,
|
84 |
-
)
|
85 |
-
|
86 |
-
|
87 |
-
def _create_model_card(
|
88 |
-
model,
|
89 |
-
repo_dir: Path,
|
90 |
-
plot_model: bool = True,
|
91 |
-
metadata: Optional[dict] = None,
|
92 |
-
):
|
93 |
-
"""
|
94 |
-
Creates a model card for the repository.
|
95 |
-
"""
|
96 |
-
hyperparameters = _create_hyperparameter_table(model)
|
97 |
-
if plot_model and is_graphviz_available() and is_pydot_available():
|
98 |
-
_plot_network(model, repo_dir)
|
99 |
-
if metadata is None:
|
100 |
-
metadata = {}
|
101 |
-
readme_path = f"{repo_dir}/README.md"
|
102 |
-
metadata["library_name"] = "keras"
|
103 |
-
model_card: str = "---\n"
|
104 |
-
model_card += yaml_dump(metadata, default_flow_style=False)
|
105 |
-
model_card += "---\n"
|
106 |
-
model_card += "\n## Model description\n\nMore information needed\n"
|
107 |
-
model_card += "\n## Intended uses & limitations\n\nMore information needed\n"
|
108 |
-
model_card += "\n## Training and evaluation data\n\nMore information needed\n"
|
109 |
-
if hyperparameters is not None:
|
110 |
-
model_card += "\n## Training procedure\n"
|
111 |
-
model_card += "\n### Training hyperparameters\n"
|
112 |
-
model_card += "\nThe following hyperparameters were used during training:\n\n"
|
113 |
-
model_card += hyperparameters
|
114 |
-
model_card += "\n"
|
115 |
-
if plot_model and os.path.exists(f"{repo_dir}/model.png"):
|
116 |
-
model_card += "\n ## Model Plot\n"
|
117 |
-
model_card += "\n<details>"
|
118 |
-
model_card += "\n<summary>View Model Plot</summary>\n"
|
119 |
-
path_to_plot = "./model.png"
|
120 |
-
model_card += f"\n\n"
|
121 |
-
model_card += "\n</details>"
|
122 |
-
|
123 |
-
if os.path.exists(readme_path):
|
124 |
-
with open(readme_path, "r", encoding="utf8") as f:
|
125 |
-
readme = f.read()
|
126 |
-
else:
|
127 |
-
readme = model_card
|
128 |
-
with open(readme_path, "w", encoding="utf-8") as f:
|
129 |
-
f.write(readme)
|
130 |
-
|
131 |
-
|
132 |
-
def save_pretrained_keras(
|
133 |
-
model,
|
134 |
-
save_directory: Union[str, Path],
|
135 |
-
config: Optional[Dict[str, Any]] = None,
|
136 |
-
include_optimizer: bool = False,
|
137 |
-
plot_model: bool = True,
|
138 |
-
tags: Optional[Union[list, str]] = None,
|
139 |
-
**model_save_kwargs,
|
140 |
-
):
|
141 |
-
"""
|
142 |
-
Saves a Keras model to save_directory in SavedModel format. Use this if
|
143 |
-
you're using the Functional or Sequential APIs.
|
144 |
-
|
145 |
-
Args:
|
146 |
-
model (`Keras.Model`):
|
147 |
-
The [Keras
|
148 |
-
model](https://www.tensorflow.org/api_docs/python/tf/keras/Model)
|
149 |
-
you'd like to save. The model must be compiled and built.
|
150 |
-
save_directory (`str` or `Path`):
|
151 |
-
Specify directory in which you want to save the Keras model.
|
152 |
-
config (`dict`, *optional*):
|
153 |
-
Configuration object to be saved alongside the model weights.
|
154 |
-
include_optimizer(`bool`, *optional*, defaults to `False`):
|
155 |
-
Whether or not to include optimizer in serialization.
|
156 |
-
plot_model (`bool`, *optional*, defaults to `True`):
|
157 |
-
Setting this to `True` will plot the model and put it in the model
|
158 |
-
card. Requires graphviz and pydot to be installed.
|
159 |
-
tags (Union[`str`,`list`], *optional*):
|
160 |
-
List of tags that are related to model or string of a single tag. See example tags
|
161 |
-
[here](https://github.com/huggingface/hub-docs/blame/main/modelcard.md).
|
162 |
-
model_save_kwargs(`dict`, *optional*):
|
163 |
-
model_save_kwargs will be passed to
|
164 |
-
[`tf.keras.models.save_model()`](https://www.tensorflow.org/api_docs/python/tf/keras/models/save_model).
|
165 |
-
"""
|
166 |
-
if is_tf_available():
|
167 |
-
import tensorflow as tf
|
168 |
-
else:
|
169 |
-
raise ImportError("Called a Tensorflow-specific function but could not import it.")
|
170 |
-
|
171 |
-
if not model.built:
|
172 |
-
raise ValueError("Model should be built before trying to save")
|
173 |
-
|
174 |
-
save_directory = Path(save_directory)
|
175 |
-
save_directory.mkdir(parents=True, exist_ok=True)
|
176 |
-
|
177 |
-
# saving config
|
178 |
-
if config:
|
179 |
-
if not isinstance(config, dict):
|
180 |
-
raise RuntimeError(f"Provided config to save_pretrained_keras should be a dict. Got: '{type(config)}'")
|
181 |
-
|
182 |
-
with (save_directory / CONFIG_NAME).open("w") as f:
|
183 |
-
json.dump(config, f)
|
184 |
-
|
185 |
-
metadata = {}
|
186 |
-
if isinstance(tags, list):
|
187 |
-
metadata["tags"] = tags
|
188 |
-
elif isinstance(tags, str):
|
189 |
-
metadata["tags"] = [tags]
|
190 |
-
|
191 |
-
task_name = model_save_kwargs.pop("task_name", None)
|
192 |
-
if task_name is not None:
|
193 |
-
warnings.warn(
|
194 |
-
"`task_name` input argument is deprecated. Pass `tags` instead.",
|
195 |
-
FutureWarning,
|
196 |
-
)
|
197 |
-
if "tags" in metadata:
|
198 |
-
metadata["tags"].append(task_name)
|
199 |
-
else:
|
200 |
-
metadata["tags"] = [task_name]
|
201 |
-
|
202 |
-
if model.history is not None:
|
203 |
-
if model.history.history != {}:
|
204 |
-
path = save_directory / "history.json"
|
205 |
-
if path.exists():
|
206 |
-
warnings.warn(
|
207 |
-
"`history.json` file already exists, it will be overwritten by the history of this version.",
|
208 |
-
UserWarning,
|
209 |
-
)
|
210 |
-
with path.open("w", encoding="utf-8") as f:
|
211 |
-
json.dump(model.history.history, f, indent=2, sort_keys=True)
|
212 |
-
|
213 |
-
_create_model_card(model, save_directory, plot_model, metadata)
|
214 |
-
tf.keras.models.save_model(model, save_directory, include_optimizer=include_optimizer, **model_save_kwargs)
|
215 |
-
|
216 |
-
|
217 |
-
def from_pretrained_keras(*args, **kwargs) -> "KerasModelHubMixin":
|
218 |
-
r"""
|
219 |
-
Instantiate a pretrained Keras model from a pre-trained model from the Hub.
|
220 |
-
The model is expected to be in `SavedModel` format.
|
221 |
-
|
222 |
-
Args:
|
223 |
-
pretrained_model_name_or_path (`str` or `os.PathLike`):
|
224 |
-
Can be either:
|
225 |
-
- A string, the `model id` of a pretrained model hosted inside a
|
226 |
-
model repo on huggingface.co. Valid model ids can be located
|
227 |
-
at the root-level, like `bert-base-uncased`, or namespaced
|
228 |
-
under a user or organization name, like
|
229 |
-
`dbmdz/bert-base-german-cased`.
|
230 |
-
- You can add `revision` by appending `@` at the end of model_id
|
231 |
-
simply like this: `dbmdz/bert-base-german-cased@main` Revision
|
232 |
-
is the specific model version to use. It can be a branch name,
|
233 |
-
a tag name, or a commit id, since we use a git-based system
|
234 |
-
for storing models and other artifacts on huggingface.co, so
|
235 |
-
`revision` can be any identifier allowed by git.
|
236 |
-
- A path to a `directory` containing model weights saved using
|
237 |
-
[`~transformers.PreTrainedModel.save_pretrained`], e.g.,
|
238 |
-
`./my_model_directory/`.
|
239 |
-
- `None` if you are both providing the configuration and state
|
240 |
-
dictionary (resp. with keyword arguments `config` and
|
241 |
-
`state_dict`).
|
242 |
-
force_download (`bool`, *optional*, defaults to `False`):
|
243 |
-
Whether to force the (re-)download of the model weights and
|
244 |
-
configuration files, overriding the cached versions if they exist.
|
245 |
-
resume_download (`bool`, *optional*, defaults to `False`):
|
246 |
-
Whether to delete incompletely received files. Will attempt to
|
247 |
-
resume the download if such a file exists.
|
248 |
-
proxies (`Dict[str, str]`, *optional*):
|
249 |
-
A dictionary of proxy servers to use by protocol or endpoint, e.g.,
|
250 |
-
`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The
|
251 |
-
proxies are used on each request.
|
252 |
-
token (`str` or `bool`, *optional*):
|
253 |
-
The token to use as HTTP bearer authorization for remote files. If
|
254 |
-
`True`, will use the token generated when running `transformers-cli
|
255 |
-
login` (stored in `~/.huggingface`).
|
256 |
-
cache_dir (`Union[str, os.PathLike]`, *optional*):
|
257 |
-
Path to a directory in which a downloaded pretrained model
|
258 |
-
configuration should be cached if the standard cache should not be
|
259 |
-
used.
|
260 |
-
local_files_only(`bool`, *optional*, defaults to `False`):
|
261 |
-
Whether to only look at local files (i.e., do not try to download
|
262 |
-
the model).
|
263 |
-
model_kwargs (`Dict`, *optional*):
|
264 |
-
model_kwargs will be passed to the model during initialization
|
265 |
-
|
266 |
-
<Tip>
|
267 |
-
|
268 |
-
Passing `token=True` is required when you want to use a private
|
269 |
-
model.
|
270 |
-
|
271 |
-
</Tip>
|
272 |
-
"""
|
273 |
-
return KerasModelHubMixin.from_pretrained(*args, **kwargs)
|
274 |
-
|
275 |
-
|
276 |
-
@validate_hf_hub_args
|
277 |
-
def push_to_hub_keras(
|
278 |
-
model,
|
279 |
-
repo_id: str,
|
280 |
-
*,
|
281 |
-
config: Optional[dict] = None,
|
282 |
-
commit_message: str = "Push Keras model using huggingface_hub.",
|
283 |
-
private: bool = False,
|
284 |
-
api_endpoint: Optional[str] = None,
|
285 |
-
token: Optional[str] = None,
|
286 |
-
branch: Optional[str] = None,
|
287 |
-
create_pr: Optional[bool] = None,
|
288 |
-
allow_patterns: Optional[Union[List[str], str]] = None,
|
289 |
-
ignore_patterns: Optional[Union[List[str], str]] = None,
|
290 |
-
delete_patterns: Optional[Union[List[str], str]] = None,
|
291 |
-
log_dir: Optional[str] = None,
|
292 |
-
include_optimizer: bool = False,
|
293 |
-
tags: Optional[Union[list, str]] = None,
|
294 |
-
plot_model: bool = True,
|
295 |
-
**model_save_kwargs,
|
296 |
-
):
|
297 |
-
"""
|
298 |
-
Upload model checkpoint to the Hub.
|
299 |
-
|
300 |
-
Use `allow_patterns` and `ignore_patterns` to precisely filter which files should be pushed to the hub. Use
|
301 |
-
`delete_patterns` to delete existing remote files in the same commit. See [`upload_folder`] reference for more
|
302 |
-
details.
|
303 |
-
|
304 |
-
Args:
|
305 |
-
model (`Keras.Model`):
|
306 |
-
The [Keras model](`https://www.tensorflow.org/api_docs/python/tf/keras/Model`) you'd like to push to the
|
307 |
-
Hub. The model must be compiled and built.
|
308 |
-
repo_id (`str`):
|
309 |
-
ID of the repository to push to (example: `"username/my-model"`).
|
310 |
-
commit_message (`str`, *optional*, defaults to "Add Keras model"):
|
311 |
-
Message to commit while pushing.
|
312 |
-
private (`bool`, *optional*, defaults to `False`):
|
313 |
-
Whether the repository created should be private.
|
314 |
-
api_endpoint (`str`, *optional*):
|
315 |
-
The API endpoint to use when pushing the model to the hub.
|
316 |
-
token (`str`, *optional*):
|
317 |
-
The token to use as HTTP bearer authorization for remote files. If
|
318 |
-
not set, will use the token set when logging in with
|
319 |
-
`huggingface-cli login` (stored in `~/.huggingface`).
|
320 |
-
branch (`str`, *optional*):
|
321 |
-
The git branch on which to push the model. This defaults to
|
322 |
-
the default branch as specified in your repository, which
|
323 |
-
defaults to `"main"`.
|
324 |
-
create_pr (`boolean`, *optional*):
|
325 |
-
Whether or not to create a Pull Request from `branch` with that commit.
|
326 |
-
Defaults to `False`.
|
327 |
-
config (`dict`, *optional*):
|
328 |
-
Configuration object to be saved alongside the model weights.
|
329 |
-
allow_patterns (`List[str]` or `str`, *optional*):
|
330 |
-
If provided, only files matching at least one pattern are pushed.
|
331 |
-
ignore_patterns (`List[str]` or `str`, *optional*):
|
332 |
-
If provided, files matching any of the patterns are not pushed.
|
333 |
-
delete_patterns (`List[str]` or `str`, *optional*):
|
334 |
-
If provided, remote files matching any of the patterns will be deleted from the repo.
|
335 |
-
log_dir (`str`, *optional*):
|
336 |
-
TensorBoard logging directory to be pushed. The Hub automatically
|
337 |
-
hosts and displays a TensorBoard instance if log files are included
|
338 |
-
in the repository.
|
339 |
-
include_optimizer (`bool`, *optional*, defaults to `False`):
|
340 |
-
Whether or not to include optimizer during serialization.
|
341 |
-
tags (Union[`list`, `str`], *optional*):
|
342 |
-
List of tags that are related to model or string of a single tag. See example tags
|
343 |
-
[here](https://github.com/huggingface/hub-docs/blame/main/modelcard.md).
|
344 |
-
plot_model (`bool`, *optional*, defaults to `True`):
|
345 |
-
Setting this to `True` will plot the model and put it in the model
|
346 |
-
card. Requires graphviz and pydot to be installed.
|
347 |
-
model_save_kwargs(`dict`, *optional*):
|
348 |
-
model_save_kwargs will be passed to
|
349 |
-
[`tf.keras.models.save_model()`](https://www.tensorflow.org/api_docs/python/tf/keras/models/save_model).
|
350 |
-
|
351 |
-
Returns:
|
352 |
-
The url of the commit of your model in the given repository.
|
353 |
-
"""
|
354 |
-
api = HfApi(endpoint=api_endpoint)
|
355 |
-
repo_id = api.create_repo(repo_id=repo_id, token=token, private=private, exist_ok=True).repo_id
|
356 |
-
|
357 |
-
# Push the files to the repo in a single commit
|
358 |
-
with SoftTemporaryDirectory() as tmp:
|
359 |
-
saved_path = Path(tmp) / repo_id
|
360 |
-
save_pretrained_keras(
|
361 |
-
model,
|
362 |
-
saved_path,
|
363 |
-
config=config,
|
364 |
-
include_optimizer=include_optimizer,
|
365 |
-
tags=tags,
|
366 |
-
plot_model=plot_model,
|
367 |
-
**model_save_kwargs,
|
368 |
-
)
|
369 |
-
|
370 |
-
# If `log_dir` provided, delete remote logs and upload new ones
|
371 |
-
if log_dir is not None:
|
372 |
-
delete_patterns = (
|
373 |
-
[]
|
374 |
-
if delete_patterns is None
|
375 |
-
else (
|
376 |
-
[delete_patterns] # convert `delete_patterns` to a list
|
377 |
-
if isinstance(delete_patterns, str)
|
378 |
-
else delete_patterns
|
379 |
-
)
|
380 |
-
)
|
381 |
-
delete_patterns.append("logs/*")
|
382 |
-
copytree(log_dir, saved_path / "logs")
|
383 |
-
|
384 |
-
return api.upload_folder(
|
385 |
-
repo_type="model",
|
386 |
-
repo_id=repo_id,
|
387 |
-
folder_path=saved_path,
|
388 |
-
commit_message=commit_message,
|
389 |
-
token=token,
|
390 |
-
revision=branch,
|
391 |
-
create_pr=create_pr,
|
392 |
-
allow_patterns=allow_patterns,
|
393 |
-
ignore_patterns=ignore_patterns,
|
394 |
-
delete_patterns=delete_patterns,
|
395 |
-
)
|
396 |
-
|
397 |
-
|
398 |
-
class KerasModelHubMixin(ModelHubMixin):
|
399 |
-
"""
|
400 |
-
Implementation of [`ModelHubMixin`] to provide model Hub upload/download
|
401 |
-
capabilities to Keras models.
|
402 |
-
|
403 |
-
|
404 |
-
```python
|
405 |
-
>>> import tensorflow as tf
|
406 |
-
>>> from huggingface_hub import KerasModelHubMixin
|
407 |
-
|
408 |
-
|
409 |
-
>>> class MyModel(tf.keras.Model, KerasModelHubMixin):
|
410 |
-
... def __init__(self, **kwargs):
|
411 |
-
... super().__init__()
|
412 |
-
... self.config = kwargs.pop("config", None)
|
413 |
-
... self.dummy_inputs = ...
|
414 |
-
... self.layer = ...
|
415 |
-
|
416 |
-
... def call(self, *args):
|
417 |
-
... return ...
|
418 |
-
|
419 |
-
|
420 |
-
>>> # Initialize and compile the model as you normally would
|
421 |
-
>>> model = MyModel()
|
422 |
-
>>> model.compile(...)
|
423 |
-
>>> # Build the graph by training it or passing dummy inputs
|
424 |
-
>>> _ = model(model.dummy_inputs)
|
425 |
-
>>> # Save model weights to local directory
|
426 |
-
>>> model.save_pretrained("my-awesome-model")
|
427 |
-
>>> # Push model weights to the Hub
|
428 |
-
>>> model.push_to_hub("my-awesome-model")
|
429 |
-
>>> # Download and initialize weights from the Hub
|
430 |
-
>>> model = MyModel.from_pretrained("username/super-cool-model")
|
431 |
-
```
|
432 |
-
"""
|
433 |
-
|
434 |
-
def _save_pretrained(self, save_directory):
|
435 |
-
save_pretrained_keras(self, save_directory)
|
436 |
-
|
437 |
-
@classmethod
|
438 |
-
def _from_pretrained(
|
439 |
-
cls,
|
440 |
-
model_id,
|
441 |
-
revision,
|
442 |
-
cache_dir,
|
443 |
-
force_download,
|
444 |
-
proxies,
|
445 |
-
resume_download,
|
446 |
-
local_files_only,
|
447 |
-
token,
|
448 |
-
**model_kwargs,
|
449 |
-
):
|
450 |
-
"""Here we just call [`from_pretrained_keras`] function so both the mixin and
|
451 |
-
functional APIs stay in sync.
|
452 |
-
|
453 |
-
TODO - Some args above aren't used since we are calling
|
454 |
-
snapshot_download instead of hf_hub_download.
|
455 |
-
"""
|
456 |
-
if is_tf_available():
|
457 |
-
import tensorflow as tf
|
458 |
-
else:
|
459 |
-
raise ImportError("Called a TensorFlow-specific function but could not import it.")
|
460 |
-
|
461 |
-
# TODO - Figure out what to do about these config values. Config is not going to be needed to load model
|
462 |
-
cfg = model_kwargs.pop("config", None)
|
463 |
-
|
464 |
-
# Root is either a local filepath matching model_id or a cached snapshot
|
465 |
-
if not os.path.isdir(model_id):
|
466 |
-
storage_folder = snapshot_download(
|
467 |
-
repo_id=model_id,
|
468 |
-
revision=revision,
|
469 |
-
cache_dir=cache_dir,
|
470 |
-
library_name="keras",
|
471 |
-
library_version=get_tf_version(),
|
472 |
-
)
|
473 |
-
else:
|
474 |
-
storage_folder = model_id
|
475 |
-
|
476 |
-
model = tf.keras.models.load_model(storage_folder, **model_kwargs)
|
477 |
-
|
478 |
-
# For now, we add a new attribute, config, to store the config loaded from the hub/a local dir.
|
479 |
-
model.config = cfg
|
480 |
-
|
481 |
-
return model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DVLH/nlpconnect-vit-gpt2-image-captioning/app.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
|
3 |
-
gr.Interface.load("models/nlpconnect/vit-gpt2-image-captioning").launch()
|
|
|
|
|
|
|
|