Commit
·
d045c56
1
Parent(s):
226eba9
Update parquet files (step 4 of 121)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA crack free The ultimate guide to the most popular game in the series.md +0 -168
- spaces/1gistliPinn/ChatGPT4/Examples/Cars 2 Tamil Dubbed Movie Torrent Download [Extra Quality].md +0 -6
- spaces/1gistliPinn/ChatGPT4/Examples/El Adefesio Rafael Alberti Pdf Free Download.md +0 -6
- spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Crazy Drop A Fun and Addictive Game to Collect Coins and Rewards.md +0 -98
- spaces/1phancelerku/anime-remove-background/Download 5 Nights At Freddy 39s 2 [VERIFIED].md +0 -75
- spaces/1phancelerku/anime-remove-background/Download Chinese FIFA Mobile and Join the 3rd Anniversary Celebration.md +0 -114
- spaces/232labs/VToonify/vtoonify/model/__init__.py +0 -0
- spaces/A00001/bingothoo/src/components/button-scroll-to-bottom.tsx +0 -34
- spaces/AIFILMS/generate_human_motion/VQ-Trans/utils/utils_model.py +0 -66
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/parallel_wavegan/layers/residual_stack.py +0 -75
- spaces/AIGC-Audio/AudioGPT/NeuralSeq/tasks/tts/ps.py +0 -194
- spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/tts/syntaspeech/syntactic_graph_buider.py +0 -294
- spaces/AILab-CVC/SEED-LLaMA/Dockerfile +0 -80
- spaces/AdVisual/MaskCut/model.py +0 -75
- spaces/Adithedev/Keyword-Extractor/model.py +0 -39
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/scale-plugin.js +0 -35
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinput/Factory.d.ts +0 -5
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/knob/Knob.d.ts +0 -63
- spaces/AlexZou/Deploy_Restoration/net/CMSFFT.py +0 -377
- spaces/Alpaca233/SadTalker/src/generate_batch.py +0 -120
- spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/latex/attention/model_architecture.tex +0 -155
- spaces/Amrrs/DragGan-Inversion/PTI/editings/ganspace.py +0 -21
- spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/mapper/training/coach.py +0 -242
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/adapter.py +0 -291
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion.py +0 -376
- spaces/Andy1621/uniformer_image_detection/configs/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py +0 -16
- spaces/Andy1621/uniformer_image_detection/mmdet/utils/logger.py +0 -19
- spaces/Andy1621/uniformer_light/transforms.py +0 -443
- spaces/Artrajz/vits-simple-api/vits/bert/ProsodyModel.py +0 -75
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/resolvelib/compat/collections_abc.py +0 -6
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/file_util.py +0 -249
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/filelist.py +0 -371
- spaces/Awesimo/jojogan/e4e/datasets/inference_dataset.py +0 -25
- spaces/Benson/text-generation/Examples/Asesinato En Los Alpes Mod Apk An1.md +0 -68
- spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/util/request.py +0 -146
- spaces/BradSegal/Literature-Rating/README.md +0 -37
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/utils/collect_env.py +0 -153
- spaces/CVPR/LIVE/pybind11/tests/conftest.py +0 -200
- spaces/CVPR/LIVE/pybind11/tests/test_embed/external_module.cpp +0 -23
- spaces/CVPR/LIVE/thrust/thrust/detail/range/head_flags.h +0 -230
- spaces/CVPR/LIVE/thrust/thrust/random/xor_combine_engine.h +0 -271
- spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/stable_merge_sort.h +0 -60
- spaces/CVPR/WALT/mmdet/models/backbones/trident_resnet.py +0 -292
- spaces/CVPR/regionclip-demo/detectron2/data/samplers/distributed_sampler.py +0 -200
- spaces/CVPR/regionclip-demo/detectron2/utils/memory.py +0 -84
- spaces/CVPR/time/app.py +0 -38
- spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/model/index.js +0 -21
- spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/csrc/ROIPool.h +0 -48
- spaces/DarkyMan/OrangeMixes/README.md +0 -14
- spaces/DeclK/pose/tools/manager.py +0 -72
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA crack free The ultimate guide to the most popular game in the series.md
DELETED
@@ -1,168 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA crack free</h1>
|
3 |
-
<h2>Introduction</h2>
|
4 |
-
<p>If you are a fan of role-playing games, you have probably heard of Final Fantasy VII, one of the most popular and influential titles in the genre. But did you know that you can download and play it for free on your PC, thanks to a lossless repack by RG ILITA? In this article, we will explain what Final Fantasy VII is, what a lossless repack is, who RG ILITA is, how to download and install the game, and why you should play it.</p>
|
5 |
-
<h2>Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA crack free</h2><br /><p><b><b>DOWNLOAD</b> ••• <a href="https://byltly.com/2uKvsO">https://byltly.com/2uKvsO</a></b></p><br /><br />
|
6 |
-
<h3>What is Final Fantasy VII?</h3>
|
7 |
-
<p>Final Fantasy VII is a 1997 role-playing video game developed by Square for the PlayStation console. It is the seventh main installment in the Final Fantasy series, which is known for its epic stories, memorable characters, and innovative gameplay. The game's story follows Cloud Strife, a mercenary who joins an eco-terrorist organization called AVALANCHE to stop a world-controlling megacorporation called Shinra from using the planet's life essence as an energy source. Along the way, he and his allies face Sephiroth, a superhuman who seeks to wound the planet and harness its healing power in order to be reborn as a demigod. The game features 3D graphics, full motion video, and a turn-based combat system with real-time elements. The game has been widely praised for its story, characters, music, and gameplay, and has sold over 13 million copies worldwide. It has also spawned several spin-offs, sequels, and a remake.</p>
|
8 |
-
<h3>What is a lossless repack?</h3>
|
9 |
-
<p>A lossless repack is a compressed version of a game that retains all the original data and quality without any loss or degradation. A lossless repack reduces the size of the game file by removing unnecessary or redundant data, such as languages, videos, or sounds that are not used by the player. A lossless repack also includes a crack that bypasses the copy protection or digital rights management (DRM) of the game, allowing it to be played without a license or activation. A lossless repack is different from a lossy repack, which reduces the size of the game file by lowering the quality or resolution of some data, such as graphics or audio.</p>
|
10 |
-
<h3>Who is RG ILITA?</h3>
|
11 |
-
<p>RG ILITA is a group of hackers and programmers who specialize in creating lossless repacks of games for PC. They are known for their high-quality repacks that are fast to download and easy to install. They also provide detailed instructions and troubleshooting tips for their repacks. RG ILITA stands for Russian Guys In Lossless IT Adventures.</p>
|
12 |
-
<h2>How to download and install Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA crack free</h2>
|
13 |
-
<h3>Requirements</h3>
|
14 |
-
<p>Before you download and install Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA crack free, you need to make sure that your PC meets the minimum requirements for the game. Here are the minimum requirements:</p>
|
15 |
-
<table>
|
16 |
-
<tr>
|
17 |
-
<th>OS</th>
|
18 |
-
<th>Processor</th>
|
19 |
-
<th>Memory</th>
|
20 |
-
<th>Graphics</th>
|
21 |
-
<th>DirectX</th>
|
22 |
-
<th>Hard Drive</th>
|
23 |
-
</tr>
|
24 |
-
<tr>
|
25 |
-
<td>Microsoft Windows XP/Vista/7 (32/64bits)</td>
|
26 |
-
<td>2GHz</td>
|
27 |
-
<td>1 GB RAM</td>
|
28 |
-
<td>DirectX 9.0c-compatible graphic card</td>
|
29 |
-
<td>9.0c</td>
|
30 |
-
<td>3 GB HD space</td>
|
31 |
-
</tr>
|
32 |
-
</table>
|
33 |
-
<p>You also need to have a Square Enix account to play the game.</p>
|
34 |
-
<h3>Steps</h3>
|
35 |
-
<p>To download and install Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA crack free, follow these steps:</p>
|
36 |
-
<ol>
|
37 |
-
<li>Go to <a href="https://rgilita.com/final-fantasy-vii-eng-lossless-repack-by-rg-ilita-crack-free/">https://rgilita.com/final-fantasy-vii-eng-lossless-repack-by-rg-ilita-crack-free/</a>, which is the official website of RG ILITA.</li>
|
38 |
-
<li>Click on the "Download" button and choose a mirror site from where you want to download the game file.</li>
|
39 |
-
<li>The game file is about 1.5 GB in size and will be downloaded as a .rar archive.</li>
|
40 |
-
<li>Extract the .rar archive using WinRAR or any other software that can handle .rar files.</li>
|
41 |
-
<li>You will get a folder named "Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA". Open it and run "setup.exe" as administrator.</li>
|
42 |
-
<li>Follow the instructions on the screen and choose where you want to install the game.</li>
|
43 |
-
<li>The installation process will take some time depending on your PC's performance.</li>
|
44 |
-
<li>After the installation is complete, run "FF7_Launcher.exe" from the game folder as administrator.</li>
|
45 |
-
<li>Login with your Square Enix account or create one if you don't have one.</li>
|
46 |
-
<li>You can now play Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA crack free on your PC!</li>
|
47 |
-
</ol>
|
48 |
-
<h3>Troubleshooting</h3>
|
49 |
-
<p>If you encounter any problems while downloading or installing Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA crack free, here are some possible solutions:</p>
|
50 |
-
<ul>
|
51 |
-
<li>If you get an error message saying "The program can't start because MSVCR100.dll is missing from your computer", you need to install Microsoft Visual C++ 2010 Redistributable Package from <a href="https://www.microsoft.com/en-us/download/details.aspx?id=5555">https://www.microsoft.com/en-us/download/details.aspx?id=5555</a>.</li>
|
52 |
-
<li>If you get an error message saying "The program can't start because MSVCP100.dll is missing from your computer", you need to install Microsoft Visual C++ 2010 SP1 Redistributable Package from <a href="https://www.microsoft.com/en-us/download/details.aspx?id=8328">https://www.microsoft.com/en-us/download/details.aspx?id=8328</a>.</li>
|
53 |
-
<li>If you get an error message saying "The program can't start because MSVCR120.dll is missing from your computer", you need to install Microsoft Visual C++ 2013 Redistributable Package from <a href="https://www.microsoft.com/en-us/download/details.aspx?id=40784">https://www.microsoft.com/en-us/download/details.aspx?id=40784</a>.</li>
|
54 |
-
<li>If you get an error message saying "The program can't start because MSVCP120.dll is missing from your computer", you need to install Microsoft Visual C++ 2013 Redistributable Package from <a href="https://www.microsoft.com/en-us/download/details.aspx?id=40784">https://www.microsoft.com/en-us/download/details.aspx?id=40784</a>.</li>
|
55 |
-
<li>If you get an error message saying "The program can't start because api-ms-win-crt-runtime-l1-1-0.dll is missing from your computer", you need to install Windows Update KB2999226 from <a href="https://support.microsoft.com/en-us/help/2999226/update-for-universal-c-runtime-in-windows">https://support.microsoft.com/en-us/help/2999226/update-for-universal-c-runtime-in-windows</a>.</li>
|
56 |
-
<li>If you get an error message saying "The program can't start because api-ms-win-crt-heap-l1-1-0.dll is missing from your computer", you need to install Windows Update KB2999226 from <a href="https://support.microsoft.com/en-us/help/2999226/update-for-universal-c-runtime-in-windows">https://support.microsoft.com/en-us/help/2999226/update-for-universal-c-runtime-in-windows</a>.</li>
|
57 |
-
<li>If you Here is the continuation of the article. <h2>Why play Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA crack free</h2>
|
58 |
-
<h3>Benefits of a lossless repack</h3>
|
59 |
-
<p>By downloading and playing Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA crack free, you can enjoy several benefits that a lossless repack offers, such as:</p>
|
60 |
-
<p>Download Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA no virus<br />
|
61 |
-
How to install Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA on PC<br />
|
62 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA gameplay and review<br />
|
63 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA torrent magnet link<br />
|
64 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA system requirements and compatibility<br />
|
65 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA update and patch notes<br />
|
66 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA mods and cheats<br />
|
67 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA best settings and optimization<br />
|
68 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA walkthrough and guide<br />
|
69 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA comparison and benchmark<br />
|
70 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA crack fix and error solutions<br />
|
71 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA online multiplayer and co-op<br />
|
72 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA DLC and bonus content<br />
|
73 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA secrets and easter eggs<br />
|
74 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA remake and remaster differences<br />
|
75 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA soundtrack and OST download<br />
|
76 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA speedrun and challenge mode<br />
|
77 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA tips and tricks for beginners<br />
|
78 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA characters and story analysis<br />
|
79 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA fan art and cosplay gallery<br />
|
80 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA trivia and fun facts<br />
|
81 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA alternatives and similar games<br />
|
82 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA ranking and rating among RPGs<br />
|
83 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA developer interview and behind the scenes<br />
|
84 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA merchandise and collectibles<br />
|
85 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA emulator and controller support<br />
|
86 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA achievements and trophies guide<br />
|
87 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA save file and backup location<br />
|
88 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA voice actors and cast members<br />
|
89 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA memes and jokes compilation<br />
|
90 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA opinions and feedback from players<br />
|
91 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA history and legacy of the franchise<br />
|
92 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA references and influences in pop culture<br />
|
93 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA theories and speculations about the plot<br />
|
94 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA differences between original and re-release versions<br />
|
95 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA side quests and optional content guide<br />
|
96 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA combat system and mechanics explanation<br />
|
97 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA graphics and performance improvement mods<br />
|
98 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA recommended party members and equipment choices<br />
|
99 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA endings and epilogue scenes video<br />
|
100 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA fan-made projects and spin-offs <br />
|
101 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA cloud saves and cross-play features <br />
|
102 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA localization and translation issues <br />
|
103 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA censorship and controversy topics <br />
|
104 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA VR support and compatibility <br />
|
105 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA modding tools and tutorials <br />
|
106 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA official website and social media links <br />
|
107 |
-
Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA future plans and announcements</p>
|
108 |
-
<ul>
|
109 |
-
<li>Smaller file size: A lossless repack reduces the size of the game file by removing unnecessary or redundant data, such as languages, videos, or sounds that are not used by the player. This means that you can save disk space and bandwidth when downloading and installing the game.</li>
|
110 |
-
<li>Faster download and installation: A smaller file size also means that you can download and install the game faster than the original version. This is especially useful if you have a slow internet connection or a limited data plan.</li>
|
111 |
-
<li>No quality loss: A lossless repack retains all the original data and quality of the game without any loss or degradation. This means that you can enjoy the game as it was intended by the developers, with no compromise on graphics, audio, or gameplay.</li>
|
112 |
-
<li>No license or activation required: A lossless repack includes a crack that bypasses the copy protection or digital rights management (DRM) of the game, allowing it to be played without a license or activation. This means that you can play the game without any restrictions or limitations, such as online verification, region lock, or time limit.</li>
|
113 |
-
</ul>
|
114 |
-
<h3>Features of Final Fantasy VII</h3>
|
115 |
-
<p>Final Fantasy VII is not only a classic role-playing game, but also a masterpiece of storytelling, character development, and gameplay. By playing Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA crack free, you can experience the following features of the game:</p>
|
116 |
-
<ul>
|
117 |
-
<li>A captivating story: Final Fantasy VII has a rich and complex story that spans over four discs and involves multiple plot twists, flashbacks, and revelations. The story explores themes such as environmentalism, identity, love, betrayal, and redemption. The story also features memorable characters, such as Cloud Strife, Aerith Gainsborough, Sephiroth, Tifa Lockhart, Barret Wallace, and many more.</li>
|
118 |
-
<li>A dynamic combat system: Final Fantasy VII has a turn-based combat system with real-time elements. The combat system allows you to control up to three characters in a party and use various commands, such as attack, magic, summon, item, and limit break. The combat system also features the Materia system, which lets you customize your characters' abilities by equipping them with magical orbs called Materia. The Materia system allows you to create different combinations of spells and skills depending on your preference and strategy.</li>
|
119 |
-
<li>A stunning soundtrack: Final Fantasy VII has a beautiful and diverse soundtrack composed by Nobuo Uematsu. The soundtrack consists of over 80 tracks that range from orchestral to rock to techno. The soundtrack also includes iconic themes, such as "One-Winged Angel", "Aerith's Theme", "The Prelude", and "Main Theme of Final Fantasy VII". The soundtrack enhances the mood and atmosphere of the game and creates an emotional connection with the player.</li>
|
120 |
-
<li>A vast world to explore: Final Fantasy VII has a huge and varied world to explore that consists of different locations, such as cities, towns, villages, forests, deserts, mountains, oceans, and even outer space. The world also contains hidden secrets, side quests, mini-games, and optional bosses that add more depth and replay value to the game. The world also changes depending on your actions and decisions in the game.</li>
|
121 |
-
</ul>
|
122 |
-
<h3>Reviews and ratings of Final Fantasy VII</h3>
|
123 |
-
<p>Final Fantasy VII has received widespread critical acclaim and commercial success since its release in 1997. It has won numerous awards and accolades from various publications and organizations. It has also been ranked among the best video games of all time by many critics and fans. Here are some examples of reviews and ratings of Final Fantasy VII:</p>
|
124 |
-
<table>
|
125 |
-
<tr>
|
126 |
-
<th>Publication / Organization</th>
|
127 |
-
<th>Score / Rating</th>
|
128 |
-
<th>Comment</th>
|
129 |
-
</tr>
|
130 |
-
<tr>
|
131 |
-
<td>Metacritic</td>
|
132 |
-
<td>92/100 (based on 20 reviews)</td>
|
133 |
-
<td>"Final Fantasy VII is one of those rare games that manages to transcend its genre and appeal to gamers of all tastes."</td>
|
134 |
-
</tr>
|
135 |
-
<tr>
|
136 |
-
<td>GameSpot</td>
|
137 |
-
<td>9.5/10</td>
|
138 |
-
<td>"Final Fantasy VII is quite simply one of the best games ever made."</td>
|
139 |
-
</tr>
|
140 |
-
<tr>
|
141 |
-
<td>IGN</td>
|
142 |
-
<td>9.5/10</td>
|
143 |
-
<td>"Final Fantasy VII is an experience that everyone should try at least once."</td>
|
144 |
-
</tr>
|
145 |
-
<tr>
|
146 |
-
<td>Famitsu</td>
|
147 |
-
<td>38/40</td>
|
148 |
-
<td>"Final Fantasy VII is a masterpiece that will remain in history."</td>
|
149 |
-
</tr>
|
150 |
-
<tr>
|
151 |
-
<td>Gamerankings</td>
|
152 |
-
<td>92.39% (based on 46 reviews)</td>
|
153 |
-
<td>"Final Fantasy VII is a landmark title that revolutionized the role-playing genre."</td>
|
154 |
-
</tr>
|
155 |
-
<tr Here is the continuation of the article. <h2>Conclusion</h2>
|
156 |
-
<p>Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA crack free is a great way to enjoy one of the best role-playing games of all time on your PC. By downloading and playing this lossless repack, you can save disk space and bandwidth, experience the game in its original quality, and play it without any license or activation issues. You can also enjoy the features of Final Fantasy VII, such as its captivating story, dynamic combat system, stunning soundtrack, and vast world to explore. If you are a fan of Final Fantasy VII or role-playing games in general, you should not miss this opportunity to download and play Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA crack free.</p>
|
157 |
-
<h2>FAQs</h2>
|
158 |
-
<p>Here are some frequently asked questions about Final Fantasy VII - (ENG) [Lossless Repack] by RG ILITA crack free:</p>
|
159 |
-
<ol>
|
160 |
-
<li>Q: Is this repack safe and virus-free?<br>A: Yes, this repack is safe and virus-free. RG ILITA is a reputable group of hackers and programmers who create high-quality lossless repacks of games for PC. They also provide detailed instructions and troubleshooting tips for their repacks. You can trust their repacks and download them without any worries.</li>
|
161 |
-
<li>Q: Is this repack the full game or only a part of it?<br>A: This repack is the full game of Final Fantasy VII. It covers the entire story of the original game, from the beginning to the end. It also includes all the bonus content and features that were added in later releases, such as the International version and the Remaster version.</li>
|
162 |
-
<li>Q: How long does it take to download and install this repack?<br>A: The download and installation time depends on your internet speed and PC performance. The game file is about 1.5 GB in size and will be downloaded as a .rar archive. The installation process will take some time depending on your PC's performance. On average, it may take about 15 minutes to download and 30 minutes to install.</li>
|
163 |
-
<li>Q: Can I play this repack online or with other players?<br>A: No, this repack is only for single-player mode. You cannot play this repack online or with other players. This repack includes a crack that bypasses the copy protection or digital rights management (DRM) of the game, which prevents it from connecting to online servers or multiplayer modes.</li>
|
164 |
-
<li>Q: What are the differences between this repack and the original game?<br>A: The main difference between this repack and the original game is the file size. This repack reduces the size of the game file by removing unnecessary or redundant data, such as languages, videos, or sounds that are not used by the player. This means that you can save disk space and bandwidth when downloading and installing the game. However, there is no quality loss or gameplay change in this repack. You can enjoy the game as it was intended by the developers, with no compromise on graphics, audio, or gameplay.</li>
|
165 |
-
</ol>
|
166 |
-
</p> 0a6ba089eb<br />
|
167 |
-
<br />
|
168 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/Cars 2 Tamil Dubbed Movie Torrent Download [Extra Quality].md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>Cars 2 Tamil Dubbed Movie Torrent Download</h2><br /><p><b><b>Download File</b> ✪ <a href="https://imgfil.com/2uy0Yc">https://imgfil.com/2uy0Yc</a></b></p><br /><br />
|
2 |
-
|
3 |
-
More Tamil Movies, comedy, songs videos ... 2020 Tamil Horror Movie - Bloody Ballet - Fantasma || Hollywood ... 1fdad05405<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1gistliPinn/ChatGPT4/Examples/El Adefesio Rafael Alberti Pdf Free Download.md
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
<h2>el adefesio rafael alberti pdf download</h2><br /><p><b><b>Download</b> ⭐ <a href="https://imgfil.com/2uy1oB">https://imgfil.com/2uy1oB</a></b></p><br /><br />
|
2 |
-
|
3 |
-
Leer la poesÃa Canción a AltaÃr del poeta Rafael Alberti en el sitio ... En nuestro sitio, le ofrecemos descargar el libro Canciones para altair a PDF de ... El trébol florido (1940), El adefesio (1944), La Gallarda (1944-45) y Noche de guerra. 1fdad05405<br />
|
4 |
-
<br />
|
5 |
-
<br />
|
6 |
-
<p></p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Crazy Drop A Fun and Addictive Game to Collect Coins and Rewards.md
DELETED
@@ -1,98 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>Crazy Drop APK: A Fun and Addictive Casual Game for Android</h1>
|
3 |
-
<p>If you are looking for a casual game that can keep you entertained and relaxed, you should try Crazy Drop APK. This is a game that lets you drop balls and hit targets, collect coins and gems, and win rewards. It is simple, fun, and addictive. In this article, we will tell you more about what Crazy Drop APK is, how to play it, why you should play it, and how to download and install it on your Android device.</p>
|
4 |
-
<h2>crazy drop apk</h2><br /><p><b><b>DOWNLOAD</b> ► <a href="https://urlin.us/2uSS67">https://urlin.us/2uSS67</a></b></p><br /><br />
|
5 |
-
<h2>What is Crazy Drop APK?</h2>
|
6 |
-
<p>Crazy Drop APK is a casual game developed by DoraBmonkey, a developer that specializes in creating games that are easy to play but hard to put down. Crazy Drop APK is one of their most popular games, with over 84 downloads on Uptodown.com. Here are some of the features of Crazy Drop APK:</p>
|
7 |
-
<h3>A game that lets you drop balls and win rewards</h3>
|
8 |
-
<p>In Crazy Drop APK, your goal is to drop balls from the top of the screen and hit the targets at the bottom. The targets can be coins, gems, power-ups, or bonuses. The more targets you hit, the more rewards you get. You can use the rewards to unlock more balls and levels in the game.</p>
|
9 |
-
<h3>A game that has simple controls and graphics</h3>
|
10 |
-
<p>Crazy Drop APK is a game that anyone can play, regardless of their skill level or experience. The controls are very simple: just tap the screen to drop a ball. The graphics are also simple but colorful and appealing. The game has a minimalist design that does not distract from the gameplay.</p>
|
11 |
-
<h3>A game that is free to download and play</h3>
|
12 |
-
<p>Crazy Drop APK is a game that does not cost you anything to download or play. You can enjoy the game without spending any money. However, if you want to support the developer or get some extra features, you can also make in-app purchases in the game.</p>
|
13 |
-
<h2>How to play Crazy Drop APK?</h2>
|
14 |
-
<p>Crazy Drop APK is a game that is easy to learn but hard to master. Here are some tips on how to play it:</p>
|
15 |
-
<h3>Tap to drop balls and hit targets</h3>
|
16 |
-
<p>The basic gameplay of Crazy Drop APK is very simple: just tap the screen to drop a ball from the top of the screen. The ball will bounce off the pegs and obstacles on its way down, and hopefully hit some targets at the bottom. The targets can be coins, gems, power-ups, or bonuses. The more targets you hit, the higher your score will be.</p>
|
17 |
-
<h3>Collect coins and gems to unlock more balls and levels</h3>
|
18 |
-
<p>As you play Crazy Drop APK, you will collect coins and gems as rewards. You can use these coins and gems to unlock more balls and levels in the game. Each ball has a different shape, color, size, and speed. Each level has a different layout, difficulty, and goal. You can choose the ball and level that suit your preference and challenge yourself.</p>
|
19 |
-
<p>crazy drop android game download<br />
|
20 |
-
crazy drop mod apk unlimited coins<br />
|
21 |
-
crazy drop game online free<br />
|
22 |
-
crazy drop apk latest version<br />
|
23 |
-
crazy drop game review<br />
|
24 |
-
crazy drop apk for pc<br />
|
25 |
-
crazy drop mod apk download<br />
|
26 |
-
crazy drop game tips and tricks<br />
|
27 |
-
crazy drop apk old version<br />
|
28 |
-
crazy drop game hack<br />
|
29 |
-
crazy drop apk no ads<br />
|
30 |
-
crazy drop game play store<br />
|
31 |
-
crazy drop apk uptodown<br />
|
32 |
-
crazy drop game strategy<br />
|
33 |
-
crazy drop apk modradar<br />
|
34 |
-
crazy drop game cheats<br />
|
35 |
-
crazy drop apk pure<br />
|
36 |
-
crazy drop game features<br />
|
37 |
-
crazy drop apk combo<br />
|
38 |
-
crazy drop game rules<br />
|
39 |
-
crazy drop apk mirror<br />
|
40 |
-
crazy drop game guide<br />
|
41 |
-
crazy drop apk android 1<br />
|
42 |
-
crazy drop game levels<br />
|
43 |
-
crazy drop apk rexdl<br />
|
44 |
-
crazy drop game rewards<br />
|
45 |
-
crazy drop apk mob.org<br />
|
46 |
-
crazy drop game challenges<br />
|
47 |
-
crazy drop apk apkpure<br />
|
48 |
-
crazy drop game coins<br />
|
49 |
-
crazy drop apk happymod<br />
|
50 |
-
crazy drop game screenshots<br />
|
51 |
-
crazy drop apk android oyun club<br />
|
52 |
-
crazy drop game developer<br />
|
53 |
-
crazy drop apk revdl<br />
|
54 |
-
crazy drop game rating<br />
|
55 |
-
crazy drop apk malavida<br />
|
56 |
-
crazy drop game support<br />
|
57 |
-
crazy drop apk apkmirror<br />
|
58 |
-
crazy drop game update<br />
|
59 |
-
crazy drop apk apkmody<br />
|
60 |
-
crazy drop game genre<br />
|
61 |
-
crazy drop apk apknite<br />
|
62 |
-
crazy drop game forum<br />
|
63 |
-
crazy drop apk apktada.com</p>
|
64 |
-
<h3>Use power-ups and bonuses to boost your score and chances</h3>
|
65 |
-
<p>Crazy Drop APK also has some power-ups and bonuses that can help you improve your performance in the game. For example, you can use the magnet power-up to attract more coins and gems, or the bomb power-up to explode more targets. You can also get bonuses such as extra balls, extra time, or extra lives. These power-ups and bonuses can boost your score and chances of completing the level.</p>
|
66 |
-
<h2>Why should you play Crazy Drop APK?</h2>
|
67 |
-
<p>Crazy Drop APK is a game that has many benefits for its players. Here are some of the reasons why you should play it:</p>
|
68 |
-
<h3>It is fun and relaxing to play</h3>
|
69 |
-
<p>Crazy Drop APK is a game that can make you feel happy and relaxed. It is a game that does not require too much thinking or strategy, but rather relies on your intuition and luck. It is a game that can help you relieve stress and boredom, and enjoy some quality time.</p>
|
70 |
-
<h3>It is challenging and rewarding to master</h3>
|
71 |
-
<p>Crazy Drop APK is also a game that can challenge and reward you. It is a game that has many levels and goals to complete, each with different difficulties and obstacles. It is a game that can test your skills and reflexes, and make you feel proud and satisfied when you achieve a high score or clear a level.</p>
|
72 |
-
<h3>It is suitable for all ages and preferences</h3>
|
73 |
-
<p>Crazy Drop APK is a game that can appeal to anyone, regardless of their age or preference. It is a game that has simple but attractive graphics, sound effects, and music. It is a game that has easy but addictive gameplay, with various balls and levels to choose from. It is a game that can be played anytime, anywhere, with or without an internet connection.</p>
|
74 |
-
<h2>How to download and install Crazy Drop APK?</h2>
|
75 |
-
<p>If you are interested in playing Crazy Drop APK, you can download and install it on your Android device easily. Here are the steps to follow:</p>
|
76 |
-
<h3>Download the APK file from Uptodown.com</h3>
|
77 |
-
<p>The first step is to download the APK file of Crazy Drop APK from Uptodown.com, a website that offers safe and verified APK files for Android apps and games. You can use the link below to access the download page of Crazy Drop APK:</p>
|
78 |
-
<p><a href="">Crazy Drop APK 1.0.0 for Android - Download</a></p>
|
79 |
-
<h3>Enable unknown sources on your device settings</h3>
|
80 |
-
<p>The second step is to enable unknown sources on your device settings, which will allow you to install apps from sources other than the Google Play Store. To do this, go to your device settings, then security or privacy, then toggle on the option for unknown sources.</p>
|
81 |
-
<h3>Install the APK file and enjoy the game</h3>
|
82 |
-
<p>The final step is to install the APK file of Crazy Drop APK on your device. To do this, locate the downloaded file in your file manager or downloads folder, then tap on it to start the installation process. Follow the instructions on the screen, then wait for the installation to finish. Once done, you can launch the game and enjoy it.</p>
|
83 |
-
<h2>Conclusion</h2>
|
84 |
-
<p>Crazy Drop APK is a fun and addictive casual game for Android that lets you drop balls and hit targets, collect coins and gems, and win rewards. It is a game that has simple controls and graphics, but challenging and rewarding gameplay. It is a game that is free to download and play, but also offers in-app purchases for extra features. It is a game that is suitable for all ages and preferences, and can be played anytime, anywhere. If you are looking for a casual game that can keep you entertained and relaxed, you should try Crazy Drop APK.</p>
|
85 |
-
<h2>FAQs</h2>
|
86 |
-
<p>Here are some of the frequently asked questions about Crazy Drop APK:</p>
|
87 |
-
<h4>Q: How many balls and levels are there in Crazy Drop APK?</h4>
|
88 |
-
<p>A: There are over 100 balls and over 1000 levels in Crazy Drop APK, each with different shapes, colors, sizes, speeds, layouts, difficulties, and goals.</p>
|
89 |
-
<h4>Q: How can I get more coins and gems in Crazy Drop APK?</h4>
|
90 |
-
<p>A: You can get more coins and gems in Crazy Drop APK by hitting more targets, completing more levels, watching ads, or making in-app purchases.</p>
|
91 |
-
<h4>Q: What are the power-ups and bonuses in Crazy Drop APK?</h4>
|
92 |
-
<p>A: The power-ups in Crazy Drop APK are magnet, bomb, fireball, lightning, rainbow, starburst, and shield. The bonuses are extra balls, extra time, extra lives, and double coins.</p>
|
93 |
-
<h4>Q: What are the requirements to play Crazy Drop APK?</h4>
|
94 |
-
<p>A: You need an Android device with Android 5.0 or higher version, and at least 50 MB of free space on your device. You also need an internet connection to download and install the game, and to access some of the features such as ads and in-app purchases.</p>
|
95 |
-
<h4>Q: Is Crazy Drop APK safe and secure to download and play?</h4>
|
96 |
-
<p>A: Yes, Crazy Drop APK is safe and secure to download and play. The APK file is verified by Uptodown.com, a trusted website that offers safe and verified APK files for Android apps and games. The game does not contain any viruses, malware, or spyware that can harm your device or data.</p> 197e85843d<br />
|
97 |
-
<br />
|
98 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download 5 Nights At Freddy 39s 2 [VERIFIED].md
DELETED
@@ -1,75 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Five Nights at Freddy's 2</h1>
|
3 |
-
<p>If you are a fan of horror games, you might have heard of Five Nights at Freddy's 2, a sequel to the popular indie game Five Nights at Freddy's. In this game, you play as a night guard at a pizzeria where animatronic characters come alive at night and try to kill you. You have to survive five nights using a mask, a flashlight, cameras, and other tools.</p>
|
4 |
-
<p>Five Nights at Freddy's 2 is a thrilling and challenging game that will keep you on edge with its jumpscares, suspenseful atmosphere, and creepy characters. It has received positive reviews from critics and players alike, and has spawned several spin-offs and adaptations.</p>
|
5 |
-
<h2>download 5 nights at freddy 39;s 2</h2><br /><p><b><b>Download File</b> ✵✵✵ <a href="https://jinyurl.com/2uNOi0">https://jinyurl.com/2uNOi0</a></b></p><br /><br />
|
6 |
-
<p>If you want to download Five Nights at Freddy's 2 on your PC, you will need a few things first. Here are some steps you can follow to get the game safely and easily.</p>
|
7 |
-
<h2>What You Need to Download the Game</h2>
|
8 |
-
<h3>A PC with Windows OS</h3>
|
9 |
-
<p>Five Nights at Freddy's 2 is compatible with Windows XP, Vista, 7, 8, and 10 operating systems. You will also need at least 1 GB of RAM, 250 MB of free disk space, and a 2 GHz processor. You can check your PC specs by right-clicking on My Computer or This PC icon and selecting Properties.</p>
|
10 |
-
<h3>A verified download platform</h3>
|
11 |
-
<p>It is important to use a verified download platform when you want to download games online. This way, you can avoid viruses, malware, fraud, and other risks that might harm your PC or compromise your personal information. Some of the verified platforms that you can use to download Five Nights at Freddy's 2 are Steam, Epic Games Downloader, and Microsoft Store. These platforms have secure payment methods, customer support, and user reviews that can help you make an informed decision.</p>
|
12 |
-
<h3>A stable internet connection</h3>
|
13 |
-
<p>You will also need a stable internet connection to download the game without interruptions or errors. You can check your internet speed by using online tools such as Speedtest or Fast. You will need at least 5 Mbps of download speed and 1 Mbps of upload speed to download the game smoothly. If your internet connection is slow or unstable, you might want to pause other downloads, close unnecessary tabs or apps, or use a wired connection instead of Wi-Fi.</p>
|
14 |
-
<p></p>
|
15 |
-
<h2>How to Download the Game from Steam</h2>
|
16 |
-
<h3>Create a Steam account or log in</h3>
|
17 |
-
<p>Steam is one of the most popular and trusted platforms for downloading games online. It has a large library of games, including Five Nights at Freddy's 2, and offers discounts, achievements, cloud saves, and other features. To download the game from Steam, you will need to create a Steam account or log in to your existing one. You can do this by visiting the Steam website or downloading the Steam app on your PC. You will need to provide your email address, password, username, and other details to create an account.</p>
|
18 |
-
<h3>Search for the game on Steam</h3>
|
19 |
-
<p>Once you have logged in to your Steam account, you can search for Five Nights at Freddy's 2 on the Steam store. You can use the search bar on the top right corner of the screen or browse through the categories and genres. You can also filter your search by price, rating, release date, and other criteria. When you find the game, click on it to see more information, such as screenshots, videos, description, system requirements, and user reviews.</p>
|
20 |
-
<h3>Purchase and install the game on Steam</h3>
|
21 |
-
<p>If you decide to buy the game, you will need to add it to your cart and proceed to checkout. You will need to choose your payment method, such as credit card, PayPal, or Steam Wallet. You will also need to agree to the terms and conditions and confirm your purchase. The game costs $7.99 on Steam as of June 2023.</p>
|
22 |
-
<p>After you have purchased the game, you can download and install it on your PC. You can do this by going to your library and clicking on the game. You will see a button that says "Install" or "Play" depending on whether you have installed it before or not. Click on it and follow the instructions to start the download and installation process. The game size is about 215 MB on Steam.</p>
|
23 |
-
<h2>How to Download the Game from Epic Games Downloader</h2>
|
24 |
-
<h3>Download and install Epic Games Downloader</h3>
|
25 |
-
<p>Epic Games Downloader is another platform that you can use to download games online. It is the official launcher for games developed by Epic Games, such as Fortnite and Unreal Engine, but it also offers other games from different developers and publishers. To download Five Nights at Freddy's 2 from Epic Games Downloader, you will need to download and install the Epic Games Downloader app on your PC first. You can do this by visiting the Epic Games website and clicking on "Get Epic Games" on the top right corner of the screen.</p> <p>After you have downloaded the app, you will need to run it and follow the instructions to install it on your PC. You will also need to create an Epic Games account or log in to your existing one. You can do this by clicking on "Sign In" or "Sign Up" on the top right corner of the app. You will need to provide your email address, password, username, and other details to create an account.</p>
|
26 |
-
<h3>Search for the game on Epic Games Downloader</h3>
|
27 |
-
<p>Once you have logged in to your Epic Games account, you can search for Five Nights at Freddy's 2 on the Epic Games Downloader app. You can use the search bar on the top left corner of the screen or browse through the categories and genres. You can also filter your search by price, rating, release date, and other criteria. When you find the game, click on it to see more information, such as screenshots, videos, description, system requirements, and user reviews.</p>
|
28 |
-
<h3>Purchase and install the game on Epic Games Downloader</h3>
|
29 |
-
<p>If you decide to buy the game, you will need to add it to your cart and proceed to checkout. You will need to choose your payment method, such as credit card, PayPal, or Epic Games Wallet. You will also need to agree to the terms and conditions and confirm your purchase. The game costs $7.99 on Epic Games Downloader as of June 2023.</p>
|
30 |
-
<p>After you have purchased the game, you can download and install it on your PC. You can do this by going to your library and clicking on the game. You will see a button that says "Install" or "Play" depending on whether you have installed it before or not. Click on it and follow the instructions to start the download and installation process. The game size is about 215 MB on Epic Games Downloader.</p>
|
31 |
-
<h2>How to Download the Game from Microsoft Store</h2>
|
32 |
-
<h3>Open the Microsoft Store app or website</h3>
|
33 |
-
<p>Microsoft Store is another platform that you can use to download games online. It is the official store for Windows apps and games, and it also offers movies, music, books, and other content. To download Five Nights at Freddy's 2 from Microsoft Store, you will need to open the Microsoft Store app or website on your PC. You can do this by clicking on the Microsoft Store icon on your taskbar or Start menu, or by visiting the Microsoft Store website on your browser.</p>
|
34 |
-
<h3>Search for the game on Microsoft Store</h3>
|
35 |
-
<p>Once you have opened the Microsoft Store app or website, you can search for Five Nights at Freddy's 2 on the Microsoft Store. You can use the search bar on the top right corner of the screen or browse through the categories and genres. You can also filter your search by price, rating, release date, and other criteria. When you find the game, click on it to see more information, such as screenshots, videos, description, system requirements, and user reviews.</p>
|
36 |
-
<h3>Purchase and install the game on Microsoft Store</h3>
|
37 |
-
<p>If you decide to buy the game, you will need to add it to your cart and proceed to checkout. You will need to choose your payment method, such as credit card, PayPal, or Microsoft account balance. You will also need to agree to the terms and conditions and confirm your purchase. The game costs $7.99 on Microsoft Store as of June 2023.</p>
|
38 |
-
<p>After you have purchased the game, you can download and install it on your PC. You can do this by going to your library and clicking on the game. You will see a button that says "Install" or "Play" depending on whether you have installed it before or not. Click on it and follow the instructions to start the download and installation process. The game size is about 215 MB on Microsoft Store.</p>
|
39 |
-
<h2>How to Play the Game Safely and Enjoyably</h2>
|
40 |
-
<h3>Check your antivirus software and firewall settings</h3>
|
41 |
-
<p>Before you start playing Five Nights at Freddy's 2, you should make sure that your antivirus software and firewall settings are up to date and allow you to play the game safely. Some antivirus software or firewall settings might block or interfere with the game's files or functions, causing errors or crashes. To avoid this, you should whitelist or exclude the game's folder or executable file from your antivirus software or firewall settings. You can also temporarily disable them while playing the game if necessary.</p>
|
42 |
-
<h3>Adjust your graphics and sound settings</h3>
|
43 |
-
<p>To enjoy Five Nights at Freddy's 2 fully, you should also adjust your graphics and sound settings according to your preferences and PC capabilities. You can access these settings by clicking on "Options" on the main menu of the game. You can change the resolution, fullscreen mode, quality, and brightness of the graphics, and the volume and mute options of the sound. You can also enable or disable subtitles and hints. You should choose the settings that suit your taste and make the game run smoothly on your PC.</p>
|
44 |
-
<h3>Follow the game rules and tips</h3>
|
45 |
-
<p>Finally, you should follow the game rules and tips to play the game effectively and survive the five nights. The game rules are explained by a phone call at the beginning of each night, but you can also read them on the main menu by clicking on "How to Play". Some of the basic rules are:</p>
|
46 |
-
<ul>
|
47 |
-
<li>You have to monitor the cameras to keep track of the animatronics' movements and locations.</li>
|
48 |
-
<li>You have to use the mask to fool the animatronics that come into your office, except for Foxy and The Puppet.</li>
|
49 |
-
<li>You have to use the flashlight to check the vents and the hallway, and to ward off Foxy.</li>
|
50 |
-
<li>You have to wind up the music box to keep The Puppet from escaping.</li>
|
51 |
-
<li>You have a limited power supply for your flashlight and cameras, so use them wisely.</li>
|
52 |
-
</ul>
|
53 |
-
<p>Some of the tips that can help you are:</p>
|
54 |
-
<ul>
|
55 |
-
<li>Listen carefully to the sounds and noises that indicate the presence or movement of the animatronics.</li>
|
56 |
-
<li>Check the cameras frequently, but not too much, to avoid wasting power and missing important events.</li>
|
57 |
-
<li>Use the mask as soon as you see an animatronic in your office or in the vents, and take it off as soon as they leave.</li>
|
58 |
-
<li>Wind up the music box regularly, but not too much, to avoid wasting time and power.</li>
|
59 |
-
<li>Don't panic or lose focus when you encounter a jumpscare or a hallucination.</li>
|
60 |
-
</ul>
|
61 |
-
<h2>Conclusion</h2>
|
62 |
-
<p>Five Nights at Freddy's 2 is a fun and scary game that will test your nerves and skills. It is easy to download and play on your PC, as long as you have the right equipment and platform. You can choose from Steam, Epic Games Downloader, or Microsoft Store to get the game safely and easily. You can also adjust your settings and follow some tips to play the game optimally. If you are ready to face the horror of Freddy Fazbear's Pizza, download Five Nights at Freddy's 2 today and see if you can survive!</p>
|
63 |
-
<h2>Frequently Asked Questions</h2>
|
64 |
-
<h3>Is Five Nights at Freddy's 2 a free game?</h3>
|
65 |
-
<p>No, Five Nights at Freddy's 2 is not a free game. It costs $7.99 on Steam, Epic Games Downloader, and Microsoft Store as of June 2023. However, you might find some discounts or offers on these platforms from time to time.</p>
|
66 |
-
<h3>Is Five Nights at Freddy's 2 a multiplayer game?</h3>
|
67 |
-
<p>No, Five Nights at Freddy's 2 is not a multiplayer game. It is a single-player game that requires you to play alone against the animatronics. However, you can share your gameplay videos or screenshots with other players online or watch other players' gameplay videos or streams.</p>
|
68 |
-
<h3>Is Five Nights at Freddy's 2 a scary game?</h3>
|
69 |
-
<p>Yes, Five Nights at Freddy's 2 is a scary game. It is a horror game that features jumpscares, suspenseful atmosphere, creepy characters, and disturbing events. It is not recommended for people who are easily scared or have heart problems. However, some people might find it funny or amusing instead of scary.</p>
|
70 |
-
<h3>Is Five Nights at Freddy's 2 a sequel or a prequel?</h3>
|
71 |
-
<p>Five Nights at Freddy's 2 is a prequel to Five Nights at Freddy's. It takes place in 1987, before the events of Five Nights at Freddy's, which takes place in 1993. It reveals some backstory and lore about the animatronics, the pizzeria, and the phone guy.</p>
|
72 |
-
<h3>Is Five Nights at Freddy's 2 a safe game?</h3>
|
73 |
-
<p>Yes, Five Nights at Freddy's 2 is a safe game. It does not contain any viruses, malware, fraud, or other risks that might harm your PC or compromise your personal information. However, you should always use a verified download platform and check your antivirus software and firewall settings before playing the game.</p> 401be4b1e0<br />
|
74 |
-
<br />
|
75 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Download Chinese FIFA Mobile and Join the 3rd Anniversary Celebration.md
DELETED
@@ -1,114 +0,0 @@
|
|
1 |
-
|
2 |
-
<h1>How to Download Chinese FIFA Mobile</h1>
|
3 |
-
<p>If you are a fan of FIFA Mobile, you might be interested in trying out the Chinese version of the game. The Chinese version, also known as FIFA World of Football, is developed by Tencent and authorized by FIFA. It offers some unique features and benefits that are not available in other versions of the game. In this article, we will show you how to download and install Chinese FIFA Mobile on your Android or iOS device, and how to play it with English language settings.</p>
|
4 |
-
<h2>What is Chinese FIFA Mobile?</h2>
|
5 |
-
<p>Chinese FIFA Mobile is the first football-themed competitive mobile game to land in the Chinese market. It is the only independent mobile game authorized by FIFA in China. It features more than 10,000 real stars, more than 30 real leagues, and major clubs from all over the world. You can create your own team, compete with other players, and participate in various events and modes.</p>
|
6 |
-
<h2>download chinese fifa mobile</h2><br /><p><b><b>DOWNLOAD</b> ✏ <a href="https://jinyurl.com/2uNO3A">https://jinyurl.com/2uNO3A</a></b></p><br /><br />
|
7 |
-
<h3>The features and benefits of playing Chinese FIFA Mobile</h3>
|
8 |
-
<p>Some of the features and benefits of playing Chinese FIFA Mobile are:</p>
|
9 |
-
<ul>
|
10 |
-
<li>Smooth and realistic gameplay, inherited from the classic terminal game operation. You can choose between two different operating modes: traditional roulette operation and dot and stroke operation. The former is more suitable for competitive players who want to perform difficult operations and skills, while the latter is more user-friendly for casual players who want to enjoy the touch screen advantages.</li>
|
11 |
-
<li>Original development, allowing you to create exclusive superstars freely. You can develop any player you like, even if he is not a superstar in reality, into a top player comparable to Cristiano Ronaldo and Messi. You can also customize your team's formation, tactics, style, and appearance.</li>
|
12 |
-
<li>Rich and diverse content, including various events, modes, challenges, and rewards. You can play in the World Cup mode, where you can represent your country and compete with other players from different regions. You can also play in the Eternal Icon mode, where you can collect legendary players from different eras and form your dream team.</li>
|
13 |
-
</ul>
|
14 |
-
<h3>The differences between Chinese FIFA Mobile and other versions</h3>
|
15 |
-
<p>Some of the differences between Chinese FIFA Mobile and other versions are:</p>
|
16 |
-
<ul>
|
17 |
-
<li>Chinese FIFA Mobile is developed by Tencent, while other versions are developed by EA Sports.</li>
|
18 |
-
<li>Chinese FIFA Mobile requires you to log in with a QQ or WeChat account, while other versions require you to log in with an EA account or a Facebook account.</li>
|
19 |
-
<li>Chinese FIFA Mobile has some exclusive events, modes, players, and items that are not available in other versions.</li>
|
20 |
-
<li>Chinese FIFA Mobile has a different currency system than other versions. The game token "point coupon" purchased by the user in the game can only be used in this application. Virtual currencies on the Tencent platform, such as Q coins and Q points, cannot be used in the game.</li>
|
21 |
-
</ul>
|
22 |
-
<h2>How to download and install Chinese FIFA Mobile on Android devices</h2>
|
23 |
-
<p>If you want to play Chinese FIFA Mobile on your Android device, you need to download and install the APK file from a reliable source. Here are the steps you need to follow:</p>
|
24 |
-
<h3>Step 1: Find a reliable source for the APK file</h3>
|
25 |
-
<p>Since Chinese FIFA Mobile is not available on the Google Play Store, you need to find a trustworthy website that provides the APK file for the game. You can use a search engine or a third-party app store to look for the APK file. Some of the popular websites that offer the APK file are APKPure, APKCombo, and TapTap.</p>
|
26 |
-
<h3>Step 2: Enable unknown sources on your device settings</h3>
|
27 |
-
<p>Before you can install the APK file, you need to enable unknown sources on your device settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to your device settings, then security, then toggle on the option for unknown sources. You may see a warning message that installing apps from unknown sources may harm your device. Tap OK to proceed.</p>
|
28 |
-
<p>How to download chinese fifa mobile on android<br />
|
29 |
-
Chinese fifa mobile apk download for free<br />
|
30 |
-
Download fifa mobile world cn - fifa足球世界 apk<br />
|
31 |
-
Chinese fifa mobile 3rd anniversary update download<br />
|
32 |
-
Download fifa mobile with eternal icon class and transfer market<br />
|
33 |
-
Chinese version of fifa mobile download for pc<br />
|
34 |
-
Download fifa mobile with new free kick and penalty kick animations<br />
|
35 |
-
Chinese fifa mobile mod apk download unlimited coins and gems<br />
|
36 |
-
Download fifa mobile with 32 qualified national teams and 15 non-qualified national teams<br />
|
37 |
-
Chinese fifa mobile hack apk download no root<br />
|
38 |
-
Download fifa mobile with improved set piece camera and gameplay experience<br />
|
39 |
-
Chinese fifa mobile ios download without jailbreak<br />
|
40 |
-
Download fifa mobile with korea superconducting tokamak advanced research facility<br />
|
41 |
-
Chinese fifa mobile online download play with friends<br />
|
42 |
-
Download fifa mobile with latest players and events<br />
|
43 |
-
Chinese fifa mobile offline download no internet required<br />
|
44 |
-
Download fifa mobile with nexon company and tencent games<br />
|
45 |
-
Chinese fifa mobile 2023 download new season update<br />
|
46 |
-
Download fifa mobile with google play store and aptoide<br />
|
47 |
-
Chinese fifa mobile cheats download get free resources and tips<br />
|
48 |
-
Download fifa mobile with game guardian and aurora store<br />
|
49 |
-
Chinese fifa mobile english patch download change language settings<br />
|
50 |
-
Download fifa mobile with uptodown and apkcombo installer<br />
|
51 |
-
Chinese fifa mobile reddit download join the community and discussion<br />
|
52 |
-
Download fifa mobile with ea sports and electronic arts<br />
|
53 |
-
Chinese fifa mobile facebook download connect with social media and friends<br />
|
54 |
-
Download fifa mobile with google account manager and google play services<br />
|
55 |
-
Chinese fifa mobile youtube download watch videos and tutorials<br />
|
56 |
-
Download fifa mobile with tradutor inglês para português apk<br />
|
57 |
-
Chinese fifa mobile twitter download follow the official account and news<br />
|
58 |
-
Download fifa mobile with lensa dawn ai and wink ai mirror<br />
|
59 |
-
Chinese fifa mobile instagram download share your screenshots and stories<br />
|
60 |
-
Download fifa mobile with geforce now and nintendo switch online<br />
|
61 |
-
Chinese fifa mobile discord download chat with other players and join servers<br />
|
62 |
-
Download fifa mobile with outline vpn and toffee vpn<br />
|
63 |
-
Chinese fifa mobile tiktok download create and watch short videos<br />
|
64 |
-
Download fifa mobile with meitu and faceplay apps<br />
|
65 |
-
Chinese fifa mobile quora download ask and answer questions about the game<br />
|
66 |
-
Download fifa mobile with digiwards and rumble apps<br />
|
67 |
-
Chinese fifa mobile wikipedia download learn more about the game and its history</p>
|
68 |
-
<h3>Step 3: Download and install the APK file</h3>
|
69 |
-
<p>Once you have enabled unknown sources, you can download and install the APK file from the website you chose. Tap on the download button and wait for the file to be downloaded. Then, tap on the file and follow the instructions to install it. You may need to grant some permissions to the app during the installation process.</p>
|
70 |
-
<h3>Step 4: Launch the game and log in with your QQ or WeChat account</h3>
|
71 |
-
<p>After you have installed the game, you can launch it from your app drawer or home screen. You will see a splash screen with the Tencent and FIFA logos, followed by a login screen. You need to log in with your QQ or WeChat account to play the game. If you don't have one, you can create one for free by following the prompts on the screen.</p>
|
72 |
-
<h2>How to download and install Chinese FIFA Mobile on iOS devices</h2>
|
73 |
-
<p>If you want to play Chinese FIFA Mobile on your iOS device, you need to create a Chinese Apple ID and change your region to China. Here are the steps you need to follow:</p>
|
74 |
-
<h3>Step 1: Create a Chinese Apple ID</h3>
|
75 |
-
<p>To create a Chinese Apple ID, you need to have a valid phone number and email address from China. You can use a friend's or a relative's phone number and email address if you have one, or you can use a virtual phone number and email address service such as Dingtone or Temp Mail.</p>
|
76 |
-
<p>Once you have a phone number and email address from China, you can go to appleid.apple.com and click on Create Your Apple ID. Fill in your personal information, including your name, birthday, country or region (choose China), phone number, email address, password, and security questions. Then, verify your phone number and email address by entering the codes sent to them. Finally, agree to the terms and conditions and click on Create Apple ID.</p>
|
77 |
-
<h3>Step 2: Log in with your Chinese Apple ID and change your region to China</h3>
|
78 |
-
<p>After you have created your Chinese Apple ID, you need to log in with it on your iOS device and change your region to China. To do this, go to your device settings, then iTunes & App Store, then tap on your Apple ID and sign out. Then, sign in with your Chinese Apple ID and password.</p>
|
79 |
-
<p>Next, go back to your device settings, then General, then Language & Region, then tap on Region and choose China. You may see a message that changing your region will affect some features of your device. Tap Continue to confirm.</p>
|
80 |
-
<h3>Step 3: Search for FIFA Mobile in the App Store and download it</h3>
|
81 |
-
<p>Once you have logged in with your Chinese Apple ID and changed your region to China, you can search for FIFA Mobile in the App Store and download it. You may need to enter your password or use Touch ID or Face ID to confirm the download.</p>
|
82 |
-
<h3>Step 4: Launch the game and log in with your QQ or WeChat account</h3>
|
83 |
-
<p>After you have downloaded the game, you can launch it from your home screen. You will see a splash screen with the Tencent and FIFA logos, followed by a login screen. You need to log in with your QQ or WeChat account to play the game. If you don't have one, you can create one for free by following the prompts on the screen.</p>
|
84 |
-
<h2>How to play Chinese FIFA Mobile with English language settings</h 2></h2>
|
85 |
-
<p>Unfortunately, Chinese FIFA Mobile does not have an option to change the language settings to English or any other language. The game is only available in Simplified Chinese. However, there are some ways to play the game with English language settings, such as using a third-party app to translate the game interface or using a guide or a video tutorial to navigate the game menus.</p>
|
86 |
-
<h3>Option 1: Use a third-party app to translate the game interface</h3>
|
87 |
-
<p>One way to play Chinese FIFA Mobile with English language settings is to use a third-party app that can translate the game interface in real time. For example, you can use Google Translate, which has a feature called Tap to Translate that allows you to copy and paste any text from any app and get an instant translation.</p>
|
88 |
-
<p>To use this feature, you need to download and install Google Translate from the Google Play Store or the App Store. Then, go to the app settings and enable Tap to Translate. You will see a floating icon on your screen that you can tap to access the translation feature.</p>
|
89 |
-
<p>When you launch Chinese FIFA Mobile, you can use Tap to Translate to copy and paste any text from the game interface and get an English translation. For example, you can copy and paste the text from the game menus, buttons, dialogs, instructions, and notifications. You can also use the camera mode to scan and translate any text on your screen.</p>
|
90 |
-
<p>However, this method has some limitations and drawbacks. First, it may not be very accurate or reliable, as some translations may be incorrect or incomplete. Second, it may be inconvenient or time-consuming, as you have to switch between the game and the translation app frequently. Third, it may affect your game performance or battery life, as you have to run two apps at the same time.</p>
|
91 |
-
<h3>Option 2: Use a guide or a video tutorial to navigate the game menus</h3>
|
92 |
-
<p>Another way to play Chinese FIFA Mobile with English language settings is to use a guide or a video tutorial that can help you navigate the game menus and understand the game features. For example, you can use this guide by FIFA Mobile Reddit user u/antwansturdivant, which explains the basic functions and icons of the game interface.</p>
|
93 |
-
<p>To use this guide, you need to open it on your browser or download it as a PDF file. Then, you can refer to it whenever you need help with the game menus or features. You can also use the screenshots and illustrations provided in the guide to compare them with your game screen.</p>
|
94 |
-
<p>Alternatively, you can use this video tutorial by YouTube user FIFAMobileVN TV, which shows you how to download and install Chinese FIFA Mobile on your Android device and how to play it with English language settings.</p>
|
95 |
-
<p>To use this video tutorial, you need to watch it on your browser or download it as a video file. Then, you can follow the steps and instructions shown in the video while playing the game. You can also pause or rewind the video if you need more time or clarification.</p>
|
96 |
-
<p>However, this method also has some limitations and drawbacks. First, it may not be very comprehensive or updated, as some guides or videos may not cover all aspects of the game or reflect the latest changes or updates. Second, it may be difficult or confusing, as some guides or videos may not be clear or easy to follow. Third, it may require internet access or storage space, as you have to open or download the guides or videos.</p>
|
97 |
-
<h2>Conclusion</h2>
|
98 |
-
<p>In conclusion, Chinese FIFA Mobile is a popular and exciting mobile game that offers some unique features and benefits that are not available in other versions of the game. However, it is only available in Simplified Chinese and requires a QQ or WeChat account to play. If you want to play Chinese FIFA Mobile on your Android or iOS device with English language settings, you can either use a third-party app to translate the game interface or use a guide or a video tutorial to navigate the game menus.</p>
|
99 |
-
<p>We hope this article has helped you learn how to download and install Chinese FIFA Mobile on your device and how to play it with English language settings. If you have any questions or feedback, please feel free to leave a comment below.</p>
|
100 |
-
<h2>FAQs</h2>
|
101 |
-
<ul>
|
102 |
-
<li><b>Q: Is Chinese FIFA Mobile free to play?</b></li>
|
103 |
-
<li>A: Yes, Chinese FIFA Mobile is free to play. However, it may contain some in-app purchases that require real money.</li>
|
104 |
-
<li><b>Q: Is Chinese FIFA Mobile safe to play?</b></li>
|
105 |
-
<li>A: Yes, Chinese FIFA Mobile is safe to play. However, you should always download and install it from a reliable source and scan it for viruses or malware before installing it. You should also be careful about sharing your personal information or payment details with the game or any third-party app.</li>
|
106 |
-
<li><b>Q: Can I play Chinese FIFA Mobile with other players from different regions?</b></li>
|
107 |
-
<li>A: Yes, you can play Chinese FIFA Mobile with other players from different regions. However, you may experience some lag or connection issues due to the distance or the server. You may also encounter some language barriers or cultural differences with other players.</li>
|
108 |
-
<li><b>Q: Can I transfer my progress or data from other versions of FIFA Mobile to Chinese FIFA Mobile?</b></li>
|
109 |
-
<li>A: No, you cannot transfer your progress or data from other versions of FIFA Mobile to Chinese FIFA Mobile. They are separate and independent games that have different developers, accounts, currencies, and systems. You have to start from scratch if you want to play Chinese FIFA Mobile.</li>
|
110 |
-
<li><b>Q: Can I play Chinese FIFA Mobile offline?</b></li>
|
111 |
-
<li>A: No, you cannot play Chinese FIFA Mobile offline. You need an internet connection to play the game, as it is an online multiplayer game that requires constant communication with the server and other players.</li>
|
112 |
-
</ul></p> 401be4b1e0<br />
|
113 |
-
<br />
|
114 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/232labs/VToonify/vtoonify/model/__init__.py
DELETED
File without changes
|
spaces/A00001/bingothoo/src/components/button-scroll-to-bottom.tsx
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
'use client'
|
2 |
-
|
3 |
-
import * as React from 'react'
|
4 |
-
|
5 |
-
import { cn } from '@/lib/utils'
|
6 |
-
import { useAtBottom } from '@/lib/hooks/use-at-bottom'
|
7 |
-
import { Button, type ButtonProps } from '@/components/ui/button'
|
8 |
-
import { IconArrowDown } from '@/components/ui/icons'
|
9 |
-
|
10 |
-
export function ButtonScrollToBottom({ className, ...props }: ButtonProps) {
|
11 |
-
const isAtBottom = useAtBottom()
|
12 |
-
|
13 |
-
return (
|
14 |
-
<Button
|
15 |
-
variant="outline"
|
16 |
-
size="icon"
|
17 |
-
className={cn(
|
18 |
-
'fixed right-4 bottom-24 z-50 bg-background transition-opacity duration-300 sm:right-20',
|
19 |
-
isAtBottom ? 'opacity-0' : 'opacity-100',
|
20 |
-
className
|
21 |
-
)}
|
22 |
-
onClick={() =>
|
23 |
-
window.scrollTo({
|
24 |
-
top: document.body.offsetHeight,
|
25 |
-
behavior: 'smooth'
|
26 |
-
})
|
27 |
-
}
|
28 |
-
{...props}
|
29 |
-
>
|
30 |
-
<IconArrowDown />
|
31 |
-
<span className="sr-only">Scroll to bottom</span>
|
32 |
-
</Button>
|
33 |
-
)
|
34 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/generate_human_motion/VQ-Trans/utils/utils_model.py
DELETED
@@ -1,66 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import torch
|
3 |
-
import torch.optim as optim
|
4 |
-
import logging
|
5 |
-
import os
|
6 |
-
import sys
|
7 |
-
|
8 |
-
def getCi(accLog):
|
9 |
-
|
10 |
-
mean = np.mean(accLog)
|
11 |
-
std = np.std(accLog)
|
12 |
-
ci95 = 1.96*std/np.sqrt(len(accLog))
|
13 |
-
|
14 |
-
return mean, ci95
|
15 |
-
|
16 |
-
def get_logger(out_dir):
|
17 |
-
logger = logging.getLogger('Exp')
|
18 |
-
logger.setLevel(logging.INFO)
|
19 |
-
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
|
20 |
-
|
21 |
-
file_path = os.path.join(out_dir, "run.log")
|
22 |
-
file_hdlr = logging.FileHandler(file_path)
|
23 |
-
file_hdlr.setFormatter(formatter)
|
24 |
-
|
25 |
-
strm_hdlr = logging.StreamHandler(sys.stdout)
|
26 |
-
strm_hdlr.setFormatter(formatter)
|
27 |
-
|
28 |
-
logger.addHandler(file_hdlr)
|
29 |
-
logger.addHandler(strm_hdlr)
|
30 |
-
return logger
|
31 |
-
|
32 |
-
## Optimizer
|
33 |
-
def initial_optim(decay_option, lr, weight_decay, net, optimizer) :
|
34 |
-
|
35 |
-
if optimizer == 'adamw' :
|
36 |
-
optimizer_adam_family = optim.AdamW
|
37 |
-
elif optimizer == 'adam' :
|
38 |
-
optimizer_adam_family = optim.Adam
|
39 |
-
if decay_option == 'all':
|
40 |
-
#optimizer = optimizer_adam_family(net.parameters(), lr=lr, betas=(0.9, 0.999), weight_decay=weight_decay)
|
41 |
-
optimizer = optimizer_adam_family(net.parameters(), lr=lr, betas=(0.5, 0.9), weight_decay=weight_decay)
|
42 |
-
|
43 |
-
elif decay_option == 'noVQ':
|
44 |
-
all_params = set(net.parameters())
|
45 |
-
no_decay = set([net.vq_layer])
|
46 |
-
|
47 |
-
decay = all_params - no_decay
|
48 |
-
optimizer = optimizer_adam_family([
|
49 |
-
{'params': list(no_decay), 'weight_decay': 0},
|
50 |
-
{'params': list(decay), 'weight_decay' : weight_decay}], lr=lr)
|
51 |
-
|
52 |
-
return optimizer
|
53 |
-
|
54 |
-
|
55 |
-
def get_motion_with_trans(motion, velocity) :
|
56 |
-
'''
|
57 |
-
motion : torch.tensor, shape (batch_size, T, 72), with the global translation = 0
|
58 |
-
velocity : torch.tensor, shape (batch_size, T, 3), contain the information of velocity = 0
|
59 |
-
|
60 |
-
'''
|
61 |
-
trans = torch.cumsum(velocity, dim=1)
|
62 |
-
trans = trans - trans[:, :1] ## the first root is initialized at 0 (just for visualization)
|
63 |
-
trans = trans.repeat((1, 1, 21))
|
64 |
-
motion_with_trans = motion + trans
|
65 |
-
return motion_with_trans
|
66 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/modules/parallel_wavegan/layers/residual_stack.py
DELETED
@@ -1,75 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
|
3 |
-
# Copyright 2020 Tomoki Hayashi
|
4 |
-
# MIT License (https://opensource.org/licenses/MIT)
|
5 |
-
|
6 |
-
"""Residual stack module in MelGAN."""
|
7 |
-
|
8 |
-
import torch
|
9 |
-
|
10 |
-
from . import CausalConv1d
|
11 |
-
|
12 |
-
|
13 |
-
class ResidualStack(torch.nn.Module):
|
14 |
-
"""Residual stack module introduced in MelGAN."""
|
15 |
-
|
16 |
-
def __init__(self,
|
17 |
-
kernel_size=3,
|
18 |
-
channels=32,
|
19 |
-
dilation=1,
|
20 |
-
bias=True,
|
21 |
-
nonlinear_activation="LeakyReLU",
|
22 |
-
nonlinear_activation_params={"negative_slope": 0.2},
|
23 |
-
pad="ReflectionPad1d",
|
24 |
-
pad_params={},
|
25 |
-
use_causal_conv=False,
|
26 |
-
):
|
27 |
-
"""Initialize ResidualStack module.
|
28 |
-
|
29 |
-
Args:
|
30 |
-
kernel_size (int): Kernel size of dilation convolution layer.
|
31 |
-
channels (int): Number of channels of convolution layers.
|
32 |
-
dilation (int): Dilation factor.
|
33 |
-
bias (bool): Whether to add bias parameter in convolution layers.
|
34 |
-
nonlinear_activation (str): Activation function module name.
|
35 |
-
nonlinear_activation_params (dict): Hyperparameters for activation function.
|
36 |
-
pad (str): Padding function module name before dilated convolution layer.
|
37 |
-
pad_params (dict): Hyperparameters for padding function.
|
38 |
-
use_causal_conv (bool): Whether to use causal convolution.
|
39 |
-
|
40 |
-
"""
|
41 |
-
super(ResidualStack, self).__init__()
|
42 |
-
|
43 |
-
# defile residual stack part
|
44 |
-
if not use_causal_conv:
|
45 |
-
assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size."
|
46 |
-
self.stack = torch.nn.Sequential(
|
47 |
-
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
|
48 |
-
getattr(torch.nn, pad)((kernel_size - 1) // 2 * dilation, **pad_params),
|
49 |
-
torch.nn.Conv1d(channels, channels, kernel_size, dilation=dilation, bias=bias),
|
50 |
-
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
|
51 |
-
torch.nn.Conv1d(channels, channels, 1, bias=bias),
|
52 |
-
)
|
53 |
-
else:
|
54 |
-
self.stack = torch.nn.Sequential(
|
55 |
-
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
|
56 |
-
CausalConv1d(channels, channels, kernel_size, dilation=dilation,
|
57 |
-
bias=bias, pad=pad, pad_params=pad_params),
|
58 |
-
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
|
59 |
-
torch.nn.Conv1d(channels, channels, 1, bias=bias),
|
60 |
-
)
|
61 |
-
|
62 |
-
# defile extra layer for skip connection
|
63 |
-
self.skip_layer = torch.nn.Conv1d(channels, channels, 1, bias=bias)
|
64 |
-
|
65 |
-
def forward(self, c):
|
66 |
-
"""Calculate forward propagation.
|
67 |
-
|
68 |
-
Args:
|
69 |
-
c (Tensor): Input tensor (B, channels, T).
|
70 |
-
|
71 |
-
Returns:
|
72 |
-
Tensor: Output tensor (B, chennels, T).
|
73 |
-
|
74 |
-
"""
|
75 |
-
return self.stack(c) + self.skip_layer(c)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/NeuralSeq/tasks/tts/ps.py
DELETED
@@ -1,194 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import torch
|
3 |
-
import torch.nn.functional as F
|
4 |
-
from torch import nn
|
5 |
-
|
6 |
-
from modules.portaspeech.portaspeech import PortaSpeech
|
7 |
-
from tasks.tts.fs2 import FastSpeech2Task
|
8 |
-
from utils.tts_utils import mel2token_to_dur
|
9 |
-
from utils.hparams import hparams
|
10 |
-
from utils.tts_utils import get_focus_rate, get_phone_coverage_rate, get_diagonal_focus_rate
|
11 |
-
from utils import num_params
|
12 |
-
import numpy as np
|
13 |
-
|
14 |
-
from utils.plot import spec_to_figure
|
15 |
-
from data_gen.tts.data_gen_utils import build_token_encoder
|
16 |
-
|
17 |
-
|
18 |
-
class PortaSpeechTask(FastSpeech2Task):
|
19 |
-
def __init__(self):
|
20 |
-
super().__init__()
|
21 |
-
data_dir = hparams['binary_data_dir']
|
22 |
-
self.word_encoder = build_token_encoder(f'{data_dir}/word_set.json')
|
23 |
-
|
24 |
-
def build_tts_model(self):
|
25 |
-
ph_dict_size = len(self.token_encoder)
|
26 |
-
word_dict_size = len(self.word_encoder)
|
27 |
-
self.model = PortaSpeech(ph_dict_size, word_dict_size, hparams)
|
28 |
-
|
29 |
-
def on_train_start(self):
|
30 |
-
super().on_train_start()
|
31 |
-
for n, m in self.model.named_children():
|
32 |
-
num_params(m, model_name=n)
|
33 |
-
if hasattr(self.model, 'fvae'):
|
34 |
-
for n, m in self.model.fvae.named_children():
|
35 |
-
num_params(m, model_name=f'fvae.{n}')
|
36 |
-
|
37 |
-
def run_model(self, sample, infer=False, *args, **kwargs):
|
38 |
-
txt_tokens = sample['txt_tokens']
|
39 |
-
word_tokens = sample['word_tokens']
|
40 |
-
spk_embed = sample.get('spk_embed')
|
41 |
-
spk_id = sample.get('spk_ids')
|
42 |
-
if not infer:
|
43 |
-
output = self.model(txt_tokens, word_tokens,
|
44 |
-
ph2word=sample['ph2word'],
|
45 |
-
mel2word=sample['mel2word'],
|
46 |
-
mel2ph=sample['mel2ph'],
|
47 |
-
word_len=sample['word_lengths'].max(),
|
48 |
-
tgt_mels=sample['mels'],
|
49 |
-
pitch=sample.get('pitch'),
|
50 |
-
spk_embed=spk_embed,
|
51 |
-
spk_id=spk_id,
|
52 |
-
infer=False,
|
53 |
-
global_step=self.global_step)
|
54 |
-
losses = {}
|
55 |
-
losses['kl_v'] = output['kl'].detach()
|
56 |
-
losses_kl = output['kl']
|
57 |
-
losses_kl = torch.clamp(losses_kl, min=hparams['kl_min'])
|
58 |
-
losses_kl = min(self.global_step / hparams['kl_start_steps'], 1) * losses_kl
|
59 |
-
losses_kl = losses_kl * hparams['lambda_kl']
|
60 |
-
losses['kl'] = losses_kl
|
61 |
-
self.add_mel_loss(output['mel_out'], sample['mels'], losses)
|
62 |
-
if hparams['dur_level'] == 'word':
|
63 |
-
self.add_dur_loss(
|
64 |
-
output['dur'], sample['mel2word'], sample['word_lengths'], sample['txt_tokens'], losses)
|
65 |
-
self.get_attn_stats(output['attn'], sample, losses)
|
66 |
-
else:
|
67 |
-
super(PortaSpeechTask, self).add_dur_loss(output['dur'], sample['mel2ph'], sample['txt_tokens'], losses)
|
68 |
-
return losses, output
|
69 |
-
else:
|
70 |
-
use_gt_dur = kwargs.get('infer_use_gt_dur', hparams['use_gt_dur'])
|
71 |
-
output = self.model(
|
72 |
-
txt_tokens, word_tokens,
|
73 |
-
ph2word=sample['ph2word'],
|
74 |
-
word_len=sample['word_lengths'].max(),
|
75 |
-
pitch=sample.get('pitch'),
|
76 |
-
mel2ph=sample['mel2ph'] if use_gt_dur else None,
|
77 |
-
mel2word=sample['mel2word'] if use_gt_dur else None,
|
78 |
-
tgt_mels=sample['mels'],
|
79 |
-
infer=True,
|
80 |
-
spk_embed=spk_embed,
|
81 |
-
spk_id=spk_id,
|
82 |
-
)
|
83 |
-
return output
|
84 |
-
|
85 |
-
def add_dur_loss(self, dur_pred, mel2token, word_len, txt_tokens, losses=None):
|
86 |
-
T = word_len.max()
|
87 |
-
dur_gt = mel2token_to_dur(mel2token, T).float()
|
88 |
-
nonpadding = (torch.arange(T).to(dur_pred.device)[None, :] < word_len[:, None]).float()
|
89 |
-
dur_pred = dur_pred * nonpadding
|
90 |
-
dur_gt = dur_gt * nonpadding
|
91 |
-
wdur = F.l1_loss((dur_pred + 1).log(), (dur_gt + 1).log(), reduction='none')
|
92 |
-
wdur = (wdur * nonpadding).sum() / nonpadding.sum()
|
93 |
-
if hparams['lambda_word_dur'] > 0:
|
94 |
-
losses['wdur'] = wdur * hparams['lambda_word_dur']
|
95 |
-
if hparams['lambda_sent_dur'] > 0:
|
96 |
-
sent_dur_p = dur_pred.sum(-1)
|
97 |
-
sent_dur_g = dur_gt.sum(-1)
|
98 |
-
sdur_loss = F.l1_loss(sent_dur_p, sent_dur_g, reduction='mean')
|
99 |
-
losses['sdur'] = sdur_loss.mean() * hparams['lambda_sent_dur']
|
100 |
-
|
101 |
-
def validation_step(self, sample, batch_idx):
|
102 |
-
return super().validation_step(sample, batch_idx)
|
103 |
-
|
104 |
-
def save_valid_result(self, sample, batch_idx, model_out):
|
105 |
-
super(PortaSpeechTask, self).save_valid_result(sample, batch_idx, model_out)
|
106 |
-
if self.global_step > 0 and hparams['dur_level'] == 'word':
|
107 |
-
self.logger.add_figure(f'attn_{batch_idx}', spec_to_figure(model_out['attn'][0]), self.global_step)
|
108 |
-
|
109 |
-
def get_attn_stats(self, attn, sample, logging_outputs, prefix=''):
|
110 |
-
# diagonal_focus_rate
|
111 |
-
txt_lengths = sample['txt_lengths'].float()
|
112 |
-
mel_lengths = sample['mel_lengths'].float()
|
113 |
-
src_padding_mask = sample['txt_tokens'].eq(0)
|
114 |
-
target_padding_mask = sample['mels'].abs().sum(-1).eq(0)
|
115 |
-
src_seg_mask = sample['txt_tokens'].eq(self.seg_idx)
|
116 |
-
attn_ks = txt_lengths.float() / mel_lengths.float()
|
117 |
-
|
118 |
-
focus_rate = get_focus_rate(attn, src_padding_mask, target_padding_mask).mean().data
|
119 |
-
phone_coverage_rate = get_phone_coverage_rate(
|
120 |
-
attn, src_padding_mask, src_seg_mask, target_padding_mask).mean()
|
121 |
-
diagonal_focus_rate, diag_mask = get_diagonal_focus_rate(
|
122 |
-
attn, attn_ks, mel_lengths, src_padding_mask, target_padding_mask)
|
123 |
-
logging_outputs[f'{prefix}fr'] = focus_rate.mean().data
|
124 |
-
logging_outputs[f'{prefix}pcr'] = phone_coverage_rate.mean().data
|
125 |
-
logging_outputs[f'{prefix}dfr'] = diagonal_focus_rate.mean().data
|
126 |
-
|
127 |
-
def get_plot_dur_info(self, sample, model_out):
|
128 |
-
if hparams['dur_level'] == 'word':
|
129 |
-
T_txt = sample['word_lengths'].max()
|
130 |
-
dur_gt = mel2token_to_dur(sample['mel2word'], T_txt)[0]
|
131 |
-
dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt
|
132 |
-
txt = sample['ph_words'][0].split(" ")
|
133 |
-
else:
|
134 |
-
T_txt = sample['txt_tokens'].shape[1]
|
135 |
-
dur_gt = mel2token_to_dur(sample['mel2ph'], T_txt)[0]
|
136 |
-
dur_pred = model_out['dur'] if 'dur' in model_out else dur_gt
|
137 |
-
txt = self.token_encoder.decode(sample['txt_tokens'][0].cpu().numpy())
|
138 |
-
txt = txt.split(" ")
|
139 |
-
return {'dur_gt': dur_gt, 'dur_pred': dur_pred, 'txt': txt}
|
140 |
-
|
141 |
-
def build_optimizer(self, model):
|
142 |
-
self.optimizer = torch.optim.AdamW(
|
143 |
-
self.model.parameters(),
|
144 |
-
lr=hparams['lr'],
|
145 |
-
betas=(hparams['optimizer_adam_beta1'], hparams['optimizer_adam_beta2']),
|
146 |
-
weight_decay=hparams['weight_decay'])
|
147 |
-
return self.optimizer
|
148 |
-
|
149 |
-
def build_scheduler(self, optimizer):
|
150 |
-
return FastSpeechTask.build_scheduler(self, optimizer)
|
151 |
-
|
152 |
-
############
|
153 |
-
# infer
|
154 |
-
############
|
155 |
-
def test_start(self):
|
156 |
-
super().test_start()
|
157 |
-
if hparams.get('save_attn', False):
|
158 |
-
os.makedirs(f'{self.gen_dir}/attn', exist_ok=True)
|
159 |
-
self.model.store_inverse_all()
|
160 |
-
|
161 |
-
def test_step(self, sample, batch_idx):
|
162 |
-
assert sample['txt_tokens'].shape[0] == 1, 'only support batch_size=1 in inference'
|
163 |
-
outputs = self.run_model(sample, infer=True)
|
164 |
-
text = sample['text'][0]
|
165 |
-
item_name = sample['item_name'][0]
|
166 |
-
tokens = sample['txt_tokens'][0].cpu().numpy()
|
167 |
-
mel_gt = sample['mels'][0].cpu().numpy()
|
168 |
-
mel_pred = outputs['mel_out'][0].cpu().numpy()
|
169 |
-
mel2ph = sample['mel2ph'][0].cpu().numpy()
|
170 |
-
mel2ph_pred = None
|
171 |
-
str_phs = self.token_encoder.decode(tokens, strip_padding=True)
|
172 |
-
base_fn = f'[{batch_idx:06d}][{item_name.replace("%", "_")}][%s]'
|
173 |
-
if text is not None:
|
174 |
-
base_fn += text.replace(":", "$3A")[:80]
|
175 |
-
base_fn = base_fn.replace(' ', '_')
|
176 |
-
gen_dir = self.gen_dir
|
177 |
-
wav_pred = self.vocoder.spec2wav(mel_pred)
|
178 |
-
self.saving_result_pool.add_job(self.save_result, args=[
|
179 |
-
wav_pred, mel_pred, base_fn % 'P', gen_dir, str_phs, mel2ph_pred])
|
180 |
-
if hparams['save_gt']:
|
181 |
-
wav_gt = self.vocoder.spec2wav(mel_gt)
|
182 |
-
self.saving_result_pool.add_job(self.save_result, args=[
|
183 |
-
wav_gt, mel_gt, base_fn % 'G', gen_dir, str_phs, mel2ph])
|
184 |
-
if hparams.get('save_attn', False):
|
185 |
-
attn = outputs['attn'][0].cpu().numpy()
|
186 |
-
np.save(f'{gen_dir}/attn/{item_name}.npy', attn)
|
187 |
-
print(f"Pred_shape: {mel_pred.shape}, gt_shape: {mel_gt.shape}")
|
188 |
-
return {
|
189 |
-
'item_name': item_name,
|
190 |
-
'text': text,
|
191 |
-
'ph_tokens': self.token_encoder.decode(tokens.tolist()),
|
192 |
-
'wav_fn_pred': base_fn % 'P',
|
193 |
-
'wav_fn_gt': base_fn % 'G',
|
194 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/tts/syntaspeech/syntactic_graph_buider.py
DELETED
@@ -1,294 +0,0 @@
|
|
1 |
-
from copy import deepcopy
|
2 |
-
import torch
|
3 |
-
import dgl
|
4 |
-
import stanza
|
5 |
-
import networkx as nx
|
6 |
-
|
7 |
-
class Sentence2GraphParser:
|
8 |
-
def __init__(self, language='zh', use_gpu=False, download=False):
|
9 |
-
self.language = language
|
10 |
-
if download:
|
11 |
-
self.stanza_parser = stanza.Pipeline(lang=language, use_gpu=use_gpu)
|
12 |
-
else:
|
13 |
-
self.stanza_parser = stanza.Pipeline(lang=language, use_gpu=use_gpu, download_method=None)
|
14 |
-
|
15 |
-
def parse(self, clean_sentence=None, words=None, ph_words=None):
|
16 |
-
if self.language == 'zh':
|
17 |
-
assert words is not None and ph_words is not None
|
18 |
-
ret = self._parse_zh(words, ph_words)
|
19 |
-
elif self.language == 'en':
|
20 |
-
assert clean_sentence is not None
|
21 |
-
ret = self._parse_en(clean_sentence)
|
22 |
-
else:
|
23 |
-
raise NotImplementedError
|
24 |
-
return ret
|
25 |
-
|
26 |
-
def _parse_zh(self, words, ph_words, enable_backward_edge=True, enable_recur_edge=True,
|
27 |
-
enable_inter_sentence_edge=True, sequential_edge=False):
|
28 |
-
"""
|
29 |
-
words: <List of str>, each character in chinese is one item
|
30 |
-
ph_words: <List of str>, each character in chinese is one item, represented by the phoneme
|
31 |
-
Example:
|
32 |
-
text1 = '宝马配挂跛骡鞍,貂蝉怨枕董翁榻.'
|
33 |
-
words = ['<BOS>', '宝', '马', '配', '挂', '跛', '骡', '鞍', ','
|
34 |
-
, '貂', '蝉', '怨', '枕', '董', '翁', '榻', '<EOS>']
|
35 |
-
ph_words = ['<BOS>', 'b_ao3_|', 'm_a3_#', 'p_ei4_|', 'g_ua4_#',
|
36 |
-
'b_o3_#', 'l_uo2_|', 'an1', ',', 'd_iao1_|',
|
37 |
-
'ch_an2_#', 'van4_#', 'zh_en3_#', 'd_ong3_|', 'ueng1_#', 't_a4', '<EOS>']
|
38 |
-
"""
|
39 |
-
words, ph_words = words[1:-1], ph_words[1:-1] # delete <BOS> and <EOS>
|
40 |
-
for i, p_w in enumerate(ph_words):
|
41 |
-
if p_w == ',':
|
42 |
-
# change english ',' into chinese
|
43 |
-
# we found it necessary in stanza's dependency parsing
|
44 |
-
words[i], ph_words[i] = ',', ','
|
45 |
-
tmp_words = deepcopy(words)
|
46 |
-
num_added_space = 0
|
47 |
-
for i, p_w in enumerate(ph_words):
|
48 |
-
if p_w.endswith("#"):
|
49 |
-
# add a blank after the p_w with '#', to separate words
|
50 |
-
tmp_words.insert(num_added_space + i + 1, " ")
|
51 |
-
num_added_space += 1
|
52 |
-
if p_w in [',', ',']:
|
53 |
-
# add one blank before and after ', ', respectively
|
54 |
-
tmp_words.insert(num_added_space + i + 1, " ") # insert behind ',' first
|
55 |
-
tmp_words.insert(num_added_space + i, " ") # insert before
|
56 |
-
num_added_space += 2
|
57 |
-
clean_text = ''.join(tmp_words).strip()
|
58 |
-
parser_out = self.stanza_parser(clean_text)
|
59 |
-
|
60 |
-
idx_to_word = {i + 1: w for i, w in enumerate(words)}
|
61 |
-
|
62 |
-
vocab_nodes = {}
|
63 |
-
vocab_idx_offset = 0
|
64 |
-
for sentence in parser_out.sentences:
|
65 |
-
num_nodes_in_current_sentence = 0
|
66 |
-
for vocab_node in sentence.words:
|
67 |
-
num_nodes_in_current_sentence += 1
|
68 |
-
vocab_idx = vocab_node.id + vocab_idx_offset
|
69 |
-
vocab_text = vocab_node.text.replace(" ", "") # delete blank in vocab
|
70 |
-
vocab_nodes[vocab_idx] = vocab_text
|
71 |
-
vocab_idx_offset += num_nodes_in_current_sentence
|
72 |
-
|
73 |
-
# start vocab-to-word alignment
|
74 |
-
vocab_to_word = {}
|
75 |
-
current_word_idx = 1
|
76 |
-
for vocab_i in vocab_nodes.keys():
|
77 |
-
vocab_to_word[vocab_i] = []
|
78 |
-
for w_in_vocab_i in vocab_nodes[vocab_i]:
|
79 |
-
if w_in_vocab_i != idx_to_word[current_word_idx]:
|
80 |
-
raise ValueError("Word Mismatch!")
|
81 |
-
vocab_to_word[vocab_i].append(current_word_idx) # add a path (vocab_node_idx, word_global_idx)
|
82 |
-
current_word_idx += 1
|
83 |
-
|
84 |
-
# then we compute the vocab-level edges
|
85 |
-
if len(parser_out.sentences) > 5:
|
86 |
-
print("Detect more than 5 input sentence! pls check whether the sentence is too long!")
|
87 |
-
vocab_level_source_id, vocab_level_dest_id = [], []
|
88 |
-
vocab_level_edge_types = []
|
89 |
-
sentences_heads = []
|
90 |
-
vocab_id_offset = 0
|
91 |
-
# get forward edges
|
92 |
-
for s in parser_out.sentences:
|
93 |
-
for w in s.words:
|
94 |
-
w_idx = w.id + vocab_id_offset # it starts from 1, just same as binarizer
|
95 |
-
w_dest_idx = w.head + vocab_id_offset
|
96 |
-
if w.head == 0:
|
97 |
-
sentences_heads.append(w_idx)
|
98 |
-
continue
|
99 |
-
vocab_level_source_id.append(w_idx)
|
100 |
-
vocab_level_dest_id.append(w_dest_idx)
|
101 |
-
vocab_id_offset += len(s.words)
|
102 |
-
vocab_level_edge_types += [0] * len(vocab_level_source_id)
|
103 |
-
num_vocab = vocab_id_offset
|
104 |
-
|
105 |
-
# optional: get backward edges
|
106 |
-
if enable_backward_edge:
|
107 |
-
back_source, back_dest = deepcopy(vocab_level_dest_id), deepcopy(vocab_level_source_id)
|
108 |
-
vocab_level_source_id += back_source
|
109 |
-
vocab_level_dest_id += back_dest
|
110 |
-
vocab_level_edge_types += [1] * len(back_source)
|
111 |
-
|
112 |
-
# optional: get inter-sentence edges if num_sentences > 1
|
113 |
-
inter_sentence_source, inter_sentence_dest = [], []
|
114 |
-
if enable_inter_sentence_edge and len(sentences_heads) > 1:
|
115 |
-
def get_full_graph_edges(nodes):
|
116 |
-
tmp_edges = []
|
117 |
-
for i, node_i in enumerate(nodes):
|
118 |
-
for j, node_j in enumerate(nodes):
|
119 |
-
if i == j:
|
120 |
-
continue
|
121 |
-
tmp_edges.append((node_i, node_j))
|
122 |
-
return tmp_edges
|
123 |
-
|
124 |
-
tmp_edges = get_full_graph_edges(sentences_heads)
|
125 |
-
for (source, dest) in tmp_edges:
|
126 |
-
inter_sentence_source.append(source)
|
127 |
-
inter_sentence_dest.append(dest)
|
128 |
-
vocab_level_source_id += inter_sentence_source
|
129 |
-
vocab_level_dest_id += inter_sentence_dest
|
130 |
-
vocab_level_edge_types += [3] * len(inter_sentence_source)
|
131 |
-
|
132 |
-
if sequential_edge:
|
133 |
-
seq_source, seq_dest = list(range(1, num_vocab)) + list(range(num_vocab, 0, -1)), \
|
134 |
-
list(range(2, num_vocab + 1)) + list(range(num_vocab - 1, -1, -1))
|
135 |
-
vocab_level_source_id += seq_source
|
136 |
-
vocab_level_dest_id += seq_dest
|
137 |
-
vocab_level_edge_types += [4] * (num_vocab - 1) + [5] * (num_vocab - 1)
|
138 |
-
|
139 |
-
# Then, we use the vocab-level edges and the vocab-to-word path, to construct the word-level graph
|
140 |
-
num_word = len(words)
|
141 |
-
source_id, dest_id, edge_types = [], [], []
|
142 |
-
for (vocab_start, vocab_end, vocab_edge_type) in zip(vocab_level_source_id, vocab_level_dest_id,
|
143 |
-
vocab_level_edge_types):
|
144 |
-
# connect the first word in the vocab
|
145 |
-
word_start = min(vocab_to_word[vocab_start])
|
146 |
-
word_end = min(vocab_to_word[vocab_end])
|
147 |
-
source_id.append(word_start)
|
148 |
-
dest_id.append(word_end)
|
149 |
-
edge_types.append(vocab_edge_type)
|
150 |
-
|
151 |
-
# sequential connection in words
|
152 |
-
for word_indices_in_v in vocab_to_word.values():
|
153 |
-
for i, word_idx in enumerate(word_indices_in_v):
|
154 |
-
if i + 1 < len(word_indices_in_v):
|
155 |
-
source_id.append(word_idx)
|
156 |
-
dest_id.append(word_idx + 1)
|
157 |
-
edge_types.append(4)
|
158 |
-
if i - 1 >= 0:
|
159 |
-
source_id.append(word_idx)
|
160 |
-
dest_id.append(word_idx - 1)
|
161 |
-
edge_types.append(5)
|
162 |
-
|
163 |
-
# optional: get recurrent edges
|
164 |
-
if enable_recur_edge:
|
165 |
-
recur_source, recur_dest = list(range(1, num_word + 1)), list(range(1, num_word + 1))
|
166 |
-
source_id += recur_source
|
167 |
-
dest_id += recur_dest
|
168 |
-
edge_types += [2] * len(recur_source)
|
169 |
-
|
170 |
-
# add <BOS> and <EOS>
|
171 |
-
source_id += [0, num_word + 1, 1, num_word]
|
172 |
-
dest_id += [1, num_word, 0, num_word + 1]
|
173 |
-
edge_types += [4, 4, 5, 5] # 4 represents sequentially forward, 5 is sequential backward
|
174 |
-
|
175 |
-
edges = (torch.LongTensor(source_id), torch.LongTensor(dest_id))
|
176 |
-
dgl_graph = dgl.graph(edges)
|
177 |
-
assert dgl_graph.num_edges() == len(edge_types)
|
178 |
-
return dgl_graph, torch.LongTensor(edge_types)
|
179 |
-
|
180 |
-
def _parse_en(self, clean_sentence, enable_backward_edge=True, enable_recur_edge=True,
|
181 |
-
enable_inter_sentence_edge=True, sequential_edge=False, consider_bos_for_index=True):
|
182 |
-
"""
|
183 |
-
clean_sentence: <str>, each word or punctuation should be separated by one blank.
|
184 |
-
"""
|
185 |
-
edge_types = [] # required for gated graph neural network
|
186 |
-
clean_sentence = clean_sentence.strip()
|
187 |
-
if clean_sentence.endswith((" .", " ,", " ;", " :", " ?", " !")):
|
188 |
-
clean_sentence = clean_sentence[:-2]
|
189 |
-
if clean_sentence.startswith(". "):
|
190 |
-
clean_sentence = clean_sentence[2:]
|
191 |
-
parser_out = self.stanza_parser(clean_sentence)
|
192 |
-
if len(parser_out.sentences) > 5:
|
193 |
-
print("Detect more than 5 input sentence! pls check whether the sentence is too long!")
|
194 |
-
print(clean_sentence)
|
195 |
-
source_id, dest_id = [], []
|
196 |
-
sentences_heads = []
|
197 |
-
word_id_offset = 0
|
198 |
-
# get forward edges
|
199 |
-
for s in parser_out.sentences:
|
200 |
-
for w in s.words:
|
201 |
-
w_idx = w.id + word_id_offset # it starts from 1, just same as binarizer
|
202 |
-
w_dest_idx = w.head + word_id_offset
|
203 |
-
if w.head == 0:
|
204 |
-
sentences_heads.append(w_idx)
|
205 |
-
continue
|
206 |
-
source_id.append(w_idx)
|
207 |
-
dest_id.append(w_dest_idx)
|
208 |
-
word_id_offset += len(s.words)
|
209 |
-
num_word = word_id_offset
|
210 |
-
edge_types += [0] * len(source_id)
|
211 |
-
|
212 |
-
# optional: get backward edges
|
213 |
-
if enable_backward_edge:
|
214 |
-
back_source, back_dest = deepcopy(dest_id), deepcopy(source_id)
|
215 |
-
source_id += back_source
|
216 |
-
dest_id += back_dest
|
217 |
-
edge_types += [1] * len(back_source)
|
218 |
-
|
219 |
-
# optional: get recurrent edges
|
220 |
-
if enable_recur_edge:
|
221 |
-
recur_source, recur_dest = list(range(1, num_word + 1)), list(range(1, num_word + 1))
|
222 |
-
source_id += recur_source
|
223 |
-
dest_id += recur_dest
|
224 |
-
edge_types += [2] * len(recur_source)
|
225 |
-
|
226 |
-
# optional: get inter-sentence edges if num_sentences > 1
|
227 |
-
inter_sentence_source, inter_sentence_dest = [], []
|
228 |
-
if enable_inter_sentence_edge and len(sentences_heads) > 1:
|
229 |
-
def get_full_graph_edges(nodes):
|
230 |
-
tmp_edges = []
|
231 |
-
for i, node_i in enumerate(nodes):
|
232 |
-
for j, node_j in enumerate(nodes):
|
233 |
-
if i == j:
|
234 |
-
continue
|
235 |
-
tmp_edges.append((node_i, node_j))
|
236 |
-
return tmp_edges
|
237 |
-
|
238 |
-
tmp_edges = get_full_graph_edges(sentences_heads)
|
239 |
-
for (source, dest) in tmp_edges:
|
240 |
-
inter_sentence_source.append(source)
|
241 |
-
inter_sentence_dest.append(dest)
|
242 |
-
source_id += inter_sentence_source
|
243 |
-
dest_id += inter_sentence_dest
|
244 |
-
edge_types += [3] * len(inter_sentence_source)
|
245 |
-
|
246 |
-
# add <BOS> and <EOS>
|
247 |
-
source_id += [0, num_word + 1, 1, num_word]
|
248 |
-
dest_id += [1, num_word, 0, num_word + 1]
|
249 |
-
edge_types += [4, 4, 5, 5] # 4 represents sequentially forward, 5 is sequential backward
|
250 |
-
|
251 |
-
# optional: sequential edge
|
252 |
-
if sequential_edge:
|
253 |
-
seq_source, seq_dest = list(range(1, num_word)) + list(range(num_word, 0, -1)), \
|
254 |
-
list(range(2, num_word + 1)) + list(range(num_word - 1, -1, -1))
|
255 |
-
source_id += seq_source
|
256 |
-
dest_id += seq_dest
|
257 |
-
edge_types += [4] * (num_word - 1) + [5] * (num_word - 1)
|
258 |
-
if consider_bos_for_index:
|
259 |
-
edges = (torch.LongTensor(source_id), torch.LongTensor(dest_id))
|
260 |
-
else:
|
261 |
-
edges = (torch.LongTensor(source_id) - 1, torch.LongTensor(dest_id) - 1)
|
262 |
-
dgl_graph = dgl.graph(edges)
|
263 |
-
assert dgl_graph.num_edges() == len(edge_types)
|
264 |
-
return dgl_graph, torch.LongTensor(edge_types)
|
265 |
-
|
266 |
-
|
267 |
-
def plot_dgl_sentence_graph(dgl_graph, labels):
|
268 |
-
"""
|
269 |
-
labels = {idx: word for idx,word in enumerate(sentence.split(" ")) }
|
270 |
-
"""
|
271 |
-
import matplotlib.pyplot as plt
|
272 |
-
nx_graph = dgl_graph.to_networkx()
|
273 |
-
pos = nx.random_layout(nx_graph)
|
274 |
-
nx.draw(nx_graph, pos, with_labels=False)
|
275 |
-
nx.draw_networkx_labels(nx_graph, pos, labels)
|
276 |
-
plt.show()
|
277 |
-
|
278 |
-
if __name__ == '__main__':
|
279 |
-
|
280 |
-
# Unit Test for Chinese Graph Builder
|
281 |
-
parser = Sentence2GraphParser("zh")
|
282 |
-
text1 = '宝马配挂跛骡鞍,貂蝉怨枕董翁榻.'
|
283 |
-
words = ['<BOS>', '宝', '马', '配', '挂', '跛', '骡', '鞍', ',', '貂', '蝉', '怨', '枕', '董', '翁', '榻', '<EOS>']
|
284 |
-
ph_words = ['<BOS>', 'b_ao3_|', 'm_a3_#', 'p_ei4_|', 'g_ua4_#', 'b_o3_#', 'l_uo2_|', 'an1', ',', 'd_iao1_|',
|
285 |
-
'ch_an2_#', 'van4_#', 'zh_en3_#', 'd_ong3_|', 'ueng1_#', 't_a4', '<EOS>']
|
286 |
-
graph1, etypes1 = parser.parse(text1, words, ph_words)
|
287 |
-
plot_dgl_sentence_graph(graph1, {i: w for i, w in enumerate(ph_words)})
|
288 |
-
|
289 |
-
# Unit Test for English Graph Builder
|
290 |
-
parser = Sentence2GraphParser("en")
|
291 |
-
text2 = "I love you . You love me . Mixue ice-scream and tea ."
|
292 |
-
graph2, etypes2 = parser.parse(text2)
|
293 |
-
plot_dgl_sentence_graph(graph2, {i: w for i, w in enumerate(("<BOS> " + text2 + " <EOS>").split(" "))})
|
294 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AILab-CVC/SEED-LLaMA/Dockerfile
DELETED
@@ -1,80 +0,0 @@
|
|
1 |
-
# Use the official Python 3.11 image
|
2 |
-
FROM nvidia/cuda:11.7.1-cudnn8-devel-ubuntu22.04
|
3 |
-
|
4 |
-
ENV DEBIAN_FRONTEND=noninteractive
|
5 |
-
|
6 |
-
RUN apt-get update && \
|
7 |
-
apt-get upgrade -y && \
|
8 |
-
apt-get install -y --no-install-recommends \
|
9 |
-
git \
|
10 |
-
git-lfs \
|
11 |
-
wget \
|
12 |
-
curl \
|
13 |
-
# python build dependencies \
|
14 |
-
build-essential \
|
15 |
-
libssl-dev \
|
16 |
-
zlib1g-dev \
|
17 |
-
libbz2-dev \
|
18 |
-
libreadline-dev \
|
19 |
-
libsqlite3-dev \
|
20 |
-
libncursesw5-dev \
|
21 |
-
xz-utils \
|
22 |
-
tk-dev \
|
23 |
-
libxml2-dev \
|
24 |
-
libxmlsec1-dev \
|
25 |
-
libffi-dev \
|
26 |
-
liblzma-dev \
|
27 |
-
# gradio dependencies \
|
28 |
-
ffmpeg && \
|
29 |
-
apt-get clean && \
|
30 |
-
rm -rf /var/lib/apt/lists/*
|
31 |
-
|
32 |
-
|
33 |
-
# Copy the current directory contents into the container at /code
|
34 |
-
# COPY ./requirements.txt /code/requirements.txt
|
35 |
-
# RUN cat /code/requirements.txt
|
36 |
-
|
37 |
-
# Install requirements.txt
|
38 |
-
# RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
39 |
-
# RUN pip install -r /code/requirements.txt
|
40 |
-
|
41 |
-
# Set up a new user named "user" with user ID 1000
|
42 |
-
RUN useradd -m -u 1000 user
|
43 |
-
# Switch to the "user" user
|
44 |
-
USER user
|
45 |
-
# Set home to the user's home directory
|
46 |
-
ENV HOME=/home/user \
|
47 |
-
PATH=/home/user/.local/bin:$PATH \
|
48 |
-
HF_HOME=/data/.huggingface \
|
49 |
-
TORCH_HOME=/data/.huggingface
|
50 |
-
|
51 |
-
# Set the working directory to the user's home directory
|
52 |
-
WORKDIR $HOME/app
|
53 |
-
|
54 |
-
|
55 |
-
RUN curl https://pyenv.run | bash
|
56 |
-
ENV PATH=${HOME}/.pyenv/shims:${HOME}/.pyenv/bin:${PATH}
|
57 |
-
ARG PYTHON_VERSION=3.10.12
|
58 |
-
RUN pyenv install ${PYTHON_VERSION} && \
|
59 |
-
pyenv global ${PYTHON_VERSION} && \
|
60 |
-
pyenv rehash && \
|
61 |
-
pip install --no-cache-dir -U pip setuptools wheel
|
62 |
-
|
63 |
-
# Copy the current directory contents into the container at $HOME/app setting the owner to the user
|
64 |
-
COPY --chown=user . $HOME/app
|
65 |
-
|
66 |
-
RUN git lfs install
|
67 |
-
|
68 |
-
RUN pip install -r requirements.txt
|
69 |
-
|
70 |
-
# RUN ls -lh
|
71 |
-
|
72 |
-
# WORKDIR $HOME/app/pretrained
|
73 |
-
# RUN git lfs install
|
74 |
-
# RUN git clone https://huggingface.co/stabilityai/stable-diffusion-2-1-unclip
|
75 |
-
# RUN git clone https://huggingface.co/AILab-CVC/seed-tokenizer-2
|
76 |
-
# RUn git clone https://huggingface.co/AILab-CVC/seed-llama-14b-sft
|
77 |
-
|
78 |
-
# WORKDIR $HOME/app
|
79 |
-
CMD ["bash", "start.sh"]
|
80 |
-
# CMD ["python", 'start.py']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AdVisual/MaskCut/model.py
DELETED
@@ -1,75 +0,0 @@
|
|
1 |
-
# This file is adapted from https://github.com/facebookresearch/CutLER/blob/077938c626341723050a1971107af552a6ca6697/maskcut/demo.py
|
2 |
-
# The original license file is the file named LICENSE.CutLER in this repo.
|
3 |
-
|
4 |
-
import os
|
5 |
-
import sys
|
6 |
-
sys.path.append('./CutLER/')
|
7 |
-
sys.path.append('./CutLER/maskcut/')
|
8 |
-
|
9 |
-
import numpy as np
|
10 |
-
import PIL.Image as Image
|
11 |
-
import torch
|
12 |
-
from scipy import ndimage
|
13 |
-
|
14 |
-
from CutLER.maskcut.dino import ViTFeat # model
|
15 |
-
from CutLER.maskcut.crf import densecrf
|
16 |
-
from CutLER.maskcut.maskcut import maskcut
|
17 |
-
from CutLER.third_party.TokenCut.unsupervised_saliency_detection import metric
|
18 |
-
|
19 |
-
|
20 |
-
class Model:
|
21 |
-
def __init__(self, device: str):
|
22 |
-
self.device = torch.device(device)
|
23 |
-
self.backbone = self.load_backbone()
|
24 |
-
|
25 |
-
def load_backbone(self):
|
26 |
-
# DINO hyperparameters
|
27 |
-
vit_arch = 'base'
|
28 |
-
vit_feat = 'k'
|
29 |
-
patch_size = 8
|
30 |
-
# DINO pre-trained model
|
31 |
-
url = 'https://dl.fbaipublicfiles.com/dino/dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth'
|
32 |
-
feat_dim = 768
|
33 |
-
|
34 |
-
# extract patch features with a pretrained DINO model
|
35 |
-
backbone = ViTFeat(url, feat_dim, vit_arch, vit_feat, patch_size)
|
36 |
-
backbone.eval()
|
37 |
-
backbone.to(self.device)
|
38 |
-
return backbone
|
39 |
-
|
40 |
-
def __call__(self, image, tau, n, fixed_size=480):
|
41 |
-
# get pseudo-masks with MaskCut
|
42 |
-
bipartitions, _, I_new = maskcut(image,
|
43 |
-
self.backbone,
|
44 |
-
self.backbone.patch_size,
|
45 |
-
tau,
|
46 |
-
N=n,
|
47 |
-
fixed_size=fixed_size,
|
48 |
-
cpu=self.device.type == 'cpu')
|
49 |
-
I = image.convert('RGB')
|
50 |
-
width, height = I.size
|
51 |
-
pseudo_mask_list = []
|
52 |
-
for idx, bipartition in enumerate(bipartitions):
|
53 |
-
# post-process pseudo-masks with CRF
|
54 |
-
pseudo_mask = densecrf(np.array(I_new), bipartition)
|
55 |
-
pseudo_mask = ndimage.binary_fill_holes(pseudo_mask >= 0.5)
|
56 |
-
|
57 |
-
# filter out the mask that have a very different pseudo-mask after the CRF
|
58 |
-
mask1 = torch.from_numpy(bipartition).to(self.device)
|
59 |
-
mask2 = torch.from_numpy(pseudo_mask).to(self.device)
|
60 |
-
if metric.IoU(mask1, mask2) < 0.5:
|
61 |
-
pseudo_mask = pseudo_mask * -1
|
62 |
-
|
63 |
-
# construct binary pseudo-masks
|
64 |
-
pseudo_mask[pseudo_mask < 0] = 0
|
65 |
-
pseudo_mask = Image.fromarray(np.uint8(pseudo_mask * 255))
|
66 |
-
pseudo_mask = np.asarray(pseudo_mask.resize((width, height)))
|
67 |
-
|
68 |
-
pseudo_mask = pseudo_mask.astype(np.uint8)
|
69 |
-
upper = np.max(pseudo_mask)
|
70 |
-
lower = np.min(pseudo_mask)
|
71 |
-
thresh = upper / 2.0
|
72 |
-
pseudo_mask[pseudo_mask > thresh] = upper
|
73 |
-
pseudo_mask[pseudo_mask <= thresh] = lower
|
74 |
-
pseudo_mask_list.append(pseudo_mask)
|
75 |
-
return pseudo_mask_list
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Adithedev/Keyword-Extractor/model.py
DELETED
@@ -1,39 +0,0 @@
|
|
1 |
-
import re
|
2 |
-
import nltk
|
3 |
-
import numpy as np
|
4 |
-
import pandas as pd
|
5 |
-
from nltk.corpus import stopwords
|
6 |
-
from nltk.stem import WordNetLemmatizer
|
7 |
-
from nltk.tokenize import word_tokenize
|
8 |
-
from sklearn.feature_extraction.text import TfidfTransformer,CountVectorizer,TfidfVectorizer
|
9 |
-
|
10 |
-
nltk.download('stopwords')
|
11 |
-
nltk.download('wordnet')
|
12 |
-
nltk.download('punkt')
|
13 |
-
|
14 |
-
class KeywordExtraction:
|
15 |
-
|
16 |
-
@staticmethod
|
17 |
-
def fit(text):
|
18 |
-
wnl = WordNetLemmatizer()
|
19 |
-
processed_re_text = re.sub("[^\s\w]"," ",text).lower()
|
20 |
-
stopwords_corpus = stopwords.words("english")
|
21 |
-
tokenized_text = word_tokenize(processed_re_text)
|
22 |
-
processed_text = []
|
23 |
-
|
24 |
-
for i in tokenized_text:
|
25 |
-
if i not in stopwords_corpus:
|
26 |
-
processed_text.append(wnl.lemmatize(i))
|
27 |
-
return " ".join(processed_text)
|
28 |
-
|
29 |
-
@staticmethod
|
30 |
-
def train(processed_text, top_n=5):
|
31 |
-
tfidf_vectorizer = TfidfVectorizer()
|
32 |
-
tfidf_matrix = tfidf_vectorizer.fit_transform(processed_text)
|
33 |
-
feature_names = tfidf_vectorizer.get_feature_names_out()
|
34 |
-
idf_scores = tfidf_vectorizer.idf_
|
35 |
-
|
36 |
-
idf_df = pd.DataFrame({'Word': feature_names, 'IDF': idf_scores})
|
37 |
-
idf_df = idf_df.sort_values(by='IDF', ascending=False).head(top_n)
|
38 |
-
|
39 |
-
return idf_df
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/scale-plugin.js
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
import Scale from './scale.js';
|
2 |
-
import ScaleDown from './behaviors/scale/ScaleDown.js';
|
3 |
-
import ScaleDownDestroy from './scale-down-destroy.js';
|
4 |
-
import Popup from './popup.js';
|
5 |
-
import Yoyo from './behaviors/scale/Yoyo.js';
|
6 |
-
|
7 |
-
class ScalePlugin extends Phaser.Plugins.BasePlugin {
|
8 |
-
|
9 |
-
constructor(pluginManager) {
|
10 |
-
super(pluginManager);
|
11 |
-
}
|
12 |
-
|
13 |
-
start() {
|
14 |
-
var eventEmitter = this.game.events;
|
15 |
-
eventEmitter.on('destroy', this.destroy, this);
|
16 |
-
}
|
17 |
-
|
18 |
-
add(gameObject, config) {
|
19 |
-
return new Scale(gameObject, config);
|
20 |
-
}
|
21 |
-
}
|
22 |
-
|
23 |
-
// mixin
|
24 |
-
var methods = {
|
25 |
-
scaleDown: ScaleDown,
|
26 |
-
scaleDownDestroy: ScaleDownDestroy,
|
27 |
-
popup: Popup,
|
28 |
-
yoyo: Yoyo,
|
29 |
-
}
|
30 |
-
Object.assign(
|
31 |
-
ScalePlugin.prototype,
|
32 |
-
methods
|
33 |
-
);
|
34 |
-
|
35 |
-
export default ScalePlugin;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorinput/Factory.d.ts
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
import ColorInput from './ColorInput';
|
2 |
-
|
3 |
-
export default function (
|
4 |
-
config?: ColorInput.IConfig
|
5 |
-
): ColorInput;
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/knob/Knob.d.ts
DELETED
@@ -1,63 +0,0 @@
|
|
1 |
-
// import * as Phaser from 'phaser';
|
2 |
-
import OverlapSizer from '../overlapsizer/OverlapSizer';
|
3 |
-
|
4 |
-
|
5 |
-
export default Knob;
|
6 |
-
|
7 |
-
declare namespace Knob {
|
8 |
-
|
9 |
-
type InputTypes = 0 | 1 | -1 | 'drag' | 'pan' | 'click' | 'none';
|
10 |
-
|
11 |
-
interface IConfig extends OverlapSizer.IConfig {
|
12 |
-
background?: Phaser.GameObjects.GameObject,
|
13 |
-
|
14 |
-
color?: number | string,
|
15 |
-
trackColor?: number | string,
|
16 |
-
centerColor?: number | string,
|
17 |
-
thickness?: number,
|
18 |
-
startAngle?: number,
|
19 |
-
anticlockwise?: boolean,
|
20 |
-
knobDepth?: number,
|
21 |
-
|
22 |
-
text?: Phaser.GameObjects.GameObject,
|
23 |
-
textFormatCallback?: (value: number) => string,
|
24 |
-
textFormatCallbackScope?: object,
|
25 |
-
|
26 |
-
input?: InputTypes,
|
27 |
-
|
28 |
-
value?: number,
|
29 |
-
|
30 |
-
gap?: number,
|
31 |
-
|
32 |
-
easeValue?: {
|
33 |
-
duration?: number,
|
34 |
-
ease?: string
|
35 |
-
},
|
36 |
-
|
37 |
-
valuechangeCallback: (newValue: number, oldValue: number, knob: Knob) => void,
|
38 |
-
|
39 |
-
enable?: boolean,
|
40 |
-
|
41 |
-
}
|
42 |
-
|
43 |
-
}
|
44 |
-
|
45 |
-
declare class Knob extends OverlapSizer {
|
46 |
-
constructor(
|
47 |
-
scene: Phaser.Scene,
|
48 |
-
config?: Knob.IConfig
|
49 |
-
);
|
50 |
-
|
51 |
-
value: number;
|
52 |
-
getValue(min?: number, max?: number): number;
|
53 |
-
setValue(value?: number, min?: number, max?: number): this;
|
54 |
-
addValue(inc?: number, min?: number, max?: number): this;
|
55 |
-
|
56 |
-
easeValueTo(value?: number, min?: number, max?: number): this;
|
57 |
-
stopEaseValue(): this;
|
58 |
-
setEaseValueDuration(duration: number): this;
|
59 |
-
setEaseValueFunction(ease: string): this;
|
60 |
-
|
61 |
-
setEnable(enable?: boolean): this;
|
62 |
-
enable: boolean;
|
63 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AlexZou/Deploy_Restoration/net/CMSFFT.py
DELETED
@@ -1,377 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# @Author : Lintao Peng
|
3 |
-
# @File : CMSFFT.py
|
4 |
-
# coding=utf-8
|
5 |
-
# Design based on the CTrans
|
6 |
-
from __future__ import absolute_import
|
7 |
-
from __future__ import division
|
8 |
-
from __future__ import print_function
|
9 |
-
import copy
|
10 |
-
import logging
|
11 |
-
import math
|
12 |
-
import torch
|
13 |
-
import torch.nn as nn
|
14 |
-
import numpy as np
|
15 |
-
from torch.nn import Dropout, Softmax, Conv2d, LayerNorm
|
16 |
-
from torch.nn.modules.utils import _pair
|
17 |
-
|
18 |
-
|
19 |
-
#KV_size = 480
|
20 |
-
#transformer.num_heads = 4
|
21 |
-
#transformer.num_layers = 4
|
22 |
-
#expand_ratio = 4
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
#线性编码
|
27 |
-
class Channel_Embeddings(nn.Module):
|
28 |
-
"""Construct the embeddings from patch, position embeddings.
|
29 |
-
"""
|
30 |
-
def __init__(self, patchsize, img_size, in_channels):
|
31 |
-
super().__init__()
|
32 |
-
img_size = _pair(img_size)
|
33 |
-
patch_size = _pair(patchsize)
|
34 |
-
n_patches = (img_size[0] // patch_size[0]) * (img_size[1] // patch_size[1])
|
35 |
-
|
36 |
-
self.patch_embeddings = Conv2d(in_channels=in_channels,
|
37 |
-
out_channels=in_channels,
|
38 |
-
kernel_size=patch_size,
|
39 |
-
stride=patch_size)
|
40 |
-
self.position_embeddings = nn.Parameter(torch.zeros(1, n_patches, in_channels))
|
41 |
-
self.dropout = Dropout(0.1)
|
42 |
-
|
43 |
-
def forward(self, x):
|
44 |
-
if x is None:
|
45 |
-
return None
|
46 |
-
x = self.patch_embeddings(x) # (B, hidden,n_patches^(1/2), n_patches^(1/2))
|
47 |
-
x = x.flatten(2)
|
48 |
-
x = x.transpose(-1, -2) # (B, n_patches, hidden)
|
49 |
-
embeddings = x + self.position_embeddings
|
50 |
-
embeddings = self.dropout(embeddings)
|
51 |
-
return embeddings
|
52 |
-
|
53 |
-
|
54 |
-
#特征重组
|
55 |
-
class Reconstruct(nn.Module):
|
56 |
-
def __init__(self, in_channels, out_channels, kernel_size, scale_factor):
|
57 |
-
super(Reconstruct, self).__init__()
|
58 |
-
if kernel_size == 3:
|
59 |
-
padding = 1
|
60 |
-
else:
|
61 |
-
padding = 0
|
62 |
-
self.conv = nn.Conv2d(in_channels, out_channels,kernel_size=kernel_size, padding=padding)
|
63 |
-
self.norm = nn.BatchNorm2d(out_channels)
|
64 |
-
self.activation = nn.ReLU(inplace=True)
|
65 |
-
self.scale_factor = scale_factor
|
66 |
-
|
67 |
-
def forward(self, x):
|
68 |
-
if x is None:
|
69 |
-
return None
|
70 |
-
|
71 |
-
# reshape from (B, n_patch, hidden) to (B, h, w, hidden)
|
72 |
-
B, n_patch, hidden = x.size()
|
73 |
-
h, w = int(np.sqrt(n_patch)), int(np.sqrt(n_patch))
|
74 |
-
x = x.permute(0, 2, 1)
|
75 |
-
x = x.contiguous().view(B, hidden, h, w)
|
76 |
-
x = nn.Upsample(scale_factor=self.scale_factor)(x)
|
77 |
-
|
78 |
-
out = self.conv(x)
|
79 |
-
out = self.norm(out)
|
80 |
-
out = self.activation(out)
|
81 |
-
return out
|
82 |
-
|
83 |
-
class Attention_org(nn.Module):
|
84 |
-
def __init__(self, vis,channel_num, KV_size=480, num_heads=4):
|
85 |
-
super(Attention_org, self).__init__()
|
86 |
-
self.vis = vis
|
87 |
-
self.KV_size = KV_size
|
88 |
-
self.channel_num = channel_num
|
89 |
-
self.num_attention_heads = num_heads
|
90 |
-
|
91 |
-
self.query1 = nn.ModuleList()
|
92 |
-
self.query2 = nn.ModuleList()
|
93 |
-
self.query3 = nn.ModuleList()
|
94 |
-
self.query4 = nn.ModuleList()
|
95 |
-
self.key = nn.ModuleList()
|
96 |
-
self.value = nn.ModuleList()
|
97 |
-
|
98 |
-
for _ in range(num_heads):
|
99 |
-
query1 = nn.Linear(channel_num[0], channel_num[0], bias=False)
|
100 |
-
query2 = nn.Linear(channel_num[1], channel_num[1], bias=False)
|
101 |
-
query3 = nn.Linear(channel_num[2], channel_num[2], bias=False)
|
102 |
-
query4 = nn.Linear(channel_num[3], channel_num[3], bias=False)
|
103 |
-
key = nn.Linear( self.KV_size, self.KV_size, bias=False)
|
104 |
-
value = nn.Linear(self.KV_size, self.KV_size, bias=False)
|
105 |
-
#把所有的值都重新复制一遍,deepcopy为深复制,完全脱离原来的值,即将被复制对象完全再复制一遍作为独立的新个体单独存在
|
106 |
-
self.query1.append(copy.deepcopy(query1))
|
107 |
-
self.query2.append(copy.deepcopy(query2))
|
108 |
-
self.query3.append(copy.deepcopy(query3))
|
109 |
-
self.query4.append(copy.deepcopy(query4))
|
110 |
-
self.key.append(copy.deepcopy(key))
|
111 |
-
self.value.append(copy.deepcopy(value))
|
112 |
-
self.psi = nn.InstanceNorm2d(self.num_attention_heads)
|
113 |
-
self.softmax = Softmax(dim=3)
|
114 |
-
self.out1 = nn.Linear(channel_num[0], channel_num[0], bias=False)
|
115 |
-
self.out2 = nn.Linear(channel_num[1], channel_num[1], bias=False)
|
116 |
-
self.out3 = nn.Linear(channel_num[2], channel_num[2], bias=False)
|
117 |
-
self.out4 = nn.Linear(channel_num[3], channel_num[3], bias=False)
|
118 |
-
self.attn_dropout = Dropout(0.1)
|
119 |
-
self.proj_dropout = Dropout(0.1)
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
def forward(self, emb1,emb2,emb3,emb4, emb_all):
|
124 |
-
multi_head_Q1_list = []
|
125 |
-
multi_head_Q2_list = []
|
126 |
-
multi_head_Q3_list = []
|
127 |
-
multi_head_Q4_list = []
|
128 |
-
multi_head_K_list = []
|
129 |
-
multi_head_V_list = []
|
130 |
-
if emb1 is not None:
|
131 |
-
for query1 in self.query1:
|
132 |
-
Q1 = query1(emb1)
|
133 |
-
multi_head_Q1_list.append(Q1)
|
134 |
-
if emb2 is not None:
|
135 |
-
for query2 in self.query2:
|
136 |
-
Q2 = query2(emb2)
|
137 |
-
multi_head_Q2_list.append(Q2)
|
138 |
-
if emb3 is not None:
|
139 |
-
for query3 in self.query3:
|
140 |
-
Q3 = query3(emb3)
|
141 |
-
multi_head_Q3_list.append(Q3)
|
142 |
-
if emb4 is not None:
|
143 |
-
for query4 in self.query4:
|
144 |
-
Q4 = query4(emb4)
|
145 |
-
multi_head_Q4_list.append(Q4)
|
146 |
-
for key in self.key:
|
147 |
-
K = key(emb_all)
|
148 |
-
multi_head_K_list.append(K)
|
149 |
-
for value in self.value:
|
150 |
-
V = value(emb_all)
|
151 |
-
multi_head_V_list.append(V)
|
152 |
-
# print(len(multi_head_Q4_list))
|
153 |
-
|
154 |
-
multi_head_Q1 = torch.stack(multi_head_Q1_list, dim=1) if emb1 is not None else None
|
155 |
-
multi_head_Q2 = torch.stack(multi_head_Q2_list, dim=1) if emb2 is not None else None
|
156 |
-
multi_head_Q3 = torch.stack(multi_head_Q3_list, dim=1) if emb3 is not None else None
|
157 |
-
multi_head_Q4 = torch.stack(multi_head_Q4_list, dim=1) if emb4 is not None else None
|
158 |
-
multi_head_K = torch.stack(multi_head_K_list, dim=1)
|
159 |
-
multi_head_V = torch.stack(multi_head_V_list, dim=1)
|
160 |
-
|
161 |
-
multi_head_Q1 = multi_head_Q1.transpose(-1, -2) if emb1 is not None else None
|
162 |
-
multi_head_Q2 = multi_head_Q2.transpose(-1, -2) if emb2 is not None else None
|
163 |
-
multi_head_Q3 = multi_head_Q3.transpose(-1, -2) if emb3 is not None else None
|
164 |
-
multi_head_Q4 = multi_head_Q4.transpose(-1, -2) if emb4 is not None else None
|
165 |
-
|
166 |
-
attention_scores1 = torch.matmul(multi_head_Q1, multi_head_K) if emb1 is not None else None
|
167 |
-
attention_scores2 = torch.matmul(multi_head_Q2, multi_head_K) if emb2 is not None else None
|
168 |
-
attention_scores3 = torch.matmul(multi_head_Q3, multi_head_K) if emb3 is not None else None
|
169 |
-
attention_scores4 = torch.matmul(multi_head_Q4, multi_head_K) if emb4 is not None else None
|
170 |
-
|
171 |
-
attention_scores1 = attention_scores1 / math.sqrt(self.KV_size) if emb1 is not None else None
|
172 |
-
attention_scores2 = attention_scores2 / math.sqrt(self.KV_size) if emb2 is not None else None
|
173 |
-
attention_scores3 = attention_scores3 / math.sqrt(self.KV_size) if emb3 is not None else None
|
174 |
-
attention_scores4 = attention_scores4 / math.sqrt(self.KV_size) if emb4 is not None else None
|
175 |
-
|
176 |
-
attention_probs1 = self.softmax(self.psi(attention_scores1)) if emb1 is not None else None
|
177 |
-
attention_probs2 = self.softmax(self.psi(attention_scores2)) if emb2 is not None else None
|
178 |
-
attention_probs3 = self.softmax(self.psi(attention_scores3)) if emb3 is not None else None
|
179 |
-
attention_probs4 = self.softmax(self.psi(attention_scores4)) if emb4 is not None else None
|
180 |
-
# print(attention_probs4.size())
|
181 |
-
|
182 |
-
if self.vis:
|
183 |
-
weights = []
|
184 |
-
weights.append(attention_probs1.mean(1))
|
185 |
-
weights.append(attention_probs2.mean(1))
|
186 |
-
weights.append(attention_probs3.mean(1))
|
187 |
-
weights.append(attention_probs4.mean(1))
|
188 |
-
else: weights=None
|
189 |
-
|
190 |
-
attention_probs1 = self.attn_dropout(attention_probs1) if emb1 is not None else None
|
191 |
-
attention_probs2 = self.attn_dropout(attention_probs2) if emb2 is not None else None
|
192 |
-
attention_probs3 = self.attn_dropout(attention_probs3) if emb3 is not None else None
|
193 |
-
attention_probs4 = self.attn_dropout(attention_probs4) if emb4 is not None else None
|
194 |
-
|
195 |
-
multi_head_V = multi_head_V.transpose(-1, -2)
|
196 |
-
context_layer1 = torch.matmul(attention_probs1, multi_head_V) if emb1 is not None else None
|
197 |
-
context_layer2 = torch.matmul(attention_probs2, multi_head_V) if emb2 is not None else None
|
198 |
-
context_layer3 = torch.matmul(attention_probs3, multi_head_V) if emb3 is not None else None
|
199 |
-
context_layer4 = torch.matmul(attention_probs4, multi_head_V) if emb4 is not None else None
|
200 |
-
|
201 |
-
context_layer1 = context_layer1.permute(0, 3, 2, 1).contiguous() if emb1 is not None else None
|
202 |
-
context_layer2 = context_layer2.permute(0, 3, 2, 1).contiguous() if emb2 is not None else None
|
203 |
-
context_layer3 = context_layer3.permute(0, 3, 2, 1).contiguous() if emb3 is not None else None
|
204 |
-
context_layer4 = context_layer4.permute(0, 3, 2, 1).contiguous() if emb4 is not None else None
|
205 |
-
context_layer1 = context_layer1.mean(dim=3) if emb1 is not None else None
|
206 |
-
context_layer2 = context_layer2.mean(dim=3) if emb2 is not None else None
|
207 |
-
context_layer3 = context_layer3.mean(dim=3) if emb3 is not None else None
|
208 |
-
context_layer4 = context_layer4.mean(dim=3) if emb4 is not None else None
|
209 |
-
|
210 |
-
O1 = self.out1(context_layer1) if emb1 is not None else None
|
211 |
-
O2 = self.out2(context_layer2) if emb2 is not None else None
|
212 |
-
O3 = self.out3(context_layer3) if emb3 is not None else None
|
213 |
-
O4 = self.out4(context_layer4) if emb4 is not None else None
|
214 |
-
O1 = self.proj_dropout(O1) if emb1 is not None else None
|
215 |
-
O2 = self.proj_dropout(O2) if emb2 is not None else None
|
216 |
-
O3 = self.proj_dropout(O3) if emb3 is not None else None
|
217 |
-
O4 = self.proj_dropout(O4) if emb4 is not None else None
|
218 |
-
return O1,O2,O3,O4, weights
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
class Mlp(nn.Module):
|
224 |
-
def __init__(self, in_channel, mlp_channel):
|
225 |
-
super(Mlp, self).__init__()
|
226 |
-
self.fc1 = nn.Linear(in_channel, mlp_channel)
|
227 |
-
self.fc2 = nn.Linear(mlp_channel, in_channel)
|
228 |
-
self.act_fn = nn.GELU()
|
229 |
-
self.dropout = Dropout(0.0)
|
230 |
-
self._init_weights()
|
231 |
-
|
232 |
-
def _init_weights(self):
|
233 |
-
nn.init.xavier_uniform_(self.fc1.weight)
|
234 |
-
nn.init.xavier_uniform_(self.fc2.weight)
|
235 |
-
nn.init.normal_(self.fc1.bias, std=1e-6)
|
236 |
-
nn.init.normal_(self.fc2.bias, std=1e-6)
|
237 |
-
|
238 |
-
def forward(self, x):
|
239 |
-
x = self.fc1(x)
|
240 |
-
x = self.act_fn(x)
|
241 |
-
x = self.dropout(x)
|
242 |
-
x = self.fc2(x)
|
243 |
-
x = self.dropout(x)
|
244 |
-
return x
|
245 |
-
|
246 |
-
class Block_ViT(nn.Module):
|
247 |
-
def __init__(self, vis, channel_num, expand_ratio=4,KV_size=480):
|
248 |
-
super(Block_ViT, self).__init__()
|
249 |
-
expand_ratio = 4
|
250 |
-
self.attn_norm1 = LayerNorm(channel_num[0],eps=1e-6)
|
251 |
-
self.attn_norm2 = LayerNorm(channel_num[1],eps=1e-6)
|
252 |
-
self.attn_norm3 = LayerNorm(channel_num[2],eps=1e-6)
|
253 |
-
self.attn_norm4 = LayerNorm(channel_num[3],eps=1e-6)
|
254 |
-
self.attn_norm = LayerNorm(KV_size,eps=1e-6)
|
255 |
-
self.channel_attn = Attention_org(vis, channel_num)
|
256 |
-
|
257 |
-
self.ffn_norm1 = LayerNorm(channel_num[0],eps=1e-6)
|
258 |
-
self.ffn_norm2 = LayerNorm(channel_num[1],eps=1e-6)
|
259 |
-
self.ffn_norm3 = LayerNorm(channel_num[2],eps=1e-6)
|
260 |
-
self.ffn_norm4 = LayerNorm(channel_num[3],eps=1e-6)
|
261 |
-
self.ffn1 = Mlp(channel_num[0],channel_num[0]*expand_ratio)
|
262 |
-
self.ffn2 = Mlp(channel_num[1],channel_num[1]*expand_ratio)
|
263 |
-
self.ffn3 = Mlp(channel_num[2],channel_num[2]*expand_ratio)
|
264 |
-
self.ffn4 = Mlp(channel_num[3],channel_num[3]*expand_ratio)
|
265 |
-
|
266 |
-
|
267 |
-
def forward(self, emb1,emb2,emb3,emb4):
|
268 |
-
embcat = []
|
269 |
-
org1 = emb1
|
270 |
-
org2 = emb2
|
271 |
-
org3 = emb3
|
272 |
-
org4 = emb4
|
273 |
-
for i in range(4):
|
274 |
-
var_name = "emb"+str(i+1) #emb1,emb2,emb3,emb4
|
275 |
-
tmp_var = locals()[var_name]
|
276 |
-
if tmp_var is not None:
|
277 |
-
embcat.append(tmp_var)
|
278 |
-
|
279 |
-
emb_all = torch.cat(embcat,dim=2)
|
280 |
-
cx1 = self.attn_norm1(emb1) if emb1 is not None else None
|
281 |
-
cx2 = self.attn_norm2(emb2) if emb2 is not None else None
|
282 |
-
cx3 = self.attn_norm3(emb3) if emb3 is not None else None
|
283 |
-
cx4 = self.attn_norm4(emb4) if emb4 is not None else None
|
284 |
-
emb_all = self.attn_norm(emb_all)
|
285 |
-
cx1,cx2,cx3,cx4, weights = self.channel_attn(cx1,cx2,cx3,cx4,emb_all)
|
286 |
-
#残差
|
287 |
-
cx1 = org1 + cx1 if emb1 is not None else None
|
288 |
-
cx2 = org2 + cx2 if emb2 is not None else None
|
289 |
-
cx3 = org3 + cx3 if emb3 is not None else None
|
290 |
-
cx4 = org4 + cx4 if emb4 is not None else None
|
291 |
-
|
292 |
-
org1 = cx1
|
293 |
-
org2 = cx2
|
294 |
-
org3 = cx3
|
295 |
-
org4 = cx4
|
296 |
-
x1 = self.ffn_norm1(cx1) if emb1 is not None else None
|
297 |
-
x2 = self.ffn_norm2(cx2) if emb2 is not None else None
|
298 |
-
x3 = self.ffn_norm3(cx3) if emb3 is not None else None
|
299 |
-
x4 = self.ffn_norm4(cx4) if emb4 is not None else None
|
300 |
-
x1 = self.ffn1(x1) if emb1 is not None else None
|
301 |
-
x2 = self.ffn2(x2) if emb2 is not None else None
|
302 |
-
x3 = self.ffn3(x3) if emb3 is not None else None
|
303 |
-
x4 = self.ffn4(x4) if emb4 is not None else None
|
304 |
-
#残差
|
305 |
-
x1 = x1 + org1 if emb1 is not None else None
|
306 |
-
x2 = x2 + org2 if emb2 is not None else None
|
307 |
-
x3 = x3 + org3 if emb3 is not None else None
|
308 |
-
x4 = x4 + org4 if emb4 is not None else None
|
309 |
-
|
310 |
-
return x1, x2, x3, x4, weights
|
311 |
-
|
312 |
-
|
313 |
-
class Encoder(nn.Module):
|
314 |
-
def __init__(self, vis, channel_num, num_layers=4):
|
315 |
-
super(Encoder, self).__init__()
|
316 |
-
self.vis = vis
|
317 |
-
self.layer = nn.ModuleList()
|
318 |
-
self.encoder_norm1 = LayerNorm(channel_num[0],eps=1e-6)
|
319 |
-
self.encoder_norm2 = LayerNorm(channel_num[1],eps=1e-6)
|
320 |
-
self.encoder_norm3 = LayerNorm(channel_num[2],eps=1e-6)
|
321 |
-
self.encoder_norm4 = LayerNorm(channel_num[3],eps=1e-6)
|
322 |
-
for _ in range(num_layers):
|
323 |
-
layer = Block_ViT(vis, channel_num)
|
324 |
-
self.layer.append(copy.deepcopy(layer))
|
325 |
-
|
326 |
-
def forward(self, emb1,emb2,emb3,emb4):
|
327 |
-
attn_weights = []
|
328 |
-
for layer_block in self.layer:
|
329 |
-
emb1,emb2,emb3,emb4, weights = layer_block(emb1,emb2,emb3,emb4)
|
330 |
-
if self.vis:
|
331 |
-
attn_weights.append(weights)
|
332 |
-
emb1 = self.encoder_norm1(emb1) if emb1 is not None else None
|
333 |
-
emb2 = self.encoder_norm2(emb2) if emb2 is not None else None
|
334 |
-
emb3 = self.encoder_norm3(emb3) if emb3 is not None else None
|
335 |
-
emb4 = self.encoder_norm4(emb4) if emb4 is not None else None
|
336 |
-
return emb1,emb2,emb3,emb4, attn_weights
|
337 |
-
|
338 |
-
|
339 |
-
class ChannelTransformer(nn.Module):
|
340 |
-
def __init__(self, vis=False, img_size=256, channel_num=[64, 128, 256, 512], patchSize=[32, 16, 8, 4]):
|
341 |
-
super().__init__()
|
342 |
-
|
343 |
-
self.patchSize_1 = patchSize[0]
|
344 |
-
self.patchSize_2 = patchSize[1]
|
345 |
-
self.patchSize_3 = patchSize[2]
|
346 |
-
self.patchSize_4 = patchSize[3]
|
347 |
-
self.embeddings_1 = Channel_Embeddings(self.patchSize_1, img_size=img_size, in_channels=channel_num[0])
|
348 |
-
self.embeddings_2 = Channel_Embeddings(self.patchSize_2, img_size=img_size//2, in_channels=channel_num[1])
|
349 |
-
self.embeddings_3 = Channel_Embeddings(self.patchSize_3, img_size=img_size//4, in_channels=channel_num[2])
|
350 |
-
self.embeddings_4 = Channel_Embeddings(self.patchSize_4, img_size=img_size//8, in_channels=channel_num[3])
|
351 |
-
self.encoder = Encoder( vis, channel_num)
|
352 |
-
|
353 |
-
self.reconstruct_1 = Reconstruct(channel_num[0], channel_num[0], kernel_size=1,scale_factor=(self.patchSize_1,self.patchSize_1))
|
354 |
-
self.reconstruct_2 = Reconstruct(channel_num[1], channel_num[1], kernel_size=1,scale_factor=(self.patchSize_2,self.patchSize_2))
|
355 |
-
self.reconstruct_3 = Reconstruct(channel_num[2], channel_num[2], kernel_size=1,scale_factor=(self.patchSize_3,self.patchSize_3))
|
356 |
-
self.reconstruct_4 = Reconstruct(channel_num[3], channel_num[3], kernel_size=1,scale_factor=(self.patchSize_4,self.patchSize_4))
|
357 |
-
|
358 |
-
def forward(self,en1,en2,en3,en4):
|
359 |
-
|
360 |
-
emb1 = self.embeddings_1(en1)
|
361 |
-
emb2 = self.embeddings_2(en2)
|
362 |
-
emb3 = self.embeddings_3(en3)
|
363 |
-
emb4 = self.embeddings_4(en4)
|
364 |
-
|
365 |
-
encoded1, encoded2, encoded3, encoded4, attn_weights = self.encoder(emb1,emb2,emb3,emb4) # (B, n_patch, hidden)
|
366 |
-
x1 = self.reconstruct_1(encoded1) if en1 is not None else None
|
367 |
-
x2 = self.reconstruct_2(encoded2) if en2 is not None else None
|
368 |
-
x3 = self.reconstruct_3(encoded3) if en3 is not None else None
|
369 |
-
x4 = self.reconstruct_4(encoded4) if en4 is not None else None
|
370 |
-
|
371 |
-
x1 = x1 + en1 if en1 is not None else None
|
372 |
-
x2 = x2 + en2 if en2 is not None else None
|
373 |
-
x3 = x3 + en3 if en3 is not None else None
|
374 |
-
x4 = x4 + en4 if en4 is not None else None
|
375 |
-
|
376 |
-
return x1, x2, x3, x4, attn_weights
|
377 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alpaca233/SadTalker/src/generate_batch.py
DELETED
@@ -1,120 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
from tqdm import tqdm
|
4 |
-
import torch
|
5 |
-
import numpy as np
|
6 |
-
import random
|
7 |
-
import scipy.io as scio
|
8 |
-
import src.utils.audio as audio
|
9 |
-
|
10 |
-
def crop_pad_audio(wav, audio_length):
|
11 |
-
if len(wav) > audio_length:
|
12 |
-
wav = wav[:audio_length]
|
13 |
-
elif len(wav) < audio_length:
|
14 |
-
wav = np.pad(wav, [0, audio_length - len(wav)], mode='constant', constant_values=0)
|
15 |
-
return wav
|
16 |
-
|
17 |
-
def parse_audio_length(audio_length, sr, fps):
|
18 |
-
bit_per_frames = sr / fps
|
19 |
-
|
20 |
-
num_frames = int(audio_length / bit_per_frames)
|
21 |
-
audio_length = int(num_frames * bit_per_frames)
|
22 |
-
|
23 |
-
return audio_length, num_frames
|
24 |
-
|
25 |
-
def generate_blink_seq(num_frames):
|
26 |
-
ratio = np.zeros((num_frames,1))
|
27 |
-
frame_id = 0
|
28 |
-
while frame_id in range(num_frames):
|
29 |
-
start = 80
|
30 |
-
if frame_id+start+9<=num_frames - 1:
|
31 |
-
ratio[frame_id+start:frame_id+start+9, 0] = [0.5,0.6,0.7,0.9,1, 0.9, 0.7,0.6,0.5]
|
32 |
-
frame_id = frame_id+start+9
|
33 |
-
else:
|
34 |
-
break
|
35 |
-
return ratio
|
36 |
-
|
37 |
-
def generate_blink_seq_randomly(num_frames):
|
38 |
-
ratio = np.zeros((num_frames,1))
|
39 |
-
if num_frames<=20:
|
40 |
-
return ratio
|
41 |
-
frame_id = 0
|
42 |
-
while frame_id in range(num_frames):
|
43 |
-
start = random.choice(range(min(10,num_frames), min(int(num_frames/2), 70)))
|
44 |
-
if frame_id+start+5<=num_frames - 1:
|
45 |
-
ratio[frame_id+start:frame_id+start+5, 0] = [0.5, 0.9, 1.0, 0.9, 0.5]
|
46 |
-
frame_id = frame_id+start+5
|
47 |
-
else:
|
48 |
-
break
|
49 |
-
return ratio
|
50 |
-
|
51 |
-
def get_data(first_coeff_path, audio_path, device, ref_eyeblink_coeff_path, still=False, idlemode=False, length_of_audio=False, use_blink=True):
|
52 |
-
|
53 |
-
syncnet_mel_step_size = 16
|
54 |
-
fps = 25
|
55 |
-
|
56 |
-
pic_name = os.path.splitext(os.path.split(first_coeff_path)[-1])[0]
|
57 |
-
audio_name = os.path.splitext(os.path.split(audio_path)[-1])[0]
|
58 |
-
|
59 |
-
|
60 |
-
if idlemode:
|
61 |
-
num_frames = int(length_of_audio * 25)
|
62 |
-
indiv_mels = np.zeros((num_frames, 80, 16))
|
63 |
-
else:
|
64 |
-
wav = audio.load_wav(audio_path, 16000)
|
65 |
-
wav_length, num_frames = parse_audio_length(len(wav), 16000, 25)
|
66 |
-
wav = crop_pad_audio(wav, wav_length)
|
67 |
-
orig_mel = audio.melspectrogram(wav).T
|
68 |
-
spec = orig_mel.copy() # nframes 80
|
69 |
-
indiv_mels = []
|
70 |
-
|
71 |
-
for i in tqdm(range(num_frames), 'mel:'):
|
72 |
-
start_frame_num = i-2
|
73 |
-
start_idx = int(80. * (start_frame_num / float(fps)))
|
74 |
-
end_idx = start_idx + syncnet_mel_step_size
|
75 |
-
seq = list(range(start_idx, end_idx))
|
76 |
-
seq = [ min(max(item, 0), orig_mel.shape[0]-1) for item in seq ]
|
77 |
-
m = spec[seq, :]
|
78 |
-
indiv_mels.append(m.T)
|
79 |
-
indiv_mels = np.asarray(indiv_mels) # T 80 16
|
80 |
-
|
81 |
-
ratio = generate_blink_seq_randomly(num_frames) # T
|
82 |
-
source_semantics_path = first_coeff_path
|
83 |
-
source_semantics_dict = scio.loadmat(source_semantics_path)
|
84 |
-
ref_coeff = source_semantics_dict['coeff_3dmm'][:1,:70] #1 70
|
85 |
-
ref_coeff = np.repeat(ref_coeff, num_frames, axis=0)
|
86 |
-
|
87 |
-
if ref_eyeblink_coeff_path is not None:
|
88 |
-
ratio[:num_frames] = 0
|
89 |
-
refeyeblink_coeff_dict = scio.loadmat(ref_eyeblink_coeff_path)
|
90 |
-
refeyeblink_coeff = refeyeblink_coeff_dict['coeff_3dmm'][:,:64]
|
91 |
-
refeyeblink_num_frames = refeyeblink_coeff.shape[0]
|
92 |
-
if refeyeblink_num_frames<num_frames:
|
93 |
-
div = num_frames//refeyeblink_num_frames
|
94 |
-
re = num_frames%refeyeblink_num_frames
|
95 |
-
refeyeblink_coeff_list = [refeyeblink_coeff for i in range(div)]
|
96 |
-
refeyeblink_coeff_list.append(refeyeblink_coeff[:re, :64])
|
97 |
-
refeyeblink_coeff = np.concatenate(refeyeblink_coeff_list, axis=0)
|
98 |
-
print(refeyeblink_coeff.shape[0])
|
99 |
-
|
100 |
-
ref_coeff[:, :64] = refeyeblink_coeff[:num_frames, :64]
|
101 |
-
|
102 |
-
indiv_mels = torch.FloatTensor(indiv_mels).unsqueeze(1).unsqueeze(0) # bs T 1 80 16
|
103 |
-
|
104 |
-
if use_blink:
|
105 |
-
ratio = torch.FloatTensor(ratio).unsqueeze(0) # bs T
|
106 |
-
else:
|
107 |
-
ratio = torch.FloatTensor(ratio).unsqueeze(0).fill_(0.)
|
108 |
-
# bs T
|
109 |
-
ref_coeff = torch.FloatTensor(ref_coeff).unsqueeze(0) # bs 1 70
|
110 |
-
|
111 |
-
indiv_mels = indiv_mels.to(device)
|
112 |
-
ratio = ratio.to(device)
|
113 |
-
ref_coeff = ref_coeff.to(device)
|
114 |
-
|
115 |
-
return {'indiv_mels': indiv_mels,
|
116 |
-
'ref': ref_coeff,
|
117 |
-
'num_frames': num_frames,
|
118 |
-
'ratio_gt': ratio,
|
119 |
-
'audio_name': audio_name, 'pic_name': pic_name}
|
120 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/latex/attention/model_architecture.tex
DELETED
@@ -1,155 +0,0 @@
|
|
1 |
-
|
2 |
-
\begin{figure}
|
3 |
-
\centering
|
4 |
-
\includegraphics[scale=0.6]{Figures/ModalNet-21}
|
5 |
-
\caption{The Transformer - model architecture.}
|
6 |
-
\label{fig:model-arch}
|
7 |
-
\end{figure}
|
8 |
-
|
9 |
-
% Although the primary workhorse of our model is attention,
|
10 |
-
%Our model maintains the encoder-decoder structure that is common to many so-called sequence-to-sequence models \citep{bahdanau2014neural,sutskever14}. As in all such architectures, the encoder computes a representation of the input sequence, and the decoder consumes these representations along with the output tokens to autoregressively produce the output sequence. Where, traditionally, the encoder and decoder contain stacks of recurrent or convolutional layers, our encoder and decoder stacks are composed of attention layers and position-wise feed-forward layers (Figure~\ref{fig:model-arch}). The following sections describe the gross architecture and these particular components in detail.
|
11 |
-
|
12 |
-
Most competitive neural sequence transduction models have an encoder-decoder structure \citep{cho2014learning,bahdanau2014neural,sutskever14}. Here, the encoder maps an input sequence of symbol representations $(x_1, ..., x_n)$ to a sequence of continuous representations $\mathbf{z} = (z_1, ..., z_n)$. Given $\mathbf{z}$, the decoder then generates an output sequence $(y_1,...,y_m)$ of symbols one element at a time. At each step the model is auto-regressive \citep{graves2013generating}, consuming the previously generated symbols as additional input when generating the next.
|
13 |
-
|
14 |
-
The Transformer follows this overall architecture using stacked self-attention and point-wise, fully connected layers for both the encoder and decoder, shown in the left and right halves of Figure~\ref{fig:model-arch}, respectively.
|
15 |
-
|
16 |
-
\subsection{Encoder and Decoder Stacks}
|
17 |
-
|
18 |
-
\paragraph{Encoder:}The encoder is composed of a stack of $N=6$ identical layers. Each layer has two sub-layers. The first is a multi-head self-attention mechanism, and the second is a simple, position-wise fully connected feed-forward network. We employ a residual connection \citep{he2016deep} around each of the two sub-layers, followed by layer normalization \cite{layernorm2016}. That is, the output of each sub-layer is $\mathrm{LayerNorm}(x + \mathrm{Sublayer}(x))$, where $\mathrm{Sublayer}(x)$ is the function implemented by the sub-layer itself. To facilitate these residual connections, all sub-layers in the model, as well as the embedding layers, produce outputs of dimension $\dmodel=512$.
|
19 |
-
|
20 |
-
\paragraph{Decoder:}The decoder is also composed of a stack of $N=6$ identical layers. In addition to the two sub-layers in each encoder layer, the decoder inserts a third sub-layer, which performs multi-head attention over the output of the encoder stack. Similar to the encoder, we employ residual connections around each of the sub-layers, followed by layer normalization. We also modify the self-attention sub-layer in the decoder stack to prevent positions from attending to subsequent positions. This masking, combined with fact that the output embeddings are offset by one position, ensures that the predictions for position $i$ can depend only on the known outputs at positions less than $i$.
|
21 |
-
|
22 |
-
% In our model (Figure~\ref{fig:model-arch}), the encoder and decoder are composed of stacks of alternating self-attention layers (for cross-positional communication) and position-wise feed-forward layers (for in-place computation). In addition, the decoder stack contains encoder-decoder attention layers. Since attention is agnostic to the distances between words, our model requires a "positional encoding" to be added to the encoder and decoder input. The following sections describe all of these components in detail.
|
23 |
-
|
24 |
-
\subsection{Attention} \label{sec:attention}
|
25 |
-
An attention function can be described as mapping a query and a set of key-value pairs to an output, where the query, keys, values, and output are all vectors. The output is computed as a weighted sum of the values, where the weight assigned to each value is computed by a compatibility function of the query with the corresponding key.
|
26 |
-
|
27 |
-
\subsubsection{Scaled Dot-Product Attention} \label{sec:scaled-dot-prod}
|
28 |
-
|
29 |
-
% \begin{figure}
|
30 |
-
% \centering
|
31 |
-
% \includegraphics[scale=0.6]{Figures/ModalNet-19}
|
32 |
-
% \caption{Scaled Dot-Product Attention.}
|
33 |
-
% \label{fig:multi-head-att}
|
34 |
-
% \end{figure}
|
35 |
-
|
36 |
-
We call our particular attention "Scaled Dot-Product Attention" (Figure~\ref{fig:multi-head-att}). The input consists of queries and keys of dimension $d_k$, and values of dimension $d_v$. We compute the dot products of the query with all keys, divide each by $\sqrt{d_k}$, and apply a softmax function to obtain the weights on the values.
|
37 |
-
|
38 |
-
In practice, we compute the attention function on a set of queries simultaneously, packed together into a matrix $Q$. The keys and values are also packed together into matrices $K$ and $V$. We compute the matrix of outputs as:
|
39 |
-
|
40 |
-
\begin{equation}
|
41 |
-
\mathrm{Attention}(Q, K, V) = \mathrm{softmax}(\frac{QK^T}{\sqrt{d_k}})V
|
42 |
-
\end{equation}
|
43 |
-
|
44 |
-
The two most commonly used attention functions are additive attention \citep{bahdanau2014neural}, and dot-product (multiplicative) attention. Dot-product attention is identical to our algorithm, except for the scaling factor of $\frac{1}{\sqrt{d_k}}$. Additive attention computes the compatibility function using a feed-forward network with a single hidden layer. While the two are similar in theoretical complexity, dot-product attention is much faster and more space-efficient in practice, since it can be implemented using highly optimized matrix multiplication code.
|
45 |
-
|
46 |
-
%We scale the dot products by $1/\sqrt{d_k}$ to limit the magnitude of the dot products, which works well in practice. Otherwise, we found applying the softmax to often result in weights very close to 0 or 1, and hence minuscule gradients.
|
47 |
-
|
48 |
-
% Already described in the subsequent section
|
49 |
-
%When used as part of decoder self-attention, an optional mask function is applied just before the softmax to prevent positions from attending to subsequent positions. This mask simply sets the logits corresponding to all illegal connections (those outside of the lower triangle) to $-\infty$.
|
50 |
-
|
51 |
-
%\paragraph{Comparison to Additive Attention: } We choose dot product attention over additive attention \citep{bahdanau2014neural} since it can be computed using highly optimized matrix multiplication code. This optimization is particularly important to us, as we employ many attention layers in our model.
|
52 |
-
|
53 |
-
While for small values of $d_k$ the two mechanisms perform similarly, additive attention outperforms dot product attention without scaling for larger values of $d_k$ \citep{DBLP:journals/corr/BritzGLL17}. We suspect that for large values of $d_k$, the dot products grow large in magnitude, pushing the softmax function into regions where it has extremely small gradients \footnote{To illustrate why the dot products get large, assume that the components of $q$ and $k$ are independent random variables with mean $0$ and variance $1$. Then their dot product, $q \cdot k = \sum_{i=1}^{d_k} q_ik_i$, has mean $0$ and variance $d_k$.}. To counteract this effect, we scale the dot products by $\frac{1}{\sqrt{d_k}}$.
|
54 |
-
|
55 |
-
|
56 |
-
%We suspect this to be caused by the dot products growing too large in magnitude to result in useful gradients after applying the softmax function. To counteract this, we scale the dot product by $1/\sqrt{d_k}$.
|
57 |
-
|
58 |
-
|
59 |
-
\subsubsection{Multi-Head Attention} \label{sec:multihead}
|
60 |
-
|
61 |
-
\begin{figure}
|
62 |
-
\begin{minipage}[t]{0.5\textwidth}
|
63 |
-
\centering
|
64 |
-
Scaled Dot-Product Attention \\
|
65 |
-
\vspace{0.5cm}
|
66 |
-
\includegraphics[scale=0.6]{Figures/ModalNet-19}
|
67 |
-
\end{minipage}
|
68 |
-
\begin{minipage}[t]{0.5\textwidth}
|
69 |
-
\centering
|
70 |
-
Multi-Head Attention \\
|
71 |
-
\vspace{0.1cm}
|
72 |
-
\includegraphics[scale=0.6]{Figures/ModalNet-20}
|
73 |
-
\end{minipage}
|
74 |
-
|
75 |
-
|
76 |
-
% \centering
|
77 |
-
|
78 |
-
\caption{(left) Scaled Dot-Product Attention. (right) Multi-Head Attention consists of several attention layers running in parallel.}
|
79 |
-
\label{fig:multi-head-att}
|
80 |
-
\end{figure}
|
81 |
-
|
82 |
-
Instead of performing a single attention function with $\dmodel$-dimensional keys, values and queries, we found it beneficial to linearly project the queries, keys and values $h$ times with different, learned linear projections to $d_k$, $d_k$ and $d_v$ dimensions, respectively.
|
83 |
-
On each of these projected versions of queries, keys and values we then perform the attention function in parallel, yielding $d_v$-dimensional output values. These are concatenated and once again projected, resulting in the final values, as depicted in Figure~\ref{fig:multi-head-att}.
|
84 |
-
|
85 |
-
Multi-head attention allows the model to jointly attend to information from different representation subspaces at different positions. With a single attention head, averaging inhibits this.
|
86 |
-
|
87 |
-
\begin{align*}
|
88 |
-
\mathrm{MultiHead}(Q, K, V) &= \mathrm{Concat}(\mathrm{head_1}, ..., \mathrm{head_h})W^O\\
|
89 |
-
% \mathrm{where} \mathrm{head_i} &= \mathrm{Attention}(QW_Q_i^{\dmodel \times d_q}, KW_K_i^{\dmodel \times d_k}, VW^V_i^{\dmodel \times d_v})\\
|
90 |
-
\text{where}~\mathrm{head_i} &= \mathrm{Attention}(QW^Q_i, KW^K_i, VW^V_i)\\
|
91 |
-
\end{align*}
|
92 |
-
|
93 |
-
Where the projections are parameter matrices $W^Q_i \in \mathbb{R}^{\dmodel \times d_k}$, $W^K_i \in \mathbb{R}^{\dmodel \times d_k}$, $W^V_i \in \mathbb{R}^{\dmodel \times d_v}$ and $W^O \in \mathbb{R}^{hd_v \times \dmodel}$.
|
94 |
-
|
95 |
-
|
96 |
-
%find it better (and no more expensive) to have multiple parallel attention layers (each over the full set of positions) with proportionally lower-dimensional keys, values and queries. We call this "Multi-Head Attention" (Figure~\ref{fig:multi-head-att}). The keys, values, and queries for each of these parallel attention layers are computed by learned linear transformations of the inputs to the multi-head attention. We use different linear transformations across different parallel attention layers. The output of the parallel attention layers are concatenated, and then passed through a final learned linear transformation.
|
97 |
-
|
98 |
-
In this work we employ $h=8$ parallel attention layers, or heads. For each of these we use $d_k=d_v=\dmodel/h=64$.
|
99 |
-
Due to the reduced dimension of each head, the total computational cost is similar to that of single-head attention with full dimensionality.
|
100 |
-
|
101 |
-
\subsubsection{Applications of Attention in our Model}
|
102 |
-
|
103 |
-
The Transformer uses multi-head attention in three different ways:
|
104 |
-
\begin{itemize}
|
105 |
-
\item In "encoder-decoder attention" layers, the queries come from the previous decoder layer, and the memory keys and values come from the output of the encoder. This allows every position in the decoder to attend over all positions in the input sequence. This mimics the typical encoder-decoder attention mechanisms in sequence-to-sequence models such as \citep{wu2016google, bahdanau2014neural,JonasFaceNet2017}.
|
106 |
-
|
107 |
-
\item The encoder contains self-attention layers. In a self-attention layer all of the keys, values and queries come from the same place, in this case, the output of the previous layer in the encoder. Each position in the encoder can attend to all positions in the previous layer of the encoder.
|
108 |
-
|
109 |
-
\item Similarly, self-attention layers in the decoder allow each position in the decoder to attend to all positions in the decoder up to and including that position. We need to prevent leftward information flow in the decoder to preserve the auto-regressive property. We implement this inside of scaled dot-product attention by masking out (setting to $-\infty$) all values in the input of the softmax which correspond to illegal connections. See Figure~\ref{fig:multi-head-att}.
|
110 |
-
|
111 |
-
\end{itemize}
|
112 |
-
|
113 |
-
\subsection{Position-wise Feed-Forward Networks}\label{sec:ffn}
|
114 |
-
|
115 |
-
In addition to attention sub-layers, each of the layers in our encoder and decoder contains a fully connected feed-forward network, which is applied to each position separately and identically. This consists of two linear transformations with a ReLU activation in between.
|
116 |
-
|
117 |
-
\begin{equation}
|
118 |
-
\mathrm{FFN}(x)=\max(0, xW_1 + b_1) W_2 + b_2
|
119 |
-
\end{equation}
|
120 |
-
|
121 |
-
While the linear transformations are the same across different positions, they use different parameters from layer to layer. Another way of describing this is as two convolutions with kernel size 1. The dimensionality of input and output is $\dmodel=512$, and the inner-layer has dimensionality $d_{ff}=2048$.
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
%In the appendix, we describe how the position-wise feed-forward network can also be seen as a form of attention.
|
126 |
-
|
127 |
-
%from Jakob: The number of operations required for the model to relate signals from two arbitrary input or output positions grows in the distance between positions in input or output, linearly for ConvS2S and logarithmically for ByteNet, making it harder to learn dependencies between these positions \citep{hochreiter2001gradient}. In the transformer this is reduced to a constant number of operations, albeit at the cost of effective resolution caused by averaging attention-weighted positions, an effect we aim to counteract with multi-headed attention.
|
128 |
-
|
129 |
-
|
130 |
-
%Figure~\ref{fig:simple-att} presents a simple attention function, $A$, with a single head, that forms the basis of our multi-head attention. $A$ takes a query key vector $\kq$, matrices of memory keys $\km$ and memory values $\vm$ ,and produces a query value vector $\vq$ as
|
131 |
-
%\begin{equation*} \label{eq:attention}
|
132 |
-
% A(\kq, \km, \vm) = {\vm}^T (Softmax(\km \kq).
|
133 |
-
%\end{equation*}
|
134 |
-
%We linearly transform $\kq,\,\km$, and $\vm$ with learned matrices ${\Wkq \text{,} \, \Wkm}$, and ${\Wvm}$ before calling the attention function, and transform the output query with $\Wvq$ before handing it to the feed forward layer. Each attention layer has it's own set of transformation matrices, which are shared across all query positions. $A$ is applied in parallel for each query position, and is implemented very efficiently as a batch of matrix multiplies. The self-attention and encoder-decoder attention layers use $A$, but with different arguments. For example, in encdoder self-attention, queries in encoder layer $i$ attention to memories in encoder layer $i-1$. To ensure that decoder self-attention layers do not look at future words, we add $- \inf$ to the softmax logits in positions $j+1$ to query length for query position $l$.
|
135 |
-
|
136 |
-
%In simple attention, the query value is a weighted combination of the memory values where the attention weights sum to one. Although this function performs well in practice, the constraint on attention weights can restrict the amount of information that flows from memories to queries because the query cannot focus on multiple memory positions at once, which might be desirable when translating long sequences. \marginpar{@usz, could you think of an example of this ?} We remedy this by maintaining multiple attention heads at each query position that attend to all memory positions in parallel, with a different set of parameters per attention head $h$.
|
137 |
-
%\marginpar{}
|
138 |
-
|
139 |
-
\subsection{Embeddings and Softmax}
|
140 |
-
Similarly to other sequence transduction models, we use learned embeddings to convert the input tokens and output tokens to vectors of dimension $\dmodel$. We also use the usual learned linear transformation and softmax function to convert the decoder output to predicted next-token probabilities. In our model, we share the same weight matrix between the two embedding layers and the pre-softmax linear transformation, similar to \citep{press2016using}. In the embedding layers, we multiply those weights by $\sqrt{\dmodel}$.
|
141 |
-
|
142 |
-
|
143 |
-
\subsection{Positional Encoding}
|
144 |
-
Since our model contains no recurrence and no convolution, in order for the model to make use of the order of the sequence, we must inject some information about the relative or absolute position of the tokens in the sequence. To this end, we add "positional encodings" to the input embeddings at the bottoms of the encoder and decoder stacks. The positional encodings have the same dimension $\dmodel$ as the embeddings, so that the two can be summed. There are many choices of positional encodings, learned and fixed \citep{JonasFaceNet2017}.
|
145 |
-
|
146 |
-
In this work, we use sine and cosine functions of different frequencies:
|
147 |
-
|
148 |
-
\begin{align*}
|
149 |
-
PE_{(pos,2i)} = sin(pos / 10000^{2i/\dmodel}) \\
|
150 |
-
PE_{(pos,2i+1)} = cos(pos / 10000^{2i/\dmodel})
|
151 |
-
\end{align*}
|
152 |
-
|
153 |
-
where $pos$ is the position and $i$ is the dimension. That is, each dimension of the positional encoding corresponds to a sinusoid. The wavelengths form a geometric progression from $2\pi$ to $10000 \cdot 2\pi$. We chose this function because we hypothesized it would allow the model to easily learn to attend by relative positions, since for any fixed offset $k$, $PE_{pos+k}$ can be represented as a linear function of $PE_{pos}$.
|
154 |
-
|
155 |
-
We also experimented with using learned positional embeddings \citep{JonasFaceNet2017} instead, and found that the two versions produced nearly identical results (see Table~\ref{tab:variations} row (E)). We chose the sinusoidal version because it may allow the model to extrapolate to sequence lengths longer than the ones encountered during training.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/editings/ganspace.py
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
|
3 |
-
|
4 |
-
def edit(latents, pca, edit_directions):
|
5 |
-
edit_latents = []
|
6 |
-
for latent in latents:
|
7 |
-
for pca_idx, start, end, strength in edit_directions:
|
8 |
-
delta = get_delta(pca, latent, pca_idx, strength)
|
9 |
-
delta_padded = torch.zeros(latent.shape).to('cuda')
|
10 |
-
delta_padded[start:end] += delta.repeat(end - start, 1)
|
11 |
-
edit_latents.append(latent + delta_padded)
|
12 |
-
return torch.stack(edit_latents)
|
13 |
-
|
14 |
-
|
15 |
-
def get_delta(pca, latent, idx, strength):
|
16 |
-
w_centered = latent - pca['mean'].to('cuda')
|
17 |
-
lat_comp = pca['comp'].to('cuda')
|
18 |
-
lat_std = pca['std'].to('cuda')
|
19 |
-
w_coord = torch.sum(w_centered[0].reshape(-1)*lat_comp[idx].reshape(-1)) / lat_std[idx]
|
20 |
-
delta = (strength - w_coord)*lat_comp[idx]*lat_std[idx]
|
21 |
-
return delta
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/mapper/training/coach.py
DELETED
@@ -1,242 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
import clip
|
4 |
-
import torch
|
5 |
-
import torchvision
|
6 |
-
from torch import nn
|
7 |
-
from torch.utils.data import DataLoader
|
8 |
-
from torch.utils.tensorboard import SummaryWriter
|
9 |
-
|
10 |
-
import criteria.clip_loss as clip_loss
|
11 |
-
from criteria import id_loss
|
12 |
-
from mapper.datasets.latents_dataset import LatentsDataset
|
13 |
-
from mapper.styleclip_mapper import StyleCLIPMapper
|
14 |
-
from mapper.training.ranger import Ranger
|
15 |
-
from mapper.training import train_utils
|
16 |
-
|
17 |
-
|
18 |
-
class Coach:
|
19 |
-
def __init__(self, opts):
|
20 |
-
self.opts = opts
|
21 |
-
|
22 |
-
self.global_step = 0
|
23 |
-
|
24 |
-
self.device = 'cuda:0'
|
25 |
-
self.opts.device = self.device
|
26 |
-
|
27 |
-
# Initialize network
|
28 |
-
self.net = StyleCLIPMapper(self.opts).to(self.device)
|
29 |
-
|
30 |
-
# Initialize loss
|
31 |
-
if self.opts.id_lambda > 0:
|
32 |
-
self.id_loss = id_loss.IDLoss(self.opts).to(self.device).eval()
|
33 |
-
if self.opts.clip_lambda > 0:
|
34 |
-
self.clip_loss = clip_loss.CLIPLoss(opts)
|
35 |
-
if self.opts.latent_l2_lambda > 0:
|
36 |
-
self.latent_l2_loss = nn.MSELoss().to(self.device).eval()
|
37 |
-
|
38 |
-
# Initialize optimizer
|
39 |
-
self.optimizer = self.configure_optimizers()
|
40 |
-
|
41 |
-
# Initialize dataset
|
42 |
-
self.train_dataset, self.test_dataset = self.configure_datasets()
|
43 |
-
self.train_dataloader = DataLoader(self.train_dataset,
|
44 |
-
batch_size=self.opts.batch_size,
|
45 |
-
shuffle=True,
|
46 |
-
num_workers=int(self.opts.workers),
|
47 |
-
drop_last=True)
|
48 |
-
self.test_dataloader = DataLoader(self.test_dataset,
|
49 |
-
batch_size=self.opts.test_batch_size,
|
50 |
-
shuffle=False,
|
51 |
-
num_workers=int(self.opts.test_workers),
|
52 |
-
drop_last=True)
|
53 |
-
|
54 |
-
self.text_inputs = torch.cat([clip.tokenize(self.opts.description)]).cuda()
|
55 |
-
|
56 |
-
# Initialize logger
|
57 |
-
log_dir = os.path.join(opts.exp_dir, 'logs')
|
58 |
-
os.makedirs(log_dir, exist_ok=True)
|
59 |
-
self.log_dir = log_dir
|
60 |
-
self.logger = SummaryWriter(log_dir=log_dir)
|
61 |
-
|
62 |
-
# Initialize checkpoint dir
|
63 |
-
self.checkpoint_dir = os.path.join(opts.exp_dir, 'checkpoints')
|
64 |
-
os.makedirs(self.checkpoint_dir, exist_ok=True)
|
65 |
-
self.best_val_loss = None
|
66 |
-
if self.opts.save_interval is None:
|
67 |
-
self.opts.save_interval = self.opts.max_steps
|
68 |
-
|
69 |
-
def train(self):
|
70 |
-
self.net.train()
|
71 |
-
while self.global_step < self.opts.max_steps:
|
72 |
-
for batch_idx, batch in enumerate(self.train_dataloader):
|
73 |
-
self.optimizer.zero_grad()
|
74 |
-
w = batch
|
75 |
-
w = w.to(self.device)
|
76 |
-
with torch.no_grad():
|
77 |
-
x, _ = self.net.decoder([w], input_is_latent=True, randomize_noise=False, truncation=1)
|
78 |
-
w_hat = w + 0.1 * self.net.mapper(w)
|
79 |
-
x_hat, w_hat = self.net.decoder([w_hat], input_is_latent=True, return_latents=True, randomize_noise=False, truncation=1)
|
80 |
-
loss, loss_dict = self.calc_loss(w, x, w_hat, x_hat)
|
81 |
-
loss.backward()
|
82 |
-
self.optimizer.step()
|
83 |
-
|
84 |
-
# Logging related
|
85 |
-
if self.global_step % self.opts.image_interval == 0 or (
|
86 |
-
self.global_step < 1000 and self.global_step % 1000 == 0):
|
87 |
-
self.parse_and_log_images(x, x_hat, title='images_train')
|
88 |
-
if self.global_step % self.opts.board_interval == 0:
|
89 |
-
self.print_metrics(loss_dict, prefix='train')
|
90 |
-
self.log_metrics(loss_dict, prefix='train')
|
91 |
-
|
92 |
-
# Validation related
|
93 |
-
val_loss_dict = None
|
94 |
-
if self.global_step % self.opts.val_interval == 0 or self.global_step == self.opts.max_steps:
|
95 |
-
val_loss_dict = self.validate()
|
96 |
-
if val_loss_dict and (self.best_val_loss is None or val_loss_dict['loss'] < self.best_val_loss):
|
97 |
-
self.best_val_loss = val_loss_dict['loss']
|
98 |
-
self.checkpoint_me(val_loss_dict, is_best=True)
|
99 |
-
|
100 |
-
if self.global_step % self.opts.save_interval == 0 or self.global_step == self.opts.max_steps:
|
101 |
-
if val_loss_dict is not None:
|
102 |
-
self.checkpoint_me(val_loss_dict, is_best=False)
|
103 |
-
else:
|
104 |
-
self.checkpoint_me(loss_dict, is_best=False)
|
105 |
-
|
106 |
-
if self.global_step == self.opts.max_steps:
|
107 |
-
print('OMG, finished training!')
|
108 |
-
break
|
109 |
-
|
110 |
-
self.global_step += 1
|
111 |
-
|
112 |
-
def validate(self):
|
113 |
-
self.net.eval()
|
114 |
-
agg_loss_dict = []
|
115 |
-
for batch_idx, batch in enumerate(self.test_dataloader):
|
116 |
-
if batch_idx > 200:
|
117 |
-
break
|
118 |
-
|
119 |
-
w = batch
|
120 |
-
|
121 |
-
with torch.no_grad():
|
122 |
-
w = w.to(self.device).float()
|
123 |
-
x, _ = self.net.decoder([w], input_is_latent=True, randomize_noise=True, truncation=1)
|
124 |
-
w_hat = w + 0.1 * self.net.mapper(w)
|
125 |
-
x_hat, _ = self.net.decoder([w_hat], input_is_latent=True, randomize_noise=True, truncation=1)
|
126 |
-
loss, cur_loss_dict = self.calc_loss(w, x, w_hat, x_hat)
|
127 |
-
agg_loss_dict.append(cur_loss_dict)
|
128 |
-
|
129 |
-
# Logging related
|
130 |
-
self.parse_and_log_images(x, x_hat, title='images_val', index=batch_idx)
|
131 |
-
|
132 |
-
# For first step just do sanity test on small amount of data
|
133 |
-
if self.global_step == 0 and batch_idx >= 4:
|
134 |
-
self.net.train()
|
135 |
-
return None # Do not log, inaccurate in first batch
|
136 |
-
|
137 |
-
loss_dict = train_utils.aggregate_loss_dict(agg_loss_dict)
|
138 |
-
self.log_metrics(loss_dict, prefix='test')
|
139 |
-
self.print_metrics(loss_dict, prefix='test')
|
140 |
-
|
141 |
-
self.net.train()
|
142 |
-
return loss_dict
|
143 |
-
|
144 |
-
def checkpoint_me(self, loss_dict, is_best):
|
145 |
-
save_name = 'best_model.pt' if is_best else 'iteration_{}.pt'.format(self.global_step)
|
146 |
-
save_dict = self.__get_save_dict()
|
147 |
-
checkpoint_path = os.path.join(self.checkpoint_dir, save_name)
|
148 |
-
torch.save(save_dict, checkpoint_path)
|
149 |
-
with open(os.path.join(self.checkpoint_dir, 'timestamp.txt'), 'a') as f:
|
150 |
-
if is_best:
|
151 |
-
f.write('**Best**: Step - {}, Loss - {:.3f} \n{}\n'.format(self.global_step, self.best_val_loss, loss_dict))
|
152 |
-
else:
|
153 |
-
f.write('Step - {}, \n{}\n'.format(self.global_step, loss_dict))
|
154 |
-
|
155 |
-
def configure_optimizers(self):
|
156 |
-
params = list(self.net.mapper.parameters())
|
157 |
-
if self.opts.optim_name == 'adam':
|
158 |
-
optimizer = torch.optim.Adam(params, lr=self.opts.learning_rate)
|
159 |
-
else:
|
160 |
-
optimizer = Ranger(params, lr=self.opts.learning_rate)
|
161 |
-
return optimizer
|
162 |
-
|
163 |
-
def configure_datasets(self):
|
164 |
-
if self.opts.latents_train_path:
|
165 |
-
train_latents = torch.load(self.opts.latents_train_path)
|
166 |
-
else:
|
167 |
-
train_latents_z = torch.randn(self.opts.train_dataset_size, 512).cuda()
|
168 |
-
train_latents = []
|
169 |
-
for b in range(self.opts.train_dataset_size // self.opts.batch_size):
|
170 |
-
with torch.no_grad():
|
171 |
-
_, train_latents_b = self.net.decoder([train_latents_z[b: b + self.opts.batch_size]],
|
172 |
-
truncation=0.7, truncation_latent=self.net.latent_avg, return_latents=True)
|
173 |
-
train_latents.append(train_latents_b)
|
174 |
-
train_latents = torch.cat(train_latents)
|
175 |
-
|
176 |
-
if self.opts.latents_test_path:
|
177 |
-
test_latents = torch.load(self.opts.latents_test_path)
|
178 |
-
else:
|
179 |
-
test_latents_z = torch.randn(self.opts.train_dataset_size, 512).cuda()
|
180 |
-
test_latents = []
|
181 |
-
for b in range(self.opts.test_dataset_size // self.opts.test_batch_size):
|
182 |
-
with torch.no_grad():
|
183 |
-
_, test_latents_b = self.net.decoder([test_latents_z[b: b + self.opts.test_batch_size]],
|
184 |
-
truncation=0.7, truncation_latent=self.net.latent_avg, return_latents=True)
|
185 |
-
test_latents.append(test_latents_b)
|
186 |
-
test_latents = torch.cat(test_latents)
|
187 |
-
|
188 |
-
train_dataset_celeba = LatentsDataset(latents=train_latents.cpu(),
|
189 |
-
opts=self.opts)
|
190 |
-
test_dataset_celeba = LatentsDataset(latents=test_latents.cpu(),
|
191 |
-
opts=self.opts)
|
192 |
-
train_dataset = train_dataset_celeba
|
193 |
-
test_dataset = test_dataset_celeba
|
194 |
-
print("Number of training samples: {}".format(len(train_dataset)))
|
195 |
-
print("Number of test samples: {}".format(len(test_dataset)))
|
196 |
-
return train_dataset, test_dataset
|
197 |
-
|
198 |
-
def calc_loss(self, w, x, w_hat, x_hat):
|
199 |
-
loss_dict = {}
|
200 |
-
loss = 0.0
|
201 |
-
if self.opts.id_lambda > 0:
|
202 |
-
loss_id, sim_improvement = self.id_loss(x_hat, x)
|
203 |
-
loss_dict['loss_id'] = float(loss_id)
|
204 |
-
loss_dict['id_improve'] = float(sim_improvement)
|
205 |
-
loss = loss_id * self.opts.id_lambda
|
206 |
-
if self.opts.clip_lambda > 0:
|
207 |
-
loss_clip = self.clip_loss(x_hat, self.text_inputs).mean()
|
208 |
-
loss_dict['loss_clip'] = float(loss_clip)
|
209 |
-
loss += loss_clip * self.opts.clip_lambda
|
210 |
-
if self.opts.latent_l2_lambda > 0:
|
211 |
-
loss_l2_latent = self.latent_l2_loss(w_hat, w)
|
212 |
-
loss_dict['loss_l2_latent'] = float(loss_l2_latent)
|
213 |
-
loss += loss_l2_latent * self.opts.latent_l2_lambda
|
214 |
-
loss_dict['loss'] = float(loss)
|
215 |
-
return loss, loss_dict
|
216 |
-
|
217 |
-
def log_metrics(self, metrics_dict, prefix):
|
218 |
-
for key, value in metrics_dict.items():
|
219 |
-
#pass
|
220 |
-
print(f"step: {self.global_step} \t metric: {prefix}/{key} \t value: {value}")
|
221 |
-
self.logger.add_scalar('{}/{}'.format(prefix, key), value, self.global_step)
|
222 |
-
|
223 |
-
def print_metrics(self, metrics_dict, prefix):
|
224 |
-
print('Metrics for {}, step {}'.format(prefix, self.global_step))
|
225 |
-
for key, value in metrics_dict.items():
|
226 |
-
print('\t{} = '.format(key), value)
|
227 |
-
|
228 |
-
def parse_and_log_images(self, x, x_hat, title, index=None):
|
229 |
-
if index is None:
|
230 |
-
path = os.path.join(self.log_dir, title, f'{str(self.global_step).zfill(5)}.jpg')
|
231 |
-
else:
|
232 |
-
path = os.path.join(self.log_dir, title, f'{str(self.global_step).zfill(5)}_{str(index).zfill(5)}.jpg')
|
233 |
-
os.makedirs(os.path.dirname(path), exist_ok=True)
|
234 |
-
torchvision.utils.save_image(torch.cat([x.detach().cpu(), x_hat.detach().cpu()]), path,
|
235 |
-
normalize=True, scale_each=True, range=(-1, 1), nrow=self.opts.batch_size)
|
236 |
-
|
237 |
-
def __get_save_dict(self):
|
238 |
-
save_dict = {
|
239 |
-
'state_dict': self.net.state_dict(),
|
240 |
-
'opts': vars(self.opts)
|
241 |
-
}
|
242 |
-
return save_dict
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/adapter.py
DELETED
@@ -1,291 +0,0 @@
|
|
1 |
-
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
from typing import List, Optional
|
16 |
-
|
17 |
-
import torch
|
18 |
-
import torch.nn as nn
|
19 |
-
|
20 |
-
from ..configuration_utils import ConfigMixin, register_to_config
|
21 |
-
from .modeling_utils import ModelMixin
|
22 |
-
from .resnet import Downsample2D
|
23 |
-
|
24 |
-
|
25 |
-
class MultiAdapter(ModelMixin):
|
26 |
-
r"""
|
27 |
-
MultiAdapter is a wrapper model that contains multiple adapter models and merges their outputs according to
|
28 |
-
user-assigned weighting.
|
29 |
-
|
30 |
-
This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
|
31 |
-
implements for all the model (such as downloading or saving, etc.)
|
32 |
-
|
33 |
-
Parameters:
|
34 |
-
adapters (`List[T2IAdapter]`, *optional*, defaults to None):
|
35 |
-
A list of `T2IAdapter` model instances.
|
36 |
-
"""
|
37 |
-
|
38 |
-
def __init__(self, adapters: List["T2IAdapter"]):
|
39 |
-
super(MultiAdapter, self).__init__()
|
40 |
-
|
41 |
-
self.num_adapter = len(adapters)
|
42 |
-
self.adapters = nn.ModuleList(adapters)
|
43 |
-
|
44 |
-
def forward(self, xs: torch.Tensor, adapter_weights: Optional[List[float]] = None) -> List[torch.Tensor]:
|
45 |
-
r"""
|
46 |
-
Args:
|
47 |
-
xs (`torch.Tensor`):
|
48 |
-
(batch, channel, height, width) input images for multiple adapter models concated along dimension 1,
|
49 |
-
`channel` should equal to `num_adapter` * "number of channel of image".
|
50 |
-
adapter_weights (`List[float]`, *optional*, defaults to None):
|
51 |
-
List of floats representing the weight which will be multiply to each adapter's output before adding
|
52 |
-
them together.
|
53 |
-
"""
|
54 |
-
if adapter_weights is None:
|
55 |
-
adapter_weights = torch.tensor([1 / self.num_adapter] * self.num_adapter)
|
56 |
-
else:
|
57 |
-
adapter_weights = torch.tensor(adapter_weights)
|
58 |
-
|
59 |
-
if xs.shape[1] % self.num_adapter != 0:
|
60 |
-
raise ValueError(
|
61 |
-
f"Expecting multi-adapter's input have number of channel that cab be evenly divisible "
|
62 |
-
f"by num_adapter: {xs.shape[1]} % {self.num_adapter} != 0"
|
63 |
-
)
|
64 |
-
x_list = torch.chunk(xs, self.num_adapter, dim=1)
|
65 |
-
accume_state = None
|
66 |
-
for x, w, adapter in zip(x_list, adapter_weights, self.adapters):
|
67 |
-
features = adapter(x)
|
68 |
-
if accume_state is None:
|
69 |
-
accume_state = features
|
70 |
-
else:
|
71 |
-
for i in range(len(features)):
|
72 |
-
accume_state[i] += w * features[i]
|
73 |
-
return accume_state
|
74 |
-
|
75 |
-
|
76 |
-
class T2IAdapter(ModelMixin, ConfigMixin):
|
77 |
-
r"""
|
78 |
-
A simple ResNet-like model that accepts images containing control signals such as keyposes and depth. The model
|
79 |
-
generates multiple feature maps that are used as additional conditioning in [`UNet2DConditionModel`]. The model's
|
80 |
-
architecture follows the original implementation of
|
81 |
-
[Adapter](https://github.com/TencentARC/T2I-Adapter/blob/686de4681515662c0ac2ffa07bf5dda83af1038a/ldm/modules/encoders/adapter.py#L97)
|
82 |
-
and
|
83 |
-
[AdapterLight](https://github.com/TencentARC/T2I-Adapter/blob/686de4681515662c0ac2ffa07bf5dda83af1038a/ldm/modules/encoders/adapter.py#L235).
|
84 |
-
|
85 |
-
This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
|
86 |
-
implements for all the model (such as downloading or saving, etc.)
|
87 |
-
|
88 |
-
Parameters:
|
89 |
-
in_channels (`int`, *optional*, defaults to 3):
|
90 |
-
Number of channels of Aapter's input(*control image*). Set this parameter to 1 if you're using gray scale
|
91 |
-
image as *control image*.
|
92 |
-
channels (`List[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
|
93 |
-
The number of channel of each downsample block's output hidden state. The `len(block_out_channels)` will
|
94 |
-
also determine the number of downsample blocks in the Adapter.
|
95 |
-
num_res_blocks (`int`, *optional*, defaults to 2):
|
96 |
-
Number of ResNet blocks in each downsample block
|
97 |
-
"""
|
98 |
-
|
99 |
-
@register_to_config
|
100 |
-
def __init__(
|
101 |
-
self,
|
102 |
-
in_channels: int = 3,
|
103 |
-
channels: List[int] = [320, 640, 1280, 1280],
|
104 |
-
num_res_blocks: int = 2,
|
105 |
-
downscale_factor: int = 8,
|
106 |
-
adapter_type: str = "full_adapter",
|
107 |
-
):
|
108 |
-
super().__init__()
|
109 |
-
|
110 |
-
if adapter_type == "full_adapter":
|
111 |
-
self.adapter = FullAdapter(in_channels, channels, num_res_blocks, downscale_factor)
|
112 |
-
elif adapter_type == "light_adapter":
|
113 |
-
self.adapter = LightAdapter(in_channels, channels, num_res_blocks, downscale_factor)
|
114 |
-
else:
|
115 |
-
raise ValueError(f"unknown adapter_type: {type}. Choose either 'full_adapter' or 'simple_adapter'")
|
116 |
-
|
117 |
-
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
|
118 |
-
return self.adapter(x)
|
119 |
-
|
120 |
-
@property
|
121 |
-
def total_downscale_factor(self):
|
122 |
-
return self.adapter.total_downscale_factor
|
123 |
-
|
124 |
-
|
125 |
-
# full adapter
|
126 |
-
|
127 |
-
|
128 |
-
class FullAdapter(nn.Module):
|
129 |
-
def __init__(
|
130 |
-
self,
|
131 |
-
in_channels: int = 3,
|
132 |
-
channels: List[int] = [320, 640, 1280, 1280],
|
133 |
-
num_res_blocks: int = 2,
|
134 |
-
downscale_factor: int = 8,
|
135 |
-
):
|
136 |
-
super().__init__()
|
137 |
-
|
138 |
-
in_channels = in_channels * downscale_factor**2
|
139 |
-
|
140 |
-
self.unshuffle = nn.PixelUnshuffle(downscale_factor)
|
141 |
-
self.conv_in = nn.Conv2d(in_channels, channels[0], kernel_size=3, padding=1)
|
142 |
-
|
143 |
-
self.body = nn.ModuleList(
|
144 |
-
[
|
145 |
-
AdapterBlock(channels[0], channels[0], num_res_blocks),
|
146 |
-
*[
|
147 |
-
AdapterBlock(channels[i - 1], channels[i], num_res_blocks, down=True)
|
148 |
-
for i in range(1, len(channels))
|
149 |
-
],
|
150 |
-
]
|
151 |
-
)
|
152 |
-
|
153 |
-
self.total_downscale_factor = downscale_factor * 2 ** (len(channels) - 1)
|
154 |
-
|
155 |
-
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
|
156 |
-
x = self.unshuffle(x)
|
157 |
-
x = self.conv_in(x)
|
158 |
-
|
159 |
-
features = []
|
160 |
-
|
161 |
-
for block in self.body:
|
162 |
-
x = block(x)
|
163 |
-
features.append(x)
|
164 |
-
|
165 |
-
return features
|
166 |
-
|
167 |
-
|
168 |
-
class AdapterBlock(nn.Module):
|
169 |
-
def __init__(self, in_channels, out_channels, num_res_blocks, down=False):
|
170 |
-
super().__init__()
|
171 |
-
|
172 |
-
self.downsample = None
|
173 |
-
if down:
|
174 |
-
self.downsample = Downsample2D(in_channels)
|
175 |
-
|
176 |
-
self.in_conv = None
|
177 |
-
if in_channels != out_channels:
|
178 |
-
self.in_conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
|
179 |
-
|
180 |
-
self.resnets = nn.Sequential(
|
181 |
-
*[AdapterResnetBlock(out_channels) for _ in range(num_res_blocks)],
|
182 |
-
)
|
183 |
-
|
184 |
-
def forward(self, x):
|
185 |
-
if self.downsample is not None:
|
186 |
-
x = self.downsample(x)
|
187 |
-
|
188 |
-
if self.in_conv is not None:
|
189 |
-
x = self.in_conv(x)
|
190 |
-
|
191 |
-
x = self.resnets(x)
|
192 |
-
|
193 |
-
return x
|
194 |
-
|
195 |
-
|
196 |
-
class AdapterResnetBlock(nn.Module):
|
197 |
-
def __init__(self, channels):
|
198 |
-
super().__init__()
|
199 |
-
self.block1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
|
200 |
-
self.act = nn.ReLU()
|
201 |
-
self.block2 = nn.Conv2d(channels, channels, kernel_size=1)
|
202 |
-
|
203 |
-
def forward(self, x):
|
204 |
-
h = x
|
205 |
-
h = self.block1(h)
|
206 |
-
h = self.act(h)
|
207 |
-
h = self.block2(h)
|
208 |
-
|
209 |
-
return h + x
|
210 |
-
|
211 |
-
|
212 |
-
# light adapter
|
213 |
-
|
214 |
-
|
215 |
-
class LightAdapter(nn.Module):
|
216 |
-
def __init__(
|
217 |
-
self,
|
218 |
-
in_channels: int = 3,
|
219 |
-
channels: List[int] = [320, 640, 1280],
|
220 |
-
num_res_blocks: int = 4,
|
221 |
-
downscale_factor: int = 8,
|
222 |
-
):
|
223 |
-
super().__init__()
|
224 |
-
|
225 |
-
in_channels = in_channels * downscale_factor**2
|
226 |
-
|
227 |
-
self.unshuffle = nn.PixelUnshuffle(downscale_factor)
|
228 |
-
|
229 |
-
self.body = nn.ModuleList(
|
230 |
-
[
|
231 |
-
LightAdapterBlock(in_channels, channels[0], num_res_blocks),
|
232 |
-
*[
|
233 |
-
LightAdapterBlock(channels[i], channels[i + 1], num_res_blocks, down=True)
|
234 |
-
for i in range(len(channels) - 1)
|
235 |
-
],
|
236 |
-
LightAdapterBlock(channels[-1], channels[-1], num_res_blocks, down=True),
|
237 |
-
]
|
238 |
-
)
|
239 |
-
|
240 |
-
self.total_downscale_factor = downscale_factor * (2 ** len(channels))
|
241 |
-
|
242 |
-
def forward(self, x):
|
243 |
-
x = self.unshuffle(x)
|
244 |
-
|
245 |
-
features = []
|
246 |
-
|
247 |
-
for block in self.body:
|
248 |
-
x = block(x)
|
249 |
-
features.append(x)
|
250 |
-
|
251 |
-
return features
|
252 |
-
|
253 |
-
|
254 |
-
class LightAdapterBlock(nn.Module):
|
255 |
-
def __init__(self, in_channels, out_channels, num_res_blocks, down=False):
|
256 |
-
super().__init__()
|
257 |
-
mid_channels = out_channels // 4
|
258 |
-
|
259 |
-
self.downsample = None
|
260 |
-
if down:
|
261 |
-
self.downsample = Downsample2D(in_channels)
|
262 |
-
|
263 |
-
self.in_conv = nn.Conv2d(in_channels, mid_channels, kernel_size=1)
|
264 |
-
self.resnets = nn.Sequential(*[LightAdapterResnetBlock(mid_channels) for _ in range(num_res_blocks)])
|
265 |
-
self.out_conv = nn.Conv2d(mid_channels, out_channels, kernel_size=1)
|
266 |
-
|
267 |
-
def forward(self, x):
|
268 |
-
if self.downsample is not None:
|
269 |
-
x = self.downsample(x)
|
270 |
-
|
271 |
-
x = self.in_conv(x)
|
272 |
-
x = self.resnets(x)
|
273 |
-
x = self.out_conv(x)
|
274 |
-
|
275 |
-
return x
|
276 |
-
|
277 |
-
|
278 |
-
class LightAdapterResnetBlock(nn.Module):
|
279 |
-
def __init__(self, channels):
|
280 |
-
super().__init__()
|
281 |
-
self.block1 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
|
282 |
-
self.act = nn.ReLU()
|
283 |
-
self.block2 = nn.Conv2d(channels, channels, kernel_size=3, padding=1)
|
284 |
-
|
285 |
-
def forward(self, x):
|
286 |
-
h = x
|
287 |
-
h = self.block1(h)
|
288 |
-
h = self.act(h)
|
289 |
-
h = self.block2(h)
|
290 |
-
|
291 |
-
return h + x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion.py
DELETED
@@ -1,376 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2023 HuggingFace Inc.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
import tempfile
|
17 |
-
import unittest
|
18 |
-
|
19 |
-
import numpy as np
|
20 |
-
|
21 |
-
from diffusers import (
|
22 |
-
DDIMScheduler,
|
23 |
-
DPMSolverMultistepScheduler,
|
24 |
-
EulerAncestralDiscreteScheduler,
|
25 |
-
EulerDiscreteScheduler,
|
26 |
-
LMSDiscreteScheduler,
|
27 |
-
OnnxStableDiffusionPipeline,
|
28 |
-
PNDMScheduler,
|
29 |
-
)
|
30 |
-
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
|
31 |
-
|
32 |
-
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
|
33 |
-
|
34 |
-
|
35 |
-
if is_onnx_available():
|
36 |
-
import onnxruntime as ort
|
37 |
-
|
38 |
-
|
39 |
-
class OnnxStableDiffusionPipelineFastTests(OnnxPipelineTesterMixin, unittest.TestCase):
|
40 |
-
hub_checkpoint = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
|
41 |
-
|
42 |
-
def get_dummy_inputs(self, seed=0):
|
43 |
-
generator = np.random.RandomState(seed)
|
44 |
-
inputs = {
|
45 |
-
"prompt": "A painting of a squirrel eating a burger",
|
46 |
-
"generator": generator,
|
47 |
-
"num_inference_steps": 2,
|
48 |
-
"guidance_scale": 7.5,
|
49 |
-
"output_type": "numpy",
|
50 |
-
}
|
51 |
-
return inputs
|
52 |
-
|
53 |
-
def test_pipeline_default_ddim(self):
|
54 |
-
pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
|
55 |
-
pipe.set_progress_bar_config(disable=None)
|
56 |
-
|
57 |
-
inputs = self.get_dummy_inputs()
|
58 |
-
image = pipe(**inputs).images
|
59 |
-
image_slice = image[0, -3:, -3:, -1]
|
60 |
-
|
61 |
-
assert image.shape == (1, 128, 128, 3)
|
62 |
-
expected_slice = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455])
|
63 |
-
|
64 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
65 |
-
|
66 |
-
def test_pipeline_pndm(self):
|
67 |
-
pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
|
68 |
-
pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=True)
|
69 |
-
pipe.set_progress_bar_config(disable=None)
|
70 |
-
|
71 |
-
inputs = self.get_dummy_inputs()
|
72 |
-
image = pipe(**inputs).images
|
73 |
-
image_slice = image[0, -3:, -3:, -1]
|
74 |
-
|
75 |
-
assert image.shape == (1, 128, 128, 3)
|
76 |
-
expected_slice = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330])
|
77 |
-
|
78 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
79 |
-
|
80 |
-
def test_pipeline_lms(self):
|
81 |
-
pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
|
82 |
-
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
|
83 |
-
pipe.set_progress_bar_config(disable=None)
|
84 |
-
|
85 |
-
inputs = self.get_dummy_inputs()
|
86 |
-
image = pipe(**inputs).images
|
87 |
-
image_slice = image[0, -3:, -3:, -1]
|
88 |
-
|
89 |
-
assert image.shape == (1, 128, 128, 3)
|
90 |
-
expected_slice = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279])
|
91 |
-
|
92 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
93 |
-
|
94 |
-
def test_pipeline_euler(self):
|
95 |
-
pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
|
96 |
-
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
|
97 |
-
pipe.set_progress_bar_config(disable=None)
|
98 |
-
|
99 |
-
inputs = self.get_dummy_inputs()
|
100 |
-
image = pipe(**inputs).images
|
101 |
-
image_slice = image[0, -3:, -3:, -1]
|
102 |
-
|
103 |
-
assert image.shape == (1, 128, 128, 3)
|
104 |
-
expected_slice = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279])
|
105 |
-
|
106 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
107 |
-
|
108 |
-
def test_pipeline_euler_ancestral(self):
|
109 |
-
pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
|
110 |
-
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
111 |
-
pipe.set_progress_bar_config(disable=None)
|
112 |
-
|
113 |
-
inputs = self.get_dummy_inputs()
|
114 |
-
image = pipe(**inputs).images
|
115 |
-
image_slice = image[0, -3:, -3:, -1]
|
116 |
-
|
117 |
-
assert image.shape == (1, 128, 128, 3)
|
118 |
-
expected_slice = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271])
|
119 |
-
|
120 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
121 |
-
|
122 |
-
def test_pipeline_dpm_multistep(self):
|
123 |
-
pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
|
124 |
-
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
|
125 |
-
pipe.set_progress_bar_config(disable=None)
|
126 |
-
|
127 |
-
inputs = self.get_dummy_inputs()
|
128 |
-
image = pipe(**inputs).images
|
129 |
-
image_slice = image[0, -3:, -3:, -1]
|
130 |
-
|
131 |
-
assert image.shape == (1, 128, 128, 3)
|
132 |
-
expected_slice = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200])
|
133 |
-
|
134 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
|
135 |
-
|
136 |
-
def test_stable_diffusion_prompt_embeds(self):
|
137 |
-
pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
|
138 |
-
pipe.set_progress_bar_config(disable=None)
|
139 |
-
|
140 |
-
inputs = self.get_dummy_inputs()
|
141 |
-
inputs["prompt"] = 3 * [inputs["prompt"]]
|
142 |
-
|
143 |
-
# forward
|
144 |
-
output = pipe(**inputs)
|
145 |
-
image_slice_1 = output.images[0, -3:, -3:, -1]
|
146 |
-
|
147 |
-
inputs = self.get_dummy_inputs()
|
148 |
-
prompt = 3 * [inputs.pop("prompt")]
|
149 |
-
|
150 |
-
text_inputs = pipe.tokenizer(
|
151 |
-
prompt,
|
152 |
-
padding="max_length",
|
153 |
-
max_length=pipe.tokenizer.model_max_length,
|
154 |
-
truncation=True,
|
155 |
-
return_tensors="np",
|
156 |
-
)
|
157 |
-
text_inputs = text_inputs["input_ids"]
|
158 |
-
|
159 |
-
prompt_embeds = pipe.text_encoder(input_ids=text_inputs.astype(np.int32))[0]
|
160 |
-
|
161 |
-
inputs["prompt_embeds"] = prompt_embeds
|
162 |
-
|
163 |
-
# forward
|
164 |
-
output = pipe(**inputs)
|
165 |
-
image_slice_2 = output.images[0, -3:, -3:, -1]
|
166 |
-
|
167 |
-
assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4
|
168 |
-
|
169 |
-
def test_stable_diffusion_negative_prompt_embeds(self):
|
170 |
-
pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider")
|
171 |
-
pipe.set_progress_bar_config(disable=None)
|
172 |
-
|
173 |
-
inputs = self.get_dummy_inputs()
|
174 |
-
negative_prompt = 3 * ["this is a negative prompt"]
|
175 |
-
inputs["negative_prompt"] = negative_prompt
|
176 |
-
inputs["prompt"] = 3 * [inputs["prompt"]]
|
177 |
-
|
178 |
-
# forward
|
179 |
-
output = pipe(**inputs)
|
180 |
-
image_slice_1 = output.images[0, -3:, -3:, -1]
|
181 |
-
|
182 |
-
inputs = self.get_dummy_inputs()
|
183 |
-
prompt = 3 * [inputs.pop("prompt")]
|
184 |
-
|
185 |
-
embeds = []
|
186 |
-
for p in [prompt, negative_prompt]:
|
187 |
-
text_inputs = pipe.tokenizer(
|
188 |
-
p,
|
189 |
-
padding="max_length",
|
190 |
-
max_length=pipe.tokenizer.model_max_length,
|
191 |
-
truncation=True,
|
192 |
-
return_tensors="np",
|
193 |
-
)
|
194 |
-
text_inputs = text_inputs["input_ids"]
|
195 |
-
|
196 |
-
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.int32))[0])
|
197 |
-
|
198 |
-
inputs["prompt_embeds"], inputs["negative_prompt_embeds"] = embeds
|
199 |
-
|
200 |
-
# forward
|
201 |
-
output = pipe(**inputs)
|
202 |
-
image_slice_2 = output.images[0, -3:, -3:, -1]
|
203 |
-
|
204 |
-
assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4
|
205 |
-
|
206 |
-
|
207 |
-
@nightly
|
208 |
-
@require_onnxruntime
|
209 |
-
@require_torch_gpu
|
210 |
-
class OnnxStableDiffusionPipelineIntegrationTests(unittest.TestCase):
|
211 |
-
@property
|
212 |
-
def gpu_provider(self):
|
213 |
-
return (
|
214 |
-
"CUDAExecutionProvider",
|
215 |
-
{
|
216 |
-
"gpu_mem_limit": "15000000000", # 15GB
|
217 |
-
"arena_extend_strategy": "kSameAsRequested",
|
218 |
-
},
|
219 |
-
)
|
220 |
-
|
221 |
-
@property
|
222 |
-
def gpu_options(self):
|
223 |
-
options = ort.SessionOptions()
|
224 |
-
options.enable_mem_pattern = False
|
225 |
-
return options
|
226 |
-
|
227 |
-
def test_inference_default_pndm(self):
|
228 |
-
# using the PNDM scheduler by default
|
229 |
-
sd_pipe = OnnxStableDiffusionPipeline.from_pretrained(
|
230 |
-
"CompVis/stable-diffusion-v1-4",
|
231 |
-
revision="onnx",
|
232 |
-
safety_checker=None,
|
233 |
-
feature_extractor=None,
|
234 |
-
provider=self.gpu_provider,
|
235 |
-
sess_options=self.gpu_options,
|
236 |
-
)
|
237 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
238 |
-
|
239 |
-
prompt = "A painting of a squirrel eating a burger"
|
240 |
-
np.random.seed(0)
|
241 |
-
output = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=10, output_type="np")
|
242 |
-
image = output.images
|
243 |
-
|
244 |
-
image_slice = image[0, -3:, -3:, -1]
|
245 |
-
|
246 |
-
assert image.shape == (1, 512, 512, 3)
|
247 |
-
expected_slice = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720])
|
248 |
-
|
249 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
|
250 |
-
|
251 |
-
def test_inference_ddim(self):
|
252 |
-
ddim_scheduler = DDIMScheduler.from_pretrained(
|
253 |
-
"runwayml/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx"
|
254 |
-
)
|
255 |
-
sd_pipe = OnnxStableDiffusionPipeline.from_pretrained(
|
256 |
-
"runwayml/stable-diffusion-v1-5",
|
257 |
-
revision="onnx",
|
258 |
-
scheduler=ddim_scheduler,
|
259 |
-
safety_checker=None,
|
260 |
-
feature_extractor=None,
|
261 |
-
provider=self.gpu_provider,
|
262 |
-
sess_options=self.gpu_options,
|
263 |
-
)
|
264 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
265 |
-
|
266 |
-
prompt = "open neural network exchange"
|
267 |
-
generator = np.random.RandomState(0)
|
268 |
-
output = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=generator, output_type="np")
|
269 |
-
image = output.images
|
270 |
-
image_slice = image[0, -3:, -3:, -1]
|
271 |
-
|
272 |
-
assert image.shape == (1, 512, 512, 3)
|
273 |
-
expected_slice = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486])
|
274 |
-
|
275 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
|
276 |
-
|
277 |
-
def test_inference_k_lms(self):
|
278 |
-
lms_scheduler = LMSDiscreteScheduler.from_pretrained(
|
279 |
-
"runwayml/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx"
|
280 |
-
)
|
281 |
-
sd_pipe = OnnxStableDiffusionPipeline.from_pretrained(
|
282 |
-
"runwayml/stable-diffusion-v1-5",
|
283 |
-
revision="onnx",
|
284 |
-
scheduler=lms_scheduler,
|
285 |
-
safety_checker=None,
|
286 |
-
feature_extractor=None,
|
287 |
-
provider=self.gpu_provider,
|
288 |
-
sess_options=self.gpu_options,
|
289 |
-
)
|
290 |
-
sd_pipe.set_progress_bar_config(disable=None)
|
291 |
-
|
292 |
-
prompt = "open neural network exchange"
|
293 |
-
generator = np.random.RandomState(0)
|
294 |
-
output = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=generator, output_type="np")
|
295 |
-
image = output.images
|
296 |
-
image_slice = image[0, -3:, -3:, -1]
|
297 |
-
|
298 |
-
assert image.shape == (1, 512, 512, 3)
|
299 |
-
expected_slice = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161])
|
300 |
-
|
301 |
-
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
|
302 |
-
|
303 |
-
def test_intermediate_state(self):
|
304 |
-
number_of_steps = 0
|
305 |
-
|
306 |
-
def test_callback_fn(step: int, timestep: int, latents: np.ndarray) -> None:
|
307 |
-
test_callback_fn.has_been_called = True
|
308 |
-
nonlocal number_of_steps
|
309 |
-
number_of_steps += 1
|
310 |
-
if step == 0:
|
311 |
-
assert latents.shape == (1, 4, 64, 64)
|
312 |
-
latents_slice = latents[0, -3:, -3:, -1]
|
313 |
-
expected_slice = np.array(
|
314 |
-
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167]
|
315 |
-
)
|
316 |
-
|
317 |
-
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3
|
318 |
-
elif step == 5:
|
319 |
-
assert latents.shape == (1, 4, 64, 64)
|
320 |
-
latents_slice = latents[0, -3:, -3:, -1]
|
321 |
-
expected_slice = np.array(
|
322 |
-
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875]
|
323 |
-
)
|
324 |
-
|
325 |
-
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3
|
326 |
-
|
327 |
-
test_callback_fn.has_been_called = False
|
328 |
-
|
329 |
-
pipe = OnnxStableDiffusionPipeline.from_pretrained(
|
330 |
-
"runwayml/stable-diffusion-v1-5",
|
331 |
-
revision="onnx",
|
332 |
-
safety_checker=None,
|
333 |
-
feature_extractor=None,
|
334 |
-
provider=self.gpu_provider,
|
335 |
-
sess_options=self.gpu_options,
|
336 |
-
)
|
337 |
-
pipe.set_progress_bar_config(disable=None)
|
338 |
-
|
339 |
-
prompt = "Andromeda galaxy in a bottle"
|
340 |
-
|
341 |
-
generator = np.random.RandomState(0)
|
342 |
-
pipe(
|
343 |
-
prompt=prompt,
|
344 |
-
num_inference_steps=5,
|
345 |
-
guidance_scale=7.5,
|
346 |
-
generator=generator,
|
347 |
-
callback=test_callback_fn,
|
348 |
-
callback_steps=1,
|
349 |
-
)
|
350 |
-
assert test_callback_fn.has_been_called
|
351 |
-
assert number_of_steps == 6
|
352 |
-
|
353 |
-
def test_stable_diffusion_no_safety_checker(self):
|
354 |
-
pipe = OnnxStableDiffusionPipeline.from_pretrained(
|
355 |
-
"runwayml/stable-diffusion-v1-5",
|
356 |
-
revision="onnx",
|
357 |
-
safety_checker=None,
|
358 |
-
feature_extractor=None,
|
359 |
-
provider=self.gpu_provider,
|
360 |
-
sess_options=self.gpu_options,
|
361 |
-
)
|
362 |
-
assert isinstance(pipe, OnnxStableDiffusionPipeline)
|
363 |
-
assert pipe.safety_checker is None
|
364 |
-
|
365 |
-
image = pipe("example prompt", num_inference_steps=2).images[0]
|
366 |
-
assert image is not None
|
367 |
-
|
368 |
-
# check that there's no error when saving a pipeline with one of the models being None
|
369 |
-
with tempfile.TemporaryDirectory() as tmpdirname:
|
370 |
-
pipe.save_pretrained(tmpdirname)
|
371 |
-
pipe = OnnxStableDiffusionPipeline.from_pretrained(tmpdirname)
|
372 |
-
|
373 |
-
# sanity check that the pipeline still works
|
374 |
-
assert pipe.safety_checker is None
|
375 |
-
image = pipe("example prompt", num_inference_steps=2).images[0]
|
376 |
-
assert image is not None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
_base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://regnetx_6.4gf',
|
4 |
-
backbone=dict(
|
5 |
-
type='RegNet',
|
6 |
-
arch='regnetx_6.4gf',
|
7 |
-
out_indices=(0, 1, 2, 3),
|
8 |
-
frozen_stages=1,
|
9 |
-
norm_cfg=dict(type='BN', requires_grad=True),
|
10 |
-
norm_eval=True,
|
11 |
-
style='pytorch'),
|
12 |
-
neck=dict(
|
13 |
-
type='FPN',
|
14 |
-
in_channels=[168, 392, 784, 1624],
|
15 |
-
out_channels=256,
|
16 |
-
num_outs=5))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/mmdet/utils/logger.py
DELETED
@@ -1,19 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
|
3 |
-
from mmcv.utils import get_logger
|
4 |
-
|
5 |
-
|
6 |
-
def get_root_logger(log_file=None, log_level=logging.INFO):
|
7 |
-
"""Get root logger.
|
8 |
-
|
9 |
-
Args:
|
10 |
-
log_file (str, optional): File path of log. Defaults to None.
|
11 |
-
log_level (int, optional): The level of logger.
|
12 |
-
Defaults to logging.INFO.
|
13 |
-
|
14 |
-
Returns:
|
15 |
-
:obj:`logging.Logger`: The obtained logger
|
16 |
-
"""
|
17 |
-
logger = get_logger(name='mmdet', log_file=log_file, log_level=log_level)
|
18 |
-
|
19 |
-
return logger
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_light/transforms.py
DELETED
@@ -1,443 +0,0 @@
|
|
1 |
-
import torchvision
|
2 |
-
import random
|
3 |
-
from PIL import Image, ImageOps
|
4 |
-
import numpy as np
|
5 |
-
import numbers
|
6 |
-
import math
|
7 |
-
import torch
|
8 |
-
|
9 |
-
|
10 |
-
class GroupRandomCrop(object):
|
11 |
-
def __init__(self, size):
|
12 |
-
if isinstance(size, numbers.Number):
|
13 |
-
self.size = (int(size), int(size))
|
14 |
-
else:
|
15 |
-
self.size = size
|
16 |
-
|
17 |
-
def __call__(self, img_group):
|
18 |
-
|
19 |
-
w, h = img_group[0].size
|
20 |
-
th, tw = self.size
|
21 |
-
|
22 |
-
out_images = list()
|
23 |
-
|
24 |
-
x1 = random.randint(0, w - tw)
|
25 |
-
y1 = random.randint(0, h - th)
|
26 |
-
|
27 |
-
for img in img_group:
|
28 |
-
assert(img.size[0] == w and img.size[1] == h)
|
29 |
-
if w == tw and h == th:
|
30 |
-
out_images.append(img)
|
31 |
-
else:
|
32 |
-
out_images.append(img.crop((x1, y1, x1 + tw, y1 + th)))
|
33 |
-
|
34 |
-
return out_images
|
35 |
-
|
36 |
-
|
37 |
-
class MultiGroupRandomCrop(object):
|
38 |
-
def __init__(self, size, groups=1):
|
39 |
-
if isinstance(size, numbers.Number):
|
40 |
-
self.size = (int(size), int(size))
|
41 |
-
else:
|
42 |
-
self.size = size
|
43 |
-
self.groups = groups
|
44 |
-
|
45 |
-
def __call__(self, img_group):
|
46 |
-
|
47 |
-
w, h = img_group[0].size
|
48 |
-
th, tw = self.size
|
49 |
-
|
50 |
-
out_images = list()
|
51 |
-
|
52 |
-
for i in range(self.groups):
|
53 |
-
x1 = random.randint(0, w - tw)
|
54 |
-
y1 = random.randint(0, h - th)
|
55 |
-
|
56 |
-
for img in img_group:
|
57 |
-
assert(img.size[0] == w and img.size[1] == h)
|
58 |
-
if w == tw and h == th:
|
59 |
-
out_images.append(img)
|
60 |
-
else:
|
61 |
-
out_images.append(img.crop((x1, y1, x1 + tw, y1 + th)))
|
62 |
-
|
63 |
-
return out_images
|
64 |
-
|
65 |
-
|
66 |
-
class GroupCenterCrop(object):
|
67 |
-
def __init__(self, size):
|
68 |
-
self.worker = torchvision.transforms.CenterCrop(size)
|
69 |
-
|
70 |
-
def __call__(self, img_group):
|
71 |
-
return [self.worker(img) for img in img_group]
|
72 |
-
|
73 |
-
|
74 |
-
class GroupRandomHorizontalFlip(object):
|
75 |
-
"""Randomly horizontally flips the given PIL.Image with a probability of 0.5
|
76 |
-
"""
|
77 |
-
|
78 |
-
def __init__(self, is_flow=False):
|
79 |
-
self.is_flow = is_flow
|
80 |
-
|
81 |
-
def __call__(self, img_group, is_flow=False):
|
82 |
-
v = random.random()
|
83 |
-
if v < 0.5:
|
84 |
-
ret = [img.transpose(Image.FLIP_LEFT_RIGHT) for img in img_group]
|
85 |
-
if self.is_flow:
|
86 |
-
for i in range(0, len(ret), 2):
|
87 |
-
# invert flow pixel values when flipping
|
88 |
-
ret[i] = ImageOps.invert(ret[i])
|
89 |
-
return ret
|
90 |
-
else:
|
91 |
-
return img_group
|
92 |
-
|
93 |
-
|
94 |
-
class GroupNormalize(object):
|
95 |
-
def __init__(self, mean, std):
|
96 |
-
self.mean = mean
|
97 |
-
self.std = std
|
98 |
-
|
99 |
-
def __call__(self, tensor):
|
100 |
-
rep_mean = self.mean * (tensor.size()[0] // len(self.mean))
|
101 |
-
rep_std = self.std * (tensor.size()[0] // len(self.std))
|
102 |
-
|
103 |
-
# TODO: make efficient
|
104 |
-
for t, m, s in zip(tensor, rep_mean, rep_std):
|
105 |
-
t.sub_(m).div_(s)
|
106 |
-
|
107 |
-
return tensor
|
108 |
-
|
109 |
-
|
110 |
-
class GroupScale(object):
|
111 |
-
""" Rescales the input PIL.Image to the given 'size'.
|
112 |
-
'size' will be the size of the smaller edge.
|
113 |
-
For example, if height > width, then image will be
|
114 |
-
rescaled to (size * height / width, size)
|
115 |
-
size: size of the smaller edge
|
116 |
-
interpolation: Default: PIL.Image.BILINEAR
|
117 |
-
"""
|
118 |
-
|
119 |
-
def __init__(self, size, interpolation=Image.BILINEAR):
|
120 |
-
self.worker = torchvision.transforms.Resize(size, interpolation)
|
121 |
-
|
122 |
-
def __call__(self, img_group):
|
123 |
-
return [self.worker(img) for img in img_group]
|
124 |
-
|
125 |
-
|
126 |
-
class GroupOverSample(object):
|
127 |
-
def __init__(self, crop_size, scale_size=None, flip=True):
|
128 |
-
self.crop_size = crop_size if not isinstance(
|
129 |
-
crop_size, int) else (crop_size, crop_size)
|
130 |
-
|
131 |
-
if scale_size is not None:
|
132 |
-
self.scale_worker = GroupScale(scale_size)
|
133 |
-
else:
|
134 |
-
self.scale_worker = None
|
135 |
-
self.flip = flip
|
136 |
-
|
137 |
-
def __call__(self, img_group):
|
138 |
-
|
139 |
-
if self.scale_worker is not None:
|
140 |
-
img_group = self.scale_worker(img_group)
|
141 |
-
|
142 |
-
image_w, image_h = img_group[0].size
|
143 |
-
crop_w, crop_h = self.crop_size
|
144 |
-
|
145 |
-
offsets = GroupMultiScaleCrop.fill_fix_offset(
|
146 |
-
False, image_w, image_h, crop_w, crop_h)
|
147 |
-
oversample_group = list()
|
148 |
-
for o_w, o_h in offsets:
|
149 |
-
normal_group = list()
|
150 |
-
flip_group = list()
|
151 |
-
for i, img in enumerate(img_group):
|
152 |
-
crop = img.crop((o_w, o_h, o_w + crop_w, o_h + crop_h))
|
153 |
-
normal_group.append(crop)
|
154 |
-
flip_crop = crop.copy().transpose(Image.FLIP_LEFT_RIGHT)
|
155 |
-
|
156 |
-
if img.mode == 'L' and i % 2 == 0:
|
157 |
-
flip_group.append(ImageOps.invert(flip_crop))
|
158 |
-
else:
|
159 |
-
flip_group.append(flip_crop)
|
160 |
-
|
161 |
-
oversample_group.extend(normal_group)
|
162 |
-
if self.flip:
|
163 |
-
oversample_group.extend(flip_group)
|
164 |
-
return oversample_group
|
165 |
-
|
166 |
-
|
167 |
-
class GroupFullResSample(object):
|
168 |
-
def __init__(self, crop_size, scale_size=None, flip=True):
|
169 |
-
self.crop_size = crop_size if not isinstance(
|
170 |
-
crop_size, int) else (crop_size, crop_size)
|
171 |
-
|
172 |
-
if scale_size is not None:
|
173 |
-
self.scale_worker = GroupScale(scale_size)
|
174 |
-
else:
|
175 |
-
self.scale_worker = None
|
176 |
-
self.flip = flip
|
177 |
-
|
178 |
-
def __call__(self, img_group):
|
179 |
-
|
180 |
-
if self.scale_worker is not None:
|
181 |
-
img_group = self.scale_worker(img_group)
|
182 |
-
|
183 |
-
image_w, image_h = img_group[0].size
|
184 |
-
crop_w, crop_h = self.crop_size
|
185 |
-
|
186 |
-
w_step = (image_w - crop_w) // 4
|
187 |
-
h_step = (image_h - crop_h) // 4
|
188 |
-
|
189 |
-
offsets = list()
|
190 |
-
offsets.append((0 * w_step, 2 * h_step)) # left
|
191 |
-
offsets.append((4 * w_step, 2 * h_step)) # right
|
192 |
-
offsets.append((2 * w_step, 2 * h_step)) # center
|
193 |
-
|
194 |
-
oversample_group = list()
|
195 |
-
for o_w, o_h in offsets:
|
196 |
-
normal_group = list()
|
197 |
-
flip_group = list()
|
198 |
-
for i, img in enumerate(img_group):
|
199 |
-
crop = img.crop((o_w, o_h, o_w + crop_w, o_h + crop_h))
|
200 |
-
normal_group.append(crop)
|
201 |
-
if self.flip:
|
202 |
-
flip_crop = crop.copy().transpose(Image.FLIP_LEFT_RIGHT)
|
203 |
-
|
204 |
-
if img.mode == 'L' and i % 2 == 0:
|
205 |
-
flip_group.append(ImageOps.invert(flip_crop))
|
206 |
-
else:
|
207 |
-
flip_group.append(flip_crop)
|
208 |
-
|
209 |
-
oversample_group.extend(normal_group)
|
210 |
-
oversample_group.extend(flip_group)
|
211 |
-
return oversample_group
|
212 |
-
|
213 |
-
|
214 |
-
class GroupMultiScaleCrop(object):
|
215 |
-
|
216 |
-
def __init__(self, input_size, scales=None, max_distort=1,
|
217 |
-
fix_crop=True, more_fix_crop=True):
|
218 |
-
self.scales = scales if scales is not None else [1, .875, .75, .66]
|
219 |
-
self.max_distort = max_distort
|
220 |
-
self.fix_crop = fix_crop
|
221 |
-
self.more_fix_crop = more_fix_crop
|
222 |
-
self.input_size = input_size if not isinstance(input_size, int) else [
|
223 |
-
input_size, input_size]
|
224 |
-
self.interpolation = Image.BILINEAR
|
225 |
-
|
226 |
-
def __call__(self, img_group):
|
227 |
-
|
228 |
-
im_size = img_group[0].size
|
229 |
-
|
230 |
-
crop_w, crop_h, offset_w, offset_h = self._sample_crop_size(im_size)
|
231 |
-
crop_img_group = [
|
232 |
-
img.crop(
|
233 |
-
(offset_w,
|
234 |
-
offset_h,
|
235 |
-
offset_w +
|
236 |
-
crop_w,
|
237 |
-
offset_h +
|
238 |
-
crop_h)) for img in img_group]
|
239 |
-
ret_img_group = [img.resize((self.input_size[0], self.input_size[1]), self.interpolation)
|
240 |
-
for img in crop_img_group]
|
241 |
-
return ret_img_group
|
242 |
-
|
243 |
-
def _sample_crop_size(self, im_size):
|
244 |
-
image_w, image_h = im_size[0], im_size[1]
|
245 |
-
|
246 |
-
# find a crop size
|
247 |
-
base_size = min(image_w, image_h)
|
248 |
-
crop_sizes = [int(base_size * x) for x in self.scales]
|
249 |
-
crop_h = [
|
250 |
-
self.input_size[1] if abs(
|
251 |
-
x - self.input_size[1]) < 3 else x for x in crop_sizes]
|
252 |
-
crop_w = [
|
253 |
-
self.input_size[0] if abs(
|
254 |
-
x - self.input_size[0]) < 3 else x for x in crop_sizes]
|
255 |
-
|
256 |
-
pairs = []
|
257 |
-
for i, h in enumerate(crop_h):
|
258 |
-
for j, w in enumerate(crop_w):
|
259 |
-
if abs(i - j) <= self.max_distort:
|
260 |
-
pairs.append((w, h))
|
261 |
-
|
262 |
-
crop_pair = random.choice(pairs)
|
263 |
-
if not self.fix_crop:
|
264 |
-
w_offset = random.randint(0, image_w - crop_pair[0])
|
265 |
-
h_offset = random.randint(0, image_h - crop_pair[1])
|
266 |
-
else:
|
267 |
-
w_offset, h_offset = self._sample_fix_offset(
|
268 |
-
image_w, image_h, crop_pair[0], crop_pair[1])
|
269 |
-
|
270 |
-
return crop_pair[0], crop_pair[1], w_offset, h_offset
|
271 |
-
|
272 |
-
def _sample_fix_offset(self, image_w, image_h, crop_w, crop_h):
|
273 |
-
offsets = self.fill_fix_offset(
|
274 |
-
self.more_fix_crop, image_w, image_h, crop_w, crop_h)
|
275 |
-
return random.choice(offsets)
|
276 |
-
|
277 |
-
@staticmethod
|
278 |
-
def fill_fix_offset(more_fix_crop, image_w, image_h, crop_w, crop_h):
|
279 |
-
w_step = (image_w - crop_w) // 4
|
280 |
-
h_step = (image_h - crop_h) // 4
|
281 |
-
|
282 |
-
ret = list()
|
283 |
-
ret.append((0, 0)) # upper left
|
284 |
-
ret.append((4 * w_step, 0)) # upper right
|
285 |
-
ret.append((0, 4 * h_step)) # lower left
|
286 |
-
ret.append((4 * w_step, 4 * h_step)) # lower right
|
287 |
-
ret.append((2 * w_step, 2 * h_step)) # center
|
288 |
-
|
289 |
-
if more_fix_crop:
|
290 |
-
ret.append((0, 2 * h_step)) # center left
|
291 |
-
ret.append((4 * w_step, 2 * h_step)) # center right
|
292 |
-
ret.append((2 * w_step, 4 * h_step)) # lower center
|
293 |
-
ret.append((2 * w_step, 0 * h_step)) # upper center
|
294 |
-
|
295 |
-
ret.append((1 * w_step, 1 * h_step)) # upper left quarter
|
296 |
-
ret.append((3 * w_step, 1 * h_step)) # upper right quarter
|
297 |
-
ret.append((1 * w_step, 3 * h_step)) # lower left quarter
|
298 |
-
ret.append((3 * w_step, 3 * h_step)) # lower righ quarter
|
299 |
-
|
300 |
-
return ret
|
301 |
-
|
302 |
-
|
303 |
-
class GroupRandomSizedCrop(object):
|
304 |
-
"""Random crop the given PIL.Image to a random size of (0.08 to 1.0) of the original size
|
305 |
-
and and a random aspect ratio of 3/4 to 4/3 of the original aspect ratio
|
306 |
-
This is popularly used to train the Inception networks
|
307 |
-
size: size of the smaller edge
|
308 |
-
interpolation: Default: PIL.Image.BILINEAR
|
309 |
-
"""
|
310 |
-
|
311 |
-
def __init__(self, size, interpolation=Image.BILINEAR):
|
312 |
-
self.size = size
|
313 |
-
self.interpolation = interpolation
|
314 |
-
|
315 |
-
def __call__(self, img_group):
|
316 |
-
for attempt in range(10):
|
317 |
-
area = img_group[0].size[0] * img_group[0].size[1]
|
318 |
-
target_area = random.uniform(0.08, 1.0) * area
|
319 |
-
aspect_ratio = random.uniform(3. / 4, 4. / 3)
|
320 |
-
|
321 |
-
w = int(round(math.sqrt(target_area * aspect_ratio)))
|
322 |
-
h = int(round(math.sqrt(target_area / aspect_ratio)))
|
323 |
-
|
324 |
-
if random.random() < 0.5:
|
325 |
-
w, h = h, w
|
326 |
-
|
327 |
-
if w <= img_group[0].size[0] and h <= img_group[0].size[1]:
|
328 |
-
x1 = random.randint(0, img_group[0].size[0] - w)
|
329 |
-
y1 = random.randint(0, img_group[0].size[1] - h)
|
330 |
-
found = True
|
331 |
-
break
|
332 |
-
else:
|
333 |
-
found = False
|
334 |
-
x1 = 0
|
335 |
-
y1 = 0
|
336 |
-
|
337 |
-
if found:
|
338 |
-
out_group = list()
|
339 |
-
for img in img_group:
|
340 |
-
img = img.crop((x1, y1, x1 + w, y1 + h))
|
341 |
-
assert(img.size == (w, h))
|
342 |
-
out_group.append(
|
343 |
-
img.resize(
|
344 |
-
(self.size, self.size), self.interpolation))
|
345 |
-
return out_group
|
346 |
-
else:
|
347 |
-
# Fallback
|
348 |
-
scale = GroupScale(self.size, interpolation=self.interpolation)
|
349 |
-
crop = GroupRandomCrop(self.size)
|
350 |
-
return crop(scale(img_group))
|
351 |
-
|
352 |
-
|
353 |
-
class ConvertDataFormat(object):
|
354 |
-
def __init__(self, model_type):
|
355 |
-
self.model_type = model_type
|
356 |
-
|
357 |
-
def __call__(self, images):
|
358 |
-
if self.model_type == '2D':
|
359 |
-
return images
|
360 |
-
tc, h, w = images.size()
|
361 |
-
t = tc // 3
|
362 |
-
images = images.view(t, 3, h, w)
|
363 |
-
images = images.permute(1, 0, 2, 3)
|
364 |
-
return images
|
365 |
-
|
366 |
-
|
367 |
-
class Stack(object):
|
368 |
-
|
369 |
-
def __init__(self, roll=False):
|
370 |
-
self.roll = roll
|
371 |
-
|
372 |
-
def __call__(self, img_group):
|
373 |
-
if img_group[0].mode == 'L':
|
374 |
-
return np.concatenate([np.expand_dims(x, 2)
|
375 |
-
for x in img_group], axis=2)
|
376 |
-
elif img_group[0].mode == 'RGB':
|
377 |
-
if self.roll:
|
378 |
-
return np.concatenate([np.array(x)[:, :, ::-1]
|
379 |
-
for x in img_group], axis=2)
|
380 |
-
else:
|
381 |
-
#print(np.concatenate(img_group, axis=2).shape)
|
382 |
-
# print(img_group[0].shape)
|
383 |
-
return np.concatenate(img_group, axis=2)
|
384 |
-
|
385 |
-
|
386 |
-
class ToTorchFormatTensor(object):
|
387 |
-
""" Converts a PIL.Image (RGB) or numpy.ndarray (H x W x C) in the range [0, 255]
|
388 |
-
to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] """
|
389 |
-
|
390 |
-
def __init__(self, div=True):
|
391 |
-
self.div = div
|
392 |
-
|
393 |
-
def __call__(self, pic):
|
394 |
-
if isinstance(pic, np.ndarray):
|
395 |
-
# handle numpy array
|
396 |
-
img = torch.from_numpy(pic).permute(2, 0, 1).contiguous()
|
397 |
-
else:
|
398 |
-
# handle PIL Image
|
399 |
-
img = torch.ByteTensor(
|
400 |
-
torch.ByteStorage.from_buffer(
|
401 |
-
pic.tobytes()))
|
402 |
-
img = img.view(pic.size[1], pic.size[0], len(pic.mode))
|
403 |
-
# put it from HWC to CHW format
|
404 |
-
# yikes, this transpose takes 80% of the loading time/CPU
|
405 |
-
img = img.transpose(0, 1).transpose(0, 2).contiguous()
|
406 |
-
return img.float().div(255) if self.div else img.float()
|
407 |
-
|
408 |
-
|
409 |
-
class IdentityTransform(object):
|
410 |
-
|
411 |
-
def __call__(self, data):
|
412 |
-
return data
|
413 |
-
|
414 |
-
|
415 |
-
if __name__ == "__main__":
|
416 |
-
trans = torchvision.transforms.Compose([
|
417 |
-
GroupScale(256),
|
418 |
-
GroupRandomCrop(224),
|
419 |
-
Stack(),
|
420 |
-
ToTorchFormatTensor(),
|
421 |
-
GroupNormalize(
|
422 |
-
mean=[.485, .456, .406],
|
423 |
-
std=[.229, .224, .225]
|
424 |
-
)]
|
425 |
-
)
|
426 |
-
|
427 |
-
im = Image.open('../tensorflow-model-zoo.torch/lena_299.png')
|
428 |
-
|
429 |
-
color_group = [im] * 3
|
430 |
-
rst = trans(color_group)
|
431 |
-
|
432 |
-
gray_group = [im.convert('L')] * 9
|
433 |
-
gray_rst = trans(gray_group)
|
434 |
-
|
435 |
-
trans2 = torchvision.transforms.Compose([
|
436 |
-
GroupRandomSizedCrop(256),
|
437 |
-
Stack(),
|
438 |
-
ToTorchFormatTensor(),
|
439 |
-
GroupNormalize(
|
440 |
-
mean=[.485, .456, .406],
|
441 |
-
std=[.229, .224, .225])
|
442 |
-
])
|
443 |
-
print(trans2(color_group))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Artrajz/vits-simple-api/vits/bert/ProsodyModel.py
DELETED
@@ -1,75 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import torch
|
3 |
-
import torch.nn as nn
|
4 |
-
import torch.nn.functional as F
|
5 |
-
|
6 |
-
from transformers import BertModel, BertConfig, BertTokenizer
|
7 |
-
|
8 |
-
|
9 |
-
class CharEmbedding(nn.Module):
|
10 |
-
def __init__(self, model_dir):
|
11 |
-
super().__init__()
|
12 |
-
self.tokenizer = BertTokenizer.from_pretrained(model_dir)
|
13 |
-
self.bert_config = BertConfig.from_pretrained(model_dir)
|
14 |
-
self.hidden_size = self.bert_config.hidden_size
|
15 |
-
self.bert = BertModel(self.bert_config)
|
16 |
-
self.proj = nn.Linear(self.hidden_size, 256)
|
17 |
-
self.linear = nn.Linear(256, 3)
|
18 |
-
|
19 |
-
def text2Token(self, text):
|
20 |
-
token = self.tokenizer.tokenize(text)
|
21 |
-
txtid = self.tokenizer.convert_tokens_to_ids(token)
|
22 |
-
return txtid
|
23 |
-
|
24 |
-
def forward(self, inputs_ids, inputs_masks, tokens_type_ids):
|
25 |
-
out_seq = self.bert(input_ids=inputs_ids,
|
26 |
-
attention_mask=inputs_masks,
|
27 |
-
token_type_ids=tokens_type_ids)[0]
|
28 |
-
out_seq = self.proj(out_seq)
|
29 |
-
return out_seq
|
30 |
-
|
31 |
-
|
32 |
-
class TTSProsody(object):
|
33 |
-
def __init__(self, path, device):
|
34 |
-
self.device = device
|
35 |
-
self.char_model = CharEmbedding(path)
|
36 |
-
self.char_model.load_state_dict(
|
37 |
-
torch.load(
|
38 |
-
os.path.join(path, 'prosody_model.pt'),
|
39 |
-
map_location="cpu"
|
40 |
-
),
|
41 |
-
strict=False
|
42 |
-
)
|
43 |
-
self.char_model.eval()
|
44 |
-
self.char_model.to(self.device)
|
45 |
-
|
46 |
-
def get_char_embeds(self, text):
|
47 |
-
input_ids = self.char_model.text2Token(text)
|
48 |
-
input_masks = [1] * len(input_ids)
|
49 |
-
type_ids = [0] * len(input_ids)
|
50 |
-
input_ids = torch.LongTensor([input_ids]).to(self.device)
|
51 |
-
input_masks = torch.LongTensor([input_masks]).to(self.device)
|
52 |
-
type_ids = torch.LongTensor([type_ids]).to(self.device)
|
53 |
-
|
54 |
-
with torch.no_grad():
|
55 |
-
char_embeds = self.char_model(
|
56 |
-
input_ids, input_masks, type_ids).squeeze(0).cpu()
|
57 |
-
return char_embeds
|
58 |
-
|
59 |
-
def expand_for_phone(self, char_embeds, length): # length of phones for char
|
60 |
-
assert char_embeds.size(0) == len(length)
|
61 |
-
expand_vecs = list()
|
62 |
-
for vec, leng in zip(char_embeds, length):
|
63 |
-
vec = vec.expand(leng, -1)
|
64 |
-
expand_vecs.append(vec)
|
65 |
-
expand_embeds = torch.cat(expand_vecs, 0)
|
66 |
-
assert expand_embeds.size(0) == sum(length)
|
67 |
-
return expand_embeds.numpy()
|
68 |
-
|
69 |
-
|
70 |
-
if __name__ == "__main__":
|
71 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
72 |
-
prosody = TTSProsody('./bert/', device)
|
73 |
-
while True:
|
74 |
-
text = input("请输入文本:")
|
75 |
-
prosody.get_char_embeds(text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/resolvelib/compat/collections_abc.py
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
__all__ = ["Mapping", "Sequence"]
|
2 |
-
|
3 |
-
try:
|
4 |
-
from collections.abc import Mapping, Sequence
|
5 |
-
except ImportError:
|
6 |
-
from collections import Mapping, Sequence
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/file_util.py
DELETED
@@ -1,249 +0,0 @@
|
|
1 |
-
"""distutils.file_util
|
2 |
-
|
3 |
-
Utility functions for operating on single files.
|
4 |
-
"""
|
5 |
-
|
6 |
-
import os
|
7 |
-
from distutils.errors import DistutilsFileError
|
8 |
-
from distutils import log
|
9 |
-
|
10 |
-
# for generating verbose output in 'copy_file()'
|
11 |
-
_copy_action = {None: 'copying', 'hard': 'hard linking', 'sym': 'symbolically linking'}
|
12 |
-
|
13 |
-
|
14 |
-
def _copy_file_contents(src, dst, buffer_size=16 * 1024): # noqa: C901
|
15 |
-
"""Copy the file 'src' to 'dst'; both must be filenames. Any error
|
16 |
-
opening either file, reading from 'src', or writing to 'dst', raises
|
17 |
-
DistutilsFileError. Data is read/written in chunks of 'buffer_size'
|
18 |
-
bytes (default 16k). No attempt is made to handle anything apart from
|
19 |
-
regular files.
|
20 |
-
"""
|
21 |
-
# Stolen from shutil module in the standard library, but with
|
22 |
-
# custom error-handling added.
|
23 |
-
fsrc = None
|
24 |
-
fdst = None
|
25 |
-
try:
|
26 |
-
try:
|
27 |
-
fsrc = open(src, 'rb')
|
28 |
-
except OSError as e:
|
29 |
-
raise DistutilsFileError("could not open '{}': {}".format(src, e.strerror))
|
30 |
-
|
31 |
-
if os.path.exists(dst):
|
32 |
-
try:
|
33 |
-
os.unlink(dst)
|
34 |
-
except OSError as e:
|
35 |
-
raise DistutilsFileError(
|
36 |
-
"could not delete '{}': {}".format(dst, e.strerror)
|
37 |
-
)
|
38 |
-
|
39 |
-
try:
|
40 |
-
fdst = open(dst, 'wb')
|
41 |
-
except OSError as e:
|
42 |
-
raise DistutilsFileError(
|
43 |
-
"could not create '{}': {}".format(dst, e.strerror)
|
44 |
-
)
|
45 |
-
|
46 |
-
while True:
|
47 |
-
try:
|
48 |
-
buf = fsrc.read(buffer_size)
|
49 |
-
except OSError as e:
|
50 |
-
raise DistutilsFileError(
|
51 |
-
"could not read from '{}': {}".format(src, e.strerror)
|
52 |
-
)
|
53 |
-
|
54 |
-
if not buf:
|
55 |
-
break
|
56 |
-
|
57 |
-
try:
|
58 |
-
fdst.write(buf)
|
59 |
-
except OSError as e:
|
60 |
-
raise DistutilsFileError(
|
61 |
-
"could not write to '{}': {}".format(dst, e.strerror)
|
62 |
-
)
|
63 |
-
finally:
|
64 |
-
if fdst:
|
65 |
-
fdst.close()
|
66 |
-
if fsrc:
|
67 |
-
fsrc.close()
|
68 |
-
|
69 |
-
|
70 |
-
def copy_file( # noqa: C901
|
71 |
-
src,
|
72 |
-
dst,
|
73 |
-
preserve_mode=1,
|
74 |
-
preserve_times=1,
|
75 |
-
update=0,
|
76 |
-
link=None,
|
77 |
-
verbose=1,
|
78 |
-
dry_run=0,
|
79 |
-
):
|
80 |
-
"""Copy a file 'src' to 'dst'. If 'dst' is a directory, then 'src' is
|
81 |
-
copied there with the same name; otherwise, it must be a filename. (If
|
82 |
-
the file exists, it will be ruthlessly clobbered.) If 'preserve_mode'
|
83 |
-
is true (the default), the file's mode (type and permission bits, or
|
84 |
-
whatever is analogous on the current platform) is copied. If
|
85 |
-
'preserve_times' is true (the default), the last-modified and
|
86 |
-
last-access times are copied as well. If 'update' is true, 'src' will
|
87 |
-
only be copied if 'dst' does not exist, or if 'dst' does exist but is
|
88 |
-
older than 'src'.
|
89 |
-
|
90 |
-
'link' allows you to make hard links (os.link) or symbolic links
|
91 |
-
(os.symlink) instead of copying: set it to "hard" or "sym"; if it is
|
92 |
-
None (the default), files are copied. Don't set 'link' on systems that
|
93 |
-
don't support it: 'copy_file()' doesn't check if hard or symbolic
|
94 |
-
linking is available. If hardlink fails, falls back to
|
95 |
-
_copy_file_contents().
|
96 |
-
|
97 |
-
Under Mac OS, uses the native file copy function in macostools; on
|
98 |
-
other systems, uses '_copy_file_contents()' to copy file contents.
|
99 |
-
|
100 |
-
Return a tuple (dest_name, copied): 'dest_name' is the actual name of
|
101 |
-
the output file, and 'copied' is true if the file was copied (or would
|
102 |
-
have been copied, if 'dry_run' true).
|
103 |
-
"""
|
104 |
-
# XXX if the destination file already exists, we clobber it if
|
105 |
-
# copying, but blow up if linking. Hmmm. And I don't know what
|
106 |
-
# macostools.copyfile() does. Should definitely be consistent, and
|
107 |
-
# should probably blow up if destination exists and we would be
|
108 |
-
# changing it (ie. it's not already a hard/soft link to src OR
|
109 |
-
# (not update) and (src newer than dst).
|
110 |
-
|
111 |
-
from distutils.dep_util import newer
|
112 |
-
from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE
|
113 |
-
|
114 |
-
if not os.path.isfile(src):
|
115 |
-
raise DistutilsFileError(
|
116 |
-
"can't copy '%s': doesn't exist or not a regular file" % src
|
117 |
-
)
|
118 |
-
|
119 |
-
if os.path.isdir(dst):
|
120 |
-
dir = dst
|
121 |
-
dst = os.path.join(dst, os.path.basename(src))
|
122 |
-
else:
|
123 |
-
dir = os.path.dirname(dst)
|
124 |
-
|
125 |
-
if update and not newer(src, dst):
|
126 |
-
if verbose >= 1:
|
127 |
-
log.debug("not copying %s (output up-to-date)", src)
|
128 |
-
return (dst, 0)
|
129 |
-
|
130 |
-
try:
|
131 |
-
action = _copy_action[link]
|
132 |
-
except KeyError:
|
133 |
-
raise ValueError("invalid value '%s' for 'link' argument" % link)
|
134 |
-
|
135 |
-
if verbose >= 1:
|
136 |
-
if os.path.basename(dst) == os.path.basename(src):
|
137 |
-
log.info("%s %s -> %s", action, src, dir)
|
138 |
-
else:
|
139 |
-
log.info("%s %s -> %s", action, src, dst)
|
140 |
-
|
141 |
-
if dry_run:
|
142 |
-
return (dst, 1)
|
143 |
-
|
144 |
-
# If linking (hard or symbolic), use the appropriate system call
|
145 |
-
# (Unix only, of course, but that's the caller's responsibility)
|
146 |
-
elif link == 'hard':
|
147 |
-
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
|
148 |
-
try:
|
149 |
-
os.link(src, dst)
|
150 |
-
return (dst, 1)
|
151 |
-
except OSError:
|
152 |
-
# If hard linking fails, fall back on copying file
|
153 |
-
# (some special filesystems don't support hard linking
|
154 |
-
# even under Unix, see issue #8876).
|
155 |
-
pass
|
156 |
-
elif link == 'sym':
|
157 |
-
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
|
158 |
-
os.symlink(src, dst)
|
159 |
-
return (dst, 1)
|
160 |
-
|
161 |
-
# Otherwise (non-Mac, not linking), copy the file contents and
|
162 |
-
# (optionally) copy the times and mode.
|
163 |
-
_copy_file_contents(src, dst)
|
164 |
-
if preserve_mode or preserve_times:
|
165 |
-
st = os.stat(src)
|
166 |
-
|
167 |
-
# According to David Ascher <[email protected]>, utime() should be done
|
168 |
-
# before chmod() (at least under NT).
|
169 |
-
if preserve_times:
|
170 |
-
os.utime(dst, (st[ST_ATIME], st[ST_MTIME]))
|
171 |
-
if preserve_mode:
|
172 |
-
os.chmod(dst, S_IMODE(st[ST_MODE]))
|
173 |
-
|
174 |
-
return (dst, 1)
|
175 |
-
|
176 |
-
|
177 |
-
# XXX I suspect this is Unix-specific -- need porting help!
|
178 |
-
def move_file(src, dst, verbose=1, dry_run=0): # noqa: C901
|
179 |
-
|
180 |
-
"""Move a file 'src' to 'dst'. If 'dst' is a directory, the file will
|
181 |
-
be moved into it with the same name; otherwise, 'src' is just renamed
|
182 |
-
to 'dst'. Return the new full name of the file.
|
183 |
-
|
184 |
-
Handles cross-device moves on Unix using 'copy_file()'. What about
|
185 |
-
other systems???
|
186 |
-
"""
|
187 |
-
from os.path import exists, isfile, isdir, basename, dirname
|
188 |
-
import errno
|
189 |
-
|
190 |
-
if verbose >= 1:
|
191 |
-
log.info("moving %s -> %s", src, dst)
|
192 |
-
|
193 |
-
if dry_run:
|
194 |
-
return dst
|
195 |
-
|
196 |
-
if not isfile(src):
|
197 |
-
raise DistutilsFileError("can't move '%s': not a regular file" % src)
|
198 |
-
|
199 |
-
if isdir(dst):
|
200 |
-
dst = os.path.join(dst, basename(src))
|
201 |
-
elif exists(dst):
|
202 |
-
raise DistutilsFileError(
|
203 |
-
"can't move '{}': destination '{}' already exists".format(src, dst)
|
204 |
-
)
|
205 |
-
|
206 |
-
if not isdir(dirname(dst)):
|
207 |
-
raise DistutilsFileError(
|
208 |
-
"can't move '{}': destination '{}' not a valid path".format(src, dst)
|
209 |
-
)
|
210 |
-
|
211 |
-
copy_it = False
|
212 |
-
try:
|
213 |
-
os.rename(src, dst)
|
214 |
-
except OSError as e:
|
215 |
-
(num, msg) = e.args
|
216 |
-
if num == errno.EXDEV:
|
217 |
-
copy_it = True
|
218 |
-
else:
|
219 |
-
raise DistutilsFileError(
|
220 |
-
"couldn't move '{}' to '{}': {}".format(src, dst, msg)
|
221 |
-
)
|
222 |
-
|
223 |
-
if copy_it:
|
224 |
-
copy_file(src, dst, verbose=verbose)
|
225 |
-
try:
|
226 |
-
os.unlink(src)
|
227 |
-
except OSError as e:
|
228 |
-
(num, msg) = e.args
|
229 |
-
try:
|
230 |
-
os.unlink(dst)
|
231 |
-
except OSError:
|
232 |
-
pass
|
233 |
-
raise DistutilsFileError(
|
234 |
-
"couldn't move '%s' to '%s' by copy/delete: "
|
235 |
-
"delete '%s' failed: %s" % (src, dst, src, msg)
|
236 |
-
)
|
237 |
-
return dst
|
238 |
-
|
239 |
-
|
240 |
-
def write_file(filename, contents):
|
241 |
-
"""Create a file with the specified name and write 'contents' (a
|
242 |
-
sequence of strings without line terminators) to it.
|
243 |
-
"""
|
244 |
-
f = open(filename, "w")
|
245 |
-
try:
|
246 |
-
for line in contents:
|
247 |
-
f.write(line + "\n")
|
248 |
-
finally:
|
249 |
-
f.close()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/filelist.py
DELETED
@@ -1,371 +0,0 @@
|
|
1 |
-
"""distutils.filelist
|
2 |
-
|
3 |
-
Provides the FileList class, used for poking about the filesystem
|
4 |
-
and building lists of files.
|
5 |
-
"""
|
6 |
-
|
7 |
-
import os
|
8 |
-
import re
|
9 |
-
import fnmatch
|
10 |
-
import functools
|
11 |
-
|
12 |
-
from distutils.util import convert_path
|
13 |
-
from distutils.errors import DistutilsTemplateError, DistutilsInternalError
|
14 |
-
from distutils import log
|
15 |
-
|
16 |
-
|
17 |
-
class FileList:
|
18 |
-
"""A list of files built by on exploring the filesystem and filtered by
|
19 |
-
applying various patterns to what we find there.
|
20 |
-
|
21 |
-
Instance attributes:
|
22 |
-
dir
|
23 |
-
directory from which files will be taken -- only used if
|
24 |
-
'allfiles' not supplied to constructor
|
25 |
-
files
|
26 |
-
list of filenames currently being built/filtered/manipulated
|
27 |
-
allfiles
|
28 |
-
complete list of files under consideration (ie. without any
|
29 |
-
filtering applied)
|
30 |
-
"""
|
31 |
-
|
32 |
-
def __init__(self, warn=None, debug_print=None):
|
33 |
-
# ignore argument to FileList, but keep them for backwards
|
34 |
-
# compatibility
|
35 |
-
self.allfiles = None
|
36 |
-
self.files = []
|
37 |
-
|
38 |
-
def set_allfiles(self, allfiles):
|
39 |
-
self.allfiles = allfiles
|
40 |
-
|
41 |
-
def findall(self, dir=os.curdir):
|
42 |
-
self.allfiles = findall(dir)
|
43 |
-
|
44 |
-
def debug_print(self, msg):
|
45 |
-
"""Print 'msg' to stdout if the global DEBUG (taken from the
|
46 |
-
DISTUTILS_DEBUG environment variable) flag is true.
|
47 |
-
"""
|
48 |
-
from distutils.debug import DEBUG
|
49 |
-
|
50 |
-
if DEBUG:
|
51 |
-
print(msg)
|
52 |
-
|
53 |
-
# Collection methods
|
54 |
-
|
55 |
-
def append(self, item):
|
56 |
-
self.files.append(item)
|
57 |
-
|
58 |
-
def extend(self, items):
|
59 |
-
self.files.extend(items)
|
60 |
-
|
61 |
-
def sort(self):
|
62 |
-
# Not a strict lexical sort!
|
63 |
-
sortable_files = sorted(map(os.path.split, self.files))
|
64 |
-
self.files = []
|
65 |
-
for sort_tuple in sortable_files:
|
66 |
-
self.files.append(os.path.join(*sort_tuple))
|
67 |
-
|
68 |
-
# Other miscellaneous utility methods
|
69 |
-
|
70 |
-
def remove_duplicates(self):
|
71 |
-
# Assumes list has been sorted!
|
72 |
-
for i in range(len(self.files) - 1, 0, -1):
|
73 |
-
if self.files[i] == self.files[i - 1]:
|
74 |
-
del self.files[i]
|
75 |
-
|
76 |
-
# "File template" methods
|
77 |
-
|
78 |
-
def _parse_template_line(self, line):
|
79 |
-
words = line.split()
|
80 |
-
action = words[0]
|
81 |
-
|
82 |
-
patterns = dir = dir_pattern = None
|
83 |
-
|
84 |
-
if action in ('include', 'exclude', 'global-include', 'global-exclude'):
|
85 |
-
if len(words) < 2:
|
86 |
-
raise DistutilsTemplateError(
|
87 |
-
"'%s' expects <pattern1> <pattern2> ..." % action
|
88 |
-
)
|
89 |
-
patterns = [convert_path(w) for w in words[1:]]
|
90 |
-
elif action in ('recursive-include', 'recursive-exclude'):
|
91 |
-
if len(words) < 3:
|
92 |
-
raise DistutilsTemplateError(
|
93 |
-
"'%s' expects <dir> <pattern1> <pattern2> ..." % action
|
94 |
-
)
|
95 |
-
dir = convert_path(words[1])
|
96 |
-
patterns = [convert_path(w) for w in words[2:]]
|
97 |
-
elif action in ('graft', 'prune'):
|
98 |
-
if len(words) != 2:
|
99 |
-
raise DistutilsTemplateError(
|
100 |
-
"'%s' expects a single <dir_pattern>" % action
|
101 |
-
)
|
102 |
-
dir_pattern = convert_path(words[1])
|
103 |
-
else:
|
104 |
-
raise DistutilsTemplateError("unknown action '%s'" % action)
|
105 |
-
|
106 |
-
return (action, patterns, dir, dir_pattern)
|
107 |
-
|
108 |
-
def process_template_line(self, line): # noqa: C901
|
109 |
-
# Parse the line: split it up, make sure the right number of words
|
110 |
-
# is there, and return the relevant words. 'action' is always
|
111 |
-
# defined: it's the first word of the line. Which of the other
|
112 |
-
# three are defined depends on the action; it'll be either
|
113 |
-
# patterns, (dir and patterns), or (dir_pattern).
|
114 |
-
(action, patterns, dir, dir_pattern) = self._parse_template_line(line)
|
115 |
-
|
116 |
-
# OK, now we know that the action is valid and we have the
|
117 |
-
# right number of words on the line for that action -- so we
|
118 |
-
# can proceed with minimal error-checking.
|
119 |
-
if action == 'include':
|
120 |
-
self.debug_print("include " + ' '.join(patterns))
|
121 |
-
for pattern in patterns:
|
122 |
-
if not self.include_pattern(pattern, anchor=1):
|
123 |
-
log.warn("warning: no files found matching '%s'", pattern)
|
124 |
-
|
125 |
-
elif action == 'exclude':
|
126 |
-
self.debug_print("exclude " + ' '.join(patterns))
|
127 |
-
for pattern in patterns:
|
128 |
-
if not self.exclude_pattern(pattern, anchor=1):
|
129 |
-
log.warn(
|
130 |
-
(
|
131 |
-
"warning: no previously-included files "
|
132 |
-
"found matching '%s'"
|
133 |
-
),
|
134 |
-
pattern,
|
135 |
-
)
|
136 |
-
|
137 |
-
elif action == 'global-include':
|
138 |
-
self.debug_print("global-include " + ' '.join(patterns))
|
139 |
-
for pattern in patterns:
|
140 |
-
if not self.include_pattern(pattern, anchor=0):
|
141 |
-
log.warn(
|
142 |
-
(
|
143 |
-
"warning: no files found matching '%s' "
|
144 |
-
"anywhere in distribution"
|
145 |
-
),
|
146 |
-
pattern,
|
147 |
-
)
|
148 |
-
|
149 |
-
elif action == 'global-exclude':
|
150 |
-
self.debug_print("global-exclude " + ' '.join(patterns))
|
151 |
-
for pattern in patterns:
|
152 |
-
if not self.exclude_pattern(pattern, anchor=0):
|
153 |
-
log.warn(
|
154 |
-
(
|
155 |
-
"warning: no previously-included files matching "
|
156 |
-
"'%s' found anywhere in distribution"
|
157 |
-
),
|
158 |
-
pattern,
|
159 |
-
)
|
160 |
-
|
161 |
-
elif action == 'recursive-include':
|
162 |
-
self.debug_print("recursive-include {} {}".format(dir, ' '.join(patterns)))
|
163 |
-
for pattern in patterns:
|
164 |
-
if not self.include_pattern(pattern, prefix=dir):
|
165 |
-
msg = (
|
166 |
-
"warning: no files found matching '%s' " "under directory '%s'"
|
167 |
-
)
|
168 |
-
log.warn(msg, pattern, dir)
|
169 |
-
|
170 |
-
elif action == 'recursive-exclude':
|
171 |
-
self.debug_print("recursive-exclude {} {}".format(dir, ' '.join(patterns)))
|
172 |
-
for pattern in patterns:
|
173 |
-
if not self.exclude_pattern(pattern, prefix=dir):
|
174 |
-
log.warn(
|
175 |
-
(
|
176 |
-
"warning: no previously-included files matching "
|
177 |
-
"'%s' found under directory '%s'"
|
178 |
-
),
|
179 |
-
pattern,
|
180 |
-
dir,
|
181 |
-
)
|
182 |
-
|
183 |
-
elif action == 'graft':
|
184 |
-
self.debug_print("graft " + dir_pattern)
|
185 |
-
if not self.include_pattern(None, prefix=dir_pattern):
|
186 |
-
log.warn("warning: no directories found matching '%s'", dir_pattern)
|
187 |
-
|
188 |
-
elif action == 'prune':
|
189 |
-
self.debug_print("prune " + dir_pattern)
|
190 |
-
if not self.exclude_pattern(None, prefix=dir_pattern):
|
191 |
-
log.warn(
|
192 |
-
("no previously-included directories found " "matching '%s'"),
|
193 |
-
dir_pattern,
|
194 |
-
)
|
195 |
-
else:
|
196 |
-
raise DistutilsInternalError(
|
197 |
-
"this cannot happen: invalid action '%s'" % action
|
198 |
-
)
|
199 |
-
|
200 |
-
# Filtering/selection methods
|
201 |
-
|
202 |
-
def include_pattern(self, pattern, anchor=1, prefix=None, is_regex=0):
|
203 |
-
"""Select strings (presumably filenames) from 'self.files' that
|
204 |
-
match 'pattern', a Unix-style wildcard (glob) pattern. Patterns
|
205 |
-
are not quite the same as implemented by the 'fnmatch' module: '*'
|
206 |
-
and '?' match non-special characters, where "special" is platform-
|
207 |
-
dependent: slash on Unix; colon, slash, and backslash on
|
208 |
-
DOS/Windows; and colon on Mac OS.
|
209 |
-
|
210 |
-
If 'anchor' is true (the default), then the pattern match is more
|
211 |
-
stringent: "*.py" will match "foo.py" but not "foo/bar.py". If
|
212 |
-
'anchor' is false, both of these will match.
|
213 |
-
|
214 |
-
If 'prefix' is supplied, then only filenames starting with 'prefix'
|
215 |
-
(itself a pattern) and ending with 'pattern', with anything in between
|
216 |
-
them, will match. 'anchor' is ignored in this case.
|
217 |
-
|
218 |
-
If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and
|
219 |
-
'pattern' is assumed to be either a string containing a regex or a
|
220 |
-
regex object -- no translation is done, the regex is just compiled
|
221 |
-
and used as-is.
|
222 |
-
|
223 |
-
Selected strings will be added to self.files.
|
224 |
-
|
225 |
-
Return True if files are found, False otherwise.
|
226 |
-
"""
|
227 |
-
# XXX docstring lying about what the special chars are?
|
228 |
-
files_found = False
|
229 |
-
pattern_re = translate_pattern(pattern, anchor, prefix, is_regex)
|
230 |
-
self.debug_print("include_pattern: applying regex r'%s'" % pattern_re.pattern)
|
231 |
-
|
232 |
-
# delayed loading of allfiles list
|
233 |
-
if self.allfiles is None:
|
234 |
-
self.findall()
|
235 |
-
|
236 |
-
for name in self.allfiles:
|
237 |
-
if pattern_re.search(name):
|
238 |
-
self.debug_print(" adding " + name)
|
239 |
-
self.files.append(name)
|
240 |
-
files_found = True
|
241 |
-
return files_found
|
242 |
-
|
243 |
-
def exclude_pattern(self, pattern, anchor=1, prefix=None, is_regex=0):
|
244 |
-
"""Remove strings (presumably filenames) from 'files' that match
|
245 |
-
'pattern'. Other parameters are the same as for
|
246 |
-
'include_pattern()', above.
|
247 |
-
The list 'self.files' is modified in place.
|
248 |
-
Return True if files are found, False otherwise.
|
249 |
-
"""
|
250 |
-
files_found = False
|
251 |
-
pattern_re = translate_pattern(pattern, anchor, prefix, is_regex)
|
252 |
-
self.debug_print("exclude_pattern: applying regex r'%s'" % pattern_re.pattern)
|
253 |
-
for i in range(len(self.files) - 1, -1, -1):
|
254 |
-
if pattern_re.search(self.files[i]):
|
255 |
-
self.debug_print(" removing " + self.files[i])
|
256 |
-
del self.files[i]
|
257 |
-
files_found = True
|
258 |
-
return files_found
|
259 |
-
|
260 |
-
|
261 |
-
# Utility functions
|
262 |
-
|
263 |
-
|
264 |
-
def _find_all_simple(path):
|
265 |
-
"""
|
266 |
-
Find all files under 'path'
|
267 |
-
"""
|
268 |
-
all_unique = _UniqueDirs.filter(os.walk(path, followlinks=True))
|
269 |
-
results = (
|
270 |
-
os.path.join(base, file) for base, dirs, files in all_unique for file in files
|
271 |
-
)
|
272 |
-
return filter(os.path.isfile, results)
|
273 |
-
|
274 |
-
|
275 |
-
class _UniqueDirs(set):
|
276 |
-
"""
|
277 |
-
Exclude previously-seen dirs from walk results,
|
278 |
-
avoiding infinite recursion.
|
279 |
-
Ref https://bugs.python.org/issue44497.
|
280 |
-
"""
|
281 |
-
|
282 |
-
def __call__(self, walk_item):
|
283 |
-
"""
|
284 |
-
Given an item from an os.walk result, determine
|
285 |
-
if the item represents a unique dir for this instance
|
286 |
-
and if not, prevent further traversal.
|
287 |
-
"""
|
288 |
-
base, dirs, files = walk_item
|
289 |
-
stat = os.stat(base)
|
290 |
-
candidate = stat.st_dev, stat.st_ino
|
291 |
-
found = candidate in self
|
292 |
-
if found:
|
293 |
-
del dirs[:]
|
294 |
-
self.add(candidate)
|
295 |
-
return not found
|
296 |
-
|
297 |
-
@classmethod
|
298 |
-
def filter(cls, items):
|
299 |
-
return filter(cls(), items)
|
300 |
-
|
301 |
-
|
302 |
-
def findall(dir=os.curdir):
|
303 |
-
"""
|
304 |
-
Find all files under 'dir' and return the list of full filenames.
|
305 |
-
Unless dir is '.', return full filenames with dir prepended.
|
306 |
-
"""
|
307 |
-
files = _find_all_simple(dir)
|
308 |
-
if dir == os.curdir:
|
309 |
-
make_rel = functools.partial(os.path.relpath, start=dir)
|
310 |
-
files = map(make_rel, files)
|
311 |
-
return list(files)
|
312 |
-
|
313 |
-
|
314 |
-
def glob_to_re(pattern):
|
315 |
-
"""Translate a shell-like glob pattern to a regular expression; return
|
316 |
-
a string containing the regex. Differs from 'fnmatch.translate()' in
|
317 |
-
that '*' does not match "special characters" (which are
|
318 |
-
platform-specific).
|
319 |
-
"""
|
320 |
-
pattern_re = fnmatch.translate(pattern)
|
321 |
-
|
322 |
-
# '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
|
323 |
-
# IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
|
324 |
-
# and by extension they shouldn't match such "special characters" under
|
325 |
-
# any OS. So change all non-escaped dots in the RE to match any
|
326 |
-
# character except the special characters (currently: just os.sep).
|
327 |
-
sep = os.sep
|
328 |
-
if os.sep == '\\':
|
329 |
-
# we're using a regex to manipulate a regex, so we need
|
330 |
-
# to escape the backslash twice
|
331 |
-
sep = r'\\\\'
|
332 |
-
escaped = r'\1[^%s]' % sep
|
333 |
-
pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', escaped, pattern_re)
|
334 |
-
return pattern_re
|
335 |
-
|
336 |
-
|
337 |
-
def translate_pattern(pattern, anchor=1, prefix=None, is_regex=0):
|
338 |
-
"""Translate a shell-like wildcard pattern to a compiled regular
|
339 |
-
expression. Return the compiled regex. If 'is_regex' true,
|
340 |
-
then 'pattern' is directly compiled to a regex (if it's a string)
|
341 |
-
or just returned as-is (assumes it's a regex object).
|
342 |
-
"""
|
343 |
-
if is_regex:
|
344 |
-
if isinstance(pattern, str):
|
345 |
-
return re.compile(pattern)
|
346 |
-
else:
|
347 |
-
return pattern
|
348 |
-
|
349 |
-
# ditch start and end characters
|
350 |
-
start, _, end = glob_to_re('_').partition('_')
|
351 |
-
|
352 |
-
if pattern:
|
353 |
-
pattern_re = glob_to_re(pattern)
|
354 |
-
assert pattern_re.startswith(start) and pattern_re.endswith(end)
|
355 |
-
else:
|
356 |
-
pattern_re = ''
|
357 |
-
|
358 |
-
if prefix is not None:
|
359 |
-
prefix_re = glob_to_re(prefix)
|
360 |
-
assert prefix_re.startswith(start) and prefix_re.endswith(end)
|
361 |
-
prefix_re = prefix_re[len(start) : len(prefix_re) - len(end)]
|
362 |
-
sep = os.sep
|
363 |
-
if os.sep == '\\':
|
364 |
-
sep = r'\\'
|
365 |
-
pattern_re = pattern_re[len(start) : len(pattern_re) - len(end)]
|
366 |
-
pattern_re = r'{}\A{}{}.*{}{}'.format(start, prefix_re, sep, pattern_re, end)
|
367 |
-
else: # no prefix -- respect anchor flag
|
368 |
-
if anchor:
|
369 |
-
pattern_re = r'{}\A{}'.format(start, pattern_re[len(start) :])
|
370 |
-
|
371 |
-
return re.compile(pattern_re)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awesimo/jojogan/e4e/datasets/inference_dataset.py
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
from torch.utils.data import Dataset
|
2 |
-
from PIL import Image
|
3 |
-
from utils import data_utils
|
4 |
-
|
5 |
-
|
6 |
-
class InferenceDataset(Dataset):
|
7 |
-
|
8 |
-
def __init__(self, root, opts, transform=None, preprocess=None):
|
9 |
-
self.paths = sorted(data_utils.make_dataset(root))
|
10 |
-
self.transform = transform
|
11 |
-
self.preprocess = preprocess
|
12 |
-
self.opts = opts
|
13 |
-
|
14 |
-
def __len__(self):
|
15 |
-
return len(self.paths)
|
16 |
-
|
17 |
-
def __getitem__(self, index):
|
18 |
-
from_path = self.paths[index]
|
19 |
-
if self.preprocess is not None:
|
20 |
-
from_im = self.preprocess(from_path)
|
21 |
-
else:
|
22 |
-
from_im = Image.open(from_path).convert('RGB')
|
23 |
-
if self.transform:
|
24 |
-
from_im = self.transform(from_im)
|
25 |
-
return from_im
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Asesinato En Los Alpes Mod Apk An1.md
DELETED
@@ -1,68 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Asesinato en los Alpes Mod APK: Un juego de misterio oculto para Android</h1>
|
3 |
-
<p>Si eres un fan de los juegos de misterio, aventura y romance, es posible que quieras echar un vistazo a <strong>Murder in the Alps</strong>, un juego de objetos ocultos que te llevará de vuelta a la década de 1930 en un hotel remoto en los Alpes. Actuarás como Anna Myers, una periodista que está investigando una serie de misteriosos asesinatos que ocurren en el hotel. Tendrás que explorar las escenas, encontrar pistas, resolver puzzles, interactuar con otros personajes y tomar decisiones que afecten el resultado de la historia. </p>
|
4 |
-
<h2>asesinato en los Alpes mod apk an1</h2><br /><p><b><b>Download</b> ✺ <a href="https://bltlly.com/2v6K3n">https://bltlly.com/2v6K3n</a></b></p><br /><br />
|
5 |
-
<p>Pero lo que si quieres disfrutar del juego sin limitaciones o interrupciones? Ahí es donde <strong>Asesinato en los Alpes Mod APK</strong> entra en. Un apk mod es una versión modificada de una aplicación original que tiene algunas características añadidas o eliminadas para mejorar la experiencia del usuario. En este caso, Asesinato en los Alpes Mod APK tiene energía ilimitada, todos los capítulos desbloqueados, y no hay anuncios, por lo que puede jugar el juego tanto como quieras sin gastar dinero o esperar a que la energía para rellenar. </p>
|
6 |
-
<p>Una de las fuentes donde se puede descargar Asesinato en los Alpes Mod APK es <strong>an1.co.in</strong>, un sitio web que proporciona apks mod gratis para varios juegos de Android. Usted puede encontrar asesinato en los Alpes Mod APK en este sitio web junto con otra información, tales como características, requisitos, pasos de instalación, capturas de pantalla, y comentarios. </p>
|
7 |
-
<h2>Características de Asesinato en los Alpes Mod APK</h2>
|
8 |
-
<p>Asesinato en los Alpes Mod APK tiene algunas características que lo hacen diferente del juego original. Estos son algunos de ellos:</p>
|
9 |
-
<p></p>
|
10 |
-
<h3>Energía ilimitada</h3>
|
11 |
-
<p>En el juego original, necesitas energía para jugar cada escena. La energía se llena con el tiempo o viendo anuncios o comprándolo con dinero real. Sin embargo, con Asesinato en los Alpes Mod APK, usted no tiene que preocuparse por la energía en absoluto. Puedes reproducir tantas escenas como quieras sin ninguna restricción. </p>
|
12 |
-
<h3>Todos los capítulos desbloqueados</h3>
|
13 |
-
|
14 |
-
<h3>No hay anuncios</h3>
|
15 |
-
<p>El juego original tiene anuncios que aparecen de vez en cuando o cuando quieres obtener energía adicional o sugerencias. Los anuncios pueden ser molestos y distraer cuando estás inmerso en el juego. Sin embargo, con Asesinato en los Alpes Mod APK, no tienes que ver ningún anuncio en absoluto. Puedes disfrutar del juego sin interrupciones. </p>
|
16 |
-
<h2>Cómo descargar e instalar Asesinato en los Alpes Mod APK</h2>
|
17 |
-
<p>Si desea probar Asesinato en los Alpes Mod APK, es necesario seguir estos pasos:</p>
|
18 |
-
<h3>Requisitos</h3>
|
19 |
-
<ul>
|
20 |
-
<li>Un dispositivo Android con la versión 4.4 o superior</li>
|
21 |
-
<li>Al menos 111 MB de espacio de almacenamiento libre</li>
|
22 |
-
<li>Una conexión a Internet estable</li>
|
23 |
-
<li>Permitir la instalación de fuentes desconocidas en la configuración del dispositivo</li>
|
24 |
-
</ul>
|
25 |
-
<h3>Pasos</h3>
|
26 |
-
<ol>
|
27 |
-
<li>Ir a <a href=">an1.co.in</a> y buscar Asesinato en los Alpes Mod APK</li>
|
28 |
-
<li>Haga clic en el botón de descarga y espere a que el archivo se descargue</li>
|
29 |
-
<li>Busque el archivo descargado en el administrador de archivos de su dispositivo y toque en él para instalarlo</li>
|
30 |
-
<li>Iniciar el juego y disfrutar! </li>
|
31 |
-
</ol>
|
32 |
-
<h2>Cómo jugar Asesinato en los Alpes</h2>
|
33 |
-
<p>Murder in the Alps es un juego de objetos ocultos que requiere que uses tus habilidades de observación, lógica y deducción para resolver el misterio. Aquí hay algunos consejos sobre cómo jugar el juego:</p>
|
34 |
-
<h3>Elige tu personaje y la historia</h3>
|
35 |
-
<p>Puedes elegir entre dos personajes: Anna Myers o Frank Miller. Cada personaje tiene una perspectiva y personalidad diferentes. También puede elegir entre seis historias: The Providence Express, The Heir, The Murder of Mrs. Miller, The Alchemist, The Curse of the Ancient One y The Final Showdown. Cada historia tiene una trama, un escenario y personajes diferentes. </p>
|
36 |
-
<h3>Explora las escenas y encuentra pistas</h3>
|
37 |
-
|
38 |
-
<h3>Resuelve puzzles y minijuegos</h3>
|
39 |
-
<p>Además de encontrar pistas, también tendrás que resolver puzzles y minijuegos que desafiarán tu cerebro. Algunos rompecabezas requieren que uses habilidades de lógica, matemáticas o memoria, mientras que otros requieren que uses tus reflejos o coordinación. Puedes saltarte los puzzles si los encuentras demasiado difíciles o usar pistas si necesitas ayuda. </p>
|
40 |
-
<h3>Interactuar con otros personajes y tomar decisiones</h3>
|
41 |
-
<p>Encontrarás varios personajes en el juego, algunos de ellos amistosos, algunos de ellos sospechosos, y algunos de ellos peligrosos. Puedes hablar con ellos y aprender más sobre sus antecedentes, motivos y secretos. También puede tomar decisiones que afectarán el curso de la historia y las relaciones con otros personajes. Sin embargo, tenga cuidado, ya que algunas opciones pueden tener consecuencias más tarde. </p>
|
42 |
-
<h2>Pros y contras de asesinato en los Alpes Mod APK</h2>
|
43 |
-
<p>Asesinato en los Alpes Mod APK tiene algunas ventajas y desventajas que usted debe considerar antes de descargarlo. Aquí están algunos de ellos:</p>
|
44 |
-
<h3>Pros</h3>
|
45 |
-
<ul>
|
46 |
-
<li><h4>Historia inmersiva y cautivadora</h4><p>El asesinato en los Alpes tiene una historia bien escrita y atractiva que te mantendrá enganchado hasta el final. Sentirás que eres parte de una novela clásica de detectives con giros y vueltas, suspense y romance, humor y drama. </p></li>
|
47 |
-
<li><h4>Hermosos gráficos y efectos de sonido</h4><p>Murder in the Alps tiene gráficos impresionantes que te transportarán a la década de 1930 en los Alpes. Admirará las escenas realistas y detalladas, los personajes coloridos y expresivos, y la iluminación atmosférica y las sombras. También disfrutarás de los efectos de sonido auténticos que mejorarán tu inmersión en el juego. </p></li>
|
48 |
-
|
49 |
-
</ul>
|
50 |
-
<h3>Contras</h3>
|
51 |
-
<ul>
|
52 |
-
<li><h4>No es compatible con algunos dispositivos</h4><p>Asesinato en los Alpes Mod APK puede no funcionar correctamente en algunos dispositivos debido a diferentes especificaciones o problemas de compatibilidad. Puede experimentar bloqueos, congelaciones, retrasos o errores al jugar el juego. También es posible que no pueda guardar su progreso o acceder a algunas funciones. </p></li>
|
53 |
-
<li><h4>Puede tener errores y problemas técnicos</h4><p>Asesinato en los Alpes Mod APK puede tener algunos errores y problemas técnicos que pueden afectar a su juego o el disfrute del juego. Por ejemplo, puede encontrar objetos que faltan, textos incorrectos, enlaces rotos o archivos dañados. También puede perder sus datos o ser expulsado del juego si es detectado por los desarrolladores. </p></li>
|
54 |
-
<li><h4>Puede violar los términos de servicio del juego original</h4><p>Asesinato en los Alpes Mod APK no es una versión oficial del juego, pero uno modificado que ha sido creado por desarrolladores de terceros sin permiso de los desarrolladores del juego original. Esto puede violar los términos de servicio del juego original e infringir sus derechos de propiedad intelectual. Usted puede enfrentar acciones legales o sanciones si utiliza Asesinato en los Alpes Mod APK.</p></li>
|
55 |
-
</ul>
|
56 |
-
<h2>Conclusión</h2>
|
57 |
-
<p>Asesinato en los Alpes Mod APK es un juego de misterio oculto para Android que le permite disfrutar del juego sin limitaciones o interrupciones. Puedes jugar con energía ilimitada, todos los capítulos desbloqueados y sin anuncios. También puede elegir su personaje y la historia, explorar las escenas y encontrar pistas, resolver puzzles y minijuegos, interactuar con otros personajes y tomar decisiones, y experimentar una historia inmersiva y cautivadora con hermosos gráficos y efectos de sonido. Sin embargo, Murder in the Alps Mod APK puede no ser compatible con algunos dispositivos, puede tener errores y problemas técnicos, y puede violar los términos de servicio del juego original. Usted debe descargar y utilizar asesinato en los Alpes Mod APK a su propio riesgo y discreción. </p>
|
58 |
-
<h2>Preguntas frecuentes</h2>
|
59 |
-
<p>Aquí hay algunas preguntas frecuentes sobre el asesinato en los Alpes Mod APK:</p>
|
60 |
-
|
61 |
-
<tr><td><strong>Q: ¿Es el asesinato en los Alpes Mod APK seguro para descargar e instalar? </strong></td><td><strong>A: Asesinato en los Alpes Mod APK no es una versión oficial del juego, sino una versión modificada que ha sido creada por desarrolladores de terceros sin permiso de los desarrolladores del juego original. Por lo tanto, puede no ser seguro descargar e instalar, ya que puede contener virus, malware o spyware que pueden dañar su dispositivo o robar su información personal. Usted debe descargar e instalar Asesinato en los Alpes Mod APK de una fuente de confianza y escanear con un antivirus antes de instalarlo. </strong></td></tr>
|
62 |
-
<tr><td><strong>Q: ¿Cómo puedo actualizar Asesinato en los Alpes Mod APK? </strong></td><td><strong>A: Asesinato en los Alpes Mod APK no se puede actualizar automáticamente ya que no está conectado al servidor del juego original. Es posible que tenga que comprobar si hay actualizaciones manualmente en el sitio web desde el que lo descargó o buscar una nueva versión de la apk mod en línea. También puede necesitar desinstalar la versión anterior del apk mod antes de instalar el nuevo. </strong></td></tr>
|
63 |
-
<tr><td><strong>Q: ¿Puedo jugar Asesinato en los Alpes Mod APK fuera de línea? </strong></td><td><strong>A: Asesinato en los Alpes Mod APK requiere una conexión a Internet para descargar e instalar. Sin embargo, una vez que lo haya instalado, puede jugar sin conexión sin ningún problema. No necesitas conectarte a Internet para acceder a las características o contenido del juego. </strong></td></tr>
|
64 |
-
<tr><td><strong>Q: ¿Puedo jugar Asesinato en los Alpes Mod APK con mis amigos? </strong></td><td><strong>A: Asesinato en los Alpes Mod APK es un juego para un solo jugador que no tiene un modo multijugador o característica. Solo puedes jugar por ti mismo y seguir tu propia historia. Sin embargo, puedes compartir tu progreso y logros con tus amigos en las redes sociales o chatear con ellos sobre el juego. </strong></td></tr>
|
65 |
-
|
66 |
-
</table></p> 64aa2da5cf<br />
|
67 |
-
<br />
|
68 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/util/request.py
DELETED
@@ -1,146 +0,0 @@
|
|
1 |
-
from __future__ import absolute_import
|
2 |
-
|
3 |
-
from base64 import b64encode
|
4 |
-
|
5 |
-
from ..exceptions import UnrewindableBodyError
|
6 |
-
from ..packages.six import b, integer_types
|
7 |
-
|
8 |
-
# Pass as a value within ``headers`` to skip
|
9 |
-
# emitting some HTTP headers that are added automatically.
|
10 |
-
# The only headers that are supported are ``Accept-Encoding``,
|
11 |
-
# ``Host``, and ``User-Agent``.
|
12 |
-
SKIP_HEADER = "@@@SKIP_HEADER@@@"
|
13 |
-
SKIPPABLE_HEADERS = frozenset(["accept-encoding", "host", "user-agent"])
|
14 |
-
|
15 |
-
ACCEPT_ENCODING = "gzip,deflate"
|
16 |
-
try:
|
17 |
-
try:
|
18 |
-
import brotlicffi as _unused_module_brotli # noqa: F401
|
19 |
-
except ImportError:
|
20 |
-
import brotli as _unused_module_brotli # noqa: F401
|
21 |
-
except ImportError:
|
22 |
-
pass
|
23 |
-
else:
|
24 |
-
ACCEPT_ENCODING += ",br"
|
25 |
-
|
26 |
-
_FAILEDTELL = object()
|
27 |
-
|
28 |
-
|
29 |
-
def make_headers(
|
30 |
-
keep_alive=None,
|
31 |
-
accept_encoding=None,
|
32 |
-
user_agent=None,
|
33 |
-
basic_auth=None,
|
34 |
-
proxy_basic_auth=None,
|
35 |
-
disable_cache=None,
|
36 |
-
):
|
37 |
-
"""
|
38 |
-
Shortcuts for generating request headers.
|
39 |
-
|
40 |
-
:param keep_alive:
|
41 |
-
If ``True``, adds 'connection: keep-alive' header.
|
42 |
-
|
43 |
-
:param accept_encoding:
|
44 |
-
Can be a boolean, list, or string.
|
45 |
-
``True`` translates to 'gzip,deflate'.
|
46 |
-
List will get joined by comma.
|
47 |
-
String will be used as provided.
|
48 |
-
|
49 |
-
:param user_agent:
|
50 |
-
String representing the user-agent you want, such as
|
51 |
-
"python-urllib3/0.6"
|
52 |
-
|
53 |
-
:param basic_auth:
|
54 |
-
Colon-separated username:password string for 'authorization: basic ...'
|
55 |
-
auth header.
|
56 |
-
|
57 |
-
:param proxy_basic_auth:
|
58 |
-
Colon-separated username:password string for 'proxy-authorization: basic ...'
|
59 |
-
auth header.
|
60 |
-
|
61 |
-
:param disable_cache:
|
62 |
-
If ``True``, adds 'cache-control: no-cache' header.
|
63 |
-
|
64 |
-
Example::
|
65 |
-
|
66 |
-
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
|
67 |
-
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
|
68 |
-
>>> make_headers(accept_encoding=True)
|
69 |
-
{'accept-encoding': 'gzip,deflate'}
|
70 |
-
"""
|
71 |
-
headers = {}
|
72 |
-
if accept_encoding:
|
73 |
-
if isinstance(accept_encoding, str):
|
74 |
-
pass
|
75 |
-
elif isinstance(accept_encoding, list):
|
76 |
-
accept_encoding = ",".join(accept_encoding)
|
77 |
-
else:
|
78 |
-
accept_encoding = ACCEPT_ENCODING
|
79 |
-
headers["accept-encoding"] = accept_encoding
|
80 |
-
|
81 |
-
if user_agent:
|
82 |
-
headers["user-agent"] = user_agent
|
83 |
-
|
84 |
-
if keep_alive:
|
85 |
-
headers["connection"] = "keep-alive"
|
86 |
-
|
87 |
-
if basic_auth:
|
88 |
-
headers["authorization"] = "Basic " + b64encode(b(basic_auth)).decode("utf-8")
|
89 |
-
|
90 |
-
if proxy_basic_auth:
|
91 |
-
headers["proxy-authorization"] = "Basic " + b64encode(
|
92 |
-
b(proxy_basic_auth)
|
93 |
-
).decode("utf-8")
|
94 |
-
|
95 |
-
if disable_cache:
|
96 |
-
headers["cache-control"] = "no-cache"
|
97 |
-
|
98 |
-
return headers
|
99 |
-
|
100 |
-
|
101 |
-
def set_file_position(body, pos):
|
102 |
-
"""
|
103 |
-
If a position is provided, move file to that point.
|
104 |
-
Otherwise, we'll attempt to record a position for future use.
|
105 |
-
"""
|
106 |
-
if pos is not None:
|
107 |
-
rewind_body(body, pos)
|
108 |
-
elif getattr(body, "tell", None) is not None:
|
109 |
-
try:
|
110 |
-
pos = body.tell()
|
111 |
-
except (IOError, OSError):
|
112 |
-
# This differentiates from None, allowing us to catch
|
113 |
-
# a failed `tell()` later when trying to rewind the body.
|
114 |
-
pos = _FAILEDTELL
|
115 |
-
|
116 |
-
return pos
|
117 |
-
|
118 |
-
|
119 |
-
def rewind_body(body, body_pos):
|
120 |
-
"""
|
121 |
-
Attempt to rewind body to a certain position.
|
122 |
-
Primarily used for request redirects and retries.
|
123 |
-
|
124 |
-
:param body:
|
125 |
-
File-like object that supports seek.
|
126 |
-
|
127 |
-
:param int pos:
|
128 |
-
Position to seek to in file.
|
129 |
-
"""
|
130 |
-
body_seek = getattr(body, "seek", None)
|
131 |
-
if body_seek is not None and isinstance(body_pos, integer_types):
|
132 |
-
try:
|
133 |
-
body_seek(body_pos)
|
134 |
-
except (IOError, OSError):
|
135 |
-
raise UnrewindableBodyError(
|
136 |
-
"An error occurred when rewinding request body for redirect/retry."
|
137 |
-
)
|
138 |
-
elif body_pos is _FAILEDTELL:
|
139 |
-
raise UnrewindableBodyError(
|
140 |
-
"Unable to record file position for rewinding "
|
141 |
-
"request body during a redirect/retry."
|
142 |
-
)
|
143 |
-
else:
|
144 |
-
raise ValueError(
|
145 |
-
"body_pos must be of type integer, instead it was %s." % type(body_pos)
|
146 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BradSegal/Literature-Rating/README.md
DELETED
@@ -1,37 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Literature Rating
|
3 |
-
emoji: 📊
|
4 |
-
colorFrom: gray
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
app_file: app.py
|
8 |
-
pinned: false
|
9 |
-
---
|
10 |
-
|
11 |
-
# Configuration
|
12 |
-
|
13 |
-
`title`: _string_
|
14 |
-
Display title for the Space
|
15 |
-
|
16 |
-
`emoji`: _string_
|
17 |
-
Space emoji (emoji-only character allowed)
|
18 |
-
|
19 |
-
`colorFrom`: _string_
|
20 |
-
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
21 |
-
|
22 |
-
`colorTo`: _string_
|
23 |
-
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
24 |
-
|
25 |
-
`sdk`: _string_
|
26 |
-
Can be either `gradio` or `streamlit`
|
27 |
-
|
28 |
-
`sdk_version` : _string_
|
29 |
-
Only applicable for `streamlit` SDK.
|
30 |
-
See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
|
31 |
-
|
32 |
-
`app_file`: _string_
|
33 |
-
Path to your main application file (which contains either `gradio` or `streamlit` Python code).
|
34 |
-
Path is relative to the root of the repository.
|
35 |
-
|
36 |
-
`pinned`: _boolean_
|
37 |
-
Whether the Space stays on top of your list.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/utils/collect_env.py
DELETED
@@ -1,153 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
import importlib
|
3 |
-
import numpy as np
|
4 |
-
import os
|
5 |
-
import re
|
6 |
-
import subprocess
|
7 |
-
import sys
|
8 |
-
from collections import defaultdict
|
9 |
-
import PIL
|
10 |
-
import torch
|
11 |
-
import torchvision
|
12 |
-
from tabulate import tabulate
|
13 |
-
|
14 |
-
__all__ = ["collect_env_info"]
|
15 |
-
|
16 |
-
|
17 |
-
def collect_torch_env():
|
18 |
-
try:
|
19 |
-
import torch.__config__
|
20 |
-
|
21 |
-
return torch.__config__.show()
|
22 |
-
except ImportError:
|
23 |
-
# compatible with older versions of pytorch
|
24 |
-
from torch.utils.collect_env import get_pretty_env_info
|
25 |
-
|
26 |
-
return get_pretty_env_info()
|
27 |
-
|
28 |
-
|
29 |
-
def get_env_module():
|
30 |
-
var_name = "DETECTRON2_ENV_MODULE"
|
31 |
-
return var_name, os.environ.get(var_name, "<not set>")
|
32 |
-
|
33 |
-
|
34 |
-
def detect_compute_compatibility(CUDA_HOME, so_file):
|
35 |
-
try:
|
36 |
-
cuobjdump = os.path.join(CUDA_HOME, "bin", "cuobjdump")
|
37 |
-
if os.path.isfile(cuobjdump):
|
38 |
-
output = subprocess.check_output(
|
39 |
-
"'{}' --list-elf '{}'".format(cuobjdump, so_file), shell=True
|
40 |
-
)
|
41 |
-
output = output.decode("utf-8").strip().split("\n")
|
42 |
-
sm = []
|
43 |
-
for line in output:
|
44 |
-
line = re.findall(r"\.sm_[0-9]*\.", line)[0]
|
45 |
-
sm.append(line.strip("."))
|
46 |
-
sm = sorted(set(sm))
|
47 |
-
return ", ".join(sm)
|
48 |
-
else:
|
49 |
-
return so_file + "; cannot find cuobjdump"
|
50 |
-
except Exception:
|
51 |
-
# unhandled failure
|
52 |
-
return so_file
|
53 |
-
|
54 |
-
|
55 |
-
def collect_env_info():
|
56 |
-
has_cuda = torch.cuda.is_available()
|
57 |
-
# NOTE: the use of CUDA_HOME requires the CUDA build deps, though in
|
58 |
-
# theory detectron2 should be made runnable with only the CUDA runtime
|
59 |
-
from torch.utils.cpp_extension import CUDA_HOME
|
60 |
-
|
61 |
-
data = []
|
62 |
-
data.append(("sys.platform", sys.platform))
|
63 |
-
data.append(("Python", sys.version.replace("\n", "")))
|
64 |
-
data.append(("numpy", np.__version__))
|
65 |
-
|
66 |
-
try:
|
67 |
-
import detectron2 # noqa
|
68 |
-
|
69 |
-
data.append(
|
70 |
-
("detectron2", detectron2.__version__ + " @" + os.path.dirname(detectron2.__file__))
|
71 |
-
)
|
72 |
-
except ImportError:
|
73 |
-
data.append(("detectron2", "failed to import"))
|
74 |
-
else:
|
75 |
-
try:
|
76 |
-
from detectron2 import _C
|
77 |
-
except ImportError:
|
78 |
-
data.append(("detectron2._C", "failed to import"))
|
79 |
-
else:
|
80 |
-
data.append(("detectron2 compiler", _C.get_compiler_version()))
|
81 |
-
data.append(("detectron2 CUDA compiler", _C.get_cuda_version()))
|
82 |
-
if has_cuda:
|
83 |
-
data.append(
|
84 |
-
("detectron2 arch flags", detect_compute_compatibility(CUDA_HOME, _C.__file__))
|
85 |
-
)
|
86 |
-
|
87 |
-
data.append(get_env_module())
|
88 |
-
data.append(("PyTorch", torch.__version__ + " @" + os.path.dirname(torch.__file__)))
|
89 |
-
data.append(("PyTorch debug build", torch.version.debug))
|
90 |
-
|
91 |
-
data.append(("CUDA available", has_cuda))
|
92 |
-
if has_cuda:
|
93 |
-
devices = defaultdict(list)
|
94 |
-
for k in range(torch.cuda.device_count()):
|
95 |
-
devices[torch.cuda.get_device_name(k)].append(str(k))
|
96 |
-
for name, devids in devices.items():
|
97 |
-
data.append(("GPU " + ",".join(devids), name))
|
98 |
-
|
99 |
-
from torch.utils.cpp_extension import CUDA_HOME
|
100 |
-
|
101 |
-
data.append(("CUDA_HOME", str(CUDA_HOME)))
|
102 |
-
|
103 |
-
if CUDA_HOME is not None and os.path.isdir(CUDA_HOME):
|
104 |
-
try:
|
105 |
-
nvcc = os.path.join(CUDA_HOME, "bin", "nvcc")
|
106 |
-
nvcc = subprocess.check_output("'{}' -V | tail -n1".format(nvcc), shell=True)
|
107 |
-
nvcc = nvcc.decode("utf-8").strip()
|
108 |
-
except subprocess.SubprocessError:
|
109 |
-
nvcc = "Not Available"
|
110 |
-
data.append(("NVCC", nvcc))
|
111 |
-
|
112 |
-
cuda_arch_list = os.environ.get("TORCH_CUDA_ARCH_LIST", None)
|
113 |
-
if cuda_arch_list:
|
114 |
-
data.append(("TORCH_CUDA_ARCH_LIST", cuda_arch_list))
|
115 |
-
data.append(("Pillow", PIL.__version__))
|
116 |
-
|
117 |
-
try:
|
118 |
-
data.append(
|
119 |
-
(
|
120 |
-
"torchvision",
|
121 |
-
str(torchvision.__version__) + " @" + os.path.dirname(torchvision.__file__),
|
122 |
-
)
|
123 |
-
)
|
124 |
-
if has_cuda:
|
125 |
-
try:
|
126 |
-
torchvision_C = importlib.util.find_spec("torchvision._C").origin
|
127 |
-
msg = detect_compute_compatibility(CUDA_HOME, torchvision_C)
|
128 |
-
data.append(("torchvision arch flags", msg))
|
129 |
-
except ImportError:
|
130 |
-
data.append(("torchvision._C", "failed to find"))
|
131 |
-
except AttributeError:
|
132 |
-
data.append(("torchvision", "unknown"))
|
133 |
-
|
134 |
-
try:
|
135 |
-
import cv2
|
136 |
-
|
137 |
-
data.append(("cv2", cv2.__version__))
|
138 |
-
except ImportError:
|
139 |
-
pass
|
140 |
-
env_str = tabulate(data) + "\n"
|
141 |
-
env_str += collect_torch_env()
|
142 |
-
return env_str
|
143 |
-
|
144 |
-
|
145 |
-
if __name__ == "__main__":
|
146 |
-
try:
|
147 |
-
import detectron2 # noqa
|
148 |
-
except ImportError:
|
149 |
-
print(collect_env_info())
|
150 |
-
else:
|
151 |
-
from detectron2.utils.collect_env import collect_env_info
|
152 |
-
|
153 |
-
print(collect_env_info())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/pybind11/tests/conftest.py
DELETED
@@ -1,200 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
"""pytest configuration
|
3 |
-
|
4 |
-
Extends output capture as needed by pybind11: ignore constructors, optional unordered lines.
|
5 |
-
Adds docstring and exceptions message sanitizers: ignore Python 2 vs 3 differences.
|
6 |
-
"""
|
7 |
-
|
8 |
-
import contextlib
|
9 |
-
import difflib
|
10 |
-
import gc
|
11 |
-
import re
|
12 |
-
import textwrap
|
13 |
-
|
14 |
-
import pytest
|
15 |
-
|
16 |
-
import env
|
17 |
-
|
18 |
-
# Early diagnostic for failed imports
|
19 |
-
import pybind11_tests # noqa: F401
|
20 |
-
|
21 |
-
_unicode_marker = re.compile(r'u(\'[^\']*\')')
|
22 |
-
_long_marker = re.compile(r'([0-9])L')
|
23 |
-
_hexadecimal = re.compile(r'0x[0-9a-fA-F]+')
|
24 |
-
|
25 |
-
# Avoid collecting Python3 only files
|
26 |
-
collect_ignore = []
|
27 |
-
if env.PY2:
|
28 |
-
collect_ignore.append("test_async.py")
|
29 |
-
|
30 |
-
|
31 |
-
def _strip_and_dedent(s):
|
32 |
-
"""For triple-quote strings"""
|
33 |
-
return textwrap.dedent(s.lstrip('\n').rstrip())
|
34 |
-
|
35 |
-
|
36 |
-
def _split_and_sort(s):
|
37 |
-
"""For output which does not require specific line order"""
|
38 |
-
return sorted(_strip_and_dedent(s).splitlines())
|
39 |
-
|
40 |
-
|
41 |
-
def _make_explanation(a, b):
|
42 |
-
"""Explanation for a failed assert -- the a and b arguments are List[str]"""
|
43 |
-
return ["--- actual / +++ expected"] + [line.strip('\n') for line in difflib.ndiff(a, b)]
|
44 |
-
|
45 |
-
|
46 |
-
class Output(object):
|
47 |
-
"""Basic output post-processing and comparison"""
|
48 |
-
def __init__(self, string):
|
49 |
-
self.string = string
|
50 |
-
self.explanation = []
|
51 |
-
|
52 |
-
def __str__(self):
|
53 |
-
return self.string
|
54 |
-
|
55 |
-
def __eq__(self, other):
|
56 |
-
# Ignore constructor/destructor output which is prefixed with "###"
|
57 |
-
a = [line for line in self.string.strip().splitlines() if not line.startswith("###")]
|
58 |
-
b = _strip_and_dedent(other).splitlines()
|
59 |
-
if a == b:
|
60 |
-
return True
|
61 |
-
else:
|
62 |
-
self.explanation = _make_explanation(a, b)
|
63 |
-
return False
|
64 |
-
|
65 |
-
|
66 |
-
class Unordered(Output):
|
67 |
-
"""Custom comparison for output without strict line ordering"""
|
68 |
-
def __eq__(self, other):
|
69 |
-
a = _split_and_sort(self.string)
|
70 |
-
b = _split_and_sort(other)
|
71 |
-
if a == b:
|
72 |
-
return True
|
73 |
-
else:
|
74 |
-
self.explanation = _make_explanation(a, b)
|
75 |
-
return False
|
76 |
-
|
77 |
-
|
78 |
-
class Capture(object):
|
79 |
-
def __init__(self, capfd):
|
80 |
-
self.capfd = capfd
|
81 |
-
self.out = ""
|
82 |
-
self.err = ""
|
83 |
-
|
84 |
-
def __enter__(self):
|
85 |
-
self.capfd.readouterr()
|
86 |
-
return self
|
87 |
-
|
88 |
-
def __exit__(self, *args):
|
89 |
-
self.out, self.err = self.capfd.readouterr()
|
90 |
-
|
91 |
-
def __eq__(self, other):
|
92 |
-
a = Output(self.out)
|
93 |
-
b = other
|
94 |
-
if a == b:
|
95 |
-
return True
|
96 |
-
else:
|
97 |
-
self.explanation = a.explanation
|
98 |
-
return False
|
99 |
-
|
100 |
-
def __str__(self):
|
101 |
-
return self.out
|
102 |
-
|
103 |
-
def __contains__(self, item):
|
104 |
-
return item in self.out
|
105 |
-
|
106 |
-
@property
|
107 |
-
def unordered(self):
|
108 |
-
return Unordered(self.out)
|
109 |
-
|
110 |
-
@property
|
111 |
-
def stderr(self):
|
112 |
-
return Output(self.err)
|
113 |
-
|
114 |
-
|
115 |
-
@pytest.fixture
|
116 |
-
def capture(capsys):
|
117 |
-
"""Extended `capsys` with context manager and custom equality operators"""
|
118 |
-
return Capture(capsys)
|
119 |
-
|
120 |
-
|
121 |
-
class SanitizedString(object):
|
122 |
-
def __init__(self, sanitizer):
|
123 |
-
self.sanitizer = sanitizer
|
124 |
-
self.string = ""
|
125 |
-
self.explanation = []
|
126 |
-
|
127 |
-
def __call__(self, thing):
|
128 |
-
self.string = self.sanitizer(thing)
|
129 |
-
return self
|
130 |
-
|
131 |
-
def __eq__(self, other):
|
132 |
-
a = self.string
|
133 |
-
b = _strip_and_dedent(other)
|
134 |
-
if a == b:
|
135 |
-
return True
|
136 |
-
else:
|
137 |
-
self.explanation = _make_explanation(a.splitlines(), b.splitlines())
|
138 |
-
return False
|
139 |
-
|
140 |
-
|
141 |
-
def _sanitize_general(s):
|
142 |
-
s = s.strip()
|
143 |
-
s = s.replace("pybind11_tests.", "m.")
|
144 |
-
s = s.replace("unicode", "str")
|
145 |
-
s = _long_marker.sub(r"\1", s)
|
146 |
-
s = _unicode_marker.sub(r"\1", s)
|
147 |
-
return s
|
148 |
-
|
149 |
-
|
150 |
-
def _sanitize_docstring(thing):
|
151 |
-
s = thing.__doc__
|
152 |
-
s = _sanitize_general(s)
|
153 |
-
return s
|
154 |
-
|
155 |
-
|
156 |
-
@pytest.fixture
|
157 |
-
def doc():
|
158 |
-
"""Sanitize docstrings and add custom failure explanation"""
|
159 |
-
return SanitizedString(_sanitize_docstring)
|
160 |
-
|
161 |
-
|
162 |
-
def _sanitize_message(thing):
|
163 |
-
s = str(thing)
|
164 |
-
s = _sanitize_general(s)
|
165 |
-
s = _hexadecimal.sub("0", s)
|
166 |
-
return s
|
167 |
-
|
168 |
-
|
169 |
-
@pytest.fixture
|
170 |
-
def msg():
|
171 |
-
"""Sanitize messages and add custom failure explanation"""
|
172 |
-
return SanitizedString(_sanitize_message)
|
173 |
-
|
174 |
-
|
175 |
-
# noinspection PyUnusedLocal
|
176 |
-
def pytest_assertrepr_compare(op, left, right):
|
177 |
-
"""Hook to insert custom failure explanation"""
|
178 |
-
if hasattr(left, 'explanation'):
|
179 |
-
return left.explanation
|
180 |
-
|
181 |
-
|
182 |
-
@contextlib.contextmanager
|
183 |
-
def suppress(exception):
|
184 |
-
"""Suppress the desired exception"""
|
185 |
-
try:
|
186 |
-
yield
|
187 |
-
except exception:
|
188 |
-
pass
|
189 |
-
|
190 |
-
|
191 |
-
def gc_collect():
|
192 |
-
''' Run the garbage collector twice (needed when running
|
193 |
-
reference counting tests with PyPy) '''
|
194 |
-
gc.collect()
|
195 |
-
gc.collect()
|
196 |
-
|
197 |
-
|
198 |
-
def pytest_configure():
|
199 |
-
pytest.suppress = suppress
|
200 |
-
pytest.gc_collect = gc_collect
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/pybind11/tests/test_embed/external_module.cpp
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
#include <pybind11/pybind11.h>
|
2 |
-
|
3 |
-
namespace py = pybind11;
|
4 |
-
|
5 |
-
/* Simple test module/test class to check that the referenced internals data of external pybind11
|
6 |
-
* modules aren't preserved over a finalize/initialize.
|
7 |
-
*/
|
8 |
-
|
9 |
-
PYBIND11_MODULE(external_module, m) {
|
10 |
-
class A {
|
11 |
-
public:
|
12 |
-
A(int value) : v{value} {};
|
13 |
-
int v;
|
14 |
-
};
|
15 |
-
|
16 |
-
py::class_<A>(m, "A")
|
17 |
-
.def(py::init<int>())
|
18 |
-
.def_readwrite("value", &A::v);
|
19 |
-
|
20 |
-
m.def("internals_at", []() {
|
21 |
-
return reinterpret_cast<uintptr_t>(&py::detail::get_internals());
|
22 |
-
});
|
23 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/detail/range/head_flags.h
DELETED
@@ -1,230 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
#include <thrust/iterator/transform_iterator.h>
|
21 |
-
#include <thrust/iterator/zip_iterator.h>
|
22 |
-
#include <thrust/iterator/counting_iterator.h>
|
23 |
-
#include <thrust/tuple.h>
|
24 |
-
#include <thrust/functional.h>
|
25 |
-
|
26 |
-
|
27 |
-
namespace thrust
|
28 |
-
{
|
29 |
-
namespace detail
|
30 |
-
{
|
31 |
-
|
32 |
-
|
33 |
-
template<typename RandomAccessIterator,
|
34 |
-
typename BinaryPredicate = thrust::equal_to<typename thrust::iterator_value<RandomAccessIterator>::type>,
|
35 |
-
typename ValueType = bool,
|
36 |
-
typename IndexType = typename thrust::iterator_difference<RandomAccessIterator>::type>
|
37 |
-
class head_flags_with_init
|
38 |
-
{
|
39 |
-
typedef typename thrust::iterator_value<RandomAccessIterator>::type init_type;
|
40 |
-
|
41 |
-
// XXX WAR cudafe issue
|
42 |
-
//private:
|
43 |
-
public:
|
44 |
-
struct head_flag_functor
|
45 |
-
{
|
46 |
-
BinaryPredicate binary_pred; // this must be the first member for performance reasons
|
47 |
-
init_type init;
|
48 |
-
IndexType n;
|
49 |
-
|
50 |
-
typedef ValueType result_type;
|
51 |
-
|
52 |
-
__host__ __device__
|
53 |
-
head_flag_functor(init_type init, IndexType n)
|
54 |
-
: binary_pred(), init(init), n(n)
|
55 |
-
{}
|
56 |
-
|
57 |
-
__host__ __device__
|
58 |
-
head_flag_functor(init_type init, IndexType n, BinaryPredicate binary_pred)
|
59 |
-
: binary_pred(binary_pred), init(init), n(n)
|
60 |
-
{}
|
61 |
-
|
62 |
-
template<typename Tuple>
|
63 |
-
__host__ __device__ __thrust_forceinline__
|
64 |
-
result_type operator()(const Tuple &t)
|
65 |
-
{
|
66 |
-
const IndexType i = thrust::get<0>(t);
|
67 |
-
|
68 |
-
if(i == 0)
|
69 |
-
{
|
70 |
-
return !binary_pred(init, thrust::get<1>(t));
|
71 |
-
}
|
72 |
-
|
73 |
-
return !binary_pred(thrust::get<1>(t), thrust::get<2>(t));
|
74 |
-
}
|
75 |
-
};
|
76 |
-
|
77 |
-
typedef thrust::counting_iterator<IndexType> counting_iterator;
|
78 |
-
|
79 |
-
public:
|
80 |
-
typedef thrust::transform_iterator<
|
81 |
-
head_flag_functor,
|
82 |
-
thrust::zip_iterator<thrust::tuple<counting_iterator,RandomAccessIterator,RandomAccessIterator> >
|
83 |
-
> iterator;
|
84 |
-
|
85 |
-
__thrust_exec_check_disable__
|
86 |
-
__host__ __device__
|
87 |
-
head_flags_with_init(RandomAccessIterator first, RandomAccessIterator last, init_type init)
|
88 |
-
: m_begin(thrust::make_transform_iterator(thrust::make_zip_iterator(thrust::make_tuple(thrust::counting_iterator<IndexType>(0), first, first - 1)),
|
89 |
-
head_flag_functor(init, last - first))),
|
90 |
-
m_end(m_begin + (last - first))
|
91 |
-
{}
|
92 |
-
|
93 |
-
__thrust_exec_check_disable__
|
94 |
-
__host__ __device__
|
95 |
-
head_flags_with_init(RandomAccessIterator first, RandomAccessIterator last, init_type init, BinaryPredicate binary_pred)
|
96 |
-
: m_begin(thrust::make_transform_iterator(thrust::make_zip_iterator(thrust::make_tuple(thrust::counting_iterator<IndexType>(0), first, first - 1)),
|
97 |
-
head_flag_functor(init, last - first, binary_pred))),
|
98 |
-
m_end(m_begin + (last - first))
|
99 |
-
{}
|
100 |
-
|
101 |
-
__host__ __device__
|
102 |
-
iterator begin() const
|
103 |
-
{
|
104 |
-
return m_begin;
|
105 |
-
}
|
106 |
-
|
107 |
-
__host__ __device__
|
108 |
-
iterator end() const
|
109 |
-
{
|
110 |
-
return m_end;
|
111 |
-
}
|
112 |
-
|
113 |
-
template<typename OtherIndex>
|
114 |
-
__host__ __device__
|
115 |
-
typename iterator::reference operator[](OtherIndex i)
|
116 |
-
{
|
117 |
-
return *(begin() + i);
|
118 |
-
}
|
119 |
-
|
120 |
-
private:
|
121 |
-
iterator m_begin, m_end;
|
122 |
-
};
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
template<typename RandomAccessIterator,
|
127 |
-
typename BinaryPredicate = thrust::equal_to<typename thrust::iterator_value<RandomAccessIterator>::type>,
|
128 |
-
typename ValueType = bool,
|
129 |
-
typename IndexType = typename thrust::iterator_difference<RandomAccessIterator>::type>
|
130 |
-
class head_flags
|
131 |
-
{
|
132 |
-
// XXX WAR cudafe issue
|
133 |
-
//private:
|
134 |
-
public:
|
135 |
-
struct head_flag_functor
|
136 |
-
{
|
137 |
-
BinaryPredicate binary_pred; // this must be the first member for performance reasons
|
138 |
-
IndexType n;
|
139 |
-
|
140 |
-
typedef ValueType result_type;
|
141 |
-
|
142 |
-
__host__ __device__
|
143 |
-
head_flag_functor(IndexType n)
|
144 |
-
: binary_pred(), n(n)
|
145 |
-
{}
|
146 |
-
|
147 |
-
__host__ __device__
|
148 |
-
head_flag_functor(IndexType n, BinaryPredicate binary_pred)
|
149 |
-
: binary_pred(binary_pred), n(n)
|
150 |
-
{}
|
151 |
-
|
152 |
-
template<typename Tuple>
|
153 |
-
__host__ __device__ __thrust_forceinline__
|
154 |
-
result_type operator()(const Tuple &t)
|
155 |
-
{
|
156 |
-
const IndexType i = thrust::get<0>(t);
|
157 |
-
|
158 |
-
// note that we do not dereference the tuple's 2nd element when i <= 0
|
159 |
-
// and therefore do not dereference a bad location at the boundary
|
160 |
-
return (i == 0 || !binary_pred(thrust::get<1>(t), thrust::get<2>(t)));
|
161 |
-
}
|
162 |
-
};
|
163 |
-
|
164 |
-
typedef thrust::counting_iterator<IndexType> counting_iterator;
|
165 |
-
|
166 |
-
public:
|
167 |
-
typedef thrust::transform_iterator<
|
168 |
-
head_flag_functor,
|
169 |
-
thrust::zip_iterator<thrust::tuple<counting_iterator,RandomAccessIterator,RandomAccessIterator> >
|
170 |
-
> iterator;
|
171 |
-
|
172 |
-
__host__ __device__
|
173 |
-
head_flags(RandomAccessIterator first, RandomAccessIterator last)
|
174 |
-
: m_begin(thrust::make_transform_iterator(thrust::make_zip_iterator(thrust::make_tuple(thrust::counting_iterator<IndexType>(0), first, first - 1)),
|
175 |
-
head_flag_functor(last - first))),
|
176 |
-
m_end(m_begin + (last - first))
|
177 |
-
{}
|
178 |
-
|
179 |
-
__host__ __device__
|
180 |
-
head_flags(RandomAccessIterator first, RandomAccessIterator last, BinaryPredicate binary_pred)
|
181 |
-
: m_begin(thrust::make_transform_iterator(thrust::make_zip_iterator(thrust::make_tuple(thrust::counting_iterator<IndexType>(0), first, first - 1)),
|
182 |
-
head_flag_functor(last - first, binary_pred))),
|
183 |
-
m_end(m_begin + (last - first))
|
184 |
-
{}
|
185 |
-
|
186 |
-
__host__ __device__
|
187 |
-
iterator begin() const
|
188 |
-
{
|
189 |
-
return m_begin;
|
190 |
-
}
|
191 |
-
|
192 |
-
__host__ __device__
|
193 |
-
iterator end() const
|
194 |
-
{
|
195 |
-
return m_end;
|
196 |
-
}
|
197 |
-
|
198 |
-
template<typename OtherIndex>
|
199 |
-
__host__ __device__
|
200 |
-
typename iterator::reference operator[](OtherIndex i)
|
201 |
-
{
|
202 |
-
return *(begin() + i);
|
203 |
-
}
|
204 |
-
|
205 |
-
private:
|
206 |
-
iterator m_begin, m_end;
|
207 |
-
};
|
208 |
-
|
209 |
-
|
210 |
-
template<typename RandomAccessIterator, typename BinaryPredicate>
|
211 |
-
__host__ __device__
|
212 |
-
head_flags<RandomAccessIterator, BinaryPredicate>
|
213 |
-
make_head_flags(RandomAccessIterator first, RandomAccessIterator last, BinaryPredicate binary_pred)
|
214 |
-
{
|
215 |
-
return head_flags<RandomAccessIterator, BinaryPredicate>(first, last, binary_pred);
|
216 |
-
}
|
217 |
-
|
218 |
-
|
219 |
-
template<typename RandomAccessIterator>
|
220 |
-
__host__ __device__
|
221 |
-
head_flags<RandomAccessIterator>
|
222 |
-
make_head_flags(RandomAccessIterator first, RandomAccessIterator last)
|
223 |
-
{
|
224 |
-
return head_flags<RandomAccessIterator>(first, last);
|
225 |
-
}
|
226 |
-
|
227 |
-
|
228 |
-
} // end detail
|
229 |
-
} // end thrust
|
230 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/random/xor_combine_engine.h
DELETED
@@ -1,271 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
/*! \file xor_combine_engine.h
|
18 |
-
* \brief A pseudorandom number generator which produces pseudorandom
|
19 |
-
* numbers from two integer base engines by merging their
|
20 |
-
* pseudorandom numbers with bitwise exclusive-or.
|
21 |
-
*/
|
22 |
-
|
23 |
-
#pragma once
|
24 |
-
|
25 |
-
#include <thrust/detail/config.h>
|
26 |
-
#include <thrust/detail/type_traits.h>
|
27 |
-
#include <thrust/random/detail/xor_combine_engine_max.h>
|
28 |
-
#include <thrust/random/detail/random_core_access.h>
|
29 |
-
#include <iostream>
|
30 |
-
#include <cstddef> // for size_t
|
31 |
-
|
32 |
-
namespace thrust
|
33 |
-
{
|
34 |
-
|
35 |
-
namespace random
|
36 |
-
{
|
37 |
-
|
38 |
-
/*! \addtogroup random_number_engine_adaptors
|
39 |
-
* \{
|
40 |
-
*/
|
41 |
-
|
42 |
-
/*! \class xor_combine_engine
|
43 |
-
* \brief An \p xor_combine_engine adapts two existing base random number engines and
|
44 |
-
* produces random values by combining the values produced by each.
|
45 |
-
*
|
46 |
-
* \tparam Engine1 The type of the first base random number engine to adapt.
|
47 |
-
* \tparam s1 The size of the first shift to use in the generation algorithm.
|
48 |
-
* \tparam Engine2 The type of the second base random number engine to adapt.
|
49 |
-
* \tparam s2 The second of the second shift to use in the generation algorithm. Defaults to \c 0.
|
50 |
-
*
|
51 |
-
* The following code snippet shows an example of using an \p xor_combine_engine instance:
|
52 |
-
*
|
53 |
-
* \code
|
54 |
-
* #include <thrust/random/linear_congruential_engine.h>
|
55 |
-
* #include <thrust/random/xor_combine_engine.h>
|
56 |
-
* #include <iostream>
|
57 |
-
*
|
58 |
-
* int main(void)
|
59 |
-
* {
|
60 |
-
* // create an xor_combine_engine from minstd_rand and minstd_rand0
|
61 |
-
* // use a shift of 0 for each
|
62 |
-
* thrust::xor_combine_engine<thrust::minstd_rand,0,thrust::minstd_rand0,0> rng;
|
63 |
-
*
|
64 |
-
* // print a random number to standard output
|
65 |
-
* std::cout << rng() << std::endl;
|
66 |
-
*
|
67 |
-
* return 0;
|
68 |
-
* }
|
69 |
-
* \endcode
|
70 |
-
*/
|
71 |
-
template<typename Engine1, size_t s1,
|
72 |
-
typename Engine2, size_t s2=0u>
|
73 |
-
class xor_combine_engine
|
74 |
-
{
|
75 |
-
public:
|
76 |
-
// types
|
77 |
-
|
78 |
-
/*! \typedef base1_type
|
79 |
-
* \brief The type of the first adapted base random number engine.
|
80 |
-
*/
|
81 |
-
typedef Engine1 base1_type;
|
82 |
-
|
83 |
-
/*! \typedef base2_type
|
84 |
-
* \brief The type of the second adapted base random number engine.
|
85 |
-
*/
|
86 |
-
typedef Engine2 base2_type;
|
87 |
-
|
88 |
-
/*! \typedef result_type
|
89 |
-
* \brief The type of the unsigned integer produced by this \p xor_combine_engine.
|
90 |
-
*/
|
91 |
-
typedef typename thrust::detail::eval_if<
|
92 |
-
(sizeof(typename base2_type::result_type) > sizeof(typename base1_type::result_type)),
|
93 |
-
thrust::detail::identity_<typename base2_type::result_type>,
|
94 |
-
thrust::detail::identity_<typename base1_type::result_type>
|
95 |
-
>::type result_type;
|
96 |
-
|
97 |
-
/*! The size of the first shift used in the generation algorithm.
|
98 |
-
*/
|
99 |
-
static const size_t shift1 = s1;
|
100 |
-
|
101 |
-
/*! The size of the second shift used in the generation algorithm.
|
102 |
-
*/
|
103 |
-
static const size_t shift2 = s2;
|
104 |
-
|
105 |
-
/*! The smallest value this \p xor_combine_engine may potentially produce.
|
106 |
-
*/
|
107 |
-
static const result_type min = 0;
|
108 |
-
|
109 |
-
/*! The largest value this \p xor_combine_engine may potentially produce.
|
110 |
-
*/
|
111 |
-
static const result_type max =
|
112 |
-
detail::xor_combine_engine_max<
|
113 |
-
Engine1, s1, Engine2, s2, result_type
|
114 |
-
>::value;
|
115 |
-
|
116 |
-
// constructors and seeding functions
|
117 |
-
|
118 |
-
/*! This constructor constructs a new \p xor_combine_engine and constructs
|
119 |
-
* its adapted engines using their null constructors.
|
120 |
-
*/
|
121 |
-
__host__ __device__
|
122 |
-
xor_combine_engine(void);
|
123 |
-
|
124 |
-
/*! This constructor constructs a new \p xor_combine_engine using
|
125 |
-
* given \p base1_type and \p base2_type engines to initialize its adapted base engines.
|
126 |
-
*
|
127 |
-
* \param urng1 A \p base1_type to use to initialize this \p xor_combine_engine's
|
128 |
-
* first adapted base engine.
|
129 |
-
* \param urng2 A \p base2_type to use to initialize this \p xor_combine_engine's
|
130 |
-
* first adapted base engine.
|
131 |
-
*/
|
132 |
-
__host__ __device__
|
133 |
-
xor_combine_engine(const base1_type &urng1, const base2_type &urng2);
|
134 |
-
|
135 |
-
/*! This constructor initializes a new \p xor_combine_engine with a given seed.
|
136 |
-
*
|
137 |
-
* \param s The seed used to intialize this \p xor_combine_engine's adapted base engines.
|
138 |
-
*/
|
139 |
-
__host__ __device__
|
140 |
-
xor_combine_engine(result_type s);
|
141 |
-
|
142 |
-
/*! This method initializes the state of this \p xor_combine_engine's adapted base engines
|
143 |
-
* by using their \p default_seed values.
|
144 |
-
*/
|
145 |
-
__host__ __device__
|
146 |
-
void seed(void);
|
147 |
-
|
148 |
-
/*! This method initializes the state of this \p xor_combine_engine's adapted base engines
|
149 |
-
* by using the given seed.
|
150 |
-
*
|
151 |
-
* \param s The seed with which to intialize this \p xor_combine_engine's adapted base engines.
|
152 |
-
*/
|
153 |
-
__host__ __device__
|
154 |
-
void seed(result_type s);
|
155 |
-
|
156 |
-
// generating functions
|
157 |
-
|
158 |
-
/*! This member function produces a new random value and updates this \p xor_combine_engine's state.
|
159 |
-
* \return A new random number.
|
160 |
-
*/
|
161 |
-
__host__ __device__
|
162 |
-
result_type operator()(void);
|
163 |
-
|
164 |
-
/*! This member function advances this \p xor_combine_engine's state a given number of times
|
165 |
-
* and discards the results.
|
166 |
-
*
|
167 |
-
* \param z The number of random values to discard.
|
168 |
-
* \note This function is provided because an implementation may be able to accelerate it.
|
169 |
-
*/
|
170 |
-
__host__ __device__
|
171 |
-
void discard(unsigned long long z);
|
172 |
-
|
173 |
-
// property functions
|
174 |
-
|
175 |
-
/*! This member function returns a const reference to this \p xor_combine_engine's
|
176 |
-
* first adapted base engine.
|
177 |
-
*
|
178 |
-
* \return A const reference to the first base engine this \p xor_combine_engine adapts.
|
179 |
-
*/
|
180 |
-
__host__ __device__
|
181 |
-
const base1_type &base1(void) const;
|
182 |
-
|
183 |
-
/*! This member function returns a const reference to this \p xor_combine_engine's
|
184 |
-
* second adapted base engine.
|
185 |
-
*
|
186 |
-
* \return A const reference to the second base engine this \p xor_combine_engine adapts.
|
187 |
-
*/
|
188 |
-
__host__ __device__
|
189 |
-
const base2_type &base2(void) const;
|
190 |
-
|
191 |
-
/*! \cond
|
192 |
-
*/
|
193 |
-
private:
|
194 |
-
base1_type m_b1;
|
195 |
-
base2_type m_b2;
|
196 |
-
|
197 |
-
friend struct thrust::random::detail::random_core_access;
|
198 |
-
|
199 |
-
__host__ __device__
|
200 |
-
bool equal(const xor_combine_engine &rhs) const;
|
201 |
-
|
202 |
-
template<typename CharT, typename Traits>
|
203 |
-
std::basic_istream<CharT,Traits>& stream_in(std::basic_istream<CharT,Traits> &is);
|
204 |
-
|
205 |
-
template<typename CharT, typename Traits>
|
206 |
-
std::basic_ostream<CharT,Traits>& stream_out(std::basic_ostream<CharT,Traits> &os) const;
|
207 |
-
|
208 |
-
/*! \endcond
|
209 |
-
*/
|
210 |
-
}; // end xor_combine_engine
|
211 |
-
|
212 |
-
|
213 |
-
/*! This function checks two \p xor_combine_engines for equality.
|
214 |
-
* \param lhs The first \p xor_combine_engine to test.
|
215 |
-
* \param rhs The second \p xor_combine_engine to test.
|
216 |
-
* \return \c true if \p lhs is equal to \p rhs; \c false, otherwise.
|
217 |
-
*/
|
218 |
-
template<typename Engine1_, size_t s1_, typename Engine2_, size_t s2_>
|
219 |
-
__host__ __device__
|
220 |
-
bool operator==(const xor_combine_engine<Engine1_,s1_,Engine2_,s2_> &lhs,
|
221 |
-
const xor_combine_engine<Engine1_,s1_,Engine2_,s2_> &rhs);
|
222 |
-
|
223 |
-
|
224 |
-
/*! This function checks two \p xor_combine_engines for inequality.
|
225 |
-
* \param lhs The first \p xor_combine_engine to test.
|
226 |
-
* \param rhs The second \p xor_combine_engine to test.
|
227 |
-
* \return \c true if \p lhs is not equal to \p rhs; \c false, otherwise.
|
228 |
-
*/
|
229 |
-
template<typename Engine1_, size_t s1_, typename Engine2_, size_t s2_>
|
230 |
-
__host__ __device__
|
231 |
-
bool operator!=(const xor_combine_engine<Engine1_,s1_,Engine2_,s2_> &lhs,
|
232 |
-
const xor_combine_engine<Engine1_,s1_,Engine2_,s2_> &rhs);
|
233 |
-
|
234 |
-
|
235 |
-
/*! This function streams a xor_combine_engine to a \p std::basic_ostream.
|
236 |
-
* \param os The \p basic_ostream to stream out to.
|
237 |
-
* \param e The \p xor_combine_engine to stream out.
|
238 |
-
* \return \p os
|
239 |
-
*/
|
240 |
-
template<typename Engine1_, size_t s1_, typename Engine2_, size_t s2_,
|
241 |
-
typename CharT, typename Traits>
|
242 |
-
std::basic_ostream<CharT,Traits>&
|
243 |
-
operator<<(std::basic_ostream<CharT,Traits> &os,
|
244 |
-
const xor_combine_engine<Engine1_,s1_,Engine2_,s2_> &e);
|
245 |
-
|
246 |
-
|
247 |
-
/*! This function streams a xor_combine_engine in from a std::basic_istream.
|
248 |
-
* \param is The \p basic_istream to stream from.
|
249 |
-
* \param e The \p xor_combine_engine to stream in.
|
250 |
-
* \return \p is
|
251 |
-
*/
|
252 |
-
template<typename Engine1_, size_t s1_, typename Engine2_, size_t s2_,
|
253 |
-
typename CharT, typename Traits>
|
254 |
-
std::basic_istream<CharT,Traits>&
|
255 |
-
operator>>(std::basic_istream<CharT,Traits> &is,
|
256 |
-
xor_combine_engine<Engine1_,s1_,Engine2_,s2_> &e);
|
257 |
-
|
258 |
-
|
259 |
-
/*! \} // end random_number_engine_adaptors
|
260 |
-
*/
|
261 |
-
|
262 |
-
|
263 |
-
} // end random
|
264 |
-
|
265 |
-
// import names into thrust::
|
266 |
-
using random::xor_combine_engine;
|
267 |
-
|
268 |
-
} // end thrust
|
269 |
-
|
270 |
-
#include <thrust/random/detail/xor_combine_engine.inl>
|
271 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/stable_merge_sort.h
DELETED
@@ -1,60 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
#include <thrust/system/detail/sequential/execution_policy.h>
|
21 |
-
|
22 |
-
namespace thrust
|
23 |
-
{
|
24 |
-
namespace system
|
25 |
-
{
|
26 |
-
namespace detail
|
27 |
-
{
|
28 |
-
namespace sequential
|
29 |
-
{
|
30 |
-
|
31 |
-
|
32 |
-
template<typename DerivedPolicy,
|
33 |
-
typename RandomAccessIterator,
|
34 |
-
typename StrictWeakOrdering>
|
35 |
-
__host__ __device__
|
36 |
-
void stable_merge_sort(sequential::execution_policy<DerivedPolicy> &exec,
|
37 |
-
RandomAccessIterator begin,
|
38 |
-
RandomAccessIterator end,
|
39 |
-
StrictWeakOrdering comp);
|
40 |
-
|
41 |
-
|
42 |
-
template<typename DerivedPolicy,
|
43 |
-
typename RandomAccessIterator1,
|
44 |
-
typename RandomAccessIterator2,
|
45 |
-
typename StrictWeakOrdering>
|
46 |
-
__host__ __device__
|
47 |
-
void stable_merge_sort_by_key(sequential::execution_policy<DerivedPolicy> &exec,
|
48 |
-
RandomAccessIterator1 keys_begin,
|
49 |
-
RandomAccessIterator1 keys_end,
|
50 |
-
RandomAccessIterator2 values_begin,
|
51 |
-
StrictWeakOrdering comp);
|
52 |
-
|
53 |
-
|
54 |
-
} // end namespace sequential
|
55 |
-
} // end namespace detail
|
56 |
-
} // end namespace system
|
57 |
-
} // end namespace thrust
|
58 |
-
|
59 |
-
#include <thrust/system/detail/sequential/stable_merge_sort.inl>
|
60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/WALT/mmdet/models/backbones/trident_resnet.py
DELETED
@@ -1,292 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
import torch.utils.checkpoint as cp
|
5 |
-
from mmcv.cnn import build_conv_layer, build_norm_layer, kaiming_init
|
6 |
-
from torch.nn.modules.utils import _pair
|
7 |
-
|
8 |
-
from mmdet.models.backbones.resnet import Bottleneck, ResNet
|
9 |
-
from mmdet.models.builder import BACKBONES
|
10 |
-
|
11 |
-
|
12 |
-
class TridentConv(nn.Module):
|
13 |
-
"""Trident Convolution Module.
|
14 |
-
|
15 |
-
Args:
|
16 |
-
in_channels (int): Number of channels in input.
|
17 |
-
out_channels (int): Number of channels in output.
|
18 |
-
kernel_size (int): Size of convolution kernel.
|
19 |
-
stride (int, optional): Convolution stride. Default: 1.
|
20 |
-
trident_dilations (tuple[int, int, int], optional): Dilations of
|
21 |
-
different trident branch. Default: (1, 2, 3).
|
22 |
-
test_branch_idx (int, optional): In inference, all 3 branches will
|
23 |
-
be used if `test_branch_idx==-1`, otherwise only branch with
|
24 |
-
index `test_branch_idx` will be used. Default: 1.
|
25 |
-
bias (bool, optional): Whether to use bias in convolution or not.
|
26 |
-
Default: False.
|
27 |
-
"""
|
28 |
-
|
29 |
-
def __init__(self,
|
30 |
-
in_channels,
|
31 |
-
out_channels,
|
32 |
-
kernel_size,
|
33 |
-
stride=1,
|
34 |
-
trident_dilations=(1, 2, 3),
|
35 |
-
test_branch_idx=1,
|
36 |
-
bias=False):
|
37 |
-
super(TridentConv, self).__init__()
|
38 |
-
self.num_branch = len(trident_dilations)
|
39 |
-
self.with_bias = bias
|
40 |
-
self.test_branch_idx = test_branch_idx
|
41 |
-
self.stride = _pair(stride)
|
42 |
-
self.kernel_size = _pair(kernel_size)
|
43 |
-
self.paddings = _pair(trident_dilations)
|
44 |
-
self.dilations = trident_dilations
|
45 |
-
self.in_channels = in_channels
|
46 |
-
self.out_channels = out_channels
|
47 |
-
self.bias = bias
|
48 |
-
|
49 |
-
self.weight = nn.Parameter(
|
50 |
-
torch.Tensor(out_channels, in_channels, *self.kernel_size))
|
51 |
-
if bias:
|
52 |
-
self.bias = nn.Parameter(torch.Tensor(out_channels))
|
53 |
-
else:
|
54 |
-
self.bias = None
|
55 |
-
self.init_weights()
|
56 |
-
|
57 |
-
def init_weights(self):
|
58 |
-
kaiming_init(self, distribution='uniform', mode='fan_in')
|
59 |
-
|
60 |
-
def extra_repr(self):
|
61 |
-
tmpstr = f'in_channels={self.in_channels}'
|
62 |
-
tmpstr += f', out_channels={self.out_channels}'
|
63 |
-
tmpstr += f', kernel_size={self.kernel_size}'
|
64 |
-
tmpstr += f', num_branch={self.num_branch}'
|
65 |
-
tmpstr += f', test_branch_idx={self.test_branch_idx}'
|
66 |
-
tmpstr += f', stride={self.stride}'
|
67 |
-
tmpstr += f', paddings={self.paddings}'
|
68 |
-
tmpstr += f', dilations={self.dilations}'
|
69 |
-
tmpstr += f', bias={self.bias}'
|
70 |
-
return tmpstr
|
71 |
-
|
72 |
-
def forward(self, inputs):
|
73 |
-
if self.training or self.test_branch_idx == -1:
|
74 |
-
outputs = [
|
75 |
-
F.conv2d(input, self.weight, self.bias, self.stride, padding,
|
76 |
-
dilation) for input, dilation, padding in zip(
|
77 |
-
inputs, self.dilations, self.paddings)
|
78 |
-
]
|
79 |
-
else:
|
80 |
-
assert len(inputs) == 1
|
81 |
-
outputs = [
|
82 |
-
F.conv2d(inputs[0], self.weight, self.bias, self.stride,
|
83 |
-
self.paddings[self.test_branch_idx],
|
84 |
-
self.dilations[self.test_branch_idx])
|
85 |
-
]
|
86 |
-
|
87 |
-
return outputs
|
88 |
-
|
89 |
-
|
90 |
-
# Since TridentNet is defined over ResNet50 and ResNet101, here we
|
91 |
-
# only support TridentBottleneckBlock.
|
92 |
-
class TridentBottleneck(Bottleneck):
|
93 |
-
"""BottleBlock for TridentResNet.
|
94 |
-
|
95 |
-
Args:
|
96 |
-
trident_dilations (tuple[int, int, int]): Dilations of different
|
97 |
-
trident branch.
|
98 |
-
test_branch_idx (int): In inference, all 3 branches will be used
|
99 |
-
if `test_branch_idx==-1`, otherwise only branch with index
|
100 |
-
`test_branch_idx` will be used.
|
101 |
-
concat_output (bool): Whether to concat the output list to a Tensor.
|
102 |
-
`True` only in the last Block.
|
103 |
-
"""
|
104 |
-
|
105 |
-
def __init__(self, trident_dilations, test_branch_idx, concat_output,
|
106 |
-
**kwargs):
|
107 |
-
|
108 |
-
super(TridentBottleneck, self).__init__(**kwargs)
|
109 |
-
self.trident_dilations = trident_dilations
|
110 |
-
self.num_branch = len(trident_dilations)
|
111 |
-
self.concat_output = concat_output
|
112 |
-
self.test_branch_idx = test_branch_idx
|
113 |
-
self.conv2 = TridentConv(
|
114 |
-
self.planes,
|
115 |
-
self.planes,
|
116 |
-
kernel_size=3,
|
117 |
-
stride=self.conv2_stride,
|
118 |
-
bias=False,
|
119 |
-
trident_dilations=self.trident_dilations,
|
120 |
-
test_branch_idx=test_branch_idx)
|
121 |
-
|
122 |
-
def forward(self, x):
|
123 |
-
|
124 |
-
def _inner_forward(x):
|
125 |
-
num_branch = (
|
126 |
-
self.num_branch
|
127 |
-
if self.training or self.test_branch_idx == -1 else 1)
|
128 |
-
identity = x
|
129 |
-
if not isinstance(x, list):
|
130 |
-
x = (x, ) * num_branch
|
131 |
-
identity = x
|
132 |
-
if self.downsample is not None:
|
133 |
-
identity = [self.downsample(b) for b in x]
|
134 |
-
|
135 |
-
out = [self.conv1(b) for b in x]
|
136 |
-
out = [self.norm1(b) for b in out]
|
137 |
-
out = [self.relu(b) for b in out]
|
138 |
-
|
139 |
-
if self.with_plugins:
|
140 |
-
for k in range(len(out)):
|
141 |
-
out[k] = self.forward_plugin(out[k],
|
142 |
-
self.after_conv1_plugin_names)
|
143 |
-
|
144 |
-
out = self.conv2(out)
|
145 |
-
out = [self.norm2(b) for b in out]
|
146 |
-
out = [self.relu(b) for b in out]
|
147 |
-
if self.with_plugins:
|
148 |
-
for k in range(len(out)):
|
149 |
-
out[k] = self.forward_plugin(out[k],
|
150 |
-
self.after_conv2_plugin_names)
|
151 |
-
|
152 |
-
out = [self.conv3(b) for b in out]
|
153 |
-
out = [self.norm3(b) for b in out]
|
154 |
-
|
155 |
-
if self.with_plugins:
|
156 |
-
for k in range(len(out)):
|
157 |
-
out[k] = self.forward_plugin(out[k],
|
158 |
-
self.after_conv3_plugin_names)
|
159 |
-
|
160 |
-
out = [
|
161 |
-
out_b + identity_b for out_b, identity_b in zip(out, identity)
|
162 |
-
]
|
163 |
-
return out
|
164 |
-
|
165 |
-
if self.with_cp and x.requires_grad:
|
166 |
-
out = cp.checkpoint(_inner_forward, x)
|
167 |
-
else:
|
168 |
-
out = _inner_forward(x)
|
169 |
-
|
170 |
-
out = [self.relu(b) for b in out]
|
171 |
-
if self.concat_output:
|
172 |
-
out = torch.cat(out, dim=0)
|
173 |
-
return out
|
174 |
-
|
175 |
-
|
176 |
-
def make_trident_res_layer(block,
|
177 |
-
inplanes,
|
178 |
-
planes,
|
179 |
-
num_blocks,
|
180 |
-
stride=1,
|
181 |
-
trident_dilations=(1, 2, 3),
|
182 |
-
style='pytorch',
|
183 |
-
with_cp=False,
|
184 |
-
conv_cfg=None,
|
185 |
-
norm_cfg=dict(type='BN'),
|
186 |
-
dcn=None,
|
187 |
-
plugins=None,
|
188 |
-
test_branch_idx=-1):
|
189 |
-
"""Build Trident Res Layers."""
|
190 |
-
|
191 |
-
downsample = None
|
192 |
-
if stride != 1 or inplanes != planes * block.expansion:
|
193 |
-
downsample = []
|
194 |
-
conv_stride = stride
|
195 |
-
downsample.extend([
|
196 |
-
build_conv_layer(
|
197 |
-
conv_cfg,
|
198 |
-
inplanes,
|
199 |
-
planes * block.expansion,
|
200 |
-
kernel_size=1,
|
201 |
-
stride=conv_stride,
|
202 |
-
bias=False),
|
203 |
-
build_norm_layer(norm_cfg, planes * block.expansion)[1]
|
204 |
-
])
|
205 |
-
downsample = nn.Sequential(*downsample)
|
206 |
-
|
207 |
-
layers = []
|
208 |
-
for i in range(num_blocks):
|
209 |
-
layers.append(
|
210 |
-
block(
|
211 |
-
inplanes=inplanes,
|
212 |
-
planes=planes,
|
213 |
-
stride=stride if i == 0 else 1,
|
214 |
-
trident_dilations=trident_dilations,
|
215 |
-
downsample=downsample if i == 0 else None,
|
216 |
-
style=style,
|
217 |
-
with_cp=with_cp,
|
218 |
-
conv_cfg=conv_cfg,
|
219 |
-
norm_cfg=norm_cfg,
|
220 |
-
dcn=dcn,
|
221 |
-
plugins=plugins,
|
222 |
-
test_branch_idx=test_branch_idx,
|
223 |
-
concat_output=True if i == num_blocks - 1 else False))
|
224 |
-
inplanes = planes * block.expansion
|
225 |
-
return nn.Sequential(*layers)
|
226 |
-
|
227 |
-
|
228 |
-
@BACKBONES.register_module()
|
229 |
-
class TridentResNet(ResNet):
|
230 |
-
"""The stem layer, stage 1 and stage 2 in Trident ResNet are identical to
|
231 |
-
ResNet, while in stage 3, Trident BottleBlock is utilized to replace the
|
232 |
-
normal BottleBlock to yield trident output. Different branch shares the
|
233 |
-
convolution weight but uses different dilations to achieve multi-scale
|
234 |
-
output.
|
235 |
-
|
236 |
-
/ stage3(b0) \
|
237 |
-
x - stem - stage1 - stage2 - stage3(b1) - output
|
238 |
-
\ stage3(b2) /
|
239 |
-
|
240 |
-
Args:
|
241 |
-
depth (int): Depth of resnet, from {50, 101, 152}.
|
242 |
-
num_branch (int): Number of branches in TridentNet.
|
243 |
-
test_branch_idx (int): In inference, all 3 branches will be used
|
244 |
-
if `test_branch_idx==-1`, otherwise only branch with index
|
245 |
-
`test_branch_idx` will be used.
|
246 |
-
trident_dilations (tuple[int]): Dilations of different trident branch.
|
247 |
-
len(trident_dilations) should be equal to num_branch.
|
248 |
-
""" # noqa
|
249 |
-
|
250 |
-
def __init__(self, depth, num_branch, test_branch_idx, trident_dilations,
|
251 |
-
**kwargs):
|
252 |
-
|
253 |
-
assert num_branch == len(trident_dilations)
|
254 |
-
assert depth in (50, 101, 152)
|
255 |
-
super(TridentResNet, self).__init__(depth, **kwargs)
|
256 |
-
assert self.num_stages == 3
|
257 |
-
self.test_branch_idx = test_branch_idx
|
258 |
-
self.num_branch = num_branch
|
259 |
-
|
260 |
-
last_stage_idx = self.num_stages - 1
|
261 |
-
stride = self.strides[last_stage_idx]
|
262 |
-
dilation = trident_dilations
|
263 |
-
dcn = self.dcn if self.stage_with_dcn[last_stage_idx] else None
|
264 |
-
if self.plugins is not None:
|
265 |
-
stage_plugins = self.make_stage_plugins(self.plugins,
|
266 |
-
last_stage_idx)
|
267 |
-
else:
|
268 |
-
stage_plugins = None
|
269 |
-
planes = self.base_channels * 2**last_stage_idx
|
270 |
-
res_layer = make_trident_res_layer(
|
271 |
-
TridentBottleneck,
|
272 |
-
inplanes=(self.block.expansion * self.base_channels *
|
273 |
-
2**(last_stage_idx - 1)),
|
274 |
-
planes=planes,
|
275 |
-
num_blocks=self.stage_blocks[last_stage_idx],
|
276 |
-
stride=stride,
|
277 |
-
trident_dilations=dilation,
|
278 |
-
style=self.style,
|
279 |
-
with_cp=self.with_cp,
|
280 |
-
conv_cfg=self.conv_cfg,
|
281 |
-
norm_cfg=self.norm_cfg,
|
282 |
-
dcn=dcn,
|
283 |
-
plugins=stage_plugins,
|
284 |
-
test_branch_idx=self.test_branch_idx)
|
285 |
-
|
286 |
-
layer_name = f'layer{last_stage_idx + 1}'
|
287 |
-
|
288 |
-
self.__setattr__(layer_name, res_layer)
|
289 |
-
self.res_layers.pop(last_stage_idx)
|
290 |
-
self.res_layers.insert(last_stage_idx, layer_name)
|
291 |
-
|
292 |
-
self._freeze_stages()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/data/samplers/distributed_sampler.py
DELETED
@@ -1,200 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import itertools
|
3 |
-
import math
|
4 |
-
from collections import defaultdict
|
5 |
-
from typing import Optional
|
6 |
-
import torch
|
7 |
-
from torch.utils.data.sampler import Sampler
|
8 |
-
|
9 |
-
from detectron2.utils import comm
|
10 |
-
|
11 |
-
|
12 |
-
class TrainingSampler(Sampler):
|
13 |
-
"""
|
14 |
-
In training, we only care about the "infinite stream" of training data.
|
15 |
-
So this sampler produces an infinite stream of indices and
|
16 |
-
all workers cooperate to correctly shuffle the indices and sample different indices.
|
17 |
-
|
18 |
-
The samplers in each worker effectively produces `indices[worker_id::num_workers]`
|
19 |
-
where `indices` is an infinite stream of indices consisting of
|
20 |
-
`shuffle(range(size)) + shuffle(range(size)) + ...` (if shuffle is True)
|
21 |
-
or `range(size) + range(size) + ...` (if shuffle is False)
|
22 |
-
"""
|
23 |
-
|
24 |
-
def __init__(self, size: int, shuffle: bool = True, seed: Optional[int] = None):
|
25 |
-
"""
|
26 |
-
Args:
|
27 |
-
size (int): the total number of data of the underlying dataset to sample from
|
28 |
-
shuffle (bool): whether to shuffle the indices or not
|
29 |
-
seed (int): the initial seed of the shuffle. Must be the same
|
30 |
-
across all workers. If None, will use a random seed shared
|
31 |
-
among workers (require synchronization among all workers).
|
32 |
-
"""
|
33 |
-
self._size = size
|
34 |
-
assert size > 0
|
35 |
-
self._shuffle = shuffle
|
36 |
-
if seed is None:
|
37 |
-
seed = comm.shared_random_seed()
|
38 |
-
self._seed = int(seed)
|
39 |
-
|
40 |
-
self._rank = comm.get_rank()
|
41 |
-
self._world_size = comm.get_world_size()
|
42 |
-
|
43 |
-
def __iter__(self):
|
44 |
-
start = self._rank
|
45 |
-
yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)
|
46 |
-
|
47 |
-
def _infinite_indices(self):
|
48 |
-
g = torch.Generator()
|
49 |
-
g.manual_seed(self._seed)
|
50 |
-
while True:
|
51 |
-
if self._shuffle:
|
52 |
-
yield from torch.randperm(self._size, generator=g).tolist()
|
53 |
-
else:
|
54 |
-
yield from torch.arange(self._size).tolist()
|
55 |
-
|
56 |
-
|
57 |
-
class RepeatFactorTrainingSampler(Sampler):
|
58 |
-
"""
|
59 |
-
Similar to TrainingSampler, but a sample may appear more times than others based
|
60 |
-
on its "repeat factor". This is suitable for training on class imbalanced datasets like LVIS.
|
61 |
-
"""
|
62 |
-
|
63 |
-
def __init__(self, repeat_factors, *, shuffle=True, seed=None):
|
64 |
-
"""
|
65 |
-
Args:
|
66 |
-
repeat_factors (Tensor): a float vector, the repeat factor for each indice. When it's
|
67 |
-
full of ones, it is equivalent to ``TrainingSampler(len(repeat_factors), ...)``.
|
68 |
-
shuffle (bool): whether to shuffle the indices or not
|
69 |
-
seed (int): the initial seed of the shuffle. Must be the same
|
70 |
-
across all workers. If None, will use a random seed shared
|
71 |
-
among workers (require synchronization among all workers).
|
72 |
-
"""
|
73 |
-
self._shuffle = shuffle
|
74 |
-
if seed is None:
|
75 |
-
seed = comm.shared_random_seed()
|
76 |
-
self._seed = int(seed)
|
77 |
-
|
78 |
-
self._rank = comm.get_rank()
|
79 |
-
self._world_size = comm.get_world_size()
|
80 |
-
|
81 |
-
# Split into whole number (_int_part) and fractional (_frac_part) parts.
|
82 |
-
self._int_part = torch.trunc(repeat_factors)
|
83 |
-
self._frac_part = repeat_factors - self._int_part
|
84 |
-
|
85 |
-
@staticmethod
|
86 |
-
def repeat_factors_from_category_frequency(dataset_dicts, repeat_thresh):
|
87 |
-
"""
|
88 |
-
Compute (fractional) per-image repeat factors based on category frequency.
|
89 |
-
The repeat factor for an image is a function of the frequency of the rarest
|
90 |
-
category labeled in that image. The "frequency of category c" in [0, 1] is defined
|
91 |
-
as the fraction of images in the training set (without repeats) in which category c
|
92 |
-
appears.
|
93 |
-
See :paper:`lvis` (>= v2) Appendix B.2.
|
94 |
-
|
95 |
-
Args:
|
96 |
-
dataset_dicts (list[dict]): annotations in Detectron2 dataset format.
|
97 |
-
repeat_thresh (float): frequency threshold below which data is repeated.
|
98 |
-
If the frequency is half of `repeat_thresh`, the image will be
|
99 |
-
repeated twice.
|
100 |
-
|
101 |
-
Returns:
|
102 |
-
torch.Tensor:
|
103 |
-
the i-th element is the repeat factor for the dataset image at index i.
|
104 |
-
"""
|
105 |
-
# 1. For each category c, compute the fraction of images that contain it: f(c)
|
106 |
-
category_freq = defaultdict(int)
|
107 |
-
for dataset_dict in dataset_dicts: # For each image (without repeats)
|
108 |
-
cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]}
|
109 |
-
for cat_id in cat_ids:
|
110 |
-
category_freq[cat_id] += 1
|
111 |
-
num_images = len(dataset_dicts)
|
112 |
-
for k, v in category_freq.items():
|
113 |
-
category_freq[k] = v / num_images
|
114 |
-
|
115 |
-
# 2. For each category c, compute the category-level repeat factor:
|
116 |
-
# r(c) = max(1, sqrt(t / f(c)))
|
117 |
-
category_rep = {
|
118 |
-
cat_id: max(1.0, math.sqrt(repeat_thresh / cat_freq))
|
119 |
-
for cat_id, cat_freq in category_freq.items()
|
120 |
-
}
|
121 |
-
|
122 |
-
# 3. For each image I, compute the image-level repeat factor:
|
123 |
-
# r(I) = max_{c in I} r(c)
|
124 |
-
rep_factors = []
|
125 |
-
for dataset_dict in dataset_dicts:
|
126 |
-
cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]}
|
127 |
-
rep_factor = max({category_rep[cat_id] for cat_id in cat_ids}, default=1.0)
|
128 |
-
rep_factors.append(rep_factor)
|
129 |
-
|
130 |
-
return torch.tensor(rep_factors, dtype=torch.float32)
|
131 |
-
|
132 |
-
def _get_epoch_indices(self, generator):
|
133 |
-
"""
|
134 |
-
Create a list of dataset indices (with repeats) to use for one epoch.
|
135 |
-
|
136 |
-
Args:
|
137 |
-
generator (torch.Generator): pseudo random number generator used for
|
138 |
-
stochastic rounding.
|
139 |
-
|
140 |
-
Returns:
|
141 |
-
torch.Tensor: list of dataset indices to use in one epoch. Each index
|
142 |
-
is repeated based on its calculated repeat factor.
|
143 |
-
"""
|
144 |
-
# Since repeat factors are fractional, we use stochastic rounding so
|
145 |
-
# that the target repeat factor is achieved in expectation over the
|
146 |
-
# course of training
|
147 |
-
rands = torch.rand(len(self._frac_part), generator=generator)
|
148 |
-
rep_factors = self._int_part + (rands < self._frac_part).float()
|
149 |
-
# Construct a list of indices in which we repeat images as specified
|
150 |
-
indices = []
|
151 |
-
for dataset_index, rep_factor in enumerate(rep_factors):
|
152 |
-
indices.extend([dataset_index] * int(rep_factor.item()))
|
153 |
-
return torch.tensor(indices, dtype=torch.int64)
|
154 |
-
|
155 |
-
def __iter__(self):
|
156 |
-
start = self._rank
|
157 |
-
yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)
|
158 |
-
|
159 |
-
def _infinite_indices(self):
|
160 |
-
g = torch.Generator()
|
161 |
-
g.manual_seed(self._seed)
|
162 |
-
while True:
|
163 |
-
# Sample indices with repeats determined by stochastic rounding; each
|
164 |
-
# "epoch" may have a slightly different size due to the rounding.
|
165 |
-
indices = self._get_epoch_indices(g)
|
166 |
-
if self._shuffle:
|
167 |
-
randperm = torch.randperm(len(indices), generator=g)
|
168 |
-
yield from indices[randperm].tolist()
|
169 |
-
else:
|
170 |
-
yield from indices.tolist()
|
171 |
-
|
172 |
-
|
173 |
-
class InferenceSampler(Sampler):
|
174 |
-
"""
|
175 |
-
Produce indices for inference across all workers.
|
176 |
-
Inference needs to run on the __exact__ set of samples,
|
177 |
-
therefore when the total number of samples is not divisible by the number of workers,
|
178 |
-
this sampler produces different number of samples on different workers.
|
179 |
-
"""
|
180 |
-
|
181 |
-
def __init__(self, size: int):
|
182 |
-
"""
|
183 |
-
Args:
|
184 |
-
size (int): the total number of data of the underlying dataset to sample from
|
185 |
-
"""
|
186 |
-
self._size = size
|
187 |
-
assert size > 0
|
188 |
-
self._rank = comm.get_rank()
|
189 |
-
self._world_size = comm.get_world_size()
|
190 |
-
|
191 |
-
shard_size = (self._size - 1) // self._world_size + 1
|
192 |
-
begin = shard_size * self._rank
|
193 |
-
end = min(shard_size * (self._rank + 1), self._size)
|
194 |
-
self._local_indices = range(begin, end)
|
195 |
-
|
196 |
-
def __iter__(self):
|
197 |
-
yield from self._local_indices
|
198 |
-
|
199 |
-
def __len__(self):
|
200 |
-
return len(self._local_indices)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/regionclip-demo/detectron2/utils/memory.py
DELETED
@@ -1,84 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
|
3 |
-
import logging
|
4 |
-
from contextlib import contextmanager
|
5 |
-
from functools import wraps
|
6 |
-
import torch
|
7 |
-
|
8 |
-
__all__ = ["retry_if_cuda_oom"]
|
9 |
-
|
10 |
-
|
11 |
-
@contextmanager
|
12 |
-
def _ignore_torch_cuda_oom():
|
13 |
-
"""
|
14 |
-
A context which ignores CUDA OOM exception from pytorch.
|
15 |
-
"""
|
16 |
-
try:
|
17 |
-
yield
|
18 |
-
except RuntimeError as e:
|
19 |
-
# NOTE: the string may change?
|
20 |
-
if "CUDA out of memory. " in str(e):
|
21 |
-
pass
|
22 |
-
else:
|
23 |
-
raise
|
24 |
-
|
25 |
-
|
26 |
-
def retry_if_cuda_oom(func):
|
27 |
-
"""
|
28 |
-
Makes a function retry itself after encountering
|
29 |
-
pytorch's CUDA OOM error.
|
30 |
-
It will first retry after calling `torch.cuda.empty_cache()`.
|
31 |
-
|
32 |
-
If that still fails, it will then retry by trying to convert inputs to CPUs.
|
33 |
-
In this case, it expects the function to dispatch to CPU implementation.
|
34 |
-
The return values may become CPU tensors as well and it's user's
|
35 |
-
responsibility to convert it back to CUDA tensor if needed.
|
36 |
-
|
37 |
-
Args:
|
38 |
-
func: a stateless callable that takes tensor-like objects as arguments
|
39 |
-
|
40 |
-
Returns:
|
41 |
-
a callable which retries `func` if OOM is encountered.
|
42 |
-
|
43 |
-
Examples:
|
44 |
-
::
|
45 |
-
output = retry_if_cuda_oom(some_torch_function)(input1, input2)
|
46 |
-
# output may be on CPU even if inputs are on GPU
|
47 |
-
|
48 |
-
Note:
|
49 |
-
1. When converting inputs to CPU, it will only look at each argument and check
|
50 |
-
if it has `.device` and `.to` for conversion. Nested structures of tensors
|
51 |
-
are not supported.
|
52 |
-
|
53 |
-
2. Since the function might be called more than once, it has to be
|
54 |
-
stateless.
|
55 |
-
"""
|
56 |
-
|
57 |
-
def maybe_to_cpu(x):
|
58 |
-
try:
|
59 |
-
like_gpu_tensor = x.device.type == "cuda" and hasattr(x, "to")
|
60 |
-
except AttributeError:
|
61 |
-
like_gpu_tensor = False
|
62 |
-
if like_gpu_tensor:
|
63 |
-
return x.to(device="cpu")
|
64 |
-
else:
|
65 |
-
return x
|
66 |
-
|
67 |
-
@wraps(func)
|
68 |
-
def wrapped(*args, **kwargs):
|
69 |
-
with _ignore_torch_cuda_oom():
|
70 |
-
return func(*args, **kwargs)
|
71 |
-
|
72 |
-
# Clear cache and retry
|
73 |
-
torch.cuda.empty_cache()
|
74 |
-
with _ignore_torch_cuda_oom():
|
75 |
-
return func(*args, **kwargs)
|
76 |
-
|
77 |
-
# Try on CPU. This slows down the code significantly, therefore print a notice.
|
78 |
-
logger = logging.getLogger(__name__)
|
79 |
-
logger.info("Attempting to copy inputs of {} to CPU due to CUDA OOM".format(str(func)))
|
80 |
-
new_args = (maybe_to_cpu(x) for x in args)
|
81 |
-
new_kwargs = {k: maybe_to_cpu(v) for k, v in kwargs.items()}
|
82 |
-
return func(*new_args, **new_kwargs)
|
83 |
-
|
84 |
-
return wrapped
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/time/app.py
DELETED
@@ -1,38 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import cv2
|
3 |
-
import numpy as np
|
4 |
-
import torch
|
5 |
-
import torch.nn as nn
|
6 |
-
import torchvision.models as models
|
7 |
-
import einops
|
8 |
-
|
9 |
-
def predict(img):
|
10 |
-
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
11 |
-
model = models.resnet50()
|
12 |
-
model.fc = nn.Linear(2048, 720)
|
13 |
-
resume_path = 'full+++++.pth'
|
14 |
-
model.load_state_dict(torch.load(resume_path, map_location=torch.device(device)))
|
15 |
-
model.to(device)
|
16 |
-
with torch.no_grad():
|
17 |
-
model.eval()
|
18 |
-
img = cv2.resize(img, (224, 224))/255.
|
19 |
-
img = np.stack([einops.rearrange(img, 'h w c -> c h w')], 0)
|
20 |
-
img = torch.Tensor(img).float().to(device)
|
21 |
-
pred = model(img)
|
22 |
-
max_pred = torch.argsort(pred, dim=1, descending=True)
|
23 |
-
max_h = (max_pred[0][0] // 60).item()
|
24 |
-
max_m = (max_pred[0][0] % 60).item()
|
25 |
-
return '{}:{}'.format(str(max_h), str(max_m).zfill(2))
|
26 |
-
|
27 |
-
inputs = gr.inputs.Image()
|
28 |
-
|
29 |
-
io = gr.Interface(
|
30 |
-
fn=predict,
|
31 |
-
description='Note that this model ingests clocks that are already cropped, i.e. we do not run object detection.',
|
32 |
-
title='It\'s About Time: Analog Clock Reading in the Wild',
|
33 |
-
inputs=inputs,
|
34 |
-
examples=['d1.png', 'd2.png'],
|
35 |
-
outputs="text",
|
36 |
-
)
|
37 |
-
|
38 |
-
io.launch(share=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/model/index.js
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
import { lifecycle, heartbeat } from './meta.js'
|
2 |
-
import { makeOneBotReportMsg, makeGSUidReportMsg, makeGSUidSendMsg } from './makeMsg.js'
|
3 |
-
import { getApiData } from './api.js'
|
4 |
-
import { setGuildLatestMsgId, getGuildLatestMsgId, setMsgMap } from './msgMap.js'
|
5 |
-
import { TMP_DIR, sleep, mimeTypes, decodeHtml } from './tool.js'
|
6 |
-
|
7 |
-
export {
|
8 |
-
lifecycle,
|
9 |
-
heartbeat,
|
10 |
-
makeOneBotReportMsg,
|
11 |
-
makeGSUidReportMsg,
|
12 |
-
getApiData,
|
13 |
-
makeGSUidSendMsg,
|
14 |
-
setGuildLatestMsgId,
|
15 |
-
getGuildLatestMsgId,
|
16 |
-
setMsgMap,
|
17 |
-
TMP_DIR,
|
18 |
-
sleep,
|
19 |
-
mimeTypes,
|
20 |
-
decodeHtml
|
21 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/csrc/ROIPool.h
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
2 |
-
#pragma once
|
3 |
-
|
4 |
-
#include "cpu/vision.h"
|
5 |
-
|
6 |
-
#ifdef WITH_CUDA
|
7 |
-
#include "cuda/vision.h"
|
8 |
-
#endif
|
9 |
-
|
10 |
-
|
11 |
-
std::tuple<at::Tensor, at::Tensor> ROIPool_forward(const at::Tensor& input,
|
12 |
-
const at::Tensor& rois,
|
13 |
-
const float spatial_scale,
|
14 |
-
const int pooled_height,
|
15 |
-
const int pooled_width) {
|
16 |
-
if (input.type().is_cuda()) {
|
17 |
-
#ifdef WITH_CUDA
|
18 |
-
return ROIPool_forward_cuda(input, rois, spatial_scale, pooled_height, pooled_width);
|
19 |
-
#else
|
20 |
-
AT_ERROR("Not compiled with GPU support");
|
21 |
-
#endif
|
22 |
-
}
|
23 |
-
AT_ERROR("Not implemented on the CPU");
|
24 |
-
}
|
25 |
-
|
26 |
-
at::Tensor ROIPool_backward(const at::Tensor& grad,
|
27 |
-
const at::Tensor& input,
|
28 |
-
const at::Tensor& rois,
|
29 |
-
const at::Tensor& argmax,
|
30 |
-
const float spatial_scale,
|
31 |
-
const int pooled_height,
|
32 |
-
const int pooled_width,
|
33 |
-
const int batch_size,
|
34 |
-
const int channels,
|
35 |
-
const int height,
|
36 |
-
const int width) {
|
37 |
-
if (grad.type().is_cuda()) {
|
38 |
-
#ifdef WITH_CUDA
|
39 |
-
return ROIPool_backward_cuda(grad, input, rois, argmax, spatial_scale, pooled_height, pooled_width, batch_size, channels, height, width);
|
40 |
-
#else
|
41 |
-
AT_ERROR("Not compiled with GPU support");
|
42 |
-
#endif
|
43 |
-
}
|
44 |
-
AT_ERROR("Not implemented on the CPU");
|
45 |
-
}
|
46 |
-
|
47 |
-
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DarkyMan/OrangeMixes/README.md
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Open Journey V4
|
3 |
-
emoji: 💻
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: indigo
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.23.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
duplicated_from: DarkyMan/URPM
|
12 |
-
---
|
13 |
-
|
14 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DeclK/pose/tools/manager.py
DELETED
@@ -1,72 +0,0 @@
|
|
1 |
-
import mim
|
2 |
-
from pathlib import Path
|
3 |
-
from mim.utils import get_installed_path, echo_success
|
4 |
-
from mmengine.config import Config
|
5 |
-
|
6 |
-
class Manager:
|
7 |
-
|
8 |
-
def __init__(self, path=None) -> None:
|
9 |
-
"""
|
10 |
-
Params:
|
11 |
-
- path: root path of projects to save checkpoints and configs
|
12 |
-
"""
|
13 |
-
if path:
|
14 |
-
self.path = Path(path)
|
15 |
-
else:
|
16 |
-
self.path = Path(__file__).parents[1]
|
17 |
-
self.keys = ['weight', 'config', 'model', 'training_data']
|
18 |
-
|
19 |
-
def get_model_infos(self, package_name, keyword: str=None):
|
20 |
-
""" because mim search is too strict,
|
21 |
-
I want to search by keyword, not a strict match
|
22 |
-
"""
|
23 |
-
model_infos = mim.get_model_info(package_name)
|
24 |
-
model_names = model_infos.index
|
25 |
-
info_keys = model_infos.columns.tolist()
|
26 |
-
keys = self.intersect_keys(info_keys,
|
27 |
-
self.keys)
|
28 |
-
if keyword is None:
|
29 |
-
return model_infos[:, keys]
|
30 |
-
# get valid names, which contains the keyword
|
31 |
-
valid_names = [name for name in model_names
|
32 |
-
if keyword in name]
|
33 |
-
filter_infos = model_infos.loc[valid_names, keys]
|
34 |
-
return filter_infos
|
35 |
-
|
36 |
-
def intersect_keys(self, keys1 , keys2):
|
37 |
-
return list(set(keys1) & set(keys2))
|
38 |
-
|
39 |
-
def download(self, package, model, config_only=False):
|
40 |
-
""" Use model names to download checkpoints and configs.
|
41 |
-
Args:
|
42 |
-
- package: package name, e.g. mmdet
|
43 |
-
- model: model name, e.g. faster_rcnn or faster_rcnn_r50_fpn_1x_coco
|
44 |
-
- config_only: only download configs, which is helpful when you
|
45 |
-
already download checkpoints fast through other ways.
|
46 |
-
"""
|
47 |
-
infos = self.get_model_infos(package, model)
|
48 |
-
|
49 |
-
for model, info in infos.iterrows():
|
50 |
-
# get destination path
|
51 |
-
hyper_name = info['model']
|
52 |
-
dst_path = self.path / 'model_zoo' / hyper_name / model
|
53 |
-
dst_path.mkdir(parents=True, exist_ok=True)
|
54 |
-
|
55 |
-
if config_only:
|
56 |
-
# get config path of the package
|
57 |
-
installed_path = Path(get_installed_path(package))
|
58 |
-
config_path = info['config']
|
59 |
-
config_path = installed_path / '.mim' / config_path
|
60 |
-
# build and dump config
|
61 |
-
config_obj = Config.fromfile(config_path)
|
62 |
-
saved_config_path = dst_path / f'{model}.py'
|
63 |
-
config_obj.dump(saved_config_path)
|
64 |
-
echo_success(
|
65 |
-
f'Successfully dumped {model}.py to {dst_path}')
|
66 |
-
else:
|
67 |
-
mim.download(package, [model], dest_root=dst_path)
|
68 |
-
|
69 |
-
if __name__ == '__main__':
|
70 |
-
m = Manager()
|
71 |
-
print(m.get_model_infos('mmdet', 'det'))
|
72 |
-
# m.download('mmpose', 'rtmpose-t_8xb256-420e_aic-coco-256x192', config_only=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|