Commit
·
8c2ec13
1
Parent(s):
5a523ba
Update parquet files (step 60 of 121)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- spaces/101-5/gpt4free/g4f/.v1/testing/useless_test.py +0 -25
- spaces/1acneusushi/gradio-2dmoleculeeditor/data/Enscape3D System Requirements for Windows and MacOS A Comparison.md +0 -54
- spaces/1nferno/Single_Digit_Detection/README.md +0 -13
- spaces/1phancelerku/anime-remove-background/Blade Idle A Fun and Easy Idle RPG with Customizable Skills and Equipment.md +0 -104
- spaces/1phancelerku/anime-remove-background/Enjoy Music Movies and TV Shows with Black Video Player APK.md +0 -94
- spaces/20four60/Auto-GPT/Dockerfile +0 -65
- spaces/AIFILMS/generate_human_motion/pyrender/pyrender/camera.py +0 -437
- spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/audio/pitch_extractors.py +0 -85
- spaces/ASJMO/freegpt/client/css/dropdown.css +0 -10
- spaces/Adapter/T2I-Adapter/ldm/modules/extra_condition/utils.py +0 -72
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/GetChildWidth.js +0 -18
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/menu/methods/CollapseSubMenu.js +0 -12
- spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/press/Press.d.ts +0 -2
- spaces/AkashKhamkar/Job_Search_Engine/loader.py +0 -11
- spaces/Alpaca233/SadTalker/src/facerender/modules/make_animation.py +0 -170
- spaces/Alpaca233/SadTalker/webui.sh +0 -140
- spaces/Aman30577/imageTool1/README.md +0 -12
- spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/criteria/__init__.py +0 -0
- spaces/Amrrs/DragGan-Inversion/stylegan_human/PP_HumanSeg/pretrained_model/download_pretrained_model.py +0 -44
- spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/utils/print_env.py +0 -48
- spaces/Andy1621/uniformer_image_detection/configs/resnest/faster_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py +0 -4
- spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/fcn_unet_s5-d16.py +0 -51
- spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/utils/version_utils.py +0 -90
- spaces/ArkanDash/rvc-models/app-full.py +0 -254
- spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/distlib/locators.py +0 -1300
- spaces/Audio-AGI/AudioSep/models/CLAP/open_clip/htsat.py +0 -1308
- spaces/Audio-AGI/WavJourney/scripts/EnvsSetup.sh +0 -7
- spaces/Awesimo/jojogan/e4e/models/stylegan2/model.py +0 -678
- spaces/Awiny/Image2Paragraph/models/grit_src/grit/modeling/roi_heads/grit_roi_heads.py +0 -478
- spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/test_checkpoint.py +0 -49
- spaces/Banbri/zcvzcv/src/components/ui/switch.tsx +0 -29
- spaces/Benson/text-generation/Examples/Cmo Descargar Coches De Lujo Europeos.md +0 -47
- spaces/Benson/text-generation/Examples/Descargar Granja Hroes Sper Saga Para Pc.md +0 -79
- spaces/BetterAPI/BetterChat_new/src/lib/utils/trimSuffix.ts +0 -6
- spaces/Big-Web/MMSD/env/Lib/site-packages/_distutils_hack/override.py +0 -1
- spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/pyparsing/util.py +0 -235
- spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/structures/image_list.py +0 -102
- spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/malloc_and_free.h +0 -23
- spaces/ChenWu98/Stable-CycleDiffusion/README.md +0 -13
- spaces/CofAI/chat/client/js/highlight.min.js +0 -0
- spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/models/diffusion/ddim.py +0 -337
- spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/midas/utils.py +0 -189
- spaces/CuriousDolphin/MobileSAM/utils/__init__.py +0 -0
- spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/util/__init__.py +0 -62
- spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/structures/mty.py +0 -59
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/misc/filenames.py +0 -246
- spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_p_o_s_t.py +0 -308
- spaces/DaleChen/AutoGPT/autogpt/commands/write_tests.py +0 -31
- spaces/DanielSan7/judini-video/README.md +0 -13
- spaces/Datasculptor/OpenAI-Chatbot_App/app.py +0 -66
spaces/101-5/gpt4free/g4f/.v1/testing/useless_test.py
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
from gpt4free import usesless
|
2 |
-
|
3 |
-
message_id = ""
|
4 |
-
while True:
|
5 |
-
prompt = input("Question: ")
|
6 |
-
if prompt == "!stop":
|
7 |
-
break
|
8 |
-
|
9 |
-
req = usesless.Completion.create(prompt=prompt, parentMessageId=message_id)
|
10 |
-
|
11 |
-
print(f"Answer: {req['text']}")
|
12 |
-
message_id = req["id"]
|
13 |
-
|
14 |
-
import gpt4free
|
15 |
-
|
16 |
-
message_id = ""
|
17 |
-
while True:
|
18 |
-
prompt = input("Question: ")
|
19 |
-
if prompt == "!stop":
|
20 |
-
break
|
21 |
-
|
22 |
-
req = gpt4free.Completion.create(provider=gpt4free.Provider.UseLess, prompt=prompt, parentMessageId=message_id)
|
23 |
-
|
24 |
-
print(f"Answer: {req['text']}")
|
25 |
-
message_id = req["id"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Enscape3D System Requirements for Windows and MacOS A Comparison.md
DELETED
@@ -1,54 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>What You Need to Know About Enscape3D System Requirements</h1>
|
3 |
-
|
4 |
-
<p>Enscape3D is a powerful real-time rendering software that works with popular CAD/BIM applications such as Revit, SketchUp, Rhino, Archicad and Vectorworks. Enscape3D allows you to create stunning visualizations, animations and virtual reality experiences with ease and speed. But what are the system requirements to run Enscape3D smoothly and efficiently?</p>
|
5 |
-
<h2>enscape3d system requirements</h2><br /><p><b><b>Download Zip</b> >>> <a href="https://byltly.com/2uKxMm">https://byltly.com/2uKxMm</a></b></p><br /><br />
|
6 |
-
|
7 |
-
<p>In this article, we will explain the technical requirements to run Enscape3D on Windows and MacOS operating systems, as well as the recommended specifications for optimal performance and VR compatibility. We will also provide some tips on how to optimize your system and project settings for better rendering quality and speed.</p>
|
8 |
-
|
9 |
-
<h2>Enscape3D System Requirements for Windows</h2>
|
10 |
-
|
11 |
-
<p>Enscape3D uses ray tracing for its real-time rendering, and almost all the calculations that Enscape3D performs are being handled on the graphics card (GPU). For this reason, your computer must at least meet the minimum recommended system requirements set out below . Furthermore, although not a requirement, we do recommend that you use Enscape3D with dual monitors, as Enscape3D is optimized to work on a dual monitor setup.</p>
|
12 |
-
|
13 |
-
<p>The system requirements to run Enscape3D, as well as the standalone executable files that can be exported from Enscape3D, are identical. It is also recommended that your internet connection is fast and stable, and that you should use a direct cable connection and avoid using a Wi-fi connection where possible, as this can slow down the asset library loading times.</p>
|
14 |
-
<p></p>
|
15 |
-
|
16 |
-
<table>
|
17 |
-
<thead>
|
18 |
-
<tr>
|
19 |
-
<th>Windows OS</th>
|
20 |
-
<th>Minimum Requirements</th>
|
21 |
-
<th>Recommended Requirements</th>
|
22 |
-
<th>VR Requirements</th>
|
23 |
-
</tr>
|
24 |
-
</thead>
|
25 |
-
<tbody>
|
26 |
-
<tr>
|
27 |
-
<td>Operating System</td>
|
28 |
-
<td>Windows 10 or higher<br>Enscape3D will possibly also run where Windows 10 is installed on certain Intel Macs via Bootcamp</td>
|
29 |
-
<td>Windows 10 or higher<br>Enscape3D will possibly also run where Windows 10 is installed on certain Intel Macs via Bootcamp</td>
|
30 |
-
<td>Windows 10 or higher<br>Enscape3D will possibly also run where Windows 10 is installed on certain Intel Macs via Bootcamp</td>
|
31 |
-
</tr>
|
32 |
-
<tr>
|
33 |
-
<td>Graphics Card</td>
|
34 |
-
<td>NVIDIA or AMD dedicated GPU with 4GB VRAM that supports Vulkan 1.1<br>NVIDIA GeForce GTX 900 series / Quadro M series and newer<br>AMD Radeon RX 400 series / equivalent Radeon Pro series and newer<br><strong>Unsupported hardware:</strong><br>Radeon 6000 mobile GPUâs<br>Intel Integrated Graphics onboard GPUâs<br>SLI</td>
|
35 |
-
<td>NVIDIA or AMD dedicated GPU with 8GB VRAM that supports Vulkan 1.1<br>NVIDIA GeForce RTX 2000 series / Quadro RTX series and newer<br>AMD Radeon RX 5000 series / equivalent Radeon Pro series and newer</td>
|
36 |
-
<td>NVIDIA or AMD dedicated GPU with 8GB VRAM that supports Vulkan 1.1<br>NVIDIA GeForce RTX 3000 series / Quadro RTX series and newer<br>AMD Radeon RX 6000 series / equivalent Radeon Pro series and newer</td>
|
37 |
-
</tr>
|
38 |
-
<tr>
|
39 |
-
<td>CPU</td>
|
40 |
-
<td>Dual core processor (e.g. Intel Core i5) with at least 2.5 GHz clock speed</td>
|
41 |
-
<td>Quad core processor (e.g. Intel Core i7) with at least 3.5 GHz clock speed</td>
|
42 |
-
<td>Six core processor (e.g. Intel Core i9) with at least 4 GHz clock speed</td>
|
43 |
-
</tr>
|
44 |
-
<tr>
|
45 |
-
<td>RAM</td>
|
46 |
-
<td>8 GB RAM or more</td>
|
47 |
-
<td>16 GB RAM or more</td>
|
48 |
-
<td>32 GB RAM or more</td>
|
49 |
-
</tr>
|
50 |
-
<tr>
|
51 |
-
<td>CAD/BIM Software</td>
|
52 |
-
<td>The Enscape3D plug-in is provided for the following host applications:<br>Revit (2019, 2020, 2021, 2022, and 2023)<br>*SketchUp (2019, 2020, 2021,</p> ddb901b051<br />
|
53 |
-
<br />
|
54 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1nferno/Single_Digit_Detection/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Single Digit Detection
|
3 |
-
emoji: 📚
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: yellow
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.1.7
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Blade Idle A Fun and Easy Idle RPG with Customizable Skills and Equipment.md
DELETED
@@ -1,104 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>How to Download Blade Idle: A Guide for Android Users</h1>
|
3 |
-
<p>If you're looking for a new idle RPG game to play on your Android device, you might want to check out Blade Idle, a simulation game developed by mobirix. In this game, you'll follow the story of a common herb collector who stumbles onto a legendary sword and becomes a great hero. You'll adventure through the main stages and dungeons, and grow your character through farming, merging, and upgrading your equipment. You'll also be able to customize your skills, collect various pets, relics, and insignias, and challenge the upgrade dungeon for different skins.</p>
|
4 |
-
<p>Blade Idle is a game that combines the thrill of action with the convenience of idle gameplay. You can enjoy the game at your own pace, without worrying about missing out on anything. You can also play with other players from around the world, and compete for rankings and rewards. Blade Idle is a game that will keep you entertained for hours, with its rich content, stunning graphics, and engaging storyline.</p>
|
5 |
-
<h2>download blade idle</h2><br /><p><b><b>Download File</b> ··· <a href="https://jinyurl.com/2uNQph">https://jinyurl.com/2uNQph</a></b></p><br /><br />
|
6 |
-
<p>So how can you download Blade Idle and start playing it right away? There are two main ways to do so: from the Google Play Store or from the BlueStacks emulator. In this article, we'll explain both methods in detail, and also show you how to play Blade Idle on your PC or Mac if you prefer a bigger screen. Let's get started!</p>
|
7 |
-
<h2>How to download Blade Idle from Google Play Store</h2>
|
8 |
-
<p>The easiest way to download Blade Idle is from the Google Play Store, the official app store for Android devices. Here are the steps you need to follow:</p>
|
9 |
-
<ol>
|
10 |
-
<li>Open the Google Play Store app on your Android device.</li>
|
11 |
-
<li>Search for "Blade Idle" in the search bar.</li>
|
12 |
-
<li>Tap on the game icon that says "Blade Idle" by mobirix.</li>
|
13 |
-
<li>Tap on the green "Install" button and wait for the download to finish.</li>
|
14 |
-
<li>Tap on the "Open" button or find the game icon on your home screen or app drawer.</li>
|
15 |
-
<li>Enjoy playing Blade Idle!</li>
|
16 |
-
</ol>
|
17 |
-
<p>That's it! You've successfully downloaded Blade Idle from the Google Play Store. You can now start playing the game and enjoy its features. However, if you don't have access to the Google Play Store or you want to try a different way of downloading Blade Idle, you can also use an emulator.</p>
|
18 |
-
<h2>How to download Blade Idle from BlueStacks emulator</h2>
|
19 |
-
<p>An emulator is a software that allows you to run Android apps on your PC or Mac. One of the most popular emulators is BlueStacks, which is free and easy to use. With BlueStacks, you can download Blade Idle and play it on your computer with better performance and graphics. Here are the steps you need to follow:</p>
|
20 |
-
<ol>
|
21 |
-
<li>Download and install BlueStacks on your PC or Mac from [3](https://www.bluestacks.com/apps/simulation/blade-idle-on-pc.html).</li>
|
22 |
-
<li>Launch BlueStacks and sign in with your Google account.</li>
|
23 |
-
<li>Search for "Blade Idle" in the search bar at the top right corner.</li>
|
24 |
-
<li>Click on the game icon that says "Blade Idle" by mobirix.</li>
|
25 |
-
<li>Click on the green "Install" button and wait for the download to finish.</li>
|
26 |
-
<li>Click on the "Open" button or find the game icon on your home screen or app drawer.</li>
|
27 |
-
<li>Enjoy playing Blade Idle!</li>
|
28 |
-
</ol>
|
29 |
-
<p>Congratulations! You've successfully downloaded Blade Idle from BlueStacks emulator. You can now play the game on your PC or Mac with better controls and features. However, if you want to switch between your Android device and your computer, you can also sync your progress using Facebook or Google Play Games.</p>
|
30 |
-
<h2>How <h2>How to play Blade Idle on your PC or Mac</h2>
|
31 |
-
<p>If you've downloaded Blade Idle from BlueStacks emulator, you can also play it on your PC or Mac with better graphics and performance. However, you might need to adjust some settings and controls to optimize your gaming experience. Here are some tips and tricks you can use:</p>
|
32 |
-
<ul>
|
33 |
-
<li>Change the resolution and graphics quality of the game from the settings menu. You can choose from low, medium, high, or ultra settings depending on your device's specifications.</li>
|
34 |
-
<li>Use the keyboard and mouse to control the game. You can customize the key mapping from the BlueStacks settings menu. You can also use the gamepad if you have one connected to your computer.</li>
|
35 |
-
<li>Enable the eco mode to reduce CPU and battery consumption. This will make the game run smoother and faster. You can also enable the multi-instance mode to run multiple games or apps at the same time.</li>
|
36 |
-
<li>Use the screen recorder and screenshot tools to capture your gameplay and share it with your friends. You can also stream your game live on Twitch or YouTube using the BlueStacks streaming mode.</li>
|
37 |
-
<li>Access the in-game chat and social features to communicate with other players and join guilds. You can also use the BlueStacks chat app to chat with other BlueStacks users.</li>
|
38 |
-
</ul>
|
39 |
-
<p>With these tips and tricks, you can enjoy playing Blade Idle on your PC or Mac with better graphics and performance. You can also switch between your Android device and your computer anytime you want, as long as you sync your progress using Facebook or Google Play Games.</p>
|
40 |
-
<p>download blade idle game for android<br />
|
41 |
-
download blade idle game for pc<br />
|
42 |
-
download blade idle game apk<br />
|
43 |
-
download blade idle game mod<br />
|
44 |
-
download blade idle game guide<br />
|
45 |
-
download blade idle game tips and tricks<br />
|
46 |
-
download blade idle game coupon codes<br />
|
47 |
-
download blade idle game best skills<br />
|
48 |
-
download blade idle game emulator<br />
|
49 |
-
download blade idle game bluestacks<br />
|
50 |
-
download blade idle game review<br />
|
51 |
-
download blade idle game hack<br />
|
52 |
-
download blade idle game cheats<br />
|
53 |
-
download blade idle game update<br />
|
54 |
-
download blade idle game offline<br />
|
55 |
-
download blade idle game online<br />
|
56 |
-
download blade idle game free<br />
|
57 |
-
download blade idle game no ads<br />
|
58 |
-
download blade idle game costumes<br />
|
59 |
-
download blade idle game weapons<br />
|
60 |
-
download blade idle game armor<br />
|
61 |
-
download blade idle game dungeons<br />
|
62 |
-
download blade idle game farming<br />
|
63 |
-
download blade idle game pets<br />
|
64 |
-
download blade idle game relics<br />
|
65 |
-
download blade idle game insignia<br />
|
66 |
-
download blade idle game skins<br />
|
67 |
-
download blade idle game adventure<br />
|
68 |
-
download blade idle game simulation<br />
|
69 |
-
download blade idle game role playing<br />
|
70 |
-
download blade idle game casual<br />
|
71 |
-
download blade idle game multiplayer<br />
|
72 |
-
download blade idle game single player<br />
|
73 |
-
download blade idle game anime<br />
|
74 |
-
download blade idle game story<br />
|
75 |
-
download blade idle game hero<br />
|
76 |
-
download blade idle game sword<br />
|
77 |
-
download blade idle game fusion<br />
|
78 |
-
download blade idle game merge<br />
|
79 |
-
download blade idle game challenge<br />
|
80 |
-
download blade idle mobirix <br />
|
81 |
-
how to play/download/install/blade/idle/game <br />
|
82 |
-
where to find/download/get/blade/idle/game <br />
|
83 |
-
what is/download/blade/idle/game <br />
|
84 |
-
why play/download/blade/idle/game</p>
|
85 |
-
<h2>Conclusion</h2>
|
86 |
-
<p>Blade Idle is a fun and addictive idle RPG game that you can play on your Android device or your PC or Mac. In this game, you'll follow the story of a common herb collector who becomes a great hero with a legendary sword. You'll adventure through various stages and dungeons, and grow your character through farming, merging, and upgrading your equipment. You'll also be able to customize your skills, collect various pets, relics, and insignias, and challenge the upgrade dungeon for different skins.</p>
|
87 |
-
<p>You can download Blade Idle from the Google Play Store or from the BlueStacks emulator. Both methods are easy and fast, and will allow you to start playing the game right away. You can also play Blade Idle on your PC or Mac with better graphics and performance, using some tips and tricks to optimize your gaming experience.</p>
|
88 |
-
<p>Blade Idle is a game that will keep you entertained for hours, with its rich content, stunning graphics, and engaging storyline. You can also play with other players from around the world, and compete for rankings and rewards. Blade Idle is a game that you don't want to miss out on!</p>
|
89 |
-
<h3>FAQs</h3>
|
90 |
-
<p>Here are some common questions and answers about Blade Idle:</p>
|
91 |
-
<ol>
|
92 |
-
<li><b>What are the system requirements for Blade Idle?</b><br>
|
93 |
-
Blade Idle requires Android 4.4 or higher for mobile devices, and Windows 7 or higher or Mac OS X 10.11 or higher for computers. You also need at least 2 GB of RAM and 500 MB of free storage space.</li>
|
94 |
-
<li><b>How can I get more gold and gems in Blade Idle?</b><br>
|
95 |
-
You can get more gold and gems by completing quests, achievements, daily missions, events, and dungeons. You can also watch ads, spin the roulette wheel, open chests, or buy them with real money.</li>
|
96 |
-
<li><b>How can I merge and upgrade my equipment in Blade Idle?</b><br>
|
97 |
-
You can merge and upgrade your equipment by dragging two items of the same grade onto each other. This will create a higher grade item with better stats. You can also use upgrade stones to increase the level of your equipment.</li>
|
98 |
-
<li><b>How can I unlock more skills in Blade Idle?</b><br>
|
99 |
-
You can unlock more skills by reaching certain levels or completing certain stages. You can also use skill books to learn new skills or upgrade existing ones.</li>
|
100 |
-
<li><b>How can I change my character's appearance in Blade Idle?</b><br>
|
101 |
-
You can change your character's appearance by using different skins. You can get skins by challenging the upgrade dungeon or buying them with gems.</li>
|
102 |
-
</ol></p> 401be4b1e0<br />
|
103 |
-
<br />
|
104 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/1phancelerku/anime-remove-background/Enjoy Music Movies and TV Shows with Black Video Player APK.md
DELETED
@@ -1,94 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Black Video Player APK: A Powerful and Elegant Media Player for Android</h1>
|
3 |
-
<p>If you are looking for a media player that can play any video or audio file on your Android device, you should try Black Video Player APK. This is a free and ad-free app that offers a lot of features and benefits for your media enjoyment. In this article, we will tell you what Black Video Player APK is, how to download and install it, how to use it, and why you should choose it.</p>
|
4 |
-
<h2>black video player apk</h2><br /><p><b><b>Download File</b> ✅ <a href="https://jinyurl.com/2uNNKJ">https://jinyurl.com/2uNNKJ</a></b></p><br /><br />
|
5 |
-
<h2>What is Black Video Player APK?</h2>
|
6 |
-
<p>Black Video Player APK is an app that allows you to play any video or audio file on your Android device. It supports all formats, including MKV, MP4, AVI, MOV, Ogg, FLAC, TS, M2TS, Wv, and AAC. It can also play files from your local storage or from network streams. It has a simple and elegant user interface that makes it easy to use and navigate. It also has a lot of features that enhance your media experience.</p>
|
7 |
-
<h3>Features of Black Video Player APK</h3>
|
8 |
-
<h4>Supports all video and audio formats</h4>
|
9 |
-
<p>Black Video Player APK can play any video or audio file that you have on your device or online. You don't need to worry about compatibility issues or converting files. You can enjoy any media content with this app.</p>
|
10 |
-
<h4>Plays local and network files</h4>
|
11 |
-
<p>Black Video Player APK can play files from your internal or external storage, as well as from network streams. You can access your media library easily with this app. You can also stream videos from online sources, such as YouTube, Vimeo, Dailymotion, etc.</p>
|
12 |
-
<h4>Offers gesture controls and subtitles</h4>
|
13 |
-
<p>Black Video Player APK gives you full control over your playback with gesture controls. You can swipe left or right to seek forward or backward, swipe up or down to adjust the volume or brightness, double tap to pause or resume, etc. You can also enable subtitles for your videos, and adjust the size, color, position, and timing of them.</p>
|
14 |
-
<h4>Customizes playback speed and aspect ratio</h4>
|
15 |
-
<p>Black Video Player APK lets you customize your playback speed and aspect ratio according to your preference. You can speed up or slow down the video or audio playback, or change the aspect ratio to fit your screen size. You can also rotate the screen orientation if you want.</p>
|
16 |
-
<h4>Enhances video quality and sound effects</h4>
|
17 |
-
<p>Black Video Player APK improves the video quality and sound effects of your media files. It has a built-in equalizer that lets you adjust the bass, treble, balance, etc. of your audio output. It also has a video enhancer that enhances the brightness, contrast, saturation, etc. of your video output.</p>
|
18 |
-
<p>black video player apk download<br />
|
19 |
-
black video player apk for android<br />
|
20 |
-
black video player apk free<br />
|
21 |
-
black video player apk pro<br />
|
22 |
-
black video player apk mod<br />
|
23 |
-
black video player apk latest version<br />
|
24 |
-
black video player apk no ads<br />
|
25 |
-
black video player apk offline<br />
|
26 |
-
black video player apk online<br />
|
27 |
-
black video player apk premium<br />
|
28 |
-
black video player apk full<br />
|
29 |
-
black video player apk cracked<br />
|
30 |
-
black video player apk hd<br />
|
31 |
-
black video player apk 4k<br />
|
32 |
-
black video player apk 2023<br />
|
33 |
-
black video player apk update<br />
|
34 |
-
black video player apk best<br />
|
35 |
-
black video player apk new<br />
|
36 |
-
black video player apk old<br />
|
37 |
-
black video player apk beta<br />
|
38 |
-
black video player apk review<br />
|
39 |
-
black video player apk features<br />
|
40 |
-
black video player apk install<br />
|
41 |
-
black video player apk uninstall<br />
|
42 |
-
black video player apk alternative<br />
|
43 |
-
black video player apk comparison<br />
|
44 |
-
black video player apk ranking<br />
|
45 |
-
black video player apk rating<br />
|
46 |
-
black video player apk feedback<br />
|
47 |
-
black video player apk support<br />
|
48 |
-
black video player apk help<br />
|
49 |
-
black video player apk guide<br />
|
50 |
-
black video player apk tutorial<br />
|
51 |
-
black video player apk tips<br />
|
52 |
-
black video player apk tricks<br />
|
53 |
-
black video player apk hacks<br />
|
54 |
-
black video player apk cheats<br />
|
55 |
-
black video player apk codes<br />
|
56 |
-
black video player apk coupons<br />
|
57 |
-
black video player apk deals<br />
|
58 |
-
black video player apk discounts<br />
|
59 |
-
black video player apk offers<br />
|
60 |
-
black video player apk promotions<br />
|
61 |
-
black video player apk sales<br />
|
62 |
-
black video player apk free trial<br />
|
63 |
-
black video player apk subscription<br />
|
64 |
-
black video player apk license key<br />
|
65 |
-
black video player apk activation code</p>
|
66 |
-
<h3>How to download and install Black Video Player APK?</h3>
|
67 |
-
<h4>Download the APK file from a trusted source</h4>
|
68 |
-
<p>To download Black Video Player APK, you need to find a trusted source that offers the latest version of the app. You can use [this link] to download the APK file.</p>
|
69 |
-
<h4>Enable unknown sources on your device settings</h4>
|
70 |
-
<p>To install Black Video Player APK, you need to enable unknown sources on your device settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.</p>
|
71 |
-
<h4>Install the APK file and launch the app</h4>
|
72 |
-
<p>After you have downloaded and enabled unknown sources, you can install the APK file. To do this, locate the APK file on your device and tap on it. Follow the instructions on the screen to complete the installation. Once the app is installed, you can launch it from your app drawer or home screen.</p>
|
73 |
-
<h3>How to use Black Video Player APK?</h3>
|
74 |
-
<h4>Browse and select the media file you want to play</h4>
|
75 |
-
<p>To use Black Video Player APK, you need to browse and select the media file you want to play. You can do this by tapping on the menu icon on the top left corner of the app and choosing the folder where your media files are stored. You can also tap on the network icon on the top right corner of the app and enter the URL of the online video or audio stream you want to play.</p>
|
76 |
-
<h4>Adjust the settings and preferences according to your needs</h4>
|
77 |
-
<p>Once you have selected the media file you want to play, you can adjust the settings and preferences according to your needs. You can do this by tapping on the gear icon on the top right corner of the app and choosing the options you want. You can change the playback speed, aspect ratio, subtitle settings, equalizer settings, video enhancer settings, etc.</p>
|
78 |
-
<h4>Enjoy your media experience with Black Video Player APK</h4>
|
79 |
-
<p>After you have adjusted the settings and preferences, you can enjoy your media experience with Black Video Player APK. You can use the gesture controls to control your playback, or use the buttons on the bottom of the screen. You can also switch between portrait and landscape mode by rotating your device.</p>
|
80 |
-
<h2>Why choose Black Video Player APK?</h2>
|
81 |
-
<h3>Benefits of Black Video Player APK</h3>
|
82 |
-
<h4>Simple and elegant user interface</h4>
|
83 |
-
<p>Black Video Player APK has a simple and elegant user interface that makes it easy to use and navigate. It has a black theme that is pleasing to the eye and reduces eye strain. It also has a minimalistic design that focuses on your media content.</p>
|
84 |
-
<h4>Smooth and stable performance</h4>
|
85 |
-
<p>Black Video Player APK has a smooth and stable performance that ensures a high-quality media experience. It has a powerful engine that can handle any video or audio format without lagging or crashing. It also has a low battery consumption that saves your device's power.</p>
|
86 |
-
<h4>Free and ad-free app</h4>
|
87 |
-
<p>Black Video Player APK is a free and ad-free app that does not require any registration or subscription. You can download and use it without any limitations or interruptions. You can also enjoy all its features and benefits without paying anything.</p>
|
88 |
-
<h4>Compatible with most Android devices</h4>
|
89 |
-
<p>Black Video Player APK is compatible with most Android devices that run on Android 5.0 or higher. It can work on any device size, from smartphones to tablets. It can also adapt to any screen resolution, from HD to 4K.</p>
|
90 |
-
<h2>Conclusion</h2>
|
91 |
-
<p>In conclusion, Black Video Player APK is a powerful and elegant media player for Android that can play any video or audio file on your device or online. It has a lot of features that enhance your media experience, such as gesture controls, subtitles, playback speed, aspect ratio, equalizer, video enhancer, etc. It also has a simple and elegant user interface, a smooth and stable performance, a free and ad-free app, and a compatibility with most Android devices. If you are looking for a media player that can meet all your needs, you should try Black Video Player APK.</p>
|
92 |
-
FAQs - Q: Is Black Video Player APK safe to use? - A: Yes, Black Video Player APK is safe to use as long as you download it from a trusted source. It does not contain any malware or viruses that can harm your device or data. - Q: How can I update Black Video Player APK? - A: You can update Black Video Player APK by downloading the latest version of the app from [this link]. You can also check for updates within the app by tapping on the menu icon > About > Check for updates. - Q: How can I share my media files with others using Black Video Player APK? - A: You can share your media files with others using Black Video Player APK by tapping on the share icon on the bottom of the screen. You can choose the app or platform you want to share your media file with, such as WhatsApp, Facebook, Twitter, etc. - Q: How can I delete or uninstall Black Video Player APK? - A: You can delete or uninstall Black Video Player APK by going to your device settings > Apps > Black Video Player APK > Uninstall. You can also long-press the app icon on your home screen or app drawer and drag it to the uninstall option. - Q: How can I contact the developer of Black Video Player APK? - A: You can contact the developer of Black Video Player APK by tapping on the menu icon > About > Contact us. You can also send an email to [this address] or visit [this website].</p> 401be4b1e0<br />
|
93 |
-
<br />
|
94 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/20four60/Auto-GPT/Dockerfile
DELETED
@@ -1,65 +0,0 @@
|
|
1 |
-
FROM zenmldocker/zenml-server:latest
|
2 |
-
|
3 |
-
ENV ZENML_ANALYTICS_OPT_IN=true
|
4 |
-
ENV ZENML_SERVER_DEPLOYMENT_TYPE="hf_spaces"
|
5 |
-
ENV ZENML_LOGGING_VERBOSITY=DEBUG
|
6 |
-
|
7 |
-
################################################################################
|
8 |
-
#
|
9 |
-
# CONFIGURING YOUR ZENML HF SPACES SERVER
|
10 |
-
# ---------------------------------------
|
11 |
-
# By default this space is not persistent. All ZenML metadata is stored in
|
12 |
-
# localstorage in a SQLite database. If you would like to make your storage
|
13 |
-
# persistent, use the appropriate environment variables below to configure the
|
14 |
-
# image to use a MySQL-compatible database service that is reachable from the
|
15 |
-
# container. See https://docs.zenml.io/getting-started/deploying-zenml/docker
|
16 |
-
# for more information on how to configure these environment variables.
|
17 |
-
|
18 |
-
# You can also configure the secrets store to use for your ZenML server. Be
|
19 |
-
# sure to use Huggingface Spaces' 'Repository Secrets' feature to store any
|
20 |
-
# secrets referenced here. See
|
21 |
-
# https://huggingface.co/docs/hub/spaces-overview#managing-secrets for more
|
22 |
-
# information on how to configure these environment variables.
|
23 |
-
|
24 |
-
# ENV ZENML_DEFAULT_PROJECT_NAME=""
|
25 |
-
# ENV ZENML_DEFAULT_USER_NAME=""
|
26 |
-
# ENV ZENML_DEFAULT_USER_PASSWORD=""
|
27 |
-
# ENV ZENML_STORE_URL=""
|
28 |
-
# ENV ZENML_STORE_SSL_CA=""
|
29 |
-
# ENV ZENML_STORE_SSL_CERT=""
|
30 |
-
# ENV ZENML_STORE_SSL_KEY=""
|
31 |
-
# ENV ZENML_STORE_SSL_VERIFY_SERVER_CERT=""
|
32 |
-
|
33 |
-
# ENV ZENML_LOGGING_VERBOSITY=""
|
34 |
-
|
35 |
-
# # SECRETS STORE CONFIGURATION
|
36 |
-
# ENV ZENML_SECRETS_STORE_TYPE=""
|
37 |
-
# ENV ZENML_SECRETS_STORE_ENCRYPTION_KEY=""
|
38 |
-
# ENV ZENML_SECRETS_STORE_CLASS_PATH=""
|
39 |
-
# ENV ZENML_JWT_SECRET_KEY=""
|
40 |
-
|
41 |
-
# # AWS Secrets Store Configuration
|
42 |
-
# ENV ZENML_SECRETS_STORE_REGION_NAME=""
|
43 |
-
# ENV ZENML_SECRETS_STORE_AWS_ACCESS_KEY_ID=""
|
44 |
-
# ENV ZENML_SECRETS_STORE_AWS_SECRET_ACCESS_KEY=""
|
45 |
-
# ENV ZENML_SECRETS_STORE_AWS_SESSION_TOKEN=""
|
46 |
-
# ENV ZENML_SECRETS_STORE_SECRET_LIST_REFRESH_TIMEOUT=""
|
47 |
-
|
48 |
-
# # GCP Secrets Store Configuration
|
49 |
-
# ENV ZENML_SECRETS_STORE_PROJECT_ID=""
|
50 |
-
# ENV GOOGLE_APPLICATION_CREDENTIALS=""
|
51 |
-
|
52 |
-
# # Azure Secrets Store Configuration
|
53 |
-
# ENV ZENML_SECRETS_STORE_KEY_VAULT_NAME=""
|
54 |
-
# ENV ZENML_SECRETS_STORE_AZURE_CLIENT_ID=""
|
55 |
-
# ENV ZENML_SECRETS_STORE_AZURE_CLIENT_SECRET=""
|
56 |
-
# ENV ZENML_SECRETS_STORE_AZURE_TENANT_ID=""
|
57 |
-
|
58 |
-
# # Hashicorp Secrets Store Configuration
|
59 |
-
# ENV ZENML_SECRETS_STORE_VAULT_ADDR=""
|
60 |
-
# ENV ZENML_SECRETS_STORE_VAULT_TOKEN=""
|
61 |
-
# ENV ZENML_SECRETS_STORE_VAULT_NAMESPACE=""
|
62 |
-
# ENV ZENML_SECRETS_STORE_MAX_VERSIONS=""
|
63 |
-
|
64 |
-
ENTRYPOINT ["uvicorn", "zenml.zen_server.zen_server_api:app", "--log-level", "debug"]
|
65 |
-
CMD ["--proxy-headers", "--port", "8080", "--host", "0.0.0.0"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIFILMS/generate_human_motion/pyrender/pyrender/camera.py
DELETED
@@ -1,437 +0,0 @@
|
|
1 |
-
"""Virtual cameras compliant with the glTF 2.0 specification as described at
|
2 |
-
https://github.com/KhronosGroup/glTF/tree/master/specification/2.0#reference-camera
|
3 |
-
|
4 |
-
Author: Matthew Matl
|
5 |
-
"""
|
6 |
-
import abc
|
7 |
-
import numpy as np
|
8 |
-
import six
|
9 |
-
import sys
|
10 |
-
|
11 |
-
from .constants import DEFAULT_Z_NEAR, DEFAULT_Z_FAR
|
12 |
-
|
13 |
-
|
14 |
-
@six.add_metaclass(abc.ABCMeta)
|
15 |
-
class Camera(object):
|
16 |
-
"""Abstract base class for all cameras.
|
17 |
-
|
18 |
-
Note
|
19 |
-
----
|
20 |
-
Camera poses are specified in the OpenGL format,
|
21 |
-
where the z axis points away from the view direction and the
|
22 |
-
x and y axes point to the right and up in the image plane, respectively.
|
23 |
-
|
24 |
-
Parameters
|
25 |
-
----------
|
26 |
-
znear : float
|
27 |
-
The floating-point distance to the near clipping plane.
|
28 |
-
zfar : float
|
29 |
-
The floating-point distance to the far clipping plane.
|
30 |
-
``zfar`` must be greater than ``znear``.
|
31 |
-
name : str, optional
|
32 |
-
The user-defined name of this object.
|
33 |
-
"""
|
34 |
-
|
35 |
-
def __init__(self,
|
36 |
-
znear=DEFAULT_Z_NEAR,
|
37 |
-
zfar=DEFAULT_Z_FAR,
|
38 |
-
name=None):
|
39 |
-
self.name = name
|
40 |
-
self.znear = znear
|
41 |
-
self.zfar = zfar
|
42 |
-
|
43 |
-
@property
|
44 |
-
def name(self):
|
45 |
-
"""str : The user-defined name of this object.
|
46 |
-
"""
|
47 |
-
return self._name
|
48 |
-
|
49 |
-
@name.setter
|
50 |
-
def name(self, value):
|
51 |
-
if value is not None:
|
52 |
-
value = str(value)
|
53 |
-
self._name = value
|
54 |
-
|
55 |
-
@property
|
56 |
-
def znear(self):
|
57 |
-
"""float : The distance to the near clipping plane.
|
58 |
-
"""
|
59 |
-
return self._znear
|
60 |
-
|
61 |
-
@znear.setter
|
62 |
-
def znear(self, value):
|
63 |
-
value = float(value)
|
64 |
-
if value < 0:
|
65 |
-
raise ValueError('z-near must be >= 0.0')
|
66 |
-
self._znear = value
|
67 |
-
|
68 |
-
@property
|
69 |
-
def zfar(self):
|
70 |
-
"""float : The distance to the far clipping plane.
|
71 |
-
"""
|
72 |
-
return self._zfar
|
73 |
-
|
74 |
-
@zfar.setter
|
75 |
-
def zfar(self, value):
|
76 |
-
value = float(value)
|
77 |
-
if value <= 0 or value <= self.znear:
|
78 |
-
raise ValueError('zfar must be >0 and >znear')
|
79 |
-
self._zfar = value
|
80 |
-
|
81 |
-
@abc.abstractmethod
|
82 |
-
def get_projection_matrix(self, width=None, height=None):
|
83 |
-
"""Return the OpenGL projection matrix for this camera.
|
84 |
-
|
85 |
-
Parameters
|
86 |
-
----------
|
87 |
-
width : int
|
88 |
-
Width of the current viewport, in pixels.
|
89 |
-
height : int
|
90 |
-
Height of the current viewport, in pixels.
|
91 |
-
"""
|
92 |
-
pass
|
93 |
-
|
94 |
-
|
95 |
-
class PerspectiveCamera(Camera):
|
96 |
-
|
97 |
-
"""A perspective camera for perspective projection.
|
98 |
-
|
99 |
-
Parameters
|
100 |
-
----------
|
101 |
-
yfov : float
|
102 |
-
The floating-point vertical field of view in radians.
|
103 |
-
znear : float
|
104 |
-
The floating-point distance to the near clipping plane.
|
105 |
-
If not specified, defaults to 0.05.
|
106 |
-
zfar : float, optional
|
107 |
-
The floating-point distance to the far clipping plane.
|
108 |
-
``zfar`` must be greater than ``znear``.
|
109 |
-
If None, the camera uses an infinite projection matrix.
|
110 |
-
aspectRatio : float, optional
|
111 |
-
The floating-point aspect ratio of the field of view.
|
112 |
-
If not specified, the camera uses the viewport's aspect ratio.
|
113 |
-
name : str, optional
|
114 |
-
The user-defined name of this object.
|
115 |
-
"""
|
116 |
-
|
117 |
-
def __init__(self,
|
118 |
-
yfov,
|
119 |
-
znear=DEFAULT_Z_NEAR,
|
120 |
-
zfar=None,
|
121 |
-
aspectRatio=None,
|
122 |
-
name=None):
|
123 |
-
super(PerspectiveCamera, self).__init__(
|
124 |
-
znear=znear,
|
125 |
-
zfar=zfar,
|
126 |
-
name=name,
|
127 |
-
)
|
128 |
-
|
129 |
-
self.yfov = yfov
|
130 |
-
self.aspectRatio = aspectRatio
|
131 |
-
|
132 |
-
@property
|
133 |
-
def yfov(self):
|
134 |
-
"""float : The vertical field of view in radians.
|
135 |
-
"""
|
136 |
-
return self._yfov
|
137 |
-
|
138 |
-
@yfov.setter
|
139 |
-
def yfov(self, value):
|
140 |
-
value = float(value)
|
141 |
-
if value <= 0.0:
|
142 |
-
raise ValueError('Field of view must be positive')
|
143 |
-
self._yfov = value
|
144 |
-
|
145 |
-
@property
|
146 |
-
def zfar(self):
|
147 |
-
"""float : The distance to the far clipping plane.
|
148 |
-
"""
|
149 |
-
return self._zfar
|
150 |
-
|
151 |
-
@zfar.setter
|
152 |
-
def zfar(self, value):
|
153 |
-
if value is not None:
|
154 |
-
value = float(value)
|
155 |
-
if value <= 0 or value <= self.znear:
|
156 |
-
raise ValueError('zfar must be >0 and >znear')
|
157 |
-
self._zfar = value
|
158 |
-
|
159 |
-
@property
|
160 |
-
def aspectRatio(self):
|
161 |
-
"""float : The ratio of the width to the height of the field of view.
|
162 |
-
"""
|
163 |
-
return self._aspectRatio
|
164 |
-
|
165 |
-
@aspectRatio.setter
|
166 |
-
def aspectRatio(self, value):
|
167 |
-
if value is not None:
|
168 |
-
value = float(value)
|
169 |
-
if value <= 0.0:
|
170 |
-
raise ValueError('Aspect ratio must be positive')
|
171 |
-
self._aspectRatio = value
|
172 |
-
|
173 |
-
def get_projection_matrix(self, width=None, height=None):
|
174 |
-
"""Return the OpenGL projection matrix for this camera.
|
175 |
-
|
176 |
-
Parameters
|
177 |
-
----------
|
178 |
-
width : int
|
179 |
-
Width of the current viewport, in pixels.
|
180 |
-
height : int
|
181 |
-
Height of the current viewport, in pixels.
|
182 |
-
"""
|
183 |
-
aspect_ratio = self.aspectRatio
|
184 |
-
if aspect_ratio is None:
|
185 |
-
if width is None or height is None:
|
186 |
-
raise ValueError('Aspect ratio of camera must be defined')
|
187 |
-
aspect_ratio = float(width) / float(height)
|
188 |
-
|
189 |
-
a = aspect_ratio
|
190 |
-
t = np.tan(self.yfov / 2.0)
|
191 |
-
n = self.znear
|
192 |
-
f = self.zfar
|
193 |
-
|
194 |
-
P = np.zeros((4,4))
|
195 |
-
P[0][0] = 1.0 / (a * t)
|
196 |
-
P[1][1] = 1.0 / t
|
197 |
-
P[3][2] = -1.0
|
198 |
-
|
199 |
-
if f is None:
|
200 |
-
P[2][2] = -1.0
|
201 |
-
P[2][3] = -2.0 * n
|
202 |
-
else:
|
203 |
-
P[2][2] = (f + n) / (n - f)
|
204 |
-
P[2][3] = (2 * f * n) / (n - f)
|
205 |
-
|
206 |
-
return P
|
207 |
-
|
208 |
-
|
209 |
-
class OrthographicCamera(Camera):
|
210 |
-
"""An orthographic camera for orthographic projection.
|
211 |
-
|
212 |
-
Parameters
|
213 |
-
----------
|
214 |
-
xmag : float
|
215 |
-
The floating-point horizontal magnification of the view.
|
216 |
-
ymag : float
|
217 |
-
The floating-point vertical magnification of the view.
|
218 |
-
znear : float
|
219 |
-
The floating-point distance to the near clipping plane.
|
220 |
-
If not specified, defaults to 0.05.
|
221 |
-
zfar : float
|
222 |
-
The floating-point distance to the far clipping plane.
|
223 |
-
``zfar`` must be greater than ``znear``.
|
224 |
-
If not specified, defaults to 100.0.
|
225 |
-
name : str, optional
|
226 |
-
The user-defined name of this object.
|
227 |
-
"""
|
228 |
-
|
229 |
-
def __init__(self,
|
230 |
-
xmag,
|
231 |
-
ymag,
|
232 |
-
znear=DEFAULT_Z_NEAR,
|
233 |
-
zfar=DEFAULT_Z_FAR,
|
234 |
-
name=None):
|
235 |
-
super(OrthographicCamera, self).__init__(
|
236 |
-
znear=znear,
|
237 |
-
zfar=zfar,
|
238 |
-
name=name,
|
239 |
-
)
|
240 |
-
|
241 |
-
self.xmag = xmag
|
242 |
-
self.ymag = ymag
|
243 |
-
|
244 |
-
@property
|
245 |
-
def xmag(self):
|
246 |
-
"""float : The horizontal magnification of the view.
|
247 |
-
"""
|
248 |
-
return self._xmag
|
249 |
-
|
250 |
-
@xmag.setter
|
251 |
-
def xmag(self, value):
|
252 |
-
value = float(value)
|
253 |
-
if value <= 0.0:
|
254 |
-
raise ValueError('X magnification must be positive')
|
255 |
-
self._xmag = value
|
256 |
-
|
257 |
-
@property
|
258 |
-
def ymag(self):
|
259 |
-
"""float : The vertical magnification of the view.
|
260 |
-
"""
|
261 |
-
return self._ymag
|
262 |
-
|
263 |
-
@ymag.setter
|
264 |
-
def ymag(self, value):
|
265 |
-
value = float(value)
|
266 |
-
if value <= 0.0:
|
267 |
-
raise ValueError('Y magnification must be positive')
|
268 |
-
self._ymag = value
|
269 |
-
|
270 |
-
@property
|
271 |
-
def znear(self):
|
272 |
-
"""float : The distance to the near clipping plane.
|
273 |
-
"""
|
274 |
-
return self._znear
|
275 |
-
|
276 |
-
@znear.setter
|
277 |
-
def znear(self, value):
|
278 |
-
value = float(value)
|
279 |
-
if value <= 0:
|
280 |
-
raise ValueError('z-near must be > 0.0')
|
281 |
-
self._znear = value
|
282 |
-
|
283 |
-
def get_projection_matrix(self, width=None, height=None):
|
284 |
-
"""Return the OpenGL projection matrix for this camera.
|
285 |
-
|
286 |
-
Parameters
|
287 |
-
----------
|
288 |
-
width : int
|
289 |
-
Width of the current viewport, in pixels.
|
290 |
-
Unused in this function.
|
291 |
-
height : int
|
292 |
-
Height of the current viewport, in pixels.
|
293 |
-
Unused in this function.
|
294 |
-
"""
|
295 |
-
xmag = self.xmag
|
296 |
-
ymag = self.ymag
|
297 |
-
|
298 |
-
# If screen width/height defined, rescale xmag
|
299 |
-
if width is not None and height is not None:
|
300 |
-
xmag = width / height * ymag
|
301 |
-
|
302 |
-
n = self.znear
|
303 |
-
f = self.zfar
|
304 |
-
P = np.zeros((4,4))
|
305 |
-
P[0][0] = 1.0 / xmag
|
306 |
-
P[1][1] = 1.0 / ymag
|
307 |
-
P[2][2] = 2.0 / (n - f)
|
308 |
-
P[2][3] = (f + n) / (n - f)
|
309 |
-
P[3][3] = 1.0
|
310 |
-
return P
|
311 |
-
|
312 |
-
|
313 |
-
class IntrinsicsCamera(Camera):
|
314 |
-
"""A perspective camera with custom intrinsics.
|
315 |
-
|
316 |
-
Parameters
|
317 |
-
----------
|
318 |
-
fx : float
|
319 |
-
X-axis focal length in pixels.
|
320 |
-
fy : float
|
321 |
-
Y-axis focal length in pixels.
|
322 |
-
cx : float
|
323 |
-
X-axis optical center in pixels.
|
324 |
-
cy : float
|
325 |
-
Y-axis optical center in pixels.
|
326 |
-
znear : float
|
327 |
-
The floating-point distance to the near clipping plane.
|
328 |
-
If not specified, defaults to 0.05.
|
329 |
-
zfar : float
|
330 |
-
The floating-point distance to the far clipping plane.
|
331 |
-
``zfar`` must be greater than ``znear``.
|
332 |
-
If not specified, defaults to 100.0.
|
333 |
-
name : str, optional
|
334 |
-
The user-defined name of this object.
|
335 |
-
"""
|
336 |
-
|
337 |
-
def __init__(self,
|
338 |
-
fx,
|
339 |
-
fy,
|
340 |
-
cx,
|
341 |
-
cy,
|
342 |
-
znear=DEFAULT_Z_NEAR,
|
343 |
-
zfar=DEFAULT_Z_FAR,
|
344 |
-
name=None):
|
345 |
-
super(IntrinsicsCamera, self).__init__(
|
346 |
-
znear=znear,
|
347 |
-
zfar=zfar,
|
348 |
-
name=name,
|
349 |
-
)
|
350 |
-
|
351 |
-
self.fx = fx
|
352 |
-
self.fy = fy
|
353 |
-
self.cx = cx
|
354 |
-
self.cy = cy
|
355 |
-
|
356 |
-
@property
|
357 |
-
def fx(self):
|
358 |
-
"""float : X-axis focal length in meters.
|
359 |
-
"""
|
360 |
-
return self._fx
|
361 |
-
|
362 |
-
@fx.setter
|
363 |
-
def fx(self, value):
|
364 |
-
self._fx = float(value)
|
365 |
-
|
366 |
-
@property
|
367 |
-
def fy(self):
|
368 |
-
"""float : Y-axis focal length in meters.
|
369 |
-
"""
|
370 |
-
return self._fy
|
371 |
-
|
372 |
-
@fy.setter
|
373 |
-
def fy(self, value):
|
374 |
-
self._fy = float(value)
|
375 |
-
|
376 |
-
@property
|
377 |
-
def cx(self):
|
378 |
-
"""float : X-axis optical center in pixels.
|
379 |
-
"""
|
380 |
-
return self._cx
|
381 |
-
|
382 |
-
@cx.setter
|
383 |
-
def cx(self, value):
|
384 |
-
self._cx = float(value)
|
385 |
-
|
386 |
-
@property
|
387 |
-
def cy(self):
|
388 |
-
"""float : Y-axis optical center in pixels.
|
389 |
-
"""
|
390 |
-
return self._cy
|
391 |
-
|
392 |
-
@cy.setter
|
393 |
-
def cy(self, value):
|
394 |
-
self._cy = float(value)
|
395 |
-
|
396 |
-
def get_projection_matrix(self, width, height):
|
397 |
-
"""Return the OpenGL projection matrix for this camera.
|
398 |
-
|
399 |
-
Parameters
|
400 |
-
----------
|
401 |
-
width : int
|
402 |
-
Width of the current viewport, in pixels.
|
403 |
-
height : int
|
404 |
-
Height of the current viewport, in pixels.
|
405 |
-
"""
|
406 |
-
width = float(width)
|
407 |
-
height = float(height)
|
408 |
-
|
409 |
-
cx, cy = self.cx, self.cy
|
410 |
-
fx, fy = self.fx, self.fy
|
411 |
-
if sys.platform == 'darwin':
|
412 |
-
cx = self.cx * 2.0
|
413 |
-
cy = self.cy * 2.0
|
414 |
-
fx = self.fx * 2.0
|
415 |
-
fy = self.fy * 2.0
|
416 |
-
|
417 |
-
P = np.zeros((4,4))
|
418 |
-
P[0][0] = 2.0 * fx / width
|
419 |
-
P[1][1] = 2.0 * fy / height
|
420 |
-
P[0][2] = 1.0 - 2.0 * cx / width
|
421 |
-
P[1][2] = 2.0 * cy / height - 1.0
|
422 |
-
P[3][2] = -1.0
|
423 |
-
|
424 |
-
n = self.znear
|
425 |
-
f = self.zfar
|
426 |
-
if f is None:
|
427 |
-
P[2][2] = -1.0
|
428 |
-
P[2][3] = -2.0 * n
|
429 |
-
else:
|
430 |
-
P[2][2] = (f + n) / (n - f)
|
431 |
-
P[2][3] = (2 * f * n) / (n - f)
|
432 |
-
|
433 |
-
return P
|
434 |
-
|
435 |
-
|
436 |
-
__all__ = ['Camera', 'PerspectiveCamera', 'OrthographicCamera',
|
437 |
-
'IntrinsicsCamera']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/audio/pitch_extractors.py
DELETED
@@ -1,85 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
from text_to_speech.utils.audio.pitch.utils import denorm_f0, norm_f0, f0_to_coarse
|
3 |
-
import parselmouth
|
4 |
-
|
5 |
-
PITCH_EXTRACTOR = {}
|
6 |
-
|
7 |
-
|
8 |
-
def register_pitch_extractor(name):
|
9 |
-
def register_pitch_extractor_(cls):
|
10 |
-
PITCH_EXTRACTOR[name] = cls
|
11 |
-
return cls
|
12 |
-
|
13 |
-
return register_pitch_extractor_
|
14 |
-
|
15 |
-
|
16 |
-
def get_pitch_extractor(name):
|
17 |
-
return PITCH_EXTRACTOR[name]
|
18 |
-
|
19 |
-
|
20 |
-
def extract_pitch_simple(wav):
|
21 |
-
from text_to_speech.utils.commons.hparams import hparams
|
22 |
-
return extract_pitch(hparams['pitch_extractor'], wav,
|
23 |
-
hparams['hop_size'], hparams['audio_sample_rate'],
|
24 |
-
f0_min=hparams['f0_min'], f0_max=hparams['f0_max'])
|
25 |
-
|
26 |
-
|
27 |
-
def extract_pitch(extractor_name, wav_data, hop_size, audio_sample_rate, f0_min=75, f0_max=800, **kwargs):
|
28 |
-
return get_pitch_extractor(extractor_name)(wav_data, hop_size, audio_sample_rate, f0_min, f0_max, **kwargs)
|
29 |
-
|
30 |
-
|
31 |
-
@register_pitch_extractor('parselmouth')
|
32 |
-
def parselmouth_pitch(wav_data, hop_size, audio_sample_rate, f0_min, f0_max,
|
33 |
-
voicing_threshold=0.6, *args, **kwargs):
|
34 |
-
import parselmouth
|
35 |
-
time_step = hop_size / audio_sample_rate * 1000
|
36 |
-
n_mel_frames = int(len(wav_data) // hop_size)
|
37 |
-
f0_pm = parselmouth.Sound(wav_data, audio_sample_rate).to_pitch_ac(
|
38 |
-
time_step=time_step / 1000, voicing_threshold=voicing_threshold,
|
39 |
-
pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency']
|
40 |
-
pad_size = (n_mel_frames - len(f0_pm) + 1) // 2
|
41 |
-
f0 = np.pad(f0_pm, [[pad_size, n_mel_frames - len(f0_pm) - pad_size]], mode='constant')
|
42 |
-
return f0
|
43 |
-
|
44 |
-
|
45 |
-
def get_pitch(wav_data, mel, hparams):
|
46 |
-
"""
|
47 |
-
:param wav_data: [T]
|
48 |
-
:param mel: [T, 80]
|
49 |
-
:param hparams:
|
50 |
-
:return:
|
51 |
-
"""
|
52 |
-
time_step = hparams['hop_size'] / hparams['audio_sample_rate'] * 1000
|
53 |
-
f0_min = 80
|
54 |
-
f0_max = 750
|
55 |
-
|
56 |
-
if hparams['pitch_extractor'] == 'harvest':
|
57 |
-
import pyworld as pw
|
58 |
-
f0, t = pw.harvest(wav_data.astype(np.double), hparams['audio_sample_rate'],
|
59 |
-
frame_period=hparams['hop_size'] / hparams['audio_sample_rate'] * 1000)
|
60 |
-
if hparams['pitch_extractor'] == 'dio':
|
61 |
-
_f0, t = pw.dio(wav_data.astype(np.double), hparams['audio_sample_rate'],
|
62 |
-
frame_period=hparams['hop_size'] / hparams['audio_sample_rate'] * 1000)
|
63 |
-
f0 = pw.stonemask(wav_data.astype(np.double), _f0, t, hparams['audio_sample_rate']) # pitch refinement
|
64 |
-
elif hparams['pitch_extractor'] == 'parselmouth':
|
65 |
-
if hparams['hop_size'] == 128:
|
66 |
-
pad_size = 4
|
67 |
-
elif hparams['hop_size'] == 256:
|
68 |
-
pad_size = 2
|
69 |
-
else:
|
70 |
-
assert False
|
71 |
-
f0 = parselmouth.Sound(wav_data, hparams['audio_sample_rate']).to_pitch_ac(
|
72 |
-
time_step=time_step / 1000, voicing_threshold=0.6,
|
73 |
-
pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array['frequency']
|
74 |
-
lpad = pad_size * 2
|
75 |
-
rpad = len(mel) - len(f0) - lpad
|
76 |
-
f0 = np.pad(f0, [[lpad, rpad]], mode='constant')
|
77 |
-
|
78 |
-
# mel和f0是2个库抽的 需要保证两者长度一致
|
79 |
-
delta_l = len(mel) - len(f0)
|
80 |
-
assert np.abs(delta_l) <= 8
|
81 |
-
if delta_l > 0:
|
82 |
-
f0 = np.concatenate([f0, [f0[-1]] * delta_l], 0)
|
83 |
-
f0 = f0[:len(mel)]
|
84 |
-
pitch_coarse = f0_to_coarse(f0)
|
85 |
-
return f0, pitch_coarse
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ASJMO/freegpt/client/css/dropdown.css
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
.dropdown {
|
2 |
-
border: 1px solid var(--conversations);
|
3 |
-
}
|
4 |
-
|
5 |
-
@media screen and (max-width: 990px) {
|
6 |
-
.dropdown {
|
7 |
-
padding: 4px 8px;
|
8 |
-
font-size: 0.75rem;
|
9 |
-
}
|
10 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Adapter/T2I-Adapter/ldm/modules/extra_condition/utils.py
DELETED
@@ -1,72 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
import cv2
|
3 |
-
import numpy as np
|
4 |
-
|
5 |
-
skeleton = [[15, 13], [13, 11], [16, 14], [14, 12], [11, 12], [5, 11], [6, 12], [5, 6], [5, 7], [6, 8], [7, 9], [8, 10],
|
6 |
-
[1, 2], [0, 1], [0, 2], [1, 3], [2, 4], [3, 5], [4, 6]]
|
7 |
-
|
8 |
-
pose_kpt_color = [[51, 153, 255], [51, 153, 255], [51, 153, 255], [51, 153, 255], [51, 153, 255], [0, 255, 0],
|
9 |
-
[255, 128, 0], [0, 255, 0], [255, 128, 0], [0, 255, 0], [255, 128, 0], [0, 255, 0], [255, 128, 0],
|
10 |
-
[0, 255, 0], [255, 128, 0], [0, 255, 0], [255, 128, 0]]
|
11 |
-
|
12 |
-
pose_link_color = [[0, 255, 0], [0, 255, 0], [255, 128, 0], [255, 128, 0],
|
13 |
-
[51, 153, 255], [51, 153, 255], [51, 153, 255], [51, 153, 255], [0, 255, 0], [255, 128, 0],
|
14 |
-
[0, 255, 0], [255, 128, 0], [51, 153, 255], [51, 153, 255], [51, 153, 255], [51, 153, 255],
|
15 |
-
[51, 153, 255], [51, 153, 255], [51, 153, 255]]
|
16 |
-
|
17 |
-
|
18 |
-
def imshow_keypoints(img,
|
19 |
-
pose_result,
|
20 |
-
kpt_score_thr=0.1,
|
21 |
-
radius=2,
|
22 |
-
thickness=2):
|
23 |
-
"""Draw keypoints and links on an image.
|
24 |
-
|
25 |
-
Args:
|
26 |
-
img (ndarry): The image to draw poses on.
|
27 |
-
pose_result (list[kpts]): The poses to draw. Each element kpts is
|
28 |
-
a set of K keypoints as an Kx3 numpy.ndarray, where each
|
29 |
-
keypoint is represented as x, y, score.
|
30 |
-
kpt_score_thr (float, optional): Minimum score of keypoints
|
31 |
-
to be shown. Default: 0.3.
|
32 |
-
thickness (int): Thickness of lines.
|
33 |
-
"""
|
34 |
-
|
35 |
-
img_h, img_w, _ = img.shape
|
36 |
-
img = np.zeros(img.shape)
|
37 |
-
|
38 |
-
for idx, kpts in enumerate(pose_result):
|
39 |
-
if idx > 1:
|
40 |
-
continue
|
41 |
-
kpts = kpts['keypoints']
|
42 |
-
# print(kpts)
|
43 |
-
kpts = np.array(kpts, copy=False)
|
44 |
-
|
45 |
-
# draw each point on image
|
46 |
-
assert len(pose_kpt_color) == len(kpts)
|
47 |
-
|
48 |
-
for kid, kpt in enumerate(kpts):
|
49 |
-
x_coord, y_coord, kpt_score = int(kpt[0]), int(kpt[1]), kpt[2]
|
50 |
-
|
51 |
-
if kpt_score < kpt_score_thr or pose_kpt_color[kid] is None:
|
52 |
-
# skip the point that should not be drawn
|
53 |
-
continue
|
54 |
-
|
55 |
-
color = tuple(int(c) for c in pose_kpt_color[kid])
|
56 |
-
cv2.circle(img, (int(x_coord), int(y_coord)), radius, color, -1)
|
57 |
-
|
58 |
-
# draw links
|
59 |
-
|
60 |
-
for sk_id, sk in enumerate(skeleton):
|
61 |
-
pos1 = (int(kpts[sk[0], 0]), int(kpts[sk[0], 1]))
|
62 |
-
pos2 = (int(kpts[sk[1], 0]), int(kpts[sk[1], 1]))
|
63 |
-
|
64 |
-
if (pos1[0] <= 0 or pos1[0] >= img_w or pos1[1] <= 0 or pos1[1] >= img_h or pos2[0] <= 0
|
65 |
-
or pos2[0] >= img_w or pos2[1] <= 0 or pos2[1] >= img_h or kpts[sk[0], 2] < kpt_score_thr
|
66 |
-
or kpts[sk[1], 2] < kpt_score_thr or pose_link_color[sk_id] is None):
|
67 |
-
# skip the link that should not be drawn
|
68 |
-
continue
|
69 |
-
color = tuple(int(c) for c in pose_link_color[sk_id])
|
70 |
-
cv2.line(img, pos1, pos2, color, thickness=thickness)
|
71 |
-
|
72 |
-
return img
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/GetChildWidth.js
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
import { GetDisplayWidth } from '../../../plugins/utils/size/GetDisplaySize.js';
|
2 |
-
|
3 |
-
var GetChildWidth = function (child) {
|
4 |
-
var childWidth;
|
5 |
-
if (child.isRexSizer) { // Sizer game object
|
6 |
-
childWidth = Math.max(child.minWidth, child.childrenWidth);
|
7 |
-
} else { // Normal game object
|
8 |
-
if (child.minWidth !== undefined) { // Force minWidth
|
9 |
-
childWidth = child.minWidth;
|
10 |
-
} else {
|
11 |
-
childWidth = GetDisplayWidth(child);
|
12 |
-
}
|
13 |
-
}
|
14 |
-
|
15 |
-
return childWidth;
|
16 |
-
}
|
17 |
-
|
18 |
-
export default GetChildWidth;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/menu/methods/CollapseSubMenu.js
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
var CollapseSubMenu = function () {
|
2 |
-
var subMenu = this.childrenMap.subMenu;
|
3 |
-
if (subMenu === undefined) {
|
4 |
-
return this;
|
5 |
-
}
|
6 |
-
|
7 |
-
this.childrenMap.subMenu = undefined;
|
8 |
-
this.remove(subMenu);
|
9 |
-
subMenu.collapse();
|
10 |
-
return this;
|
11 |
-
}
|
12 |
-
export default CollapseSubMenu;
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/press/Press.d.ts
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
import { Press } from '../../../plugins/gestures';
|
2 |
-
export default Press;
|
|
|
|
|
|
spaces/AkashKhamkar/Job_Search_Engine/loader.py
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
from sentence_transformers import SentenceTransformer, CrossEncoder, util
|
2 |
-
import pandas as pd
|
3 |
-
import pickle
|
4 |
-
|
5 |
-
bi_encoder = SentenceTransformer("multi-qa-MiniLM-L6-cos-v1")
|
6 |
-
cross_encoder = CrossEncoder("cross-encoder/ms-marco-MiniLM-L-6-v2")
|
7 |
-
df = pd.read_csv('job_corpus_dataframe.csv')
|
8 |
-
pickle_in = open("job_corpus.pickle","rb")
|
9 |
-
job_corpus = pickle.load(pickle_in)
|
10 |
-
pickle_in = open("job_corpus_encoded.pickle","rb")
|
11 |
-
job_corpus_ecoded = pickle.load(pickle_in)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alpaca233/SadTalker/src/facerender/modules/make_animation.py
DELETED
@@ -1,170 +0,0 @@
|
|
1 |
-
from scipy.spatial import ConvexHull
|
2 |
-
import torch
|
3 |
-
import torch.nn.functional as F
|
4 |
-
import numpy as np
|
5 |
-
from tqdm import tqdm
|
6 |
-
|
7 |
-
def normalize_kp(kp_source, kp_driving, kp_driving_initial, adapt_movement_scale=False,
|
8 |
-
use_relative_movement=False, use_relative_jacobian=False):
|
9 |
-
if adapt_movement_scale:
|
10 |
-
source_area = ConvexHull(kp_source['value'][0].data.cpu().numpy()).volume
|
11 |
-
driving_area = ConvexHull(kp_driving_initial['value'][0].data.cpu().numpy()).volume
|
12 |
-
adapt_movement_scale = np.sqrt(source_area) / np.sqrt(driving_area)
|
13 |
-
else:
|
14 |
-
adapt_movement_scale = 1
|
15 |
-
|
16 |
-
kp_new = {k: v for k, v in kp_driving.items()}
|
17 |
-
|
18 |
-
if use_relative_movement:
|
19 |
-
kp_value_diff = (kp_driving['value'] - kp_driving_initial['value'])
|
20 |
-
kp_value_diff *= adapt_movement_scale
|
21 |
-
kp_new['value'] = kp_value_diff + kp_source['value']
|
22 |
-
|
23 |
-
if use_relative_jacobian:
|
24 |
-
jacobian_diff = torch.matmul(kp_driving['jacobian'], torch.inverse(kp_driving_initial['jacobian']))
|
25 |
-
kp_new['jacobian'] = torch.matmul(jacobian_diff, kp_source['jacobian'])
|
26 |
-
|
27 |
-
return kp_new
|
28 |
-
|
29 |
-
def headpose_pred_to_degree(pred):
|
30 |
-
device = pred.device
|
31 |
-
idx_tensor = [idx for idx in range(66)]
|
32 |
-
idx_tensor = torch.FloatTensor(idx_tensor).type_as(pred).to(device)
|
33 |
-
pred = F.softmax(pred)
|
34 |
-
degree = torch.sum(pred*idx_tensor, 1) * 3 - 99
|
35 |
-
return degree
|
36 |
-
|
37 |
-
def get_rotation_matrix(yaw, pitch, roll):
|
38 |
-
yaw = yaw / 180 * 3.14
|
39 |
-
pitch = pitch / 180 * 3.14
|
40 |
-
roll = roll / 180 * 3.14
|
41 |
-
|
42 |
-
roll = roll.unsqueeze(1)
|
43 |
-
pitch = pitch.unsqueeze(1)
|
44 |
-
yaw = yaw.unsqueeze(1)
|
45 |
-
|
46 |
-
pitch_mat = torch.cat([torch.ones_like(pitch), torch.zeros_like(pitch), torch.zeros_like(pitch),
|
47 |
-
torch.zeros_like(pitch), torch.cos(pitch), -torch.sin(pitch),
|
48 |
-
torch.zeros_like(pitch), torch.sin(pitch), torch.cos(pitch)], dim=1)
|
49 |
-
pitch_mat = pitch_mat.view(pitch_mat.shape[0], 3, 3)
|
50 |
-
|
51 |
-
yaw_mat = torch.cat([torch.cos(yaw), torch.zeros_like(yaw), torch.sin(yaw),
|
52 |
-
torch.zeros_like(yaw), torch.ones_like(yaw), torch.zeros_like(yaw),
|
53 |
-
-torch.sin(yaw), torch.zeros_like(yaw), torch.cos(yaw)], dim=1)
|
54 |
-
yaw_mat = yaw_mat.view(yaw_mat.shape[0], 3, 3)
|
55 |
-
|
56 |
-
roll_mat = torch.cat([torch.cos(roll), -torch.sin(roll), torch.zeros_like(roll),
|
57 |
-
torch.sin(roll), torch.cos(roll), torch.zeros_like(roll),
|
58 |
-
torch.zeros_like(roll), torch.zeros_like(roll), torch.ones_like(roll)], dim=1)
|
59 |
-
roll_mat = roll_mat.view(roll_mat.shape[0], 3, 3)
|
60 |
-
|
61 |
-
rot_mat = torch.einsum('bij,bjk,bkm->bim', pitch_mat, yaw_mat, roll_mat)
|
62 |
-
|
63 |
-
return rot_mat
|
64 |
-
|
65 |
-
def keypoint_transformation(kp_canonical, he, wo_exp=False):
|
66 |
-
kp = kp_canonical['value'] # (bs, k, 3)
|
67 |
-
yaw, pitch, roll= he['yaw'], he['pitch'], he['roll']
|
68 |
-
yaw = headpose_pred_to_degree(yaw)
|
69 |
-
pitch = headpose_pred_to_degree(pitch)
|
70 |
-
roll = headpose_pred_to_degree(roll)
|
71 |
-
|
72 |
-
if 'yaw_in' in he:
|
73 |
-
yaw = he['yaw_in']
|
74 |
-
if 'pitch_in' in he:
|
75 |
-
pitch = he['pitch_in']
|
76 |
-
if 'roll_in' in he:
|
77 |
-
roll = he['roll_in']
|
78 |
-
|
79 |
-
rot_mat = get_rotation_matrix(yaw, pitch, roll) # (bs, 3, 3)
|
80 |
-
|
81 |
-
t, exp = he['t'], he['exp']
|
82 |
-
if wo_exp:
|
83 |
-
exp = exp*0
|
84 |
-
|
85 |
-
# keypoint rotation
|
86 |
-
kp_rotated = torch.einsum('bmp,bkp->bkm', rot_mat, kp)
|
87 |
-
|
88 |
-
# keypoint translation
|
89 |
-
t[:, 0] = t[:, 0]*0
|
90 |
-
t[:, 2] = t[:, 2]*0
|
91 |
-
t = t.unsqueeze(1).repeat(1, kp.shape[1], 1)
|
92 |
-
kp_t = kp_rotated + t
|
93 |
-
|
94 |
-
# add expression deviation
|
95 |
-
exp = exp.view(exp.shape[0], -1, 3)
|
96 |
-
kp_transformed = kp_t + exp
|
97 |
-
|
98 |
-
return {'value': kp_transformed}
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
def make_animation(source_image, source_semantics, target_semantics,
|
103 |
-
generator, kp_detector, he_estimator, mapping,
|
104 |
-
yaw_c_seq=None, pitch_c_seq=None, roll_c_seq=None,
|
105 |
-
use_exp=True, use_half=False):
|
106 |
-
with torch.no_grad():
|
107 |
-
predictions = []
|
108 |
-
|
109 |
-
kp_canonical = kp_detector(source_image)
|
110 |
-
he_source = mapping(source_semantics)
|
111 |
-
kp_source = keypoint_transformation(kp_canonical, he_source)
|
112 |
-
|
113 |
-
for frame_idx in tqdm(range(target_semantics.shape[1]), 'Face Renderer:'):
|
114 |
-
# still check the dimension
|
115 |
-
# print(target_semantics.shape, source_semantics.shape)
|
116 |
-
target_semantics_frame = target_semantics[:, frame_idx]
|
117 |
-
he_driving = mapping(target_semantics_frame)
|
118 |
-
if yaw_c_seq is not None:
|
119 |
-
he_driving['yaw_in'] = yaw_c_seq[:, frame_idx]
|
120 |
-
if pitch_c_seq is not None:
|
121 |
-
he_driving['pitch_in'] = pitch_c_seq[:, frame_idx]
|
122 |
-
if roll_c_seq is not None:
|
123 |
-
he_driving['roll_in'] = roll_c_seq[:, frame_idx]
|
124 |
-
|
125 |
-
kp_driving = keypoint_transformation(kp_canonical, he_driving)
|
126 |
-
|
127 |
-
kp_norm = kp_driving
|
128 |
-
out = generator(source_image, kp_source=kp_source, kp_driving=kp_norm)
|
129 |
-
'''
|
130 |
-
source_image_new = out['prediction'].squeeze(1)
|
131 |
-
kp_canonical_new = kp_detector(source_image_new)
|
132 |
-
he_source_new = he_estimator(source_image_new)
|
133 |
-
kp_source_new = keypoint_transformation(kp_canonical_new, he_source_new, wo_exp=True)
|
134 |
-
kp_driving_new = keypoint_transformation(kp_canonical_new, he_driving, wo_exp=True)
|
135 |
-
out = generator(source_image_new, kp_source=kp_source_new, kp_driving=kp_driving_new)
|
136 |
-
'''
|
137 |
-
predictions.append(out['prediction'])
|
138 |
-
predictions_ts = torch.stack(predictions, dim=1)
|
139 |
-
return predictions_ts
|
140 |
-
|
141 |
-
class AnimateModel(torch.nn.Module):
|
142 |
-
"""
|
143 |
-
Merge all generator related updates into single model for better multi-gpu usage
|
144 |
-
"""
|
145 |
-
|
146 |
-
def __init__(self, generator, kp_extractor, mapping):
|
147 |
-
super(AnimateModel, self).__init__()
|
148 |
-
self.kp_extractor = kp_extractor
|
149 |
-
self.generator = generator
|
150 |
-
self.mapping = mapping
|
151 |
-
|
152 |
-
self.kp_extractor.eval()
|
153 |
-
self.generator.eval()
|
154 |
-
self.mapping.eval()
|
155 |
-
|
156 |
-
def forward(self, x):
|
157 |
-
|
158 |
-
source_image = x['source_image']
|
159 |
-
source_semantics = x['source_semantics']
|
160 |
-
target_semantics = x['target_semantics']
|
161 |
-
yaw_c_seq = x['yaw_c_seq']
|
162 |
-
pitch_c_seq = x['pitch_c_seq']
|
163 |
-
roll_c_seq = x['roll_c_seq']
|
164 |
-
|
165 |
-
predictions_video = make_animation(source_image, source_semantics, target_semantics,
|
166 |
-
self.generator, self.kp_extractor,
|
167 |
-
self.mapping, use_exp = True,
|
168 |
-
yaw_c_seq=yaw_c_seq, pitch_c_seq=pitch_c_seq, roll_c_seq=roll_c_seq)
|
169 |
-
|
170 |
-
return predictions_video
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Alpaca233/SadTalker/webui.sh
DELETED
@@ -1,140 +0,0 @@
|
|
1 |
-
#!/usr/bin/env bash
|
2 |
-
|
3 |
-
|
4 |
-
# If run from macOS, load defaults from webui-macos-env.sh
|
5 |
-
if [[ "$OSTYPE" == "darwin"* ]]; then
|
6 |
-
export TORCH_COMMAND="pip install torch==1.12.1 torchvision==0.13.1"
|
7 |
-
fi
|
8 |
-
|
9 |
-
# python3 executable
|
10 |
-
if [[ -z "${python_cmd}" ]]
|
11 |
-
then
|
12 |
-
python_cmd="python3"
|
13 |
-
fi
|
14 |
-
|
15 |
-
# git executable
|
16 |
-
if [[ -z "${GIT}" ]]
|
17 |
-
then
|
18 |
-
export GIT="git"
|
19 |
-
fi
|
20 |
-
|
21 |
-
# python3 venv without trailing slash (defaults to ${install_dir}/${clone_dir}/venv)
|
22 |
-
if [[ -z "${venv_dir}" ]]
|
23 |
-
then
|
24 |
-
venv_dir="venv"
|
25 |
-
fi
|
26 |
-
|
27 |
-
if [[ -z "${LAUNCH_SCRIPT}" ]]
|
28 |
-
then
|
29 |
-
LAUNCH_SCRIPT="launcher.py"
|
30 |
-
fi
|
31 |
-
|
32 |
-
# this script cannot be run as root by default
|
33 |
-
can_run_as_root=1
|
34 |
-
|
35 |
-
# read any command line flags to the webui.sh script
|
36 |
-
while getopts "f" flag > /dev/null 2>&1
|
37 |
-
do
|
38 |
-
case ${flag} in
|
39 |
-
f) can_run_as_root=1;;
|
40 |
-
*) break;;
|
41 |
-
esac
|
42 |
-
done
|
43 |
-
|
44 |
-
# Disable sentry logging
|
45 |
-
export ERROR_REPORTING=FALSE
|
46 |
-
|
47 |
-
# Do not reinstall existing pip packages on Debian/Ubuntu
|
48 |
-
export PIP_IGNORE_INSTALLED=0
|
49 |
-
|
50 |
-
# Pretty print
|
51 |
-
delimiter="################################################################"
|
52 |
-
|
53 |
-
printf "\n%s\n" "${delimiter}"
|
54 |
-
printf "\e[1m\e[32mInstall script for SadTalker + Web UI\n"
|
55 |
-
printf "\e[1m\e[34mTested on Debian 11 (Bullseye)\e[0m"
|
56 |
-
printf "\n%s\n" "${delimiter}"
|
57 |
-
|
58 |
-
# Do not run as root
|
59 |
-
if [[ $(id -u) -eq 0 && can_run_as_root -eq 0 ]]
|
60 |
-
then
|
61 |
-
printf "\n%s\n" "${delimiter}"
|
62 |
-
printf "\e[1m\e[31mERROR: This script must not be launched as root, aborting...\e[0m"
|
63 |
-
printf "\n%s\n" "${delimiter}"
|
64 |
-
exit 1
|
65 |
-
else
|
66 |
-
printf "\n%s\n" "${delimiter}"
|
67 |
-
printf "Running on \e[1m\e[32m%s\e[0m user" "$(whoami)"
|
68 |
-
printf "\n%s\n" "${delimiter}"
|
69 |
-
fi
|
70 |
-
|
71 |
-
if [[ -d .git ]]
|
72 |
-
then
|
73 |
-
printf "\n%s\n" "${delimiter}"
|
74 |
-
printf "Repo already cloned, using it as install directory"
|
75 |
-
printf "\n%s\n" "${delimiter}"
|
76 |
-
install_dir="${PWD}/../"
|
77 |
-
clone_dir="${PWD##*/}"
|
78 |
-
fi
|
79 |
-
|
80 |
-
# Check prerequisites
|
81 |
-
gpu_info=$(lspci 2>/dev/null | grep VGA)
|
82 |
-
case "$gpu_info" in
|
83 |
-
*"Navi 1"*|*"Navi 2"*) export HSA_OVERRIDE_GFX_VERSION=10.3.0
|
84 |
-
;;
|
85 |
-
*"Renoir"*) export HSA_OVERRIDE_GFX_VERSION=9.0.0
|
86 |
-
printf "\n%s\n" "${delimiter}"
|
87 |
-
printf "Experimental support for Renoir: make sure to have at least 4GB of VRAM and 10GB of RAM or enable cpu mode: --use-cpu all --no-half"
|
88 |
-
printf "\n%s\n" "${delimiter}"
|
89 |
-
;;
|
90 |
-
*)
|
91 |
-
;;
|
92 |
-
esac
|
93 |
-
if echo "$gpu_info" | grep -q "AMD" && [[ -z "${TORCH_COMMAND}" ]]
|
94 |
-
then
|
95 |
-
export TORCH_COMMAND="pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/rocm5.2"
|
96 |
-
fi
|
97 |
-
|
98 |
-
for preq in "${GIT}" "${python_cmd}"
|
99 |
-
do
|
100 |
-
if ! hash "${preq}" &>/dev/null
|
101 |
-
then
|
102 |
-
printf "\n%s\n" "${delimiter}"
|
103 |
-
printf "\e[1m\e[31mERROR: %s is not installed, aborting...\e[0m" "${preq}"
|
104 |
-
printf "\n%s\n" "${delimiter}"
|
105 |
-
exit 1
|
106 |
-
fi
|
107 |
-
done
|
108 |
-
|
109 |
-
if ! "${python_cmd}" -c "import venv" &>/dev/null
|
110 |
-
then
|
111 |
-
printf "\n%s\n" "${delimiter}"
|
112 |
-
printf "\e[1m\e[31mERROR: python3-venv is not installed, aborting...\e[0m"
|
113 |
-
printf "\n%s\n" "${delimiter}"
|
114 |
-
exit 1
|
115 |
-
fi
|
116 |
-
|
117 |
-
printf "\n%s\n" "${delimiter}"
|
118 |
-
printf "Create and activate python venv"
|
119 |
-
printf "\n%s\n" "${delimiter}"
|
120 |
-
cd "${install_dir}"/"${clone_dir}"/ || { printf "\e[1m\e[31mERROR: Can't cd to %s/%s/, aborting...\e[0m" "${install_dir}" "${clone_dir}"; exit 1; }
|
121 |
-
if [[ ! -d "${venv_dir}" ]]
|
122 |
-
then
|
123 |
-
"${python_cmd}" -m venv "${venv_dir}"
|
124 |
-
first_launch=1
|
125 |
-
fi
|
126 |
-
# shellcheck source=/dev/null
|
127 |
-
if [[ -f "${venv_dir}"/bin/activate ]]
|
128 |
-
then
|
129 |
-
source "${venv_dir}"/bin/activate
|
130 |
-
else
|
131 |
-
printf "\n%s\n" "${delimiter}"
|
132 |
-
printf "\e[1m\e[31mERROR: Cannot activate python venv, aborting...\e[0m"
|
133 |
-
printf "\n%s\n" "${delimiter}"
|
134 |
-
exit 1
|
135 |
-
fi
|
136 |
-
|
137 |
-
printf "\n%s\n" "${delimiter}"
|
138 |
-
printf "Launching launcher.py..."
|
139 |
-
printf "\n%s\n" "${delimiter}"
|
140 |
-
exec "${python_cmd}" "${LAUNCH_SCRIPT}" "$@"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Aman30577/imageTool1/README.md
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: ImageTool1
|
3 |
-
emoji: 🚀
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: pink
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.37.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/criteria/__init__.py
DELETED
File without changes
|
spaces/Amrrs/DragGan-Inversion/stylegan_human/PP_HumanSeg/pretrained_model/download_pretrained_model.py
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
# coding: utf8
|
2 |
-
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
|
16 |
-
from paddleseg.utils.download import download_file_and_uncompress
|
17 |
-
import sys
|
18 |
-
import os
|
19 |
-
|
20 |
-
LOCAL_PATH = os.path.dirname(os.path.abspath(__file__))
|
21 |
-
TEST_PATH = os.path.join(LOCAL_PATH, "../../../", "test")
|
22 |
-
sys.path.append(TEST_PATH)
|
23 |
-
|
24 |
-
|
25 |
-
model_urls = {
|
26 |
-
"pphumanseg_lite_portrait_398x224":
|
27 |
-
"https://paddleseg.bj.bcebos.com/dygraph/ppseg/ppseg_lite_portrait_398x224.tar.gz",
|
28 |
-
"deeplabv3p_resnet50_os8_humanseg_512x512_100k":
|
29 |
-
"https://paddleseg.bj.bcebos.com/dygraph/humanseg/train/deeplabv3p_resnet50_os8_humanseg_512x512_100k.zip",
|
30 |
-
"fcn_hrnetw18_small_v1_humanseg_192x192":
|
31 |
-
"https://paddleseg.bj.bcebos.com/dygraph/humanseg/train/fcn_hrnetw18_small_v1_humanseg_192x192.zip",
|
32 |
-
"pphumanseg_lite_generic_human_192x192":
|
33 |
-
"https://paddleseg.bj.bcebos.com/dygraph/humanseg/train/pphumanseg_lite_generic_192x192.zip",
|
34 |
-
}
|
35 |
-
|
36 |
-
if __name__ == "__main__":
|
37 |
-
for model_name, url in model_urls.items():
|
38 |
-
download_file_and_uncompress(
|
39 |
-
url=url,
|
40 |
-
savepath=LOCAL_PATH,
|
41 |
-
extrapath=LOCAL_PATH,
|
42 |
-
extraname=model_name)
|
43 |
-
|
44 |
-
print("Pretrained model download success!")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/utils/print_env.py
DELETED
@@ -1,48 +0,0 @@
|
|
1 |
-
#!/usr/bin/env python3
|
2 |
-
|
3 |
-
# coding=utf-8
|
4 |
-
# Copyright 2023 The HuggingFace Inc. team.
|
5 |
-
#
|
6 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
7 |
-
# you may not use this file except in compliance with the License.
|
8 |
-
# You may obtain a copy of the License at
|
9 |
-
#
|
10 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
11 |
-
#
|
12 |
-
# Unless required by applicable law or agreed to in writing, software
|
13 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
14 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
15 |
-
# See the License for the specific language governing permissions and
|
16 |
-
# limitations under the License.
|
17 |
-
|
18 |
-
# this script dumps information about the environment
|
19 |
-
|
20 |
-
import os
|
21 |
-
import platform
|
22 |
-
import sys
|
23 |
-
|
24 |
-
|
25 |
-
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
|
26 |
-
|
27 |
-
print("Python version:", sys.version)
|
28 |
-
|
29 |
-
print("OS platform:", platform.platform())
|
30 |
-
print("OS architecture:", platform.machine())
|
31 |
-
|
32 |
-
try:
|
33 |
-
import torch
|
34 |
-
|
35 |
-
print("Torch version:", torch.__version__)
|
36 |
-
print("Cuda available:", torch.cuda.is_available())
|
37 |
-
print("Cuda version:", torch.version.cuda)
|
38 |
-
print("CuDNN version:", torch.backends.cudnn.version())
|
39 |
-
print("Number of GPUs available:", torch.cuda.device_count())
|
40 |
-
except ImportError:
|
41 |
-
print("Torch version:", None)
|
42 |
-
|
43 |
-
try:
|
44 |
-
import transformers
|
45 |
-
|
46 |
-
print("transformers version:", transformers.__version__)
|
47 |
-
except ImportError:
|
48 |
-
print("transformers version:", None)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_detection/configs/resnest/faster_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
_base_ = './faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py'
|
2 |
-
model = dict(
|
3 |
-
pretrained='open-mmlab://resnest101',
|
4 |
-
backbone=dict(stem_channels=128, depth=101))
|
|
|
|
|
|
|
|
|
|
spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/fcn_unet_s5-d16.py
DELETED
@@ -1,51 +0,0 @@
|
|
1 |
-
# model settings
|
2 |
-
norm_cfg = dict(type='SyncBN', requires_grad=True)
|
3 |
-
model = dict(
|
4 |
-
type='EncoderDecoder',
|
5 |
-
pretrained=None,
|
6 |
-
backbone=dict(
|
7 |
-
type='UNet',
|
8 |
-
in_channels=3,
|
9 |
-
base_channels=64,
|
10 |
-
num_stages=5,
|
11 |
-
strides=(1, 1, 1, 1, 1),
|
12 |
-
enc_num_convs=(2, 2, 2, 2, 2),
|
13 |
-
dec_num_convs=(2, 2, 2, 2),
|
14 |
-
downsamples=(True, True, True, True),
|
15 |
-
enc_dilations=(1, 1, 1, 1, 1),
|
16 |
-
dec_dilations=(1, 1, 1, 1),
|
17 |
-
with_cp=False,
|
18 |
-
conv_cfg=None,
|
19 |
-
norm_cfg=norm_cfg,
|
20 |
-
act_cfg=dict(type='ReLU'),
|
21 |
-
upsample_cfg=dict(type='InterpConv'),
|
22 |
-
norm_eval=False),
|
23 |
-
decode_head=dict(
|
24 |
-
type='FCNHead',
|
25 |
-
in_channels=64,
|
26 |
-
in_index=4,
|
27 |
-
channels=64,
|
28 |
-
num_convs=1,
|
29 |
-
concat_input=False,
|
30 |
-
dropout_ratio=0.1,
|
31 |
-
num_classes=2,
|
32 |
-
norm_cfg=norm_cfg,
|
33 |
-
align_corners=False,
|
34 |
-
loss_decode=dict(
|
35 |
-
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
|
36 |
-
auxiliary_head=dict(
|
37 |
-
type='FCNHead',
|
38 |
-
in_channels=128,
|
39 |
-
in_index=3,
|
40 |
-
channels=64,
|
41 |
-
num_convs=1,
|
42 |
-
concat_input=False,
|
43 |
-
dropout_ratio=0.1,
|
44 |
-
num_classes=2,
|
45 |
-
norm_cfg=norm_cfg,
|
46 |
-
align_corners=False,
|
47 |
-
loss_decode=dict(
|
48 |
-
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
|
49 |
-
# model training and testing settings
|
50 |
-
train_cfg=dict(),
|
51 |
-
test_cfg=dict(mode='slide', crop_size=256, stride=170))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/utils/version_utils.py
DELETED
@@ -1,90 +0,0 @@
|
|
1 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
2 |
-
import os
|
3 |
-
import subprocess
|
4 |
-
import warnings
|
5 |
-
|
6 |
-
from packaging.version import parse
|
7 |
-
|
8 |
-
|
9 |
-
def digit_version(version_str: str, length: int = 4):
|
10 |
-
"""Convert a version string into a tuple of integers.
|
11 |
-
|
12 |
-
This method is usually used for comparing two versions. For pre-release
|
13 |
-
versions: alpha < beta < rc.
|
14 |
-
|
15 |
-
Args:
|
16 |
-
version_str (str): The version string.
|
17 |
-
length (int): The maximum number of version levels. Default: 4.
|
18 |
-
|
19 |
-
Returns:
|
20 |
-
tuple[int]: The version info in digits (integers).
|
21 |
-
"""
|
22 |
-
assert 'parrots' not in version_str
|
23 |
-
version = parse(version_str)
|
24 |
-
assert version.release, f'failed to parse version {version_str}'
|
25 |
-
release = list(version.release)
|
26 |
-
release = release[:length]
|
27 |
-
if len(release) < length:
|
28 |
-
release = release + [0] * (length - len(release))
|
29 |
-
if version.is_prerelease:
|
30 |
-
mapping = {'a': -3, 'b': -2, 'rc': -1}
|
31 |
-
val = -4
|
32 |
-
# version.pre can be None
|
33 |
-
if version.pre:
|
34 |
-
if version.pre[0] not in mapping:
|
35 |
-
warnings.warn(f'unknown prerelease version {version.pre[0]}, '
|
36 |
-
'version checking may go wrong')
|
37 |
-
else:
|
38 |
-
val = mapping[version.pre[0]]
|
39 |
-
release.extend([val, version.pre[-1]])
|
40 |
-
else:
|
41 |
-
release.extend([val, 0])
|
42 |
-
|
43 |
-
elif version.is_postrelease:
|
44 |
-
release.extend([1, version.post])
|
45 |
-
else:
|
46 |
-
release.extend([0, 0])
|
47 |
-
return tuple(release)
|
48 |
-
|
49 |
-
|
50 |
-
def _minimal_ext_cmd(cmd):
|
51 |
-
# construct minimal environment
|
52 |
-
env = {}
|
53 |
-
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
|
54 |
-
v = os.environ.get(k)
|
55 |
-
if v is not None:
|
56 |
-
env[k] = v
|
57 |
-
# LANGUAGE is used on win32
|
58 |
-
env['LANGUAGE'] = 'C'
|
59 |
-
env['LANG'] = 'C'
|
60 |
-
env['LC_ALL'] = 'C'
|
61 |
-
out = subprocess.Popen(
|
62 |
-
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
|
63 |
-
return out
|
64 |
-
|
65 |
-
|
66 |
-
def get_git_hash(fallback='unknown', digits=None):
|
67 |
-
"""Get the git hash of the current repo.
|
68 |
-
|
69 |
-
Args:
|
70 |
-
fallback (str, optional): The fallback string when git hash is
|
71 |
-
unavailable. Defaults to 'unknown'.
|
72 |
-
digits (int, optional): kept digits of the hash. Defaults to None,
|
73 |
-
meaning all digits are kept.
|
74 |
-
|
75 |
-
Returns:
|
76 |
-
str: Git commit hash.
|
77 |
-
"""
|
78 |
-
|
79 |
-
if digits is not None and not isinstance(digits, int):
|
80 |
-
raise TypeError('digits must be None or an integer')
|
81 |
-
|
82 |
-
try:
|
83 |
-
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
|
84 |
-
sha = out.strip().decode('ascii')
|
85 |
-
if digits is not None:
|
86 |
-
sha = sha[:digits]
|
87 |
-
except OSError:
|
88 |
-
sha = fallback
|
89 |
-
|
90 |
-
return sha
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ArkanDash/rvc-models/app-full.py
DELETED
@@ -1,254 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
import json
|
3 |
-
import argparse
|
4 |
-
import traceback
|
5 |
-
import logging
|
6 |
-
import gradio as gr
|
7 |
-
import numpy as np
|
8 |
-
import librosa
|
9 |
-
import torch
|
10 |
-
import asyncio
|
11 |
-
import edge_tts
|
12 |
-
import yt_dlp
|
13 |
-
import ffmpeg
|
14 |
-
import subprocess
|
15 |
-
import sys
|
16 |
-
import io
|
17 |
-
import wave
|
18 |
-
from datetime import datetime
|
19 |
-
from fairseq import checkpoint_utils
|
20 |
-
from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono
|
21 |
-
from vc_infer_pipeline import VC
|
22 |
-
from config import (
|
23 |
-
is_half,
|
24 |
-
device
|
25 |
-
)
|
26 |
-
logging.getLogger("numba").setLevel(logging.WARNING)
|
27 |
-
limitation = os.getenv("SYSTEM") == "spaces" # limit audio length in huggingface spaces
|
28 |
-
|
29 |
-
def create_vc_fn(tgt_sr, net_g, vc, if_f0, file_index, file_big_npy):
|
30 |
-
def vc_fn(
|
31 |
-
input_audio,
|
32 |
-
upload_audio,
|
33 |
-
upload_mode,
|
34 |
-
f0_up_key,
|
35 |
-
f0_method,
|
36 |
-
index_rate,
|
37 |
-
tts_mode,
|
38 |
-
tts_text,
|
39 |
-
tts_voice
|
40 |
-
):
|
41 |
-
try:
|
42 |
-
if tts_mode:
|
43 |
-
if len(tts_text) > 100 and limitation:
|
44 |
-
return "Text is too long", None
|
45 |
-
if tts_text is None or tts_voice is None:
|
46 |
-
return "You need to enter text and select a voice", None
|
47 |
-
asyncio.run(edge_tts.Communicate(tts_text, "-".join(tts_voice.split('-')[:-1])).save("tts.mp3"))
|
48 |
-
audio, sr = librosa.load("tts.mp3", sr=16000, mono=True)
|
49 |
-
else:
|
50 |
-
if upload_mode:
|
51 |
-
if input_audio is None:
|
52 |
-
return "You need to upload an audio", None
|
53 |
-
sampling_rate, audio = upload_audio
|
54 |
-
duration = audio.shape[0] / sampling_rate
|
55 |
-
audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
|
56 |
-
if len(audio.shape) > 1:
|
57 |
-
audio = librosa.to_mono(audio.transpose(1, 0))
|
58 |
-
if sampling_rate != 16000:
|
59 |
-
audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
|
60 |
-
else:
|
61 |
-
audio, sr = librosa.load(input_audio, sr=16000, mono=True)
|
62 |
-
times = [0, 0, 0]
|
63 |
-
f0_up_key = int(f0_up_key)
|
64 |
-
audio_opt = vc.pipeline(
|
65 |
-
hubert_model,
|
66 |
-
net_g,
|
67 |
-
0,
|
68 |
-
audio,
|
69 |
-
times,
|
70 |
-
f0_up_key,
|
71 |
-
f0_method,
|
72 |
-
file_index,
|
73 |
-
file_big_npy,
|
74 |
-
index_rate,
|
75 |
-
if_f0,
|
76 |
-
)
|
77 |
-
print(
|
78 |
-
f"[{datetime.now().strftime('%Y-%m-%d %H:%M')}]: npy: {times[0]}, f0: {times[1]}s, infer: {times[2]}s"
|
79 |
-
)
|
80 |
-
return "Success", (tgt_sr, audio_opt)
|
81 |
-
except:
|
82 |
-
info = traceback.format_exc()
|
83 |
-
print(info)
|
84 |
-
return info, (None, None)
|
85 |
-
return vc_fn
|
86 |
-
|
87 |
-
def cut_vocal_and_inst(yt_url):
|
88 |
-
if yt_url != "":
|
89 |
-
if not os.path.exists("youtube_audio"):
|
90 |
-
os.mkdir("youtube_audio")
|
91 |
-
ydl_opts = {
|
92 |
-
'format': 'bestaudio/best',
|
93 |
-
'postprocessors': [{
|
94 |
-
'key': 'FFmpegExtractAudio',
|
95 |
-
'preferredcodec': 'wav',
|
96 |
-
}],
|
97 |
-
"outtmpl": 'youtube_audio/audio',
|
98 |
-
}
|
99 |
-
with yt_dlp.YoutubeDL(ydl_opts) as ydl:
|
100 |
-
ydl.download([yt_url])
|
101 |
-
yt_audio_path = "youtube_audio/audio.wav"
|
102 |
-
command = f"demucs --two-stems=vocals {yt_audio_path}"
|
103 |
-
result = subprocess.run(command.split(), stdout=subprocess.PIPE)
|
104 |
-
print(result.stdout.decode())
|
105 |
-
return ("separated/htdemucs/audio/vocals.wav", "separated/htdemucs/audio/no_vocals.wav", yt_audio_path, "separated/htdemucs/audio/vocals.wav")
|
106 |
-
|
107 |
-
def combine_vocal_and_inst(audio_data, audio_volume):
|
108 |
-
print(audio_data)
|
109 |
-
if not os.path.exists("result"):
|
110 |
-
os.mkdir("result")
|
111 |
-
vocal_path = "result/output.wav"
|
112 |
-
inst_path = "separated/htdemucs/audio/no_vocals.wav"
|
113 |
-
output_path = "result/combine.mp3"
|
114 |
-
with wave.open(vocal_path, "w") as wave_file:
|
115 |
-
wave_file.setnchannels(1)
|
116 |
-
wave_file.setsampwidth(2)
|
117 |
-
wave_file.setframerate(audio_data[0])
|
118 |
-
wave_file.writeframes(audio_data[1].tobytes())
|
119 |
-
command = f'ffmpeg -y -i {inst_path} -i {vocal_path} -filter_complex [1:a]volume={audio_volume}dB[v];[0:a][v]amix=inputs=2:duration=longest -b:a 320k -c:a libmp3lame {output_path}'
|
120 |
-
result = subprocess.run(command.split(), stdout=subprocess.PIPE)
|
121 |
-
return output_path
|
122 |
-
|
123 |
-
def load_hubert():
|
124 |
-
global hubert_model
|
125 |
-
models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
|
126 |
-
["hubert_base.pt"],
|
127 |
-
suffix="",
|
128 |
-
)
|
129 |
-
hubert_model = models[0]
|
130 |
-
hubert_model = hubert_model.to(device)
|
131 |
-
if is_half:
|
132 |
-
hubert_model = hubert_model.half()
|
133 |
-
else:
|
134 |
-
hubert_model = hubert_model.float()
|
135 |
-
hubert_model.eval()
|
136 |
-
|
137 |
-
def change_to_tts_mode(tts_mode, upload_mode):
|
138 |
-
if tts_mode:
|
139 |
-
return gr.Textbox.update(visible=False), gr.Audio.update(visible=False), gr.Checkbox.update(visible=False), gr.Textbox.update(visible=True), gr.Dropdown.update(visible=True)
|
140 |
-
else:
|
141 |
-
if upload_mode:
|
142 |
-
return gr.Textbox.update(visible=False), gr.Audio.update(visible=True), gr.Checkbox.update(visible=True), gr.Textbox.update(visible=False), gr.Dropdown.update(visible=False)
|
143 |
-
else:
|
144 |
-
return gr.Textbox.update(visible=True), gr.Audio.update(visible=False), gr.Checkbox.update(visible=True), gr.Textbox.update(visible=False), gr.Dropdown.update(visible=False)
|
145 |
-
|
146 |
-
def change_to_upload_mode(upload_mode):
|
147 |
-
if upload_mode:
|
148 |
-
return gr.Textbox().update(visible=False), gr.Audio().update(visible=True)
|
149 |
-
else:
|
150 |
-
return gr.Textbox().update(visible=True), gr.Audio().update(visible=False)
|
151 |
-
|
152 |
-
if __name__ == '__main__':
|
153 |
-
parser = argparse.ArgumentParser()
|
154 |
-
parser.add_argument('--api', action="store_true", default=False)
|
155 |
-
parser.add_argument("--colab", action="store_true", default=False, help="share gradio app")
|
156 |
-
args, unknown = parser.parse_known_args()
|
157 |
-
load_hubert()
|
158 |
-
models = []
|
159 |
-
tts_voice_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices())
|
160 |
-
voices = [f"{v['ShortName']}-{v['Gender']}" for v in tts_voice_list]
|
161 |
-
with open("weights/model_info.json", "r", encoding="utf-8") as f:
|
162 |
-
models_info = json.load(f)
|
163 |
-
for name, info in models_info.items():
|
164 |
-
if not info['enable']:
|
165 |
-
continue
|
166 |
-
title = info['title']
|
167 |
-
author = info.get("author", None)
|
168 |
-
cover = f"weights/{name}/{info['cover']}"
|
169 |
-
index = f"weights/{name}/{info['feature_retrieval_library']}"
|
170 |
-
npy = f"weights/{name}/{info['feature_file']}"
|
171 |
-
cpt = torch.load(f"weights/{name}/{name}.pth", map_location="cpu")
|
172 |
-
tgt_sr = cpt["config"][-1]
|
173 |
-
cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
|
174 |
-
if_f0 = cpt.get("f0", 1)
|
175 |
-
if if_f0 == 1:
|
176 |
-
net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half)
|
177 |
-
else:
|
178 |
-
net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
|
179 |
-
del net_g.enc_q
|
180 |
-
print(net_g.load_state_dict(cpt["weight"], strict=False)) # 不加这一行清不干净, 真奇葩
|
181 |
-
net_g.eval().to(device)
|
182 |
-
if is_half:
|
183 |
-
net_g = net_g.half()
|
184 |
-
else:
|
185 |
-
net_g = net_g.float()
|
186 |
-
vc = VC(tgt_sr, device, is_half)
|
187 |
-
models.append((name, title, author, cover, create_vc_fn(tgt_sr, net_g, vc, if_f0, index, npy)))
|
188 |
-
with gr.Blocks() as app:
|
189 |
-
gr.Markdown(
|
190 |
-
"# <center> RVC Models\n"
|
191 |
-
"## <center> The input audio should be clean and pure voice without background music.\n"
|
192 |
-
"### <center> More feature will be added soon... \n"
|
193 |
-
"[](https://colab.research.google.com/drive/1hx6kKvIuv5XNY1Gai2PEuZhpO5z6xpVh?usp=sharing)\n\n"
|
194 |
-
"[](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI)"
|
195 |
-
)
|
196 |
-
with gr.Tabs():
|
197 |
-
for (name, title, author, cover, vc_fn) in models:
|
198 |
-
with gr.TabItem(name):
|
199 |
-
with gr.Row():
|
200 |
-
gr.Markdown(
|
201 |
-
'<div align="center">'
|
202 |
-
f'<div>{title}</div>\n'+
|
203 |
-
(f'<div>Model author: {author}</div>' if author else "")+
|
204 |
-
(f'<img style="width:auto;height:300px;" src="file/{cover}">' if cover else "")+
|
205 |
-
'</div>'
|
206 |
-
)
|
207 |
-
with gr.Row():
|
208 |
-
with gr.Column():
|
209 |
-
vc_youtube = gr.Textbox(label="Youtube URL")
|
210 |
-
vc_convert = gr.Button("Convert", variant="primary")
|
211 |
-
vc_vocal_preview = gr.Audio(label="Vocal Preview")
|
212 |
-
vc_inst_preview = gr.Audio(label="Instrumental Preview")
|
213 |
-
vc_audio_preview = gr.Audio(label="Audio Preview")
|
214 |
-
with gr.Column():
|
215 |
-
vc_input = gr.Textbox(label="Input audio path")
|
216 |
-
vc_upload = gr.Audio(label="Upload audio file", visible=False, interactive=True)
|
217 |
-
upload_mode = gr.Checkbox(label="Upload mode", value=False)
|
218 |
-
vc_transpose = gr.Number(label="Transpose", value=0)
|
219 |
-
vc_f0method = gr.Radio(
|
220 |
-
label="Pitch extraction algorithm, PM is fast but Harvest is better for low frequencies",
|
221 |
-
choices=["pm", "harvest"],
|
222 |
-
value="pm",
|
223 |
-
interactive=True,
|
224 |
-
)
|
225 |
-
vc_index_ratio = gr.Slider(
|
226 |
-
minimum=0,
|
227 |
-
maximum=1,
|
228 |
-
label="Retrieval feature ratio",
|
229 |
-
value=0.6,
|
230 |
-
interactive=True,
|
231 |
-
)
|
232 |
-
tts_mode = gr.Checkbox(label="tts (use edge-tts as input)", value=False)
|
233 |
-
tts_text = gr.Textbox(visible=False,label="TTS text (100 words limitation)" if limitation else "TTS text")
|
234 |
-
tts_voice = gr.Dropdown(label="Edge-tts speaker", choices=voices, visible=False, allow_custom_value=False, value="en-US-AnaNeural-Female")
|
235 |
-
vc_output1 = gr.Textbox(label="Output Message")
|
236 |
-
vc_output2 = gr.Audio(label="Output Audio")
|
237 |
-
vc_submit = gr.Button("Generate", variant="primary")
|
238 |
-
with gr.Column():
|
239 |
-
vc_volume = gr.Slider(
|
240 |
-
minimum=0,
|
241 |
-
maximum=10,
|
242 |
-
label="Vocal volume",
|
243 |
-
value=4,
|
244 |
-
interactive=True,
|
245 |
-
step=1
|
246 |
-
)
|
247 |
-
vc_outputCombine = gr.Audio(label="Output Combined Audio")
|
248 |
-
vc_combine = gr.Button("Combine",variant="primary")
|
249 |
-
vc_submit.click(vc_fn, [vc_input, vc_upload, upload_mode, vc_transpose, vc_f0method, vc_index_ratio, tts_mode, tts_text, tts_voice], [vc_output1, vc_output2])
|
250 |
-
vc_convert.click(cut_vocal_and_inst, vc_youtube, [vc_vocal_preview, vc_inst_preview, vc_audio_preview, vc_input])
|
251 |
-
vc_combine.click(combine_vocal_and_inst, [vc_output2, vc_volume], vc_outputCombine)
|
252 |
-
tts_mode.change(change_to_tts_mode, [tts_mode, upload_mode], [vc_input, vc_upload, upload_mode, tts_text, tts_voice])
|
253 |
-
upload_mode.change(change_to_upload_mode, [upload_mode], [vc_input, vc_upload])
|
254 |
-
app.queue(concurrency_count=1, max_size=20, api_open=args.api).launch(share=args.colab)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/distlib/locators.py
DELETED
@@ -1,1300 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
#
|
3 |
-
# Copyright (C) 2012-2015 Vinay Sajip.
|
4 |
-
# Licensed to the Python Software Foundation under a contributor agreement.
|
5 |
-
# See LICENSE.txt and CONTRIBUTORS.txt.
|
6 |
-
#
|
7 |
-
|
8 |
-
import gzip
|
9 |
-
from io import BytesIO
|
10 |
-
import json
|
11 |
-
import logging
|
12 |
-
import os
|
13 |
-
import posixpath
|
14 |
-
import re
|
15 |
-
try:
|
16 |
-
import threading
|
17 |
-
except ImportError: # pragma: no cover
|
18 |
-
import dummy_threading as threading
|
19 |
-
import zlib
|
20 |
-
|
21 |
-
from . import DistlibException
|
22 |
-
from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
|
23 |
-
queue, quote, unescape, build_opener,
|
24 |
-
HTTPRedirectHandler as BaseRedirectHandler, text_type,
|
25 |
-
Request, HTTPError, URLError)
|
26 |
-
from .database import Distribution, DistributionPath, make_dist
|
27 |
-
from .metadata import Metadata, MetadataInvalidError
|
28 |
-
from .util import (cached_property, ensure_slash, split_filename, get_project_data,
|
29 |
-
parse_requirement, parse_name_and_version, ServerProxy,
|
30 |
-
normalize_name)
|
31 |
-
from .version import get_scheme, UnsupportedVersionError
|
32 |
-
from .wheel import Wheel, is_compatible
|
33 |
-
|
34 |
-
logger = logging.getLogger(__name__)
|
35 |
-
|
36 |
-
HASHER_HASH = re.compile(r'^(\w+)=([a-f0-9]+)')
|
37 |
-
CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
|
38 |
-
HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
|
39 |
-
DEFAULT_INDEX = 'https://pypi.org/pypi'
|
40 |
-
|
41 |
-
def get_all_distribution_names(url=None):
|
42 |
-
"""
|
43 |
-
Return all distribution names known by an index.
|
44 |
-
:param url: The URL of the index.
|
45 |
-
:return: A list of all known distribution names.
|
46 |
-
"""
|
47 |
-
if url is None:
|
48 |
-
url = DEFAULT_INDEX
|
49 |
-
client = ServerProxy(url, timeout=3.0)
|
50 |
-
try:
|
51 |
-
return client.list_packages()
|
52 |
-
finally:
|
53 |
-
client('close')()
|
54 |
-
|
55 |
-
class RedirectHandler(BaseRedirectHandler):
|
56 |
-
"""
|
57 |
-
A class to work around a bug in some Python 3.2.x releases.
|
58 |
-
"""
|
59 |
-
# There's a bug in the base version for some 3.2.x
|
60 |
-
# (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
|
61 |
-
# returns e.g. /abc, it bails because it says the scheme ''
|
62 |
-
# is bogus, when actually it should use the request's
|
63 |
-
# URL for the scheme. See Python issue #13696.
|
64 |
-
def http_error_302(self, req, fp, code, msg, headers):
|
65 |
-
# Some servers (incorrectly) return multiple Location headers
|
66 |
-
# (so probably same goes for URI). Use first header.
|
67 |
-
newurl = None
|
68 |
-
for key in ('location', 'uri'):
|
69 |
-
if key in headers:
|
70 |
-
newurl = headers[key]
|
71 |
-
break
|
72 |
-
if newurl is None: # pragma: no cover
|
73 |
-
return
|
74 |
-
urlparts = urlparse(newurl)
|
75 |
-
if urlparts.scheme == '':
|
76 |
-
newurl = urljoin(req.get_full_url(), newurl)
|
77 |
-
if hasattr(headers, 'replace_header'):
|
78 |
-
headers.replace_header(key, newurl)
|
79 |
-
else:
|
80 |
-
headers[key] = newurl
|
81 |
-
return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
|
82 |
-
headers)
|
83 |
-
|
84 |
-
http_error_301 = http_error_303 = http_error_307 = http_error_302
|
85 |
-
|
86 |
-
class Locator(object):
|
87 |
-
"""
|
88 |
-
A base class for locators - things that locate distributions.
|
89 |
-
"""
|
90 |
-
source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
|
91 |
-
binary_extensions = ('.egg', '.exe', '.whl')
|
92 |
-
excluded_extensions = ('.pdf',)
|
93 |
-
|
94 |
-
# A list of tags indicating which wheels you want to match. The default
|
95 |
-
# value of None matches against the tags compatible with the running
|
96 |
-
# Python. If you want to match other values, set wheel_tags on a locator
|
97 |
-
# instance to a list of tuples (pyver, abi, arch) which you want to match.
|
98 |
-
wheel_tags = None
|
99 |
-
|
100 |
-
downloadable_extensions = source_extensions + ('.whl',)
|
101 |
-
|
102 |
-
def __init__(self, scheme='default'):
|
103 |
-
"""
|
104 |
-
Initialise an instance.
|
105 |
-
:param scheme: Because locators look for most recent versions, they
|
106 |
-
need to know the version scheme to use. This specifies
|
107 |
-
the current PEP-recommended scheme - use ``'legacy'``
|
108 |
-
if you need to support existing distributions on PyPI.
|
109 |
-
"""
|
110 |
-
self._cache = {}
|
111 |
-
self.scheme = scheme
|
112 |
-
# Because of bugs in some of the handlers on some of the platforms,
|
113 |
-
# we use our own opener rather than just using urlopen.
|
114 |
-
self.opener = build_opener(RedirectHandler())
|
115 |
-
# If get_project() is called from locate(), the matcher instance
|
116 |
-
# is set from the requirement passed to locate(). See issue #18 for
|
117 |
-
# why this can be useful to know.
|
118 |
-
self.matcher = None
|
119 |
-
self.errors = queue.Queue()
|
120 |
-
|
121 |
-
def get_errors(self):
|
122 |
-
"""
|
123 |
-
Return any errors which have occurred.
|
124 |
-
"""
|
125 |
-
result = []
|
126 |
-
while not self.errors.empty(): # pragma: no cover
|
127 |
-
try:
|
128 |
-
e = self.errors.get(False)
|
129 |
-
result.append(e)
|
130 |
-
except self.errors.Empty:
|
131 |
-
continue
|
132 |
-
self.errors.task_done()
|
133 |
-
return result
|
134 |
-
|
135 |
-
def clear_errors(self):
|
136 |
-
"""
|
137 |
-
Clear any errors which may have been logged.
|
138 |
-
"""
|
139 |
-
# Just get the errors and throw them away
|
140 |
-
self.get_errors()
|
141 |
-
|
142 |
-
def clear_cache(self):
|
143 |
-
self._cache.clear()
|
144 |
-
|
145 |
-
def _get_scheme(self):
|
146 |
-
return self._scheme
|
147 |
-
|
148 |
-
def _set_scheme(self, value):
|
149 |
-
self._scheme = value
|
150 |
-
|
151 |
-
scheme = property(_get_scheme, _set_scheme)
|
152 |
-
|
153 |
-
def _get_project(self, name):
|
154 |
-
"""
|
155 |
-
For a given project, get a dictionary mapping available versions to Distribution
|
156 |
-
instances.
|
157 |
-
|
158 |
-
This should be implemented in subclasses.
|
159 |
-
|
160 |
-
If called from a locate() request, self.matcher will be set to a
|
161 |
-
matcher for the requirement to satisfy, otherwise it will be None.
|
162 |
-
"""
|
163 |
-
raise NotImplementedError('Please implement in the subclass')
|
164 |
-
|
165 |
-
def get_distribution_names(self):
|
166 |
-
"""
|
167 |
-
Return all the distribution names known to this locator.
|
168 |
-
"""
|
169 |
-
raise NotImplementedError('Please implement in the subclass')
|
170 |
-
|
171 |
-
def get_project(self, name):
|
172 |
-
"""
|
173 |
-
For a given project, get a dictionary mapping available versions to Distribution
|
174 |
-
instances.
|
175 |
-
|
176 |
-
This calls _get_project to do all the work, and just implements a caching layer on top.
|
177 |
-
"""
|
178 |
-
if self._cache is None: # pragma: no cover
|
179 |
-
result = self._get_project(name)
|
180 |
-
elif name in self._cache:
|
181 |
-
result = self._cache[name]
|
182 |
-
else:
|
183 |
-
self.clear_errors()
|
184 |
-
result = self._get_project(name)
|
185 |
-
self._cache[name] = result
|
186 |
-
return result
|
187 |
-
|
188 |
-
def score_url(self, url):
|
189 |
-
"""
|
190 |
-
Give an url a score which can be used to choose preferred URLs
|
191 |
-
for a given project release.
|
192 |
-
"""
|
193 |
-
t = urlparse(url)
|
194 |
-
basename = posixpath.basename(t.path)
|
195 |
-
compatible = True
|
196 |
-
is_wheel = basename.endswith('.whl')
|
197 |
-
is_downloadable = basename.endswith(self.downloadable_extensions)
|
198 |
-
if is_wheel:
|
199 |
-
compatible = is_compatible(Wheel(basename), self.wheel_tags)
|
200 |
-
return (t.scheme == 'https', 'pypi.org' in t.netloc,
|
201 |
-
is_downloadable, is_wheel, compatible, basename)
|
202 |
-
|
203 |
-
def prefer_url(self, url1, url2):
|
204 |
-
"""
|
205 |
-
Choose one of two URLs where both are candidates for distribution
|
206 |
-
archives for the same version of a distribution (for example,
|
207 |
-
.tar.gz vs. zip).
|
208 |
-
|
209 |
-
The current implementation favours https:// URLs over http://, archives
|
210 |
-
from PyPI over those from other locations, wheel compatibility (if a
|
211 |
-
wheel) and then the archive name.
|
212 |
-
"""
|
213 |
-
result = url2
|
214 |
-
if url1:
|
215 |
-
s1 = self.score_url(url1)
|
216 |
-
s2 = self.score_url(url2)
|
217 |
-
if s1 > s2:
|
218 |
-
result = url1
|
219 |
-
if result != url2:
|
220 |
-
logger.debug('Not replacing %r with %r', url1, url2)
|
221 |
-
else:
|
222 |
-
logger.debug('Replacing %r with %r', url1, url2)
|
223 |
-
return result
|
224 |
-
|
225 |
-
def split_filename(self, filename, project_name):
|
226 |
-
"""
|
227 |
-
Attempt to split a filename in project name, version and Python version.
|
228 |
-
"""
|
229 |
-
return split_filename(filename, project_name)
|
230 |
-
|
231 |
-
def convert_url_to_download_info(self, url, project_name):
|
232 |
-
"""
|
233 |
-
See if a URL is a candidate for a download URL for a project (the URL
|
234 |
-
has typically been scraped from an HTML page).
|
235 |
-
|
236 |
-
If it is, a dictionary is returned with keys "name", "version",
|
237 |
-
"filename" and "url"; otherwise, None is returned.
|
238 |
-
"""
|
239 |
-
def same_project(name1, name2):
|
240 |
-
return normalize_name(name1) == normalize_name(name2)
|
241 |
-
|
242 |
-
result = None
|
243 |
-
scheme, netloc, path, params, query, frag = urlparse(url)
|
244 |
-
if frag.lower().startswith('egg='): # pragma: no cover
|
245 |
-
logger.debug('%s: version hint in fragment: %r',
|
246 |
-
project_name, frag)
|
247 |
-
m = HASHER_HASH.match(frag)
|
248 |
-
if m:
|
249 |
-
algo, digest = m.groups()
|
250 |
-
else:
|
251 |
-
algo, digest = None, None
|
252 |
-
origpath = path
|
253 |
-
if path and path[-1] == '/': # pragma: no cover
|
254 |
-
path = path[:-1]
|
255 |
-
if path.endswith('.whl'):
|
256 |
-
try:
|
257 |
-
wheel = Wheel(path)
|
258 |
-
if not is_compatible(wheel, self.wheel_tags):
|
259 |
-
logger.debug('Wheel not compatible: %s', path)
|
260 |
-
else:
|
261 |
-
if project_name is None:
|
262 |
-
include = True
|
263 |
-
else:
|
264 |
-
include = same_project(wheel.name, project_name)
|
265 |
-
if include:
|
266 |
-
result = {
|
267 |
-
'name': wheel.name,
|
268 |
-
'version': wheel.version,
|
269 |
-
'filename': wheel.filename,
|
270 |
-
'url': urlunparse((scheme, netloc, origpath,
|
271 |
-
params, query, '')),
|
272 |
-
'python-version': ', '.join(
|
273 |
-
['.'.join(list(v[2:])) for v in wheel.pyver]),
|
274 |
-
}
|
275 |
-
except Exception as e: # pragma: no cover
|
276 |
-
logger.warning('invalid path for wheel: %s', path)
|
277 |
-
elif not path.endswith(self.downloadable_extensions): # pragma: no cover
|
278 |
-
logger.debug('Not downloadable: %s', path)
|
279 |
-
else: # downloadable extension
|
280 |
-
path = filename = posixpath.basename(path)
|
281 |
-
for ext in self.downloadable_extensions:
|
282 |
-
if path.endswith(ext):
|
283 |
-
path = path[:-len(ext)]
|
284 |
-
t = self.split_filename(path, project_name)
|
285 |
-
if not t: # pragma: no cover
|
286 |
-
logger.debug('No match for project/version: %s', path)
|
287 |
-
else:
|
288 |
-
name, version, pyver = t
|
289 |
-
if not project_name or same_project(project_name, name):
|
290 |
-
result = {
|
291 |
-
'name': name,
|
292 |
-
'version': version,
|
293 |
-
'filename': filename,
|
294 |
-
'url': urlunparse((scheme, netloc, origpath,
|
295 |
-
params, query, '')),
|
296 |
-
#'packagetype': 'sdist',
|
297 |
-
}
|
298 |
-
if pyver: # pragma: no cover
|
299 |
-
result['python-version'] = pyver
|
300 |
-
break
|
301 |
-
if result and algo:
|
302 |
-
result['%s_digest' % algo] = digest
|
303 |
-
return result
|
304 |
-
|
305 |
-
def _get_digest(self, info):
|
306 |
-
"""
|
307 |
-
Get a digest from a dictionary by looking at a "digests" dictionary
|
308 |
-
or keys of the form 'algo_digest'.
|
309 |
-
|
310 |
-
Returns a 2-tuple (algo, digest) if found, else None. Currently
|
311 |
-
looks only for SHA256, then MD5.
|
312 |
-
"""
|
313 |
-
result = None
|
314 |
-
if 'digests' in info:
|
315 |
-
digests = info['digests']
|
316 |
-
for algo in ('sha256', 'md5'):
|
317 |
-
if algo in digests:
|
318 |
-
result = (algo, digests[algo])
|
319 |
-
break
|
320 |
-
if not result:
|
321 |
-
for algo in ('sha256', 'md5'):
|
322 |
-
key = '%s_digest' % algo
|
323 |
-
if key in info:
|
324 |
-
result = (algo, info[key])
|
325 |
-
break
|
326 |
-
return result
|
327 |
-
|
328 |
-
def _update_version_data(self, result, info):
|
329 |
-
"""
|
330 |
-
Update a result dictionary (the final result from _get_project) with a
|
331 |
-
dictionary for a specific version, which typically holds information
|
332 |
-
gleaned from a filename or URL for an archive for the distribution.
|
333 |
-
"""
|
334 |
-
name = info.pop('name')
|
335 |
-
version = info.pop('version')
|
336 |
-
if version in result:
|
337 |
-
dist = result[version]
|
338 |
-
md = dist.metadata
|
339 |
-
else:
|
340 |
-
dist = make_dist(name, version, scheme=self.scheme)
|
341 |
-
md = dist.metadata
|
342 |
-
dist.digest = digest = self._get_digest(info)
|
343 |
-
url = info['url']
|
344 |
-
result['digests'][url] = digest
|
345 |
-
if md.source_url != info['url']:
|
346 |
-
md.source_url = self.prefer_url(md.source_url, url)
|
347 |
-
result['urls'].setdefault(version, set()).add(url)
|
348 |
-
dist.locator = self
|
349 |
-
result[version] = dist
|
350 |
-
|
351 |
-
def locate(self, requirement, prereleases=False):
|
352 |
-
"""
|
353 |
-
Find the most recent distribution which matches the given
|
354 |
-
requirement.
|
355 |
-
|
356 |
-
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
|
357 |
-
'foo (>= 1.0, < 2.0, != 1.3)'
|
358 |
-
:param prereleases: If ``True``, allow pre-release versions
|
359 |
-
to be located. Otherwise, pre-release versions
|
360 |
-
are not returned.
|
361 |
-
:return: A :class:`Distribution` instance, or ``None`` if no such
|
362 |
-
distribution could be located.
|
363 |
-
"""
|
364 |
-
result = None
|
365 |
-
r = parse_requirement(requirement)
|
366 |
-
if r is None: # pragma: no cover
|
367 |
-
raise DistlibException('Not a valid requirement: %r' % requirement)
|
368 |
-
scheme = get_scheme(self.scheme)
|
369 |
-
self.matcher = matcher = scheme.matcher(r.requirement)
|
370 |
-
logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
|
371 |
-
versions = self.get_project(r.name)
|
372 |
-
if len(versions) > 2: # urls and digests keys are present
|
373 |
-
# sometimes, versions are invalid
|
374 |
-
slist = []
|
375 |
-
vcls = matcher.version_class
|
376 |
-
for k in versions:
|
377 |
-
if k in ('urls', 'digests'):
|
378 |
-
continue
|
379 |
-
try:
|
380 |
-
if not matcher.match(k):
|
381 |
-
pass # logger.debug('%s did not match %r', matcher, k)
|
382 |
-
else:
|
383 |
-
if prereleases or not vcls(k).is_prerelease:
|
384 |
-
slist.append(k)
|
385 |
-
# else:
|
386 |
-
# logger.debug('skipping pre-release '
|
387 |
-
# 'version %s of %s', k, matcher.name)
|
388 |
-
except Exception: # pragma: no cover
|
389 |
-
logger.warning('error matching %s with %r', matcher, k)
|
390 |
-
pass # slist.append(k)
|
391 |
-
if len(slist) > 1:
|
392 |
-
slist = sorted(slist, key=scheme.key)
|
393 |
-
if slist:
|
394 |
-
logger.debug('sorted list: %s', slist)
|
395 |
-
version = slist[-1]
|
396 |
-
result = versions[version]
|
397 |
-
if result:
|
398 |
-
if r.extras:
|
399 |
-
result.extras = r.extras
|
400 |
-
result.download_urls = versions.get('urls', {}).get(version, set())
|
401 |
-
d = {}
|
402 |
-
sd = versions.get('digests', {})
|
403 |
-
for url in result.download_urls:
|
404 |
-
if url in sd: # pragma: no cover
|
405 |
-
d[url] = sd[url]
|
406 |
-
result.digests = d
|
407 |
-
self.matcher = None
|
408 |
-
return result
|
409 |
-
|
410 |
-
|
411 |
-
class PyPIRPCLocator(Locator):
|
412 |
-
"""
|
413 |
-
This locator uses XML-RPC to locate distributions. It therefore
|
414 |
-
cannot be used with simple mirrors (that only mirror file content).
|
415 |
-
"""
|
416 |
-
def __init__(self, url, **kwargs):
|
417 |
-
"""
|
418 |
-
Initialise an instance.
|
419 |
-
|
420 |
-
:param url: The URL to use for XML-RPC.
|
421 |
-
:param kwargs: Passed to the superclass constructor.
|
422 |
-
"""
|
423 |
-
super(PyPIRPCLocator, self).__init__(**kwargs)
|
424 |
-
self.base_url = url
|
425 |
-
self.client = ServerProxy(url, timeout=3.0)
|
426 |
-
|
427 |
-
def get_distribution_names(self):
|
428 |
-
"""
|
429 |
-
Return all the distribution names known to this locator.
|
430 |
-
"""
|
431 |
-
return set(self.client.list_packages())
|
432 |
-
|
433 |
-
def _get_project(self, name):
|
434 |
-
result = {'urls': {}, 'digests': {}}
|
435 |
-
versions = self.client.package_releases(name, True)
|
436 |
-
for v in versions:
|
437 |
-
urls = self.client.release_urls(name, v)
|
438 |
-
data = self.client.release_data(name, v)
|
439 |
-
metadata = Metadata(scheme=self.scheme)
|
440 |
-
metadata.name = data['name']
|
441 |
-
metadata.version = data['version']
|
442 |
-
metadata.license = data.get('license')
|
443 |
-
metadata.keywords = data.get('keywords', [])
|
444 |
-
metadata.summary = data.get('summary')
|
445 |
-
dist = Distribution(metadata)
|
446 |
-
if urls:
|
447 |
-
info = urls[0]
|
448 |
-
metadata.source_url = info['url']
|
449 |
-
dist.digest = self._get_digest(info)
|
450 |
-
dist.locator = self
|
451 |
-
result[v] = dist
|
452 |
-
for info in urls:
|
453 |
-
url = info['url']
|
454 |
-
digest = self._get_digest(info)
|
455 |
-
result['urls'].setdefault(v, set()).add(url)
|
456 |
-
result['digests'][url] = digest
|
457 |
-
return result
|
458 |
-
|
459 |
-
class PyPIJSONLocator(Locator):
|
460 |
-
"""
|
461 |
-
This locator uses PyPI's JSON interface. It's very limited in functionality
|
462 |
-
and probably not worth using.
|
463 |
-
"""
|
464 |
-
def __init__(self, url, **kwargs):
|
465 |
-
super(PyPIJSONLocator, self).__init__(**kwargs)
|
466 |
-
self.base_url = ensure_slash(url)
|
467 |
-
|
468 |
-
def get_distribution_names(self):
|
469 |
-
"""
|
470 |
-
Return all the distribution names known to this locator.
|
471 |
-
"""
|
472 |
-
raise NotImplementedError('Not available from this locator')
|
473 |
-
|
474 |
-
def _get_project(self, name):
|
475 |
-
result = {'urls': {}, 'digests': {}}
|
476 |
-
url = urljoin(self.base_url, '%s/json' % quote(name))
|
477 |
-
try:
|
478 |
-
resp = self.opener.open(url)
|
479 |
-
data = resp.read().decode() # for now
|
480 |
-
d = json.loads(data)
|
481 |
-
md = Metadata(scheme=self.scheme)
|
482 |
-
data = d['info']
|
483 |
-
md.name = data['name']
|
484 |
-
md.version = data['version']
|
485 |
-
md.license = data.get('license')
|
486 |
-
md.keywords = data.get('keywords', [])
|
487 |
-
md.summary = data.get('summary')
|
488 |
-
dist = Distribution(md)
|
489 |
-
dist.locator = self
|
490 |
-
urls = d['urls']
|
491 |
-
result[md.version] = dist
|
492 |
-
for info in d['urls']:
|
493 |
-
url = info['url']
|
494 |
-
dist.download_urls.add(url)
|
495 |
-
dist.digests[url] = self._get_digest(info)
|
496 |
-
result['urls'].setdefault(md.version, set()).add(url)
|
497 |
-
result['digests'][url] = self._get_digest(info)
|
498 |
-
# Now get other releases
|
499 |
-
for version, infos in d['releases'].items():
|
500 |
-
if version == md.version:
|
501 |
-
continue # already done
|
502 |
-
omd = Metadata(scheme=self.scheme)
|
503 |
-
omd.name = md.name
|
504 |
-
omd.version = version
|
505 |
-
odist = Distribution(omd)
|
506 |
-
odist.locator = self
|
507 |
-
result[version] = odist
|
508 |
-
for info in infos:
|
509 |
-
url = info['url']
|
510 |
-
odist.download_urls.add(url)
|
511 |
-
odist.digests[url] = self._get_digest(info)
|
512 |
-
result['urls'].setdefault(version, set()).add(url)
|
513 |
-
result['digests'][url] = self._get_digest(info)
|
514 |
-
# for info in urls:
|
515 |
-
# md.source_url = info['url']
|
516 |
-
# dist.digest = self._get_digest(info)
|
517 |
-
# dist.locator = self
|
518 |
-
# for info in urls:
|
519 |
-
# url = info['url']
|
520 |
-
# result['urls'].setdefault(md.version, set()).add(url)
|
521 |
-
# result['digests'][url] = self._get_digest(info)
|
522 |
-
except Exception as e:
|
523 |
-
self.errors.put(text_type(e))
|
524 |
-
logger.exception('JSON fetch failed: %s', e)
|
525 |
-
return result
|
526 |
-
|
527 |
-
|
528 |
-
class Page(object):
|
529 |
-
"""
|
530 |
-
This class represents a scraped HTML page.
|
531 |
-
"""
|
532 |
-
# The following slightly hairy-looking regex just looks for the contents of
|
533 |
-
# an anchor link, which has an attribute "href" either immediately preceded
|
534 |
-
# or immediately followed by a "rel" attribute. The attribute values can be
|
535 |
-
# declared with double quotes, single quotes or no quotes - which leads to
|
536 |
-
# the length of the expression.
|
537 |
-
_href = re.compile("""
|
538 |
-
(rel\\s*=\\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\\s\n]*))\\s+)?
|
539 |
-
href\\s*=\\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\\s\n]*))
|
540 |
-
(\\s+rel\\s*=\\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\\s\n]*)))?
|
541 |
-
""", re.I | re.S | re.X)
|
542 |
-
_base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
|
543 |
-
|
544 |
-
def __init__(self, data, url):
|
545 |
-
"""
|
546 |
-
Initialise an instance with the Unicode page contents and the URL they
|
547 |
-
came from.
|
548 |
-
"""
|
549 |
-
self.data = data
|
550 |
-
self.base_url = self.url = url
|
551 |
-
m = self._base.search(self.data)
|
552 |
-
if m:
|
553 |
-
self.base_url = m.group(1)
|
554 |
-
|
555 |
-
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
|
556 |
-
|
557 |
-
@cached_property
|
558 |
-
def links(self):
|
559 |
-
"""
|
560 |
-
Return the URLs of all the links on a page together with information
|
561 |
-
about their "rel" attribute, for determining which ones to treat as
|
562 |
-
downloads and which ones to queue for further scraping.
|
563 |
-
"""
|
564 |
-
def clean(url):
|
565 |
-
"Tidy up an URL."
|
566 |
-
scheme, netloc, path, params, query, frag = urlparse(url)
|
567 |
-
return urlunparse((scheme, netloc, quote(path),
|
568 |
-
params, query, frag))
|
569 |
-
|
570 |
-
result = set()
|
571 |
-
for match in self._href.finditer(self.data):
|
572 |
-
d = match.groupdict('')
|
573 |
-
rel = (d['rel1'] or d['rel2'] or d['rel3'] or
|
574 |
-
d['rel4'] or d['rel5'] or d['rel6'])
|
575 |
-
url = d['url1'] or d['url2'] or d['url3']
|
576 |
-
url = urljoin(self.base_url, url)
|
577 |
-
url = unescape(url)
|
578 |
-
url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
|
579 |
-
result.add((url, rel))
|
580 |
-
# We sort the result, hoping to bring the most recent versions
|
581 |
-
# to the front
|
582 |
-
result = sorted(result, key=lambda t: t[0], reverse=True)
|
583 |
-
return result
|
584 |
-
|
585 |
-
|
586 |
-
class SimpleScrapingLocator(Locator):
|
587 |
-
"""
|
588 |
-
A locator which scrapes HTML pages to locate downloads for a distribution.
|
589 |
-
This runs multiple threads to do the I/O; performance is at least as good
|
590 |
-
as pip's PackageFinder, which works in an analogous fashion.
|
591 |
-
"""
|
592 |
-
|
593 |
-
# These are used to deal with various Content-Encoding schemes.
|
594 |
-
decoders = {
|
595 |
-
'deflate': zlib.decompress,
|
596 |
-
'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(b)).read(),
|
597 |
-
'none': lambda b: b,
|
598 |
-
}
|
599 |
-
|
600 |
-
def __init__(self, url, timeout=None, num_workers=10, **kwargs):
|
601 |
-
"""
|
602 |
-
Initialise an instance.
|
603 |
-
:param url: The root URL to use for scraping.
|
604 |
-
:param timeout: The timeout, in seconds, to be applied to requests.
|
605 |
-
This defaults to ``None`` (no timeout specified).
|
606 |
-
:param num_workers: The number of worker threads you want to do I/O,
|
607 |
-
This defaults to 10.
|
608 |
-
:param kwargs: Passed to the superclass.
|
609 |
-
"""
|
610 |
-
super(SimpleScrapingLocator, self).__init__(**kwargs)
|
611 |
-
self.base_url = ensure_slash(url)
|
612 |
-
self.timeout = timeout
|
613 |
-
self._page_cache = {}
|
614 |
-
self._seen = set()
|
615 |
-
self._to_fetch = queue.Queue()
|
616 |
-
self._bad_hosts = set()
|
617 |
-
self.skip_externals = False
|
618 |
-
self.num_workers = num_workers
|
619 |
-
self._lock = threading.RLock()
|
620 |
-
# See issue #45: we need to be resilient when the locator is used
|
621 |
-
# in a thread, e.g. with concurrent.futures. We can't use self._lock
|
622 |
-
# as it is for coordinating our internal threads - the ones created
|
623 |
-
# in _prepare_threads.
|
624 |
-
self._gplock = threading.RLock()
|
625 |
-
self.platform_check = False # See issue #112
|
626 |
-
|
627 |
-
def _prepare_threads(self):
|
628 |
-
"""
|
629 |
-
Threads are created only when get_project is called, and terminate
|
630 |
-
before it returns. They are there primarily to parallelise I/O (i.e.
|
631 |
-
fetching web pages).
|
632 |
-
"""
|
633 |
-
self._threads = []
|
634 |
-
for i in range(self.num_workers):
|
635 |
-
t = threading.Thread(target=self._fetch)
|
636 |
-
t.daemon = True
|
637 |
-
t.start()
|
638 |
-
self._threads.append(t)
|
639 |
-
|
640 |
-
def _wait_threads(self):
|
641 |
-
"""
|
642 |
-
Tell all the threads to terminate (by sending a sentinel value) and
|
643 |
-
wait for them to do so.
|
644 |
-
"""
|
645 |
-
# Note that you need two loops, since you can't say which
|
646 |
-
# thread will get each sentinel
|
647 |
-
for t in self._threads:
|
648 |
-
self._to_fetch.put(None) # sentinel
|
649 |
-
for t in self._threads:
|
650 |
-
t.join()
|
651 |
-
self._threads = []
|
652 |
-
|
653 |
-
def _get_project(self, name):
|
654 |
-
result = {'urls': {}, 'digests': {}}
|
655 |
-
with self._gplock:
|
656 |
-
self.result = result
|
657 |
-
self.project_name = name
|
658 |
-
url = urljoin(self.base_url, '%s/' % quote(name))
|
659 |
-
self._seen.clear()
|
660 |
-
self._page_cache.clear()
|
661 |
-
self._prepare_threads()
|
662 |
-
try:
|
663 |
-
logger.debug('Queueing %s', url)
|
664 |
-
self._to_fetch.put(url)
|
665 |
-
self._to_fetch.join()
|
666 |
-
finally:
|
667 |
-
self._wait_threads()
|
668 |
-
del self.result
|
669 |
-
return result
|
670 |
-
|
671 |
-
platform_dependent = re.compile(r'\b(linux_(i\d86|x86_64|arm\w+)|'
|
672 |
-
r'win(32|_amd64)|macosx_?\d+)\b', re.I)
|
673 |
-
|
674 |
-
def _is_platform_dependent(self, url):
|
675 |
-
"""
|
676 |
-
Does an URL refer to a platform-specific download?
|
677 |
-
"""
|
678 |
-
return self.platform_dependent.search(url)
|
679 |
-
|
680 |
-
def _process_download(self, url):
|
681 |
-
"""
|
682 |
-
See if an URL is a suitable download for a project.
|
683 |
-
|
684 |
-
If it is, register information in the result dictionary (for
|
685 |
-
_get_project) about the specific version it's for.
|
686 |
-
|
687 |
-
Note that the return value isn't actually used other than as a boolean
|
688 |
-
value.
|
689 |
-
"""
|
690 |
-
if self.platform_check and self._is_platform_dependent(url):
|
691 |
-
info = None
|
692 |
-
else:
|
693 |
-
info = self.convert_url_to_download_info(url, self.project_name)
|
694 |
-
logger.debug('process_download: %s -> %s', url, info)
|
695 |
-
if info:
|
696 |
-
with self._lock: # needed because self.result is shared
|
697 |
-
self._update_version_data(self.result, info)
|
698 |
-
return info
|
699 |
-
|
700 |
-
def _should_queue(self, link, referrer, rel):
|
701 |
-
"""
|
702 |
-
Determine whether a link URL from a referring page and with a
|
703 |
-
particular "rel" attribute should be queued for scraping.
|
704 |
-
"""
|
705 |
-
scheme, netloc, path, _, _, _ = urlparse(link)
|
706 |
-
if path.endswith(self.source_extensions + self.binary_extensions +
|
707 |
-
self.excluded_extensions):
|
708 |
-
result = False
|
709 |
-
elif self.skip_externals and not link.startswith(self.base_url):
|
710 |
-
result = False
|
711 |
-
elif not referrer.startswith(self.base_url):
|
712 |
-
result = False
|
713 |
-
elif rel not in ('homepage', 'download'):
|
714 |
-
result = False
|
715 |
-
elif scheme not in ('http', 'https', 'ftp'):
|
716 |
-
result = False
|
717 |
-
elif self._is_platform_dependent(link):
|
718 |
-
result = False
|
719 |
-
else:
|
720 |
-
host = netloc.split(':', 1)[0]
|
721 |
-
if host.lower() == 'localhost':
|
722 |
-
result = False
|
723 |
-
else:
|
724 |
-
result = True
|
725 |
-
logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
|
726 |
-
referrer, result)
|
727 |
-
return result
|
728 |
-
|
729 |
-
def _fetch(self):
|
730 |
-
"""
|
731 |
-
Get a URL to fetch from the work queue, get the HTML page, examine its
|
732 |
-
links for download candidates and candidates for further scraping.
|
733 |
-
|
734 |
-
This is a handy method to run in a thread.
|
735 |
-
"""
|
736 |
-
while True:
|
737 |
-
url = self._to_fetch.get()
|
738 |
-
try:
|
739 |
-
if url:
|
740 |
-
page = self.get_page(url)
|
741 |
-
if page is None: # e.g. after an error
|
742 |
-
continue
|
743 |
-
for link, rel in page.links:
|
744 |
-
if link not in self._seen:
|
745 |
-
try:
|
746 |
-
self._seen.add(link)
|
747 |
-
if (not self._process_download(link) and
|
748 |
-
self._should_queue(link, url, rel)):
|
749 |
-
logger.debug('Queueing %s from %s', link, url)
|
750 |
-
self._to_fetch.put(link)
|
751 |
-
except MetadataInvalidError: # e.g. invalid versions
|
752 |
-
pass
|
753 |
-
except Exception as e: # pragma: no cover
|
754 |
-
self.errors.put(text_type(e))
|
755 |
-
finally:
|
756 |
-
# always do this, to avoid hangs :-)
|
757 |
-
self._to_fetch.task_done()
|
758 |
-
if not url:
|
759 |
-
#logger.debug('Sentinel seen, quitting.')
|
760 |
-
break
|
761 |
-
|
762 |
-
def get_page(self, url):
|
763 |
-
"""
|
764 |
-
Get the HTML for an URL, possibly from an in-memory cache.
|
765 |
-
|
766 |
-
XXX TODO Note: this cache is never actually cleared. It's assumed that
|
767 |
-
the data won't get stale over the lifetime of a locator instance (not
|
768 |
-
necessarily true for the default_locator).
|
769 |
-
"""
|
770 |
-
# http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
|
771 |
-
scheme, netloc, path, _, _, _ = urlparse(url)
|
772 |
-
if scheme == 'file' and os.path.isdir(url2pathname(path)):
|
773 |
-
url = urljoin(ensure_slash(url), 'index.html')
|
774 |
-
|
775 |
-
if url in self._page_cache:
|
776 |
-
result = self._page_cache[url]
|
777 |
-
logger.debug('Returning %s from cache: %s', url, result)
|
778 |
-
else:
|
779 |
-
host = netloc.split(':', 1)[0]
|
780 |
-
result = None
|
781 |
-
if host in self._bad_hosts:
|
782 |
-
logger.debug('Skipping %s due to bad host %s', url, host)
|
783 |
-
else:
|
784 |
-
req = Request(url, headers={'Accept-encoding': 'identity'})
|
785 |
-
try:
|
786 |
-
logger.debug('Fetching %s', url)
|
787 |
-
resp = self.opener.open(req, timeout=self.timeout)
|
788 |
-
logger.debug('Fetched %s', url)
|
789 |
-
headers = resp.info()
|
790 |
-
content_type = headers.get('Content-Type', '')
|
791 |
-
if HTML_CONTENT_TYPE.match(content_type):
|
792 |
-
final_url = resp.geturl()
|
793 |
-
data = resp.read()
|
794 |
-
encoding = headers.get('Content-Encoding')
|
795 |
-
if encoding:
|
796 |
-
decoder = self.decoders[encoding] # fail if not found
|
797 |
-
data = decoder(data)
|
798 |
-
encoding = 'utf-8'
|
799 |
-
m = CHARSET.search(content_type)
|
800 |
-
if m:
|
801 |
-
encoding = m.group(1)
|
802 |
-
try:
|
803 |
-
data = data.decode(encoding)
|
804 |
-
except UnicodeError: # pragma: no cover
|
805 |
-
data = data.decode('latin-1') # fallback
|
806 |
-
result = Page(data, final_url)
|
807 |
-
self._page_cache[final_url] = result
|
808 |
-
except HTTPError as e:
|
809 |
-
if e.code != 404:
|
810 |
-
logger.exception('Fetch failed: %s: %s', url, e)
|
811 |
-
except URLError as e: # pragma: no cover
|
812 |
-
logger.exception('Fetch failed: %s: %s', url, e)
|
813 |
-
with self._lock:
|
814 |
-
self._bad_hosts.add(host)
|
815 |
-
except Exception as e: # pragma: no cover
|
816 |
-
logger.exception('Fetch failed: %s: %s', url, e)
|
817 |
-
finally:
|
818 |
-
self._page_cache[url] = result # even if None (failure)
|
819 |
-
return result
|
820 |
-
|
821 |
-
_distname_re = re.compile('<a href=[^>]*>([^<]+)<')
|
822 |
-
|
823 |
-
def get_distribution_names(self):
|
824 |
-
"""
|
825 |
-
Return all the distribution names known to this locator.
|
826 |
-
"""
|
827 |
-
result = set()
|
828 |
-
page = self.get_page(self.base_url)
|
829 |
-
if not page:
|
830 |
-
raise DistlibException('Unable to get %s' % self.base_url)
|
831 |
-
for match in self._distname_re.finditer(page.data):
|
832 |
-
result.add(match.group(1))
|
833 |
-
return result
|
834 |
-
|
835 |
-
class DirectoryLocator(Locator):
|
836 |
-
"""
|
837 |
-
This class locates distributions in a directory tree.
|
838 |
-
"""
|
839 |
-
|
840 |
-
def __init__(self, path, **kwargs):
|
841 |
-
"""
|
842 |
-
Initialise an instance.
|
843 |
-
:param path: The root of the directory tree to search.
|
844 |
-
:param kwargs: Passed to the superclass constructor,
|
845 |
-
except for:
|
846 |
-
* recursive - if True (the default), subdirectories are
|
847 |
-
recursed into. If False, only the top-level directory
|
848 |
-
is searched,
|
849 |
-
"""
|
850 |
-
self.recursive = kwargs.pop('recursive', True)
|
851 |
-
super(DirectoryLocator, self).__init__(**kwargs)
|
852 |
-
path = os.path.abspath(path)
|
853 |
-
if not os.path.isdir(path): # pragma: no cover
|
854 |
-
raise DistlibException('Not a directory: %r' % path)
|
855 |
-
self.base_dir = path
|
856 |
-
|
857 |
-
def should_include(self, filename, parent):
|
858 |
-
"""
|
859 |
-
Should a filename be considered as a candidate for a distribution
|
860 |
-
archive? As well as the filename, the directory which contains it
|
861 |
-
is provided, though not used by the current implementation.
|
862 |
-
"""
|
863 |
-
return filename.endswith(self.downloadable_extensions)
|
864 |
-
|
865 |
-
def _get_project(self, name):
|
866 |
-
result = {'urls': {}, 'digests': {}}
|
867 |
-
for root, dirs, files in os.walk(self.base_dir):
|
868 |
-
for fn in files:
|
869 |
-
if self.should_include(fn, root):
|
870 |
-
fn = os.path.join(root, fn)
|
871 |
-
url = urlunparse(('file', '',
|
872 |
-
pathname2url(os.path.abspath(fn)),
|
873 |
-
'', '', ''))
|
874 |
-
info = self.convert_url_to_download_info(url, name)
|
875 |
-
if info:
|
876 |
-
self._update_version_data(result, info)
|
877 |
-
if not self.recursive:
|
878 |
-
break
|
879 |
-
return result
|
880 |
-
|
881 |
-
def get_distribution_names(self):
|
882 |
-
"""
|
883 |
-
Return all the distribution names known to this locator.
|
884 |
-
"""
|
885 |
-
result = set()
|
886 |
-
for root, dirs, files in os.walk(self.base_dir):
|
887 |
-
for fn in files:
|
888 |
-
if self.should_include(fn, root):
|
889 |
-
fn = os.path.join(root, fn)
|
890 |
-
url = urlunparse(('file', '',
|
891 |
-
pathname2url(os.path.abspath(fn)),
|
892 |
-
'', '', ''))
|
893 |
-
info = self.convert_url_to_download_info(url, None)
|
894 |
-
if info:
|
895 |
-
result.add(info['name'])
|
896 |
-
if not self.recursive:
|
897 |
-
break
|
898 |
-
return result
|
899 |
-
|
900 |
-
class JSONLocator(Locator):
|
901 |
-
"""
|
902 |
-
This locator uses special extended metadata (not available on PyPI) and is
|
903 |
-
the basis of performant dependency resolution in distlib. Other locators
|
904 |
-
require archive downloads before dependencies can be determined! As you
|
905 |
-
might imagine, that can be slow.
|
906 |
-
"""
|
907 |
-
def get_distribution_names(self):
|
908 |
-
"""
|
909 |
-
Return all the distribution names known to this locator.
|
910 |
-
"""
|
911 |
-
raise NotImplementedError('Not available from this locator')
|
912 |
-
|
913 |
-
def _get_project(self, name):
|
914 |
-
result = {'urls': {}, 'digests': {}}
|
915 |
-
data = get_project_data(name)
|
916 |
-
if data:
|
917 |
-
for info in data.get('files', []):
|
918 |
-
if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
|
919 |
-
continue
|
920 |
-
# We don't store summary in project metadata as it makes
|
921 |
-
# the data bigger for no benefit during dependency
|
922 |
-
# resolution
|
923 |
-
dist = make_dist(data['name'], info['version'],
|
924 |
-
summary=data.get('summary',
|
925 |
-
'Placeholder for summary'),
|
926 |
-
scheme=self.scheme)
|
927 |
-
md = dist.metadata
|
928 |
-
md.source_url = info['url']
|
929 |
-
# TODO SHA256 digest
|
930 |
-
if 'digest' in info and info['digest']:
|
931 |
-
dist.digest = ('md5', info['digest'])
|
932 |
-
md.dependencies = info.get('requirements', {})
|
933 |
-
dist.exports = info.get('exports', {})
|
934 |
-
result[dist.version] = dist
|
935 |
-
result['urls'].setdefault(dist.version, set()).add(info['url'])
|
936 |
-
return result
|
937 |
-
|
938 |
-
class DistPathLocator(Locator):
|
939 |
-
"""
|
940 |
-
This locator finds installed distributions in a path. It can be useful for
|
941 |
-
adding to an :class:`AggregatingLocator`.
|
942 |
-
"""
|
943 |
-
def __init__(self, distpath, **kwargs):
|
944 |
-
"""
|
945 |
-
Initialise an instance.
|
946 |
-
|
947 |
-
:param distpath: A :class:`DistributionPath` instance to search.
|
948 |
-
"""
|
949 |
-
super(DistPathLocator, self).__init__(**kwargs)
|
950 |
-
assert isinstance(distpath, DistributionPath)
|
951 |
-
self.distpath = distpath
|
952 |
-
|
953 |
-
def _get_project(self, name):
|
954 |
-
dist = self.distpath.get_distribution(name)
|
955 |
-
if dist is None:
|
956 |
-
result = {'urls': {}, 'digests': {}}
|
957 |
-
else:
|
958 |
-
result = {
|
959 |
-
dist.version: dist,
|
960 |
-
'urls': {dist.version: set([dist.source_url])},
|
961 |
-
'digests': {dist.version: set([None])}
|
962 |
-
}
|
963 |
-
return result
|
964 |
-
|
965 |
-
|
966 |
-
class AggregatingLocator(Locator):
|
967 |
-
"""
|
968 |
-
This class allows you to chain and/or merge a list of locators.
|
969 |
-
"""
|
970 |
-
def __init__(self, *locators, **kwargs):
|
971 |
-
"""
|
972 |
-
Initialise an instance.
|
973 |
-
|
974 |
-
:param locators: The list of locators to search.
|
975 |
-
:param kwargs: Passed to the superclass constructor,
|
976 |
-
except for:
|
977 |
-
* merge - if False (the default), the first successful
|
978 |
-
search from any of the locators is returned. If True,
|
979 |
-
the results from all locators are merged (this can be
|
980 |
-
slow).
|
981 |
-
"""
|
982 |
-
self.merge = kwargs.pop('merge', False)
|
983 |
-
self.locators = locators
|
984 |
-
super(AggregatingLocator, self).__init__(**kwargs)
|
985 |
-
|
986 |
-
def clear_cache(self):
|
987 |
-
super(AggregatingLocator, self).clear_cache()
|
988 |
-
for locator in self.locators:
|
989 |
-
locator.clear_cache()
|
990 |
-
|
991 |
-
def _set_scheme(self, value):
|
992 |
-
self._scheme = value
|
993 |
-
for locator in self.locators:
|
994 |
-
locator.scheme = value
|
995 |
-
|
996 |
-
scheme = property(Locator.scheme.fget, _set_scheme)
|
997 |
-
|
998 |
-
def _get_project(self, name):
|
999 |
-
result = {}
|
1000 |
-
for locator in self.locators:
|
1001 |
-
d = locator.get_project(name)
|
1002 |
-
if d:
|
1003 |
-
if self.merge:
|
1004 |
-
files = result.get('urls', {})
|
1005 |
-
digests = result.get('digests', {})
|
1006 |
-
# next line could overwrite result['urls'], result['digests']
|
1007 |
-
result.update(d)
|
1008 |
-
df = result.get('urls')
|
1009 |
-
if files and df:
|
1010 |
-
for k, v in files.items():
|
1011 |
-
if k in df:
|
1012 |
-
df[k] |= v
|
1013 |
-
else:
|
1014 |
-
df[k] = v
|
1015 |
-
dd = result.get('digests')
|
1016 |
-
if digests and dd:
|
1017 |
-
dd.update(digests)
|
1018 |
-
else:
|
1019 |
-
# See issue #18. If any dists are found and we're looking
|
1020 |
-
# for specific constraints, we only return something if
|
1021 |
-
# a match is found. For example, if a DirectoryLocator
|
1022 |
-
# returns just foo (1.0) while we're looking for
|
1023 |
-
# foo (>= 2.0), we'll pretend there was nothing there so
|
1024 |
-
# that subsequent locators can be queried. Otherwise we
|
1025 |
-
# would just return foo (1.0) which would then lead to a
|
1026 |
-
# failure to find foo (>= 2.0), because other locators
|
1027 |
-
# weren't searched. Note that this only matters when
|
1028 |
-
# merge=False.
|
1029 |
-
if self.matcher is None:
|
1030 |
-
found = True
|
1031 |
-
else:
|
1032 |
-
found = False
|
1033 |
-
for k in d:
|
1034 |
-
if self.matcher.match(k):
|
1035 |
-
found = True
|
1036 |
-
break
|
1037 |
-
if found:
|
1038 |
-
result = d
|
1039 |
-
break
|
1040 |
-
return result
|
1041 |
-
|
1042 |
-
def get_distribution_names(self):
|
1043 |
-
"""
|
1044 |
-
Return all the distribution names known to this locator.
|
1045 |
-
"""
|
1046 |
-
result = set()
|
1047 |
-
for locator in self.locators:
|
1048 |
-
try:
|
1049 |
-
result |= locator.get_distribution_names()
|
1050 |
-
except NotImplementedError:
|
1051 |
-
pass
|
1052 |
-
return result
|
1053 |
-
|
1054 |
-
|
1055 |
-
# We use a legacy scheme simply because most of the dists on PyPI use legacy
|
1056 |
-
# versions which don't conform to PEP 440.
|
1057 |
-
default_locator = AggregatingLocator(
|
1058 |
-
# JSONLocator(), # don't use as PEP 426 is withdrawn
|
1059 |
-
SimpleScrapingLocator('https://pypi.org/simple/',
|
1060 |
-
timeout=3.0),
|
1061 |
-
scheme='legacy')
|
1062 |
-
|
1063 |
-
locate = default_locator.locate
|
1064 |
-
|
1065 |
-
|
1066 |
-
class DependencyFinder(object):
|
1067 |
-
"""
|
1068 |
-
Locate dependencies for distributions.
|
1069 |
-
"""
|
1070 |
-
|
1071 |
-
def __init__(self, locator=None):
|
1072 |
-
"""
|
1073 |
-
Initialise an instance, using the specified locator
|
1074 |
-
to locate distributions.
|
1075 |
-
"""
|
1076 |
-
self.locator = locator or default_locator
|
1077 |
-
self.scheme = get_scheme(self.locator.scheme)
|
1078 |
-
|
1079 |
-
def add_distribution(self, dist):
|
1080 |
-
"""
|
1081 |
-
Add a distribution to the finder. This will update internal information
|
1082 |
-
about who provides what.
|
1083 |
-
:param dist: The distribution to add.
|
1084 |
-
"""
|
1085 |
-
logger.debug('adding distribution %s', dist)
|
1086 |
-
name = dist.key
|
1087 |
-
self.dists_by_name[name] = dist
|
1088 |
-
self.dists[(name, dist.version)] = dist
|
1089 |
-
for p in dist.provides:
|
1090 |
-
name, version = parse_name_and_version(p)
|
1091 |
-
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
|
1092 |
-
self.provided.setdefault(name, set()).add((version, dist))
|
1093 |
-
|
1094 |
-
def remove_distribution(self, dist):
|
1095 |
-
"""
|
1096 |
-
Remove a distribution from the finder. This will update internal
|
1097 |
-
information about who provides what.
|
1098 |
-
:param dist: The distribution to remove.
|
1099 |
-
"""
|
1100 |
-
logger.debug('removing distribution %s', dist)
|
1101 |
-
name = dist.key
|
1102 |
-
del self.dists_by_name[name]
|
1103 |
-
del self.dists[(name, dist.version)]
|
1104 |
-
for p in dist.provides:
|
1105 |
-
name, version = parse_name_and_version(p)
|
1106 |
-
logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
|
1107 |
-
s = self.provided[name]
|
1108 |
-
s.remove((version, dist))
|
1109 |
-
if not s:
|
1110 |
-
del self.provided[name]
|
1111 |
-
|
1112 |
-
def get_matcher(self, reqt):
|
1113 |
-
"""
|
1114 |
-
Get a version matcher for a requirement.
|
1115 |
-
:param reqt: The requirement
|
1116 |
-
:type reqt: str
|
1117 |
-
:return: A version matcher (an instance of
|
1118 |
-
:class:`distlib.version.Matcher`).
|
1119 |
-
"""
|
1120 |
-
try:
|
1121 |
-
matcher = self.scheme.matcher(reqt)
|
1122 |
-
except UnsupportedVersionError: # pragma: no cover
|
1123 |
-
# XXX compat-mode if cannot read the version
|
1124 |
-
name = reqt.split()[0]
|
1125 |
-
matcher = self.scheme.matcher(name)
|
1126 |
-
return matcher
|
1127 |
-
|
1128 |
-
def find_providers(self, reqt):
|
1129 |
-
"""
|
1130 |
-
Find the distributions which can fulfill a requirement.
|
1131 |
-
|
1132 |
-
:param reqt: The requirement.
|
1133 |
-
:type reqt: str
|
1134 |
-
:return: A set of distribution which can fulfill the requirement.
|
1135 |
-
"""
|
1136 |
-
matcher = self.get_matcher(reqt)
|
1137 |
-
name = matcher.key # case-insensitive
|
1138 |
-
result = set()
|
1139 |
-
provided = self.provided
|
1140 |
-
if name in provided:
|
1141 |
-
for version, provider in provided[name]:
|
1142 |
-
try:
|
1143 |
-
match = matcher.match(version)
|
1144 |
-
except UnsupportedVersionError:
|
1145 |
-
match = False
|
1146 |
-
|
1147 |
-
if match:
|
1148 |
-
result.add(provider)
|
1149 |
-
break
|
1150 |
-
return result
|
1151 |
-
|
1152 |
-
def try_to_replace(self, provider, other, problems):
|
1153 |
-
"""
|
1154 |
-
Attempt to replace one provider with another. This is typically used
|
1155 |
-
when resolving dependencies from multiple sources, e.g. A requires
|
1156 |
-
(B >= 1.0) while C requires (B >= 1.1).
|
1157 |
-
|
1158 |
-
For successful replacement, ``provider`` must meet all the requirements
|
1159 |
-
which ``other`` fulfills.
|
1160 |
-
|
1161 |
-
:param provider: The provider we are trying to replace with.
|
1162 |
-
:param other: The provider we're trying to replace.
|
1163 |
-
:param problems: If False is returned, this will contain what
|
1164 |
-
problems prevented replacement. This is currently
|
1165 |
-
a tuple of the literal string 'cantreplace',
|
1166 |
-
``provider``, ``other`` and the set of requirements
|
1167 |
-
that ``provider`` couldn't fulfill.
|
1168 |
-
:return: True if we can replace ``other`` with ``provider``, else
|
1169 |
-
False.
|
1170 |
-
"""
|
1171 |
-
rlist = self.reqts[other]
|
1172 |
-
unmatched = set()
|
1173 |
-
for s in rlist:
|
1174 |
-
matcher = self.get_matcher(s)
|
1175 |
-
if not matcher.match(provider.version):
|
1176 |
-
unmatched.add(s)
|
1177 |
-
if unmatched:
|
1178 |
-
# can't replace other with provider
|
1179 |
-
problems.add(('cantreplace', provider, other,
|
1180 |
-
frozenset(unmatched)))
|
1181 |
-
result = False
|
1182 |
-
else:
|
1183 |
-
# can replace other with provider
|
1184 |
-
self.remove_distribution(other)
|
1185 |
-
del self.reqts[other]
|
1186 |
-
for s in rlist:
|
1187 |
-
self.reqts.setdefault(provider, set()).add(s)
|
1188 |
-
self.add_distribution(provider)
|
1189 |
-
result = True
|
1190 |
-
return result
|
1191 |
-
|
1192 |
-
def find(self, requirement, meta_extras=None, prereleases=False):
|
1193 |
-
"""
|
1194 |
-
Find a distribution and all distributions it depends on.
|
1195 |
-
|
1196 |
-
:param requirement: The requirement specifying the distribution to
|
1197 |
-
find, or a Distribution instance.
|
1198 |
-
:param meta_extras: A list of meta extras such as :test:, :build: and
|
1199 |
-
so on.
|
1200 |
-
:param prereleases: If ``True``, allow pre-release versions to be
|
1201 |
-
returned - otherwise, don't return prereleases
|
1202 |
-
unless they're all that's available.
|
1203 |
-
|
1204 |
-
Return a set of :class:`Distribution` instances and a set of
|
1205 |
-
problems.
|
1206 |
-
|
1207 |
-
The distributions returned should be such that they have the
|
1208 |
-
:attr:`required` attribute set to ``True`` if they were
|
1209 |
-
from the ``requirement`` passed to ``find()``, and they have the
|
1210 |
-
:attr:`build_time_dependency` attribute set to ``True`` unless they
|
1211 |
-
are post-installation dependencies of the ``requirement``.
|
1212 |
-
|
1213 |
-
The problems should be a tuple consisting of the string
|
1214 |
-
``'unsatisfied'`` and the requirement which couldn't be satisfied
|
1215 |
-
by any distribution known to the locator.
|
1216 |
-
"""
|
1217 |
-
|
1218 |
-
self.provided = {}
|
1219 |
-
self.dists = {}
|
1220 |
-
self.dists_by_name = {}
|
1221 |
-
self.reqts = {}
|
1222 |
-
|
1223 |
-
meta_extras = set(meta_extras or [])
|
1224 |
-
if ':*:' in meta_extras:
|
1225 |
-
meta_extras.remove(':*:')
|
1226 |
-
# :meta: and :run: are implicitly included
|
1227 |
-
meta_extras |= set([':test:', ':build:', ':dev:'])
|
1228 |
-
|
1229 |
-
if isinstance(requirement, Distribution):
|
1230 |
-
dist = odist = requirement
|
1231 |
-
logger.debug('passed %s as requirement', odist)
|
1232 |
-
else:
|
1233 |
-
dist = odist = self.locator.locate(requirement,
|
1234 |
-
prereleases=prereleases)
|
1235 |
-
if dist is None:
|
1236 |
-
raise DistlibException('Unable to locate %r' % requirement)
|
1237 |
-
logger.debug('located %s', odist)
|
1238 |
-
dist.requested = True
|
1239 |
-
problems = set()
|
1240 |
-
todo = set([dist])
|
1241 |
-
install_dists = set([odist])
|
1242 |
-
while todo:
|
1243 |
-
dist = todo.pop()
|
1244 |
-
name = dist.key # case-insensitive
|
1245 |
-
if name not in self.dists_by_name:
|
1246 |
-
self.add_distribution(dist)
|
1247 |
-
else:
|
1248 |
-
#import pdb; pdb.set_trace()
|
1249 |
-
other = self.dists_by_name[name]
|
1250 |
-
if other != dist:
|
1251 |
-
self.try_to_replace(dist, other, problems)
|
1252 |
-
|
1253 |
-
ireqts = dist.run_requires | dist.meta_requires
|
1254 |
-
sreqts = dist.build_requires
|
1255 |
-
ereqts = set()
|
1256 |
-
if meta_extras and dist in install_dists:
|
1257 |
-
for key in ('test', 'build', 'dev'):
|
1258 |
-
e = ':%s:' % key
|
1259 |
-
if e in meta_extras:
|
1260 |
-
ereqts |= getattr(dist, '%s_requires' % key)
|
1261 |
-
all_reqts = ireqts | sreqts | ereqts
|
1262 |
-
for r in all_reqts:
|
1263 |
-
providers = self.find_providers(r)
|
1264 |
-
if not providers:
|
1265 |
-
logger.debug('No providers found for %r', r)
|
1266 |
-
provider = self.locator.locate(r, prereleases=prereleases)
|
1267 |
-
# If no provider is found and we didn't consider
|
1268 |
-
# prereleases, consider them now.
|
1269 |
-
if provider is None and not prereleases:
|
1270 |
-
provider = self.locator.locate(r, prereleases=True)
|
1271 |
-
if provider is None:
|
1272 |
-
logger.debug('Cannot satisfy %r', r)
|
1273 |
-
problems.add(('unsatisfied', r))
|
1274 |
-
else:
|
1275 |
-
n, v = provider.key, provider.version
|
1276 |
-
if (n, v) not in self.dists:
|
1277 |
-
todo.add(provider)
|
1278 |
-
providers.add(provider)
|
1279 |
-
if r in ireqts and dist in install_dists:
|
1280 |
-
install_dists.add(provider)
|
1281 |
-
logger.debug('Adding %s to install_dists',
|
1282 |
-
provider.name_and_version)
|
1283 |
-
for p in providers:
|
1284 |
-
name = p.key
|
1285 |
-
if name not in self.dists_by_name:
|
1286 |
-
self.reqts.setdefault(p, set()).add(r)
|
1287 |
-
else:
|
1288 |
-
other = self.dists_by_name[name]
|
1289 |
-
if other != p:
|
1290 |
-
# see if other can be replaced by p
|
1291 |
-
self.try_to_replace(p, other, problems)
|
1292 |
-
|
1293 |
-
dists = set(self.dists.values())
|
1294 |
-
for dist in dists:
|
1295 |
-
dist.build_time_dependency = dist not in install_dists
|
1296 |
-
if dist.build_time_dependency:
|
1297 |
-
logger.debug('%s is a build-time dependency only.',
|
1298 |
-
dist.name_and_version)
|
1299 |
-
logger.debug('find done for %s', odist)
|
1300 |
-
return dists, problems
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Audio-AGI/AudioSep/models/CLAP/open_clip/htsat.py
DELETED
@@ -1,1308 +0,0 @@
|
|
1 |
-
# Ke Chen
|
2 | |
3 |
-
# HTS-AT: A HIERARCHICAL TOKEN-SEMANTIC AUDIO TRANSFORMER FOR SOUND CLASSIFICATION AND DETECTION
|
4 |
-
# Some layers designed on the model
|
5 |
-
# below codes are based and referred from https://github.com/microsoft/Swin-Transformer
|
6 |
-
# Swin Transformer for Computer Vision: https://arxiv.org/pdf/2103.14030.pdf
|
7 |
-
|
8 |
-
import torch
|
9 |
-
import torch.nn as nn
|
10 |
-
import torch.nn.functional as F
|
11 |
-
from itertools import repeat
|
12 |
-
import collections.abc
|
13 |
-
import math
|
14 |
-
import warnings
|
15 |
-
|
16 |
-
from torch.nn.init import _calculate_fan_in_and_fan_out
|
17 |
-
import torch.utils.checkpoint as checkpoint
|
18 |
-
|
19 |
-
import random
|
20 |
-
|
21 |
-
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
|
22 |
-
from torchlibrosa.augmentation import SpecAugmentation
|
23 |
-
|
24 |
-
from itertools import repeat
|
25 |
-
from .utils import do_mixup, interpolate
|
26 |
-
|
27 |
-
from .feature_fusion import iAFF, AFF, DAF
|
28 |
-
|
29 |
-
# from PyTorch internals
|
30 |
-
def _ntuple(n):
|
31 |
-
def parse(x):
|
32 |
-
if isinstance(x, collections.abc.Iterable):
|
33 |
-
return x
|
34 |
-
return tuple(repeat(x, n))
|
35 |
-
|
36 |
-
return parse
|
37 |
-
|
38 |
-
|
39 |
-
to_1tuple = _ntuple(1)
|
40 |
-
to_2tuple = _ntuple(2)
|
41 |
-
to_3tuple = _ntuple(3)
|
42 |
-
to_4tuple = _ntuple(4)
|
43 |
-
to_ntuple = _ntuple
|
44 |
-
|
45 |
-
|
46 |
-
def drop_path(x, drop_prob: float = 0.0, training: bool = False):
|
47 |
-
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
|
48 |
-
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
|
49 |
-
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
|
50 |
-
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
|
51 |
-
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
|
52 |
-
'survival rate' as the argument.
|
53 |
-
"""
|
54 |
-
if drop_prob == 0.0 or not training:
|
55 |
-
return x
|
56 |
-
keep_prob = 1 - drop_prob
|
57 |
-
shape = (x.shape[0],) + (1,) * (
|
58 |
-
x.ndim - 1
|
59 |
-
) # work with diff dim tensors, not just 2D ConvNets
|
60 |
-
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
|
61 |
-
random_tensor.floor_() # binarize
|
62 |
-
output = x.div(keep_prob) * random_tensor
|
63 |
-
return output
|
64 |
-
|
65 |
-
|
66 |
-
class DropPath(nn.Module):
|
67 |
-
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
|
68 |
-
|
69 |
-
def __init__(self, drop_prob=None):
|
70 |
-
super(DropPath, self).__init__()
|
71 |
-
self.drop_prob = drop_prob
|
72 |
-
|
73 |
-
def forward(self, x):
|
74 |
-
return drop_path(x, self.drop_prob, self.training)
|
75 |
-
|
76 |
-
|
77 |
-
class PatchEmbed(nn.Module):
|
78 |
-
"""2D Image to Patch Embedding"""
|
79 |
-
|
80 |
-
def __init__(
|
81 |
-
self,
|
82 |
-
img_size=224,
|
83 |
-
patch_size=16,
|
84 |
-
in_chans=3,
|
85 |
-
embed_dim=768,
|
86 |
-
norm_layer=None,
|
87 |
-
flatten=True,
|
88 |
-
patch_stride=16,
|
89 |
-
enable_fusion=False,
|
90 |
-
fusion_type="None",
|
91 |
-
):
|
92 |
-
super().__init__()
|
93 |
-
img_size = to_2tuple(img_size)
|
94 |
-
patch_size = to_2tuple(patch_size)
|
95 |
-
patch_stride = to_2tuple(patch_stride)
|
96 |
-
self.img_size = img_size
|
97 |
-
self.patch_size = patch_size
|
98 |
-
self.patch_stride = patch_stride
|
99 |
-
self.grid_size = (
|
100 |
-
img_size[0] // patch_stride[0],
|
101 |
-
img_size[1] // patch_stride[1],
|
102 |
-
)
|
103 |
-
self.num_patches = self.grid_size[0] * self.grid_size[1]
|
104 |
-
self.flatten = flatten
|
105 |
-
self.in_chans = in_chans
|
106 |
-
self.embed_dim = embed_dim
|
107 |
-
|
108 |
-
self.enable_fusion = enable_fusion
|
109 |
-
self.fusion_type = fusion_type
|
110 |
-
|
111 |
-
padding = (
|
112 |
-
(patch_size[0] - patch_stride[0]) // 2,
|
113 |
-
(patch_size[1] - patch_stride[1]) // 2,
|
114 |
-
)
|
115 |
-
|
116 |
-
if (self.enable_fusion) and (self.fusion_type == "channel_map"):
|
117 |
-
self.proj = nn.Conv2d(
|
118 |
-
in_chans * 4,
|
119 |
-
embed_dim,
|
120 |
-
kernel_size=patch_size,
|
121 |
-
stride=patch_stride,
|
122 |
-
padding=padding,
|
123 |
-
)
|
124 |
-
else:
|
125 |
-
self.proj = nn.Conv2d(
|
126 |
-
in_chans,
|
127 |
-
embed_dim,
|
128 |
-
kernel_size=patch_size,
|
129 |
-
stride=patch_stride,
|
130 |
-
padding=padding,
|
131 |
-
)
|
132 |
-
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
|
133 |
-
|
134 |
-
if (self.enable_fusion) and (
|
135 |
-
self.fusion_type in ["daf_2d", "aff_2d", "iaff_2d"]
|
136 |
-
):
|
137 |
-
self.mel_conv2d = nn.Conv2d(
|
138 |
-
in_chans,
|
139 |
-
embed_dim,
|
140 |
-
kernel_size=(patch_size[0], patch_size[1] * 3),
|
141 |
-
stride=(patch_stride[0], patch_stride[1] * 3),
|
142 |
-
padding=padding,
|
143 |
-
)
|
144 |
-
if self.fusion_type == "daf_2d":
|
145 |
-
self.fusion_model = DAF()
|
146 |
-
elif self.fusion_type == "aff_2d":
|
147 |
-
self.fusion_model = AFF(channels=embed_dim, type="2D")
|
148 |
-
elif self.fusion_type == "iaff_2d":
|
149 |
-
self.fusion_model = iAFF(channels=embed_dim, type="2D")
|
150 |
-
|
151 |
-
def forward(self, x, longer_idx=None):
|
152 |
-
if (self.enable_fusion) and (
|
153 |
-
self.fusion_type in ["daf_2d", "aff_2d", "iaff_2d"]
|
154 |
-
):
|
155 |
-
global_x = x[:, 0:1, :, :]
|
156 |
-
|
157 |
-
# global processing
|
158 |
-
B, C, H, W = global_x.shape
|
159 |
-
assert (
|
160 |
-
H == self.img_size[0] and W == self.img_size[1]
|
161 |
-
), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
|
162 |
-
global_x = self.proj(global_x)
|
163 |
-
TW = global_x.size(-1)
|
164 |
-
if len(longer_idx) > 0:
|
165 |
-
# local processing
|
166 |
-
local_x = x[longer_idx, 1:, :, :].contiguous()
|
167 |
-
B, C, H, W = local_x.shape
|
168 |
-
local_x = local_x.view(B * C, 1, H, W)
|
169 |
-
local_x = self.mel_conv2d(local_x)
|
170 |
-
local_x = local_x.view(
|
171 |
-
B, C, local_x.size(1), local_x.size(2), local_x.size(3)
|
172 |
-
)
|
173 |
-
local_x = local_x.permute((0, 2, 3, 1, 4)).contiguous().flatten(3)
|
174 |
-
TB, TC, TH, _ = local_x.size()
|
175 |
-
if local_x.size(-1) < TW:
|
176 |
-
local_x = torch.cat(
|
177 |
-
[
|
178 |
-
local_x,
|
179 |
-
torch.zeros(
|
180 |
-
(TB, TC, TH, TW - local_x.size(-1)),
|
181 |
-
device=global_x.device,
|
182 |
-
),
|
183 |
-
],
|
184 |
-
dim=-1,
|
185 |
-
)
|
186 |
-
else:
|
187 |
-
local_x = local_x[:, :, :, :TW]
|
188 |
-
|
189 |
-
global_x[longer_idx] = self.fusion_model(global_x[longer_idx], local_x)
|
190 |
-
x = global_x
|
191 |
-
else:
|
192 |
-
B, C, H, W = x.shape
|
193 |
-
assert (
|
194 |
-
H == self.img_size[0] and W == self.img_size[1]
|
195 |
-
), f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
|
196 |
-
x = self.proj(x)
|
197 |
-
|
198 |
-
if self.flatten:
|
199 |
-
x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
|
200 |
-
x = self.norm(x)
|
201 |
-
return x
|
202 |
-
|
203 |
-
|
204 |
-
class Mlp(nn.Module):
|
205 |
-
"""MLP as used in Vision Transformer, MLP-Mixer and related networks"""
|
206 |
-
|
207 |
-
def __init__(
|
208 |
-
self,
|
209 |
-
in_features,
|
210 |
-
hidden_features=None,
|
211 |
-
out_features=None,
|
212 |
-
act_layer=nn.GELU,
|
213 |
-
drop=0.0,
|
214 |
-
):
|
215 |
-
super().__init__()
|
216 |
-
out_features = out_features or in_features
|
217 |
-
hidden_features = hidden_features or in_features
|
218 |
-
self.fc1 = nn.Linear(in_features, hidden_features)
|
219 |
-
self.act = act_layer()
|
220 |
-
self.fc2 = nn.Linear(hidden_features, out_features)
|
221 |
-
self.drop = nn.Dropout(drop)
|
222 |
-
|
223 |
-
def forward(self, x):
|
224 |
-
x = self.fc1(x)
|
225 |
-
x = self.act(x)
|
226 |
-
x = self.drop(x)
|
227 |
-
x = self.fc2(x)
|
228 |
-
x = self.drop(x)
|
229 |
-
return x
|
230 |
-
|
231 |
-
|
232 |
-
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
|
233 |
-
# Cut & paste from PyTorch official master until it's in a few official releases - RW
|
234 |
-
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
|
235 |
-
def norm_cdf(x):
|
236 |
-
# Computes standard normal cumulative distribution function
|
237 |
-
return (1.0 + math.erf(x / math.sqrt(2.0))) / 2.0
|
238 |
-
|
239 |
-
if (mean < a - 2 * std) or (mean > b + 2 * std):
|
240 |
-
warnings.warn(
|
241 |
-
"mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
|
242 |
-
"The distribution of values may be incorrect.",
|
243 |
-
stacklevel=2,
|
244 |
-
)
|
245 |
-
|
246 |
-
with torch.no_grad():
|
247 |
-
# Values are generated by using a truncated uniform distribution and
|
248 |
-
# then using the inverse CDF for the normal distribution.
|
249 |
-
# Get upper and lower cdf values
|
250 |
-
l = norm_cdf((a - mean) / std)
|
251 |
-
u = norm_cdf((b - mean) / std)
|
252 |
-
|
253 |
-
# Uniformly fill tensor with values from [l, u], then translate to
|
254 |
-
# [2l-1, 2u-1].
|
255 |
-
tensor.uniform_(2 * l - 1, 2 * u - 1)
|
256 |
-
|
257 |
-
# Use inverse cdf transform for normal distribution to get truncated
|
258 |
-
# standard normal
|
259 |
-
tensor.erfinv_()
|
260 |
-
|
261 |
-
# Transform to proper mean, std
|
262 |
-
tensor.mul_(std * math.sqrt(2.0))
|
263 |
-
tensor.add_(mean)
|
264 |
-
|
265 |
-
# Clamp to ensure it's in the proper range
|
266 |
-
tensor.clamp_(min=a, max=b)
|
267 |
-
return tensor
|
268 |
-
|
269 |
-
|
270 |
-
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
|
271 |
-
# type: (Tensor, float, float, float, float) -> Tensor
|
272 |
-
r"""Fills the input Tensor with values drawn from a truncated
|
273 |
-
normal distribution. The values are effectively drawn from the
|
274 |
-
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
|
275 |
-
with values outside :math:`[a, b]` redrawn until they are within
|
276 |
-
the bounds. The method used for generating the random values works
|
277 |
-
best when :math:`a \leq \text{mean} \leq b`.
|
278 |
-
Args:
|
279 |
-
tensor: an n-dimensional `torch.Tensor`
|
280 |
-
mean: the mean of the normal distribution
|
281 |
-
std: the standard deviation of the normal distribution
|
282 |
-
a: the minimum cutoff value
|
283 |
-
b: the maximum cutoff value
|
284 |
-
Examples:
|
285 |
-
>>> w = torch.empty(3, 5)
|
286 |
-
>>> nn.init.trunc_normal_(w)
|
287 |
-
"""
|
288 |
-
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
|
289 |
-
|
290 |
-
|
291 |
-
def variance_scaling_(tensor, scale=1.0, mode="fan_in", distribution="normal"):
|
292 |
-
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
|
293 |
-
if mode == "fan_in":
|
294 |
-
denom = fan_in
|
295 |
-
elif mode == "fan_out":
|
296 |
-
denom = fan_out
|
297 |
-
elif mode == "fan_avg":
|
298 |
-
denom = (fan_in + fan_out) / 2
|
299 |
-
|
300 |
-
variance = scale / denom
|
301 |
-
|
302 |
-
if distribution == "truncated_normal":
|
303 |
-
# constant is stddev of standard normal truncated to (-2, 2)
|
304 |
-
trunc_normal_(tensor, std=math.sqrt(variance) / 0.87962566103423978)
|
305 |
-
elif distribution == "normal":
|
306 |
-
tensor.normal_(std=math.sqrt(variance))
|
307 |
-
elif distribution == "uniform":
|
308 |
-
bound = math.sqrt(3 * variance)
|
309 |
-
tensor.uniform_(-bound, bound)
|
310 |
-
else:
|
311 |
-
raise ValueError(f"invalid distribution {distribution}")
|
312 |
-
|
313 |
-
|
314 |
-
def lecun_normal_(tensor):
|
315 |
-
variance_scaling_(tensor, mode="fan_in", distribution="truncated_normal")
|
316 |
-
|
317 |
-
|
318 |
-
def window_partition(x, window_size):
|
319 |
-
"""
|
320 |
-
Args:
|
321 |
-
x: (B, H, W, C)
|
322 |
-
window_size (int): window size
|
323 |
-
Returns:
|
324 |
-
windows: (num_windows*B, window_size, window_size, C)
|
325 |
-
"""
|
326 |
-
B, H, W, C = x.shape
|
327 |
-
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
|
328 |
-
windows = (
|
329 |
-
x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
|
330 |
-
)
|
331 |
-
return windows
|
332 |
-
|
333 |
-
|
334 |
-
def window_reverse(windows, window_size, H, W):
|
335 |
-
"""
|
336 |
-
Args:
|
337 |
-
windows: (num_windows*B, window_size, window_size, C)
|
338 |
-
window_size (int): Window size
|
339 |
-
H (int): Height of image
|
340 |
-
W (int): Width of image
|
341 |
-
Returns:
|
342 |
-
x: (B, H, W, C)
|
343 |
-
"""
|
344 |
-
B = int(windows.shape[0] / (H * W / window_size / window_size))
|
345 |
-
x = windows.view(
|
346 |
-
B, H // window_size, W // window_size, window_size, window_size, -1
|
347 |
-
)
|
348 |
-
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
|
349 |
-
return x
|
350 |
-
|
351 |
-
|
352 |
-
class WindowAttention(nn.Module):
|
353 |
-
r"""Window based multi-head self attention (W-MSA) module with relative position bias.
|
354 |
-
It supports both of shifted and non-shifted window.
|
355 |
-
Args:
|
356 |
-
dim (int): Number of input channels.
|
357 |
-
window_size (tuple[int]): The height and width of the window.
|
358 |
-
num_heads (int): Number of attention heads.
|
359 |
-
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
360 |
-
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
|
361 |
-
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
|
362 |
-
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
|
363 |
-
"""
|
364 |
-
|
365 |
-
def __init__(
|
366 |
-
self,
|
367 |
-
dim,
|
368 |
-
window_size,
|
369 |
-
num_heads,
|
370 |
-
qkv_bias=True,
|
371 |
-
qk_scale=None,
|
372 |
-
attn_drop=0.0,
|
373 |
-
proj_drop=0.0,
|
374 |
-
):
|
375 |
-
|
376 |
-
super().__init__()
|
377 |
-
self.dim = dim
|
378 |
-
self.window_size = window_size # Wh, Ww
|
379 |
-
self.num_heads = num_heads
|
380 |
-
head_dim = dim // num_heads
|
381 |
-
self.scale = qk_scale or head_dim**-0.5
|
382 |
-
|
383 |
-
# define a parameter table of relative position bias
|
384 |
-
self.relative_position_bias_table = nn.Parameter(
|
385 |
-
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)
|
386 |
-
) # 2*Wh-1 * 2*Ww-1, nH
|
387 |
-
|
388 |
-
# get pair-wise relative position index for each token inside the window
|
389 |
-
coords_h = torch.arange(self.window_size[0])
|
390 |
-
coords_w = torch.arange(self.window_size[1])
|
391 |
-
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
|
392 |
-
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
|
393 |
-
relative_coords = (
|
394 |
-
coords_flatten[:, :, None] - coords_flatten[:, None, :]
|
395 |
-
) # 2, Wh*Ww, Wh*Ww
|
396 |
-
relative_coords = relative_coords.permute(
|
397 |
-
1, 2, 0
|
398 |
-
).contiguous() # Wh*Ww, Wh*Ww, 2
|
399 |
-
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
|
400 |
-
relative_coords[:, :, 1] += self.window_size[1] - 1
|
401 |
-
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
|
402 |
-
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
|
403 |
-
self.register_buffer("relative_position_index", relative_position_index)
|
404 |
-
|
405 |
-
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
406 |
-
self.attn_drop = nn.Dropout(attn_drop)
|
407 |
-
self.proj = nn.Linear(dim, dim)
|
408 |
-
self.proj_drop = nn.Dropout(proj_drop)
|
409 |
-
|
410 |
-
trunc_normal_(self.relative_position_bias_table, std=0.02)
|
411 |
-
self.softmax = nn.Softmax(dim=-1)
|
412 |
-
|
413 |
-
def forward(self, x, mask=None):
|
414 |
-
"""
|
415 |
-
Args:
|
416 |
-
x: input features with shape of (num_windows*B, N, C)
|
417 |
-
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
|
418 |
-
"""
|
419 |
-
B_, N, C = x.shape
|
420 |
-
qkv = (
|
421 |
-
self.qkv(x)
|
422 |
-
.reshape(B_, N, 3, self.num_heads, C // self.num_heads)
|
423 |
-
.permute(2, 0, 3, 1, 4)
|
424 |
-
)
|
425 |
-
q, k, v = (
|
426 |
-
qkv[0],
|
427 |
-
qkv[1],
|
428 |
-
qkv[2],
|
429 |
-
) # make torchscript happy (cannot use tensor as tuple)
|
430 |
-
|
431 |
-
q = q * self.scale
|
432 |
-
attn = q @ k.transpose(-2, -1)
|
433 |
-
|
434 |
-
relative_position_bias = self.relative_position_bias_table[
|
435 |
-
self.relative_position_index.view(-1)
|
436 |
-
].view(
|
437 |
-
self.window_size[0] * self.window_size[1],
|
438 |
-
self.window_size[0] * self.window_size[1],
|
439 |
-
-1,
|
440 |
-
) # Wh*Ww,Wh*Ww,nH
|
441 |
-
relative_position_bias = relative_position_bias.permute(
|
442 |
-
2, 0, 1
|
443 |
-
).contiguous() # nH, Wh*Ww, Wh*Ww
|
444 |
-
attn = attn + relative_position_bias.unsqueeze(0)
|
445 |
-
|
446 |
-
if mask is not None:
|
447 |
-
nW = mask.shape[0]
|
448 |
-
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(
|
449 |
-
1
|
450 |
-
).unsqueeze(0)
|
451 |
-
attn = attn.view(-1, self.num_heads, N, N)
|
452 |
-
attn = self.softmax(attn)
|
453 |
-
else:
|
454 |
-
attn = self.softmax(attn)
|
455 |
-
|
456 |
-
attn = self.attn_drop(attn)
|
457 |
-
|
458 |
-
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
|
459 |
-
x = self.proj(x)
|
460 |
-
x = self.proj_drop(x)
|
461 |
-
return x, attn
|
462 |
-
|
463 |
-
def extra_repr(self):
|
464 |
-
return f"dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}"
|
465 |
-
|
466 |
-
|
467 |
-
# We use the model based on Swintransformer Block, therefore we can use the swin-transformer pretrained model
|
468 |
-
class SwinTransformerBlock(nn.Module):
|
469 |
-
r"""Swin Transformer Block.
|
470 |
-
Args:
|
471 |
-
dim (int): Number of input channels.
|
472 |
-
input_resolution (tuple[int]): Input resulotion.
|
473 |
-
num_heads (int): Number of attention heads.
|
474 |
-
window_size (int): Window size.
|
475 |
-
shift_size (int): Shift size for SW-MSA.
|
476 |
-
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
477 |
-
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
478 |
-
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
|
479 |
-
drop (float, optional): Dropout rate. Default: 0.0
|
480 |
-
attn_drop (float, optional): Attention dropout rate. Default: 0.0
|
481 |
-
drop_path (float, optional): Stochastic depth rate. Default: 0.0
|
482 |
-
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
|
483 |
-
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
484 |
-
"""
|
485 |
-
|
486 |
-
def __init__(
|
487 |
-
self,
|
488 |
-
dim,
|
489 |
-
input_resolution,
|
490 |
-
num_heads,
|
491 |
-
window_size=7,
|
492 |
-
shift_size=0,
|
493 |
-
mlp_ratio=4.0,
|
494 |
-
qkv_bias=True,
|
495 |
-
qk_scale=None,
|
496 |
-
drop=0.0,
|
497 |
-
attn_drop=0.0,
|
498 |
-
drop_path=0.0,
|
499 |
-
act_layer=nn.GELU,
|
500 |
-
norm_layer=nn.LayerNorm,
|
501 |
-
norm_before_mlp="ln",
|
502 |
-
):
|
503 |
-
super().__init__()
|
504 |
-
self.dim = dim
|
505 |
-
self.input_resolution = input_resolution
|
506 |
-
self.num_heads = num_heads
|
507 |
-
self.window_size = window_size
|
508 |
-
self.shift_size = shift_size
|
509 |
-
self.mlp_ratio = mlp_ratio
|
510 |
-
self.norm_before_mlp = norm_before_mlp
|
511 |
-
if min(self.input_resolution) <= self.window_size:
|
512 |
-
# if window size is larger than input resolution, we don't partition windows
|
513 |
-
self.shift_size = 0
|
514 |
-
self.window_size = min(self.input_resolution)
|
515 |
-
assert (
|
516 |
-
0 <= self.shift_size < self.window_size
|
517 |
-
), "shift_size must in 0-window_size"
|
518 |
-
|
519 |
-
self.norm1 = norm_layer(dim)
|
520 |
-
self.attn = WindowAttention(
|
521 |
-
dim,
|
522 |
-
window_size=to_2tuple(self.window_size),
|
523 |
-
num_heads=num_heads,
|
524 |
-
qkv_bias=qkv_bias,
|
525 |
-
qk_scale=qk_scale,
|
526 |
-
attn_drop=attn_drop,
|
527 |
-
proj_drop=drop,
|
528 |
-
)
|
529 |
-
|
530 |
-
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
|
531 |
-
if self.norm_before_mlp == "ln":
|
532 |
-
self.norm2 = nn.LayerNorm(dim)
|
533 |
-
elif self.norm_before_mlp == "bn":
|
534 |
-
self.norm2 = lambda x: nn.BatchNorm1d(dim)(x.transpose(1, 2)).transpose(
|
535 |
-
1, 2
|
536 |
-
)
|
537 |
-
else:
|
538 |
-
raise NotImplementedError
|
539 |
-
mlp_hidden_dim = int(dim * mlp_ratio)
|
540 |
-
self.mlp = Mlp(
|
541 |
-
in_features=dim,
|
542 |
-
hidden_features=mlp_hidden_dim,
|
543 |
-
act_layer=act_layer,
|
544 |
-
drop=drop,
|
545 |
-
)
|
546 |
-
|
547 |
-
if self.shift_size > 0:
|
548 |
-
# calculate attention mask for SW-MSA
|
549 |
-
H, W = self.input_resolution
|
550 |
-
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
|
551 |
-
h_slices = (
|
552 |
-
slice(0, -self.window_size),
|
553 |
-
slice(-self.window_size, -self.shift_size),
|
554 |
-
slice(-self.shift_size, None),
|
555 |
-
)
|
556 |
-
w_slices = (
|
557 |
-
slice(0, -self.window_size),
|
558 |
-
slice(-self.window_size, -self.shift_size),
|
559 |
-
slice(-self.shift_size, None),
|
560 |
-
)
|
561 |
-
cnt = 0
|
562 |
-
for h in h_slices:
|
563 |
-
for w in w_slices:
|
564 |
-
img_mask[:, h, w, :] = cnt
|
565 |
-
cnt += 1
|
566 |
-
|
567 |
-
mask_windows = window_partition(
|
568 |
-
img_mask, self.window_size
|
569 |
-
) # nW, window_size, window_size, 1
|
570 |
-
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
|
571 |
-
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
|
572 |
-
attn_mask = attn_mask.masked_fill(
|
573 |
-
attn_mask != 0, float(-100.0)
|
574 |
-
).masked_fill(attn_mask == 0, float(0.0))
|
575 |
-
else:
|
576 |
-
attn_mask = None
|
577 |
-
|
578 |
-
self.register_buffer("attn_mask", attn_mask)
|
579 |
-
|
580 |
-
def forward(self, x):
|
581 |
-
# pdb.set_trace()
|
582 |
-
H, W = self.input_resolution
|
583 |
-
# print("H: ", H)
|
584 |
-
# print("W: ", W)
|
585 |
-
# pdb.set_trace()
|
586 |
-
B, L, C = x.shape
|
587 |
-
# assert L == H * W, "input feature has wrong size"
|
588 |
-
|
589 |
-
shortcut = x
|
590 |
-
x = self.norm1(x)
|
591 |
-
x = x.view(B, H, W, C)
|
592 |
-
|
593 |
-
# cyclic shift
|
594 |
-
if self.shift_size > 0:
|
595 |
-
shifted_x = torch.roll(
|
596 |
-
x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)
|
597 |
-
)
|
598 |
-
else:
|
599 |
-
shifted_x = x
|
600 |
-
|
601 |
-
# partition windows
|
602 |
-
x_windows = window_partition(
|
603 |
-
shifted_x, self.window_size
|
604 |
-
) # nW*B, window_size, window_size, C
|
605 |
-
x_windows = x_windows.view(
|
606 |
-
-1, self.window_size * self.window_size, C
|
607 |
-
) # nW*B, window_size*window_size, C
|
608 |
-
|
609 |
-
# W-MSA/SW-MSA
|
610 |
-
attn_windows, attn = self.attn(
|
611 |
-
x_windows, mask=self.attn_mask
|
612 |
-
) # nW*B, window_size*window_size, C
|
613 |
-
|
614 |
-
# merge windows
|
615 |
-
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
|
616 |
-
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
|
617 |
-
|
618 |
-
# reverse cyclic shift
|
619 |
-
if self.shift_size > 0:
|
620 |
-
x = torch.roll(
|
621 |
-
shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)
|
622 |
-
)
|
623 |
-
else:
|
624 |
-
x = shifted_x
|
625 |
-
x = x.view(B, H * W, C)
|
626 |
-
|
627 |
-
# FFN
|
628 |
-
x = shortcut + self.drop_path(x)
|
629 |
-
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
630 |
-
|
631 |
-
return x, attn
|
632 |
-
|
633 |
-
def extra_repr(self):
|
634 |
-
return (
|
635 |
-
f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, "
|
636 |
-
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
|
637 |
-
)
|
638 |
-
|
639 |
-
|
640 |
-
class PatchMerging(nn.Module):
|
641 |
-
r"""Patch Merging Layer.
|
642 |
-
Args:
|
643 |
-
input_resolution (tuple[int]): Resolution of input feature.
|
644 |
-
dim (int): Number of input channels.
|
645 |
-
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
646 |
-
"""
|
647 |
-
|
648 |
-
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
|
649 |
-
super().__init__()
|
650 |
-
self.input_resolution = input_resolution
|
651 |
-
self.dim = dim
|
652 |
-
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
|
653 |
-
self.norm = norm_layer(4 * dim)
|
654 |
-
|
655 |
-
def forward(self, x):
|
656 |
-
"""
|
657 |
-
x: B, H*W, C
|
658 |
-
"""
|
659 |
-
H, W = self.input_resolution
|
660 |
-
B, L, C = x.shape
|
661 |
-
assert L == H * W, "input feature has wrong size"
|
662 |
-
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
|
663 |
-
|
664 |
-
x = x.view(B, H, W, C)
|
665 |
-
|
666 |
-
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
|
667 |
-
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
|
668 |
-
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
|
669 |
-
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
|
670 |
-
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
|
671 |
-
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
|
672 |
-
|
673 |
-
x = self.norm(x)
|
674 |
-
x = self.reduction(x)
|
675 |
-
|
676 |
-
return x
|
677 |
-
|
678 |
-
def extra_repr(self):
|
679 |
-
return f"input_resolution={self.input_resolution}, dim={self.dim}"
|
680 |
-
|
681 |
-
|
682 |
-
class BasicLayer(nn.Module):
|
683 |
-
"""A basic Swin Transformer layer for one stage.
|
684 |
-
Args:
|
685 |
-
dim (int): Number of input channels.
|
686 |
-
input_resolution (tuple[int]): Input resolution.
|
687 |
-
depth (int): Number of blocks.
|
688 |
-
num_heads (int): Number of attention heads.
|
689 |
-
window_size (int): Local window size.
|
690 |
-
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
691 |
-
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
692 |
-
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
|
693 |
-
drop (float, optional): Dropout rate. Default: 0.0
|
694 |
-
attn_drop (float, optional): Attention dropout rate. Default: 0.0
|
695 |
-
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
|
696 |
-
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
697 |
-
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
|
698 |
-
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
|
699 |
-
"""
|
700 |
-
|
701 |
-
def __init__(
|
702 |
-
self,
|
703 |
-
dim,
|
704 |
-
input_resolution,
|
705 |
-
depth,
|
706 |
-
num_heads,
|
707 |
-
window_size,
|
708 |
-
mlp_ratio=4.0,
|
709 |
-
qkv_bias=True,
|
710 |
-
qk_scale=None,
|
711 |
-
drop=0.0,
|
712 |
-
attn_drop=0.0,
|
713 |
-
drop_path=0.0,
|
714 |
-
norm_layer=nn.LayerNorm,
|
715 |
-
downsample=None,
|
716 |
-
use_checkpoint=False,
|
717 |
-
norm_before_mlp="ln",
|
718 |
-
):
|
719 |
-
|
720 |
-
super().__init__()
|
721 |
-
self.dim = dim
|
722 |
-
self.input_resolution = input_resolution
|
723 |
-
self.depth = depth
|
724 |
-
self.use_checkpoint = use_checkpoint
|
725 |
-
|
726 |
-
# build blocks
|
727 |
-
self.blocks = nn.ModuleList(
|
728 |
-
[
|
729 |
-
SwinTransformerBlock(
|
730 |
-
dim=dim,
|
731 |
-
input_resolution=input_resolution,
|
732 |
-
num_heads=num_heads,
|
733 |
-
window_size=window_size,
|
734 |
-
shift_size=0 if (i % 2 == 0) else window_size // 2,
|
735 |
-
mlp_ratio=mlp_ratio,
|
736 |
-
qkv_bias=qkv_bias,
|
737 |
-
qk_scale=qk_scale,
|
738 |
-
drop=drop,
|
739 |
-
attn_drop=attn_drop,
|
740 |
-
drop_path=drop_path[i]
|
741 |
-
if isinstance(drop_path, list)
|
742 |
-
else drop_path,
|
743 |
-
norm_layer=norm_layer,
|
744 |
-
norm_before_mlp=norm_before_mlp,
|
745 |
-
)
|
746 |
-
for i in range(depth)
|
747 |
-
]
|
748 |
-
)
|
749 |
-
|
750 |
-
# patch merging layer
|
751 |
-
if downsample is not None:
|
752 |
-
self.downsample = downsample(
|
753 |
-
input_resolution, dim=dim, norm_layer=norm_layer
|
754 |
-
)
|
755 |
-
else:
|
756 |
-
self.downsample = None
|
757 |
-
|
758 |
-
def forward(self, x):
|
759 |
-
attns = []
|
760 |
-
for blk in self.blocks:
|
761 |
-
if self.use_checkpoint:
|
762 |
-
x = checkpoint.checkpoint(blk, x)
|
763 |
-
else:
|
764 |
-
x, attn = blk(x)
|
765 |
-
if not self.training:
|
766 |
-
attns.append(attn.unsqueeze(0))
|
767 |
-
if self.downsample is not None:
|
768 |
-
x = self.downsample(x)
|
769 |
-
if not self.training:
|
770 |
-
attn = torch.cat(attns, dim=0)
|
771 |
-
attn = torch.mean(attn, dim=0)
|
772 |
-
return x, attn
|
773 |
-
|
774 |
-
def extra_repr(self):
|
775 |
-
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
|
776 |
-
|
777 |
-
|
778 |
-
# The Core of HTSAT
|
779 |
-
class HTSAT_Swin_Transformer(nn.Module):
|
780 |
-
r"""HTSAT based on the Swin Transformer
|
781 |
-
Args:
|
782 |
-
spec_size (int | tuple(int)): Input Spectrogram size. Default 256
|
783 |
-
patch_size (int | tuple(int)): Patch size. Default: 4
|
784 |
-
path_stride (iot | tuple(int)): Patch Stride for Frequency and Time Axis. Default: 4
|
785 |
-
in_chans (int): Number of input image channels. Default: 1 (mono)
|
786 |
-
num_classes (int): Number of classes for classification head. Default: 527
|
787 |
-
embed_dim (int): Patch embedding dimension. Default: 96
|
788 |
-
depths (tuple(int)): Depth of each HTSAT-Swin Transformer layer.
|
789 |
-
num_heads (tuple(int)): Number of attention heads in different layers.
|
790 |
-
window_size (int): Window size. Default: 8
|
791 |
-
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
|
792 |
-
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
|
793 |
-
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
|
794 |
-
drop_rate (float): Dropout rate. Default: 0
|
795 |
-
attn_drop_rate (float): Attention dropout rate. Default: 0
|
796 |
-
drop_path_rate (float): Stochastic depth rate. Default: 0.1
|
797 |
-
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
|
798 |
-
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
|
799 |
-
patch_norm (bool): If True, add normalization after patch embedding. Default: True
|
800 |
-
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
|
801 |
-
config (module): The configuration Module from config.py
|
802 |
-
"""
|
803 |
-
|
804 |
-
def __init__(
|
805 |
-
self,
|
806 |
-
spec_size=256,
|
807 |
-
patch_size=4,
|
808 |
-
patch_stride=(4, 4),
|
809 |
-
in_chans=1,
|
810 |
-
num_classes=527,
|
811 |
-
embed_dim=96,
|
812 |
-
depths=[2, 2, 6, 2],
|
813 |
-
num_heads=[4, 8, 16, 32],
|
814 |
-
window_size=8,
|
815 |
-
mlp_ratio=4.0,
|
816 |
-
qkv_bias=True,
|
817 |
-
qk_scale=None,
|
818 |
-
drop_rate=0.0,
|
819 |
-
attn_drop_rate=0.0,
|
820 |
-
drop_path_rate=0.1,
|
821 |
-
norm_layer=nn.LayerNorm,
|
822 |
-
ape=False,
|
823 |
-
patch_norm=True,
|
824 |
-
use_checkpoint=False,
|
825 |
-
norm_before_mlp="ln",
|
826 |
-
config=None,
|
827 |
-
enable_fusion=False,
|
828 |
-
fusion_type="None",
|
829 |
-
**kwargs,
|
830 |
-
):
|
831 |
-
super(HTSAT_Swin_Transformer, self).__init__()
|
832 |
-
|
833 |
-
self.config = config
|
834 |
-
self.spec_size = spec_size
|
835 |
-
self.patch_stride = patch_stride
|
836 |
-
self.patch_size = patch_size
|
837 |
-
self.window_size = window_size
|
838 |
-
self.embed_dim = embed_dim
|
839 |
-
self.depths = depths
|
840 |
-
self.ape = ape
|
841 |
-
self.in_chans = in_chans
|
842 |
-
self.num_classes = num_classes
|
843 |
-
self.num_heads = num_heads
|
844 |
-
self.num_layers = len(self.depths)
|
845 |
-
self.num_features = int(self.embed_dim * 2 ** (self.num_layers - 1))
|
846 |
-
|
847 |
-
self.drop_rate = drop_rate
|
848 |
-
self.attn_drop_rate = attn_drop_rate
|
849 |
-
self.drop_path_rate = drop_path_rate
|
850 |
-
|
851 |
-
self.qkv_bias = qkv_bias
|
852 |
-
self.qk_scale = None
|
853 |
-
|
854 |
-
self.patch_norm = patch_norm
|
855 |
-
self.norm_layer = norm_layer if self.patch_norm else None
|
856 |
-
self.norm_before_mlp = norm_before_mlp
|
857 |
-
self.mlp_ratio = mlp_ratio
|
858 |
-
|
859 |
-
self.use_checkpoint = use_checkpoint
|
860 |
-
|
861 |
-
self.enable_fusion = enable_fusion
|
862 |
-
self.fusion_type = fusion_type
|
863 |
-
|
864 |
-
# process mel-spec ; used only once
|
865 |
-
self.freq_ratio = self.spec_size // self.config.mel_bins
|
866 |
-
window = "hann"
|
867 |
-
center = True
|
868 |
-
pad_mode = "reflect"
|
869 |
-
ref = 1.0
|
870 |
-
amin = 1e-10
|
871 |
-
top_db = None
|
872 |
-
self.interpolate_ratio = 32 # Downsampled ratio
|
873 |
-
# Spectrogram extractor
|
874 |
-
self.spectrogram_extractor = Spectrogram(
|
875 |
-
n_fft=config.window_size,
|
876 |
-
hop_length=config.hop_size,
|
877 |
-
win_length=config.window_size,
|
878 |
-
window=window,
|
879 |
-
center=center,
|
880 |
-
pad_mode=pad_mode,
|
881 |
-
freeze_parameters=True,
|
882 |
-
)
|
883 |
-
# Logmel feature extractor
|
884 |
-
self.logmel_extractor = LogmelFilterBank(
|
885 |
-
sr=config.sample_rate,
|
886 |
-
n_fft=config.window_size,
|
887 |
-
n_mels=config.mel_bins,
|
888 |
-
fmin=config.fmin,
|
889 |
-
fmax=config.fmax,
|
890 |
-
ref=ref,
|
891 |
-
amin=amin,
|
892 |
-
top_db=top_db,
|
893 |
-
freeze_parameters=True,
|
894 |
-
)
|
895 |
-
# Spec augmenter
|
896 |
-
self.spec_augmenter = SpecAugmentation(
|
897 |
-
time_drop_width=64,
|
898 |
-
time_stripes_num=2,
|
899 |
-
freq_drop_width=8,
|
900 |
-
freq_stripes_num=2,
|
901 |
-
) # 2 2
|
902 |
-
self.bn0 = nn.BatchNorm2d(self.config.mel_bins)
|
903 |
-
|
904 |
-
# split spctrogram into non-overlapping patches
|
905 |
-
self.patch_embed = PatchEmbed(
|
906 |
-
img_size=self.spec_size,
|
907 |
-
patch_size=self.patch_size,
|
908 |
-
in_chans=self.in_chans,
|
909 |
-
embed_dim=self.embed_dim,
|
910 |
-
norm_layer=self.norm_layer,
|
911 |
-
patch_stride=patch_stride,
|
912 |
-
enable_fusion=self.enable_fusion,
|
913 |
-
fusion_type=self.fusion_type,
|
914 |
-
)
|
915 |
-
|
916 |
-
num_patches = self.patch_embed.num_patches
|
917 |
-
patches_resolution = self.patch_embed.grid_size
|
918 |
-
self.patches_resolution = patches_resolution
|
919 |
-
|
920 |
-
# absolute position embedding
|
921 |
-
if self.ape:
|
922 |
-
self.absolute_pos_embed = nn.Parameter(
|
923 |
-
torch.zeros(1, num_patches, self.embed_dim)
|
924 |
-
)
|
925 |
-
trunc_normal_(self.absolute_pos_embed, std=0.02)
|
926 |
-
|
927 |
-
self.pos_drop = nn.Dropout(p=self.drop_rate)
|
928 |
-
|
929 |
-
# stochastic depth
|
930 |
-
dpr = [
|
931 |
-
x.item() for x in torch.linspace(0, self.drop_path_rate, sum(self.depths))
|
932 |
-
] # stochastic depth decay rule
|
933 |
-
|
934 |
-
# build layers
|
935 |
-
self.layers = nn.ModuleList()
|
936 |
-
for i_layer in range(self.num_layers):
|
937 |
-
layer = BasicLayer(
|
938 |
-
dim=int(self.embed_dim * 2**i_layer),
|
939 |
-
input_resolution=(
|
940 |
-
patches_resolution[0] // (2**i_layer),
|
941 |
-
patches_resolution[1] // (2**i_layer),
|
942 |
-
),
|
943 |
-
depth=self.depths[i_layer],
|
944 |
-
num_heads=self.num_heads[i_layer],
|
945 |
-
window_size=self.window_size,
|
946 |
-
mlp_ratio=self.mlp_ratio,
|
947 |
-
qkv_bias=self.qkv_bias,
|
948 |
-
qk_scale=self.qk_scale,
|
949 |
-
drop=self.drop_rate,
|
950 |
-
attn_drop=self.attn_drop_rate,
|
951 |
-
drop_path=dpr[
|
952 |
-
sum(self.depths[:i_layer]) : sum(self.depths[: i_layer + 1])
|
953 |
-
],
|
954 |
-
norm_layer=self.norm_layer,
|
955 |
-
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
|
956 |
-
use_checkpoint=use_checkpoint,
|
957 |
-
norm_before_mlp=self.norm_before_mlp,
|
958 |
-
)
|
959 |
-
self.layers.append(layer)
|
960 |
-
|
961 |
-
self.norm = self.norm_layer(self.num_features)
|
962 |
-
self.avgpool = nn.AdaptiveAvgPool1d(1)
|
963 |
-
self.maxpool = nn.AdaptiveMaxPool1d(1)
|
964 |
-
|
965 |
-
SF = (
|
966 |
-
self.spec_size
|
967 |
-
// (2 ** (len(self.depths) - 1))
|
968 |
-
// self.patch_stride[0]
|
969 |
-
// self.freq_ratio
|
970 |
-
)
|
971 |
-
self.tscam_conv = nn.Conv2d(
|
972 |
-
in_channels=self.num_features,
|
973 |
-
out_channels=self.num_classes,
|
974 |
-
kernel_size=(SF, 3),
|
975 |
-
padding=(0, 1),
|
976 |
-
)
|
977 |
-
self.head = nn.Linear(num_classes, num_classes)
|
978 |
-
|
979 |
-
if (self.enable_fusion) and (
|
980 |
-
self.fusion_type in ["daf_1d", "aff_1d", "iaff_1d"]
|
981 |
-
):
|
982 |
-
self.mel_conv1d = nn.Sequential(
|
983 |
-
nn.Conv1d(64, 64, kernel_size=5, stride=3, padding=2),
|
984 |
-
nn.BatchNorm1d(64),
|
985 |
-
)
|
986 |
-
if self.fusion_type == "daf_1d":
|
987 |
-
self.fusion_model = DAF()
|
988 |
-
elif self.fusion_type == "aff_1d":
|
989 |
-
self.fusion_model = AFF(channels=64, type="1D")
|
990 |
-
elif self.fusion_type == "iaff_1d":
|
991 |
-
self.fusion_model = iAFF(channels=64, type="1D")
|
992 |
-
|
993 |
-
self.apply(self._init_weights)
|
994 |
-
|
995 |
-
def _init_weights(self, m):
|
996 |
-
if isinstance(m, nn.Linear):
|
997 |
-
trunc_normal_(m.weight, std=0.02)
|
998 |
-
if isinstance(m, nn.Linear) and m.bias is not None:
|
999 |
-
nn.init.constant_(m.bias, 0)
|
1000 |
-
elif isinstance(m, nn.LayerNorm):
|
1001 |
-
nn.init.constant_(m.bias, 0)
|
1002 |
-
nn.init.constant_(m.weight, 1.0)
|
1003 |
-
|
1004 |
-
@torch.jit.ignore
|
1005 |
-
def no_weight_decay(self):
|
1006 |
-
return {"absolute_pos_embed"}
|
1007 |
-
|
1008 |
-
@torch.jit.ignore
|
1009 |
-
def no_weight_decay_keywords(self):
|
1010 |
-
return {"relative_position_bias_table"}
|
1011 |
-
|
1012 |
-
def forward_features(self, x, longer_idx=None):
|
1013 |
-
# A deprecated optimization for using a hierarchical output from different blocks
|
1014 |
-
|
1015 |
-
frames_num = x.shape[2]
|
1016 |
-
x = self.patch_embed(x, longer_idx=longer_idx)
|
1017 |
-
if self.ape:
|
1018 |
-
x = x + self.absolute_pos_embed
|
1019 |
-
x = self.pos_drop(x)
|
1020 |
-
for i, layer in enumerate(self.layers):
|
1021 |
-
x, attn = layer(x)
|
1022 |
-
# for x
|
1023 |
-
x = self.norm(x)
|
1024 |
-
B, N, C = x.shape
|
1025 |
-
SF = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[0]
|
1026 |
-
ST = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[1]
|
1027 |
-
x = x.permute(0, 2, 1).contiguous().reshape(B, C, SF, ST)
|
1028 |
-
B, C, F, T = x.shape
|
1029 |
-
# group 2D CNN
|
1030 |
-
c_freq_bin = F // self.freq_ratio
|
1031 |
-
x = x.reshape(B, C, F // c_freq_bin, c_freq_bin, T)
|
1032 |
-
x = x.permute(0, 1, 3, 2, 4).contiguous().reshape(B, C, c_freq_bin, -1)
|
1033 |
-
# get latent_output
|
1034 |
-
fine_grained_latent_output = torch.mean(x, dim=2)
|
1035 |
-
fine_grained_latent_output = interpolate(
|
1036 |
-
fine_grained_latent_output.permute(0, 2, 1).contiguous(),
|
1037 |
-
8 * self.patch_stride[1],
|
1038 |
-
)
|
1039 |
-
|
1040 |
-
latent_output = self.avgpool(torch.flatten(x, 2))
|
1041 |
-
latent_output = torch.flatten(latent_output, 1)
|
1042 |
-
|
1043 |
-
# display the attention map, if needed
|
1044 |
-
|
1045 |
-
x = self.tscam_conv(x)
|
1046 |
-
x = torch.flatten(x, 2) # B, C, T
|
1047 |
-
|
1048 |
-
fpx = interpolate(
|
1049 |
-
torch.sigmoid(x).permute(0, 2, 1).contiguous(), 8 * self.patch_stride[1]
|
1050 |
-
)
|
1051 |
-
|
1052 |
-
x = self.avgpool(x)
|
1053 |
-
x = torch.flatten(x, 1)
|
1054 |
-
|
1055 |
-
output_dict = {
|
1056 |
-
"framewise_output": fpx, # already sigmoided
|
1057 |
-
"clipwise_output": torch.sigmoid(x),
|
1058 |
-
"fine_grained_embedding": fine_grained_latent_output,
|
1059 |
-
"embedding": latent_output,
|
1060 |
-
}
|
1061 |
-
|
1062 |
-
return output_dict
|
1063 |
-
|
1064 |
-
def crop_wav(self, x, crop_size, spe_pos=None):
|
1065 |
-
time_steps = x.shape[2]
|
1066 |
-
tx = torch.zeros(x.shape[0], x.shape[1], crop_size, x.shape[3]).to(x.device)
|
1067 |
-
for i in range(len(x)):
|
1068 |
-
if spe_pos is None:
|
1069 |
-
crop_pos = random.randint(0, time_steps - crop_size - 1)
|
1070 |
-
else:
|
1071 |
-
crop_pos = spe_pos
|
1072 |
-
tx[i][0] = x[i, 0, crop_pos : crop_pos + crop_size, :]
|
1073 |
-
return tx
|
1074 |
-
|
1075 |
-
# Reshape the wavform to a img size, if you want to use the pretrained swin transformer model
|
1076 |
-
def reshape_wav2img(self, x):
|
1077 |
-
B, C, T, F = x.shape
|
1078 |
-
target_T = int(self.spec_size * self.freq_ratio)
|
1079 |
-
target_F = self.spec_size // self.freq_ratio
|
1080 |
-
assert (
|
1081 |
-
T <= target_T and F <= target_F
|
1082 |
-
), "the wav size should less than or equal to the swin input size"
|
1083 |
-
# to avoid bicubic zero error
|
1084 |
-
if T < target_T:
|
1085 |
-
x = nn.functional.interpolate(
|
1086 |
-
x, (target_T, x.shape[3]), mode="bicubic", align_corners=True
|
1087 |
-
)
|
1088 |
-
if F < target_F:
|
1089 |
-
x = nn.functional.interpolate(
|
1090 |
-
x, (x.shape[2], target_F), mode="bicubic", align_corners=True
|
1091 |
-
)
|
1092 |
-
x = x.permute(0, 1, 3, 2).contiguous()
|
1093 |
-
x = x.reshape(
|
1094 |
-
x.shape[0],
|
1095 |
-
x.shape[1],
|
1096 |
-
x.shape[2],
|
1097 |
-
self.freq_ratio,
|
1098 |
-
x.shape[3] // self.freq_ratio,
|
1099 |
-
)
|
1100 |
-
# print(x.shape)
|
1101 |
-
x = x.permute(0, 1, 3, 2, 4).contiguous()
|
1102 |
-
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3], x.shape[4])
|
1103 |
-
return x
|
1104 |
-
|
1105 |
-
# Repeat the wavform to a img size, if you want to use the pretrained swin transformer model
|
1106 |
-
def repeat_wat2img(self, x, cur_pos):
|
1107 |
-
B, C, T, F = x.shape
|
1108 |
-
target_T = int(self.spec_size * self.freq_ratio)
|
1109 |
-
target_F = self.spec_size // self.freq_ratio
|
1110 |
-
assert (
|
1111 |
-
T <= target_T and F <= target_F
|
1112 |
-
), "the wav size should less than or equal to the swin input size"
|
1113 |
-
# to avoid bicubic zero error
|
1114 |
-
if T < target_T:
|
1115 |
-
x = nn.functional.interpolate(
|
1116 |
-
x, (target_T, x.shape[3]), mode="bicubic", align_corners=True
|
1117 |
-
)
|
1118 |
-
if F < target_F:
|
1119 |
-
x = nn.functional.interpolate(
|
1120 |
-
x, (x.shape[2], target_F), mode="bicubic", align_corners=True
|
1121 |
-
)
|
1122 |
-
x = x.permute(0, 1, 3, 2).contiguous() # B C F T
|
1123 |
-
x = x[:, :, :, cur_pos : cur_pos + self.spec_size]
|
1124 |
-
x = x.repeat(repeats=(1, 1, 4, 1))
|
1125 |
-
return x
|
1126 |
-
|
1127 |
-
def forward(
|
1128 |
-
self, x: torch.Tensor, mixup_lambda=None, infer_mode=False, device=None
|
1129 |
-
): # out_feat_keys: List[str] = None):
|
1130 |
-
|
1131 |
-
if self.enable_fusion and x["longer"].sum() == 0:
|
1132 |
-
# if no audio is longer than 10s, then randomly select one audio to be longer
|
1133 |
-
x["longer"][torch.randint(0, x["longer"].shape[0], (1,))] = True
|
1134 |
-
|
1135 |
-
if not self.enable_fusion:
|
1136 |
-
x = x["waveform"].to(device=device, non_blocking=True)
|
1137 |
-
x = self.spectrogram_extractor(x) # (batch_size, 1, time_steps, freq_bins)
|
1138 |
-
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
|
1139 |
-
x = x.transpose(1, 3)
|
1140 |
-
x = self.bn0(x)
|
1141 |
-
x = x.transpose(1, 3)
|
1142 |
-
if self.training:
|
1143 |
-
x = self.spec_augmenter(x)
|
1144 |
-
|
1145 |
-
if self.training and mixup_lambda is not None:
|
1146 |
-
x = do_mixup(x, mixup_lambda)
|
1147 |
-
|
1148 |
-
x = self.reshape_wav2img(x)
|
1149 |
-
output_dict = self.forward_features(x)
|
1150 |
-
else:
|
1151 |
-
longer_list = x["longer"].to(device=device, non_blocking=True)
|
1152 |
-
x = x["mel_fusion"].to(device=device, non_blocking=True)
|
1153 |
-
x = x.transpose(1, 3)
|
1154 |
-
x = self.bn0(x)
|
1155 |
-
x = x.transpose(1, 3)
|
1156 |
-
longer_list_idx = torch.where(longer_list)[0]
|
1157 |
-
if self.fusion_type in ["daf_1d", "aff_1d", "iaff_1d"]:
|
1158 |
-
new_x = x[:, 0:1, :, :].clone().contiguous()
|
1159 |
-
if len(longer_list_idx) > 0:
|
1160 |
-
# local processing
|
1161 |
-
fusion_x_local = x[longer_list_idx, 1:, :, :].clone().contiguous()
|
1162 |
-
FB, FC, FT, FF = fusion_x_local.size()
|
1163 |
-
fusion_x_local = fusion_x_local.view(FB * FC, FT, FF)
|
1164 |
-
fusion_x_local = torch.permute(
|
1165 |
-
fusion_x_local, (0, 2, 1)
|
1166 |
-
).contiguous()
|
1167 |
-
fusion_x_local = self.mel_conv1d(fusion_x_local)
|
1168 |
-
fusion_x_local = fusion_x_local.view(
|
1169 |
-
FB, FC, FF, fusion_x_local.size(-1)
|
1170 |
-
)
|
1171 |
-
fusion_x_local = (
|
1172 |
-
torch.permute(fusion_x_local, (0, 2, 1, 3))
|
1173 |
-
.contiguous()
|
1174 |
-
.flatten(2)
|
1175 |
-
)
|
1176 |
-
if fusion_x_local.size(-1) < FT:
|
1177 |
-
fusion_x_local = torch.cat(
|
1178 |
-
[
|
1179 |
-
fusion_x_local,
|
1180 |
-
torch.zeros(
|
1181 |
-
(FB, FF, FT - fusion_x_local.size(-1)),
|
1182 |
-
device=device,
|
1183 |
-
),
|
1184 |
-
],
|
1185 |
-
dim=-1,
|
1186 |
-
)
|
1187 |
-
else:
|
1188 |
-
fusion_x_local = fusion_x_local[:, :, :FT]
|
1189 |
-
# 1D fusion
|
1190 |
-
new_x = new_x.squeeze(1).permute((0, 2, 1)).contiguous()
|
1191 |
-
new_x[longer_list_idx] = self.fusion_model(
|
1192 |
-
new_x[longer_list_idx], fusion_x_local
|
1193 |
-
)
|
1194 |
-
x = new_x.permute((0, 2, 1)).contiguous()[:, None, :, :]
|
1195 |
-
else:
|
1196 |
-
x = new_x
|
1197 |
-
|
1198 |
-
elif self.fusion_type in ["daf_2d", "aff_2d", "iaff_2d", "channel_map"]:
|
1199 |
-
x = x # no change
|
1200 |
-
|
1201 |
-
if self.training:
|
1202 |
-
x = self.spec_augmenter(x)
|
1203 |
-
if self.training and mixup_lambda is not None:
|
1204 |
-
x = do_mixup(x, mixup_lambda)
|
1205 |
-
|
1206 |
-
x = self.reshape_wav2img(x)
|
1207 |
-
output_dict = self.forward_features(x, longer_idx=longer_list_idx)
|
1208 |
-
|
1209 |
-
# if infer_mode:
|
1210 |
-
# # in infer mode. we need to handle different length audio input
|
1211 |
-
# frame_num = x.shape[2]
|
1212 |
-
# target_T = int(self.spec_size * self.freq_ratio)
|
1213 |
-
# repeat_ratio = math.floor(target_T / frame_num)
|
1214 |
-
# x = x.repeat(repeats=(1,1,repeat_ratio,1))
|
1215 |
-
# x = self.reshape_wav2img(x)
|
1216 |
-
# output_dict = self.forward_features(x)
|
1217 |
-
# else:
|
1218 |
-
# if x.shape[2] > self.freq_ratio * self.spec_size:
|
1219 |
-
# if self.training:
|
1220 |
-
# x = self.crop_wav(x, crop_size=self.freq_ratio * self.spec_size)
|
1221 |
-
# x = self.reshape_wav2img(x)
|
1222 |
-
# output_dict = self.forward_features(x)
|
1223 |
-
# else:
|
1224 |
-
# # Change: Hard code here
|
1225 |
-
# overlap_size = (x.shape[2] - 1) // 4
|
1226 |
-
# output_dicts = []
|
1227 |
-
# crop_size = (x.shape[2] - 1) // 2
|
1228 |
-
# for cur_pos in range(0, x.shape[2] - crop_size - 1, overlap_size):
|
1229 |
-
# tx = self.crop_wav(x, crop_size = crop_size, spe_pos = cur_pos)
|
1230 |
-
# tx = self.reshape_wav2img(tx)
|
1231 |
-
# output_dicts.append(self.forward_features(tx))
|
1232 |
-
# clipwise_output = torch.zeros_like(output_dicts[0]["clipwise_output"]).float().to(x.device)
|
1233 |
-
# framewise_output = torch.zeros_like(output_dicts[0]["framewise_output"]).float().to(x.device)
|
1234 |
-
# for d in output_dicts:
|
1235 |
-
# clipwise_output += d["clipwise_output"]
|
1236 |
-
# framewise_output += d["framewise_output"]
|
1237 |
-
# clipwise_output = clipwise_output / len(output_dicts)
|
1238 |
-
# framewise_output = framewise_output / len(output_dicts)
|
1239 |
-
# output_dict = {
|
1240 |
-
# 'framewise_output': framewise_output,
|
1241 |
-
# 'clipwise_output': clipwise_output
|
1242 |
-
# }
|
1243 |
-
# else: # this part is typically used, and most easy one
|
1244 |
-
# x = self.reshape_wav2img(x)
|
1245 |
-
# output_dict = self.forward_features(x)
|
1246 |
-
# x = self.head(x)
|
1247 |
-
|
1248 |
-
# We process the data in the dataloader part, in that here we only consider the input_T < fixed_T
|
1249 |
-
|
1250 |
-
return output_dict
|
1251 |
-
|
1252 |
-
|
1253 |
-
def create_htsat_model(audio_cfg, enable_fusion=False, fusion_type="None"):
|
1254 |
-
try:
|
1255 |
-
|
1256 |
-
assert audio_cfg.model_name in [
|
1257 |
-
"tiny",
|
1258 |
-
"base",
|
1259 |
-
"large",
|
1260 |
-
], "model name for HTS-AT is wrong!"
|
1261 |
-
if audio_cfg.model_name == "tiny":
|
1262 |
-
model = HTSAT_Swin_Transformer(
|
1263 |
-
spec_size=256,
|
1264 |
-
patch_size=4,
|
1265 |
-
patch_stride=(4, 4),
|
1266 |
-
num_classes=audio_cfg.class_num,
|
1267 |
-
embed_dim=96,
|
1268 |
-
depths=[2, 2, 6, 2],
|
1269 |
-
num_heads=[4, 8, 16, 32],
|
1270 |
-
window_size=8,
|
1271 |
-
config=audio_cfg,
|
1272 |
-
enable_fusion=enable_fusion,
|
1273 |
-
fusion_type=fusion_type,
|
1274 |
-
)
|
1275 |
-
elif audio_cfg.model_name == "base":
|
1276 |
-
model = HTSAT_Swin_Transformer(
|
1277 |
-
spec_size=256,
|
1278 |
-
patch_size=4,
|
1279 |
-
patch_stride=(4, 4),
|
1280 |
-
num_classes=audio_cfg.class_num,
|
1281 |
-
embed_dim=128,
|
1282 |
-
depths=[2, 2, 12, 2],
|
1283 |
-
num_heads=[4, 8, 16, 32],
|
1284 |
-
window_size=8,
|
1285 |
-
config=audio_cfg,
|
1286 |
-
enable_fusion=enable_fusion,
|
1287 |
-
fusion_type=fusion_type,
|
1288 |
-
)
|
1289 |
-
elif audio_cfg.model_name == "large":
|
1290 |
-
model = HTSAT_Swin_Transformer(
|
1291 |
-
spec_size=256,
|
1292 |
-
patch_size=4,
|
1293 |
-
patch_stride=(4, 4),
|
1294 |
-
num_classes=audio_cfg.class_num,
|
1295 |
-
embed_dim=256,
|
1296 |
-
depths=[2, 2, 12, 2],
|
1297 |
-
num_heads=[4, 8, 16, 32],
|
1298 |
-
window_size=8,
|
1299 |
-
config=audio_cfg,
|
1300 |
-
enable_fusion=enable_fusion,
|
1301 |
-
fusion_type=fusion_type,
|
1302 |
-
)
|
1303 |
-
|
1304 |
-
return model
|
1305 |
-
except:
|
1306 |
-
raise RuntimeError(
|
1307 |
-
f"Import Model for {audio_cfg.model_name} not found, or the audio cfg parameters are not enough."
|
1308 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Audio-AGI/WavJourney/scripts/EnvsSetup.sh
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
conda env create -f Envs/WavJourney.yml && \
|
2 |
-
conda env update -f Envs/Bark.yml && \
|
3 |
-
conda env update -f Envs/AudioCraft.yml && \
|
4 |
-
conda run --live-stream -n WavJourney pip install -U git+https://[email protected]/facebookresearch/audiocraft@c5157b5bf14bf83449c17ea1eeb66c19fb4bc7f0#egg=audiocraft && \
|
5 |
-
conda run --live-stream -n WavJourney pip install -U --no-deps voicefixer==0.1.2 && \
|
6 |
-
conda run --live-stream -n WavJourney pip install -U --no-deps numpy==1.21 && \
|
7 |
-
conda run --live-stream -n WavJourney pip install -U --no-deps librosa==0.8.1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awesimo/jojogan/e4e/models/stylegan2/model.py
DELETED
@@ -1,678 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import random
|
3 |
-
import torch
|
4 |
-
from torch import nn
|
5 |
-
from torch.nn import functional as F
|
6 |
-
|
7 |
-
if torch.cuda.is_available():
|
8 |
-
from op.fused_act import FusedLeakyReLU, fused_leaky_relu
|
9 |
-
from op.upfirdn2d import upfirdn2d
|
10 |
-
else:
|
11 |
-
from op.fused_act_cpu import FusedLeakyReLU, fused_leaky_relu
|
12 |
-
from op.upfirdn2d_cpu import upfirdn2d
|
13 |
-
|
14 |
-
|
15 |
-
class PixelNorm(nn.Module):
|
16 |
-
def __init__(self):
|
17 |
-
super().__init__()
|
18 |
-
|
19 |
-
def forward(self, input):
|
20 |
-
return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
|
21 |
-
|
22 |
-
|
23 |
-
def make_kernel(k):
|
24 |
-
k = torch.tensor(k, dtype=torch.float32)
|
25 |
-
|
26 |
-
if k.ndim == 1:
|
27 |
-
k = k[None, :] * k[:, None]
|
28 |
-
|
29 |
-
k /= k.sum()
|
30 |
-
|
31 |
-
return k
|
32 |
-
|
33 |
-
|
34 |
-
class Upsample(nn.Module):
|
35 |
-
def __init__(self, kernel, factor=2):
|
36 |
-
super().__init__()
|
37 |
-
|
38 |
-
self.factor = factor
|
39 |
-
kernel = make_kernel(kernel) * (factor ** 2)
|
40 |
-
self.register_buffer('kernel', kernel)
|
41 |
-
|
42 |
-
p = kernel.shape[0] - factor
|
43 |
-
|
44 |
-
pad0 = (p + 1) // 2 + factor - 1
|
45 |
-
pad1 = p // 2
|
46 |
-
|
47 |
-
self.pad = (pad0, pad1)
|
48 |
-
|
49 |
-
def forward(self, input):
|
50 |
-
out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad)
|
51 |
-
|
52 |
-
return out
|
53 |
-
|
54 |
-
|
55 |
-
class Downsample(nn.Module):
|
56 |
-
def __init__(self, kernel, factor=2):
|
57 |
-
super().__init__()
|
58 |
-
|
59 |
-
self.factor = factor
|
60 |
-
kernel = make_kernel(kernel)
|
61 |
-
self.register_buffer('kernel', kernel)
|
62 |
-
|
63 |
-
p = kernel.shape[0] - factor
|
64 |
-
|
65 |
-
pad0 = (p + 1) // 2
|
66 |
-
pad1 = p // 2
|
67 |
-
|
68 |
-
self.pad = (pad0, pad1)
|
69 |
-
|
70 |
-
def forward(self, input):
|
71 |
-
out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad)
|
72 |
-
|
73 |
-
return out
|
74 |
-
|
75 |
-
|
76 |
-
class Blur(nn.Module):
|
77 |
-
def __init__(self, kernel, pad, upsample_factor=1):
|
78 |
-
super().__init__()
|
79 |
-
|
80 |
-
kernel = make_kernel(kernel)
|
81 |
-
|
82 |
-
if upsample_factor > 1:
|
83 |
-
kernel = kernel * (upsample_factor ** 2)
|
84 |
-
|
85 |
-
self.register_buffer('kernel', kernel)
|
86 |
-
|
87 |
-
self.pad = pad
|
88 |
-
|
89 |
-
def forward(self, input):
|
90 |
-
out = upfirdn2d(input, self.kernel, pad=self.pad)
|
91 |
-
|
92 |
-
return out
|
93 |
-
|
94 |
-
|
95 |
-
class EqualConv2d(nn.Module):
|
96 |
-
def __init__(
|
97 |
-
self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
|
98 |
-
):
|
99 |
-
super().__init__()
|
100 |
-
|
101 |
-
self.weight = nn.Parameter(
|
102 |
-
torch.randn(out_channel, in_channel, kernel_size, kernel_size)
|
103 |
-
)
|
104 |
-
self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
|
105 |
-
|
106 |
-
self.stride = stride
|
107 |
-
self.padding = padding
|
108 |
-
|
109 |
-
if bias:
|
110 |
-
self.bias = nn.Parameter(torch.zeros(out_channel))
|
111 |
-
|
112 |
-
else:
|
113 |
-
self.bias = None
|
114 |
-
|
115 |
-
def forward(self, input):
|
116 |
-
out = F.conv2d(
|
117 |
-
input,
|
118 |
-
self.weight * self.scale,
|
119 |
-
bias=self.bias,
|
120 |
-
stride=self.stride,
|
121 |
-
padding=self.padding,
|
122 |
-
)
|
123 |
-
|
124 |
-
return out
|
125 |
-
|
126 |
-
def __repr__(self):
|
127 |
-
return (
|
128 |
-
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},'
|
129 |
-
f' {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})'
|
130 |
-
)
|
131 |
-
|
132 |
-
|
133 |
-
class EqualLinear(nn.Module):
|
134 |
-
def __init__(
|
135 |
-
self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None
|
136 |
-
):
|
137 |
-
super().__init__()
|
138 |
-
|
139 |
-
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
|
140 |
-
|
141 |
-
if bias:
|
142 |
-
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
|
143 |
-
|
144 |
-
else:
|
145 |
-
self.bias = None
|
146 |
-
|
147 |
-
self.activation = activation
|
148 |
-
|
149 |
-
self.scale = (1 / math.sqrt(in_dim)) * lr_mul
|
150 |
-
self.lr_mul = lr_mul
|
151 |
-
|
152 |
-
def forward(self, input):
|
153 |
-
if self.activation:
|
154 |
-
out = F.linear(input, self.weight * self.scale)
|
155 |
-
out = fused_leaky_relu(out, self.bias * self.lr_mul)
|
156 |
-
|
157 |
-
else:
|
158 |
-
out = F.linear(
|
159 |
-
input, self.weight * self.scale, bias=self.bias * self.lr_mul
|
160 |
-
)
|
161 |
-
|
162 |
-
return out
|
163 |
-
|
164 |
-
def __repr__(self):
|
165 |
-
return (
|
166 |
-
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
|
167 |
-
)
|
168 |
-
|
169 |
-
|
170 |
-
class ScaledLeakyReLU(nn.Module):
|
171 |
-
def __init__(self, negative_slope=0.2):
|
172 |
-
super().__init__()
|
173 |
-
|
174 |
-
self.negative_slope = negative_slope
|
175 |
-
|
176 |
-
def forward(self, input):
|
177 |
-
out = F.leaky_relu(input, negative_slope=self.negative_slope)
|
178 |
-
|
179 |
-
return out * math.sqrt(2)
|
180 |
-
|
181 |
-
|
182 |
-
class ModulatedConv2d(nn.Module):
|
183 |
-
def __init__(
|
184 |
-
self,
|
185 |
-
in_channel,
|
186 |
-
out_channel,
|
187 |
-
kernel_size,
|
188 |
-
style_dim,
|
189 |
-
demodulate=True,
|
190 |
-
upsample=False,
|
191 |
-
downsample=False,
|
192 |
-
blur_kernel=[1, 3, 3, 1],
|
193 |
-
):
|
194 |
-
super().__init__()
|
195 |
-
|
196 |
-
self.eps = 1e-8
|
197 |
-
self.kernel_size = kernel_size
|
198 |
-
self.in_channel = in_channel
|
199 |
-
self.out_channel = out_channel
|
200 |
-
self.upsample = upsample
|
201 |
-
self.downsample = downsample
|
202 |
-
|
203 |
-
if upsample:
|
204 |
-
factor = 2
|
205 |
-
p = (len(blur_kernel) - factor) - (kernel_size - 1)
|
206 |
-
pad0 = (p + 1) // 2 + factor - 1
|
207 |
-
pad1 = p // 2 + 1
|
208 |
-
|
209 |
-
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor)
|
210 |
-
|
211 |
-
if downsample:
|
212 |
-
factor = 2
|
213 |
-
p = (len(blur_kernel) - factor) + (kernel_size - 1)
|
214 |
-
pad0 = (p + 1) // 2
|
215 |
-
pad1 = p // 2
|
216 |
-
|
217 |
-
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
|
218 |
-
|
219 |
-
fan_in = in_channel * kernel_size ** 2
|
220 |
-
self.scale = 1 / math.sqrt(fan_in)
|
221 |
-
self.padding = kernel_size // 2
|
222 |
-
|
223 |
-
self.weight = nn.Parameter(
|
224 |
-
torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)
|
225 |
-
)
|
226 |
-
|
227 |
-
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
|
228 |
-
|
229 |
-
self.demodulate = demodulate
|
230 |
-
|
231 |
-
def __repr__(self):
|
232 |
-
return (
|
233 |
-
f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, '
|
234 |
-
f'upsample={self.upsample}, downsample={self.downsample})'
|
235 |
-
)
|
236 |
-
|
237 |
-
def forward(self, input, style):
|
238 |
-
batch, in_channel, height, width = input.shape
|
239 |
-
|
240 |
-
style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
|
241 |
-
weight = self.scale * self.weight * style
|
242 |
-
|
243 |
-
if self.demodulate:
|
244 |
-
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
|
245 |
-
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
|
246 |
-
|
247 |
-
weight = weight.view(
|
248 |
-
batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size
|
249 |
-
)
|
250 |
-
|
251 |
-
if self.upsample:
|
252 |
-
input = input.view(1, batch * in_channel, height, width)
|
253 |
-
weight = weight.view(
|
254 |
-
batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size
|
255 |
-
)
|
256 |
-
weight = weight.transpose(1, 2).reshape(
|
257 |
-
batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size
|
258 |
-
)
|
259 |
-
out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)
|
260 |
-
_, _, height, width = out.shape
|
261 |
-
out = out.view(batch, self.out_channel, height, width)
|
262 |
-
out = self.blur(out)
|
263 |
-
|
264 |
-
elif self.downsample:
|
265 |
-
input = self.blur(input)
|
266 |
-
_, _, height, width = input.shape
|
267 |
-
input = input.view(1, batch * in_channel, height, width)
|
268 |
-
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
|
269 |
-
_, _, height, width = out.shape
|
270 |
-
out = out.view(batch, self.out_channel, height, width)
|
271 |
-
|
272 |
-
else:
|
273 |
-
input = input.view(1, batch * in_channel, height, width)
|
274 |
-
out = F.conv2d(input, weight, padding=self.padding, groups=batch)
|
275 |
-
_, _, height, width = out.shape
|
276 |
-
out = out.view(batch, self.out_channel, height, width)
|
277 |
-
|
278 |
-
return out
|
279 |
-
|
280 |
-
|
281 |
-
class NoiseInjection(nn.Module):
|
282 |
-
def __init__(self):
|
283 |
-
super().__init__()
|
284 |
-
|
285 |
-
self.weight = nn.Parameter(torch.zeros(1))
|
286 |
-
|
287 |
-
def forward(self, image, noise=None):
|
288 |
-
if noise is None:
|
289 |
-
batch, _, height, width = image.shape
|
290 |
-
noise = image.new_empty(batch, 1, height, width).normal_()
|
291 |
-
|
292 |
-
return image + self.weight * noise
|
293 |
-
|
294 |
-
|
295 |
-
class ConstantInput(nn.Module):
|
296 |
-
def __init__(self, channel, size=4):
|
297 |
-
super().__init__()
|
298 |
-
|
299 |
-
self.input = nn.Parameter(torch.randn(1, channel, size, size))
|
300 |
-
|
301 |
-
def forward(self, input):
|
302 |
-
batch = input.shape[0]
|
303 |
-
out = self.input.repeat(batch, 1, 1, 1)
|
304 |
-
|
305 |
-
return out
|
306 |
-
|
307 |
-
|
308 |
-
class StyledConv(nn.Module):
|
309 |
-
def __init__(
|
310 |
-
self,
|
311 |
-
in_channel,
|
312 |
-
out_channel,
|
313 |
-
kernel_size,
|
314 |
-
style_dim,
|
315 |
-
upsample=False,
|
316 |
-
blur_kernel=[1, 3, 3, 1],
|
317 |
-
demodulate=True,
|
318 |
-
):
|
319 |
-
super().__init__()
|
320 |
-
|
321 |
-
self.conv = ModulatedConv2d(
|
322 |
-
in_channel,
|
323 |
-
out_channel,
|
324 |
-
kernel_size,
|
325 |
-
style_dim,
|
326 |
-
upsample=upsample,
|
327 |
-
blur_kernel=blur_kernel,
|
328 |
-
demodulate=demodulate,
|
329 |
-
)
|
330 |
-
|
331 |
-
self.noise = NoiseInjection()
|
332 |
-
# self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
|
333 |
-
# self.activate = ScaledLeakyReLU(0.2)
|
334 |
-
self.activate = FusedLeakyReLU(out_channel)
|
335 |
-
|
336 |
-
def forward(self, input, style, noise=None):
|
337 |
-
out = self.conv(input, style)
|
338 |
-
out = self.noise(out, noise=noise)
|
339 |
-
# out = out + self.bias
|
340 |
-
out = self.activate(out)
|
341 |
-
|
342 |
-
return out
|
343 |
-
|
344 |
-
|
345 |
-
class ToRGB(nn.Module):
|
346 |
-
def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]):
|
347 |
-
super().__init__()
|
348 |
-
|
349 |
-
if upsample:
|
350 |
-
self.upsample = Upsample(blur_kernel)
|
351 |
-
|
352 |
-
self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False)
|
353 |
-
self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
|
354 |
-
|
355 |
-
def forward(self, input, style, skip=None):
|
356 |
-
out = self.conv(input, style)
|
357 |
-
out = out + self.bias
|
358 |
-
|
359 |
-
if skip is not None:
|
360 |
-
skip = self.upsample(skip)
|
361 |
-
|
362 |
-
out = out + skip
|
363 |
-
|
364 |
-
return out
|
365 |
-
|
366 |
-
|
367 |
-
class Generator(nn.Module):
|
368 |
-
def __init__(
|
369 |
-
self,
|
370 |
-
size,
|
371 |
-
style_dim,
|
372 |
-
n_mlp,
|
373 |
-
channel_multiplier=2,
|
374 |
-
blur_kernel=[1, 3, 3, 1],
|
375 |
-
lr_mlp=0.01,
|
376 |
-
):
|
377 |
-
super().__init__()
|
378 |
-
|
379 |
-
self.size = size
|
380 |
-
|
381 |
-
self.style_dim = style_dim
|
382 |
-
|
383 |
-
layers = [PixelNorm()]
|
384 |
-
|
385 |
-
for i in range(n_mlp):
|
386 |
-
layers.append(
|
387 |
-
EqualLinear(
|
388 |
-
style_dim, style_dim, lr_mul=lr_mlp, activation='fused_lrelu'
|
389 |
-
)
|
390 |
-
)
|
391 |
-
|
392 |
-
self.style = nn.Sequential(*layers)
|
393 |
-
|
394 |
-
self.channels = {
|
395 |
-
4: 512,
|
396 |
-
8: 512,
|
397 |
-
16: 512,
|
398 |
-
32: 512,
|
399 |
-
64: 256 * channel_multiplier,
|
400 |
-
128: 128 * channel_multiplier,
|
401 |
-
256: 64 * channel_multiplier,
|
402 |
-
512: 32 * channel_multiplier,
|
403 |
-
1024: 16 * channel_multiplier,
|
404 |
-
}
|
405 |
-
|
406 |
-
self.input = ConstantInput(self.channels[4])
|
407 |
-
self.conv1 = StyledConv(
|
408 |
-
self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel
|
409 |
-
)
|
410 |
-
self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False)
|
411 |
-
|
412 |
-
self.log_size = int(math.log(size, 2))
|
413 |
-
self.num_layers = (self.log_size - 2) * 2 + 1
|
414 |
-
|
415 |
-
self.convs = nn.ModuleList()
|
416 |
-
self.upsamples = nn.ModuleList()
|
417 |
-
self.to_rgbs = nn.ModuleList()
|
418 |
-
self.noises = nn.Module()
|
419 |
-
|
420 |
-
in_channel = self.channels[4]
|
421 |
-
|
422 |
-
for layer_idx in range(self.num_layers):
|
423 |
-
res = (layer_idx + 5) // 2
|
424 |
-
shape = [1, 1, 2 ** res, 2 ** res]
|
425 |
-
self.noises.register_buffer(f'noise_{layer_idx}', torch.randn(*shape))
|
426 |
-
|
427 |
-
for i in range(3, self.log_size + 1):
|
428 |
-
out_channel = self.channels[2 ** i]
|
429 |
-
|
430 |
-
self.convs.append(
|
431 |
-
StyledConv(
|
432 |
-
in_channel,
|
433 |
-
out_channel,
|
434 |
-
3,
|
435 |
-
style_dim,
|
436 |
-
upsample=True,
|
437 |
-
blur_kernel=blur_kernel,
|
438 |
-
)
|
439 |
-
)
|
440 |
-
|
441 |
-
self.convs.append(
|
442 |
-
StyledConv(
|
443 |
-
out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel
|
444 |
-
)
|
445 |
-
)
|
446 |
-
|
447 |
-
self.to_rgbs.append(ToRGB(out_channel, style_dim))
|
448 |
-
|
449 |
-
in_channel = out_channel
|
450 |
-
|
451 |
-
self.n_latent = self.log_size * 2 - 2
|
452 |
-
|
453 |
-
def make_noise(self):
|
454 |
-
device = self.input.input.device
|
455 |
-
|
456 |
-
noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2, device=device)]
|
457 |
-
|
458 |
-
for i in range(3, self.log_size + 1):
|
459 |
-
for _ in range(2):
|
460 |
-
noises.append(torch.randn(1, 1, 2 ** i, 2 ** i, device=device))
|
461 |
-
|
462 |
-
return noises
|
463 |
-
|
464 |
-
def mean_latent(self, n_latent):
|
465 |
-
latent_in = torch.randn(
|
466 |
-
n_latent, self.style_dim, device=self.input.input.device
|
467 |
-
)
|
468 |
-
latent = self.style(latent_in).mean(0, keepdim=True)
|
469 |
-
|
470 |
-
return latent
|
471 |
-
|
472 |
-
def get_latent(self, input):
|
473 |
-
return self.style(input)
|
474 |
-
|
475 |
-
def forward(
|
476 |
-
self,
|
477 |
-
styles,
|
478 |
-
return_latents=False,
|
479 |
-
return_features=False,
|
480 |
-
inject_index=None,
|
481 |
-
truncation=1,
|
482 |
-
truncation_latent=None,
|
483 |
-
input_is_latent=False,
|
484 |
-
noise=None,
|
485 |
-
randomize_noise=True,
|
486 |
-
):
|
487 |
-
if not input_is_latent:
|
488 |
-
styles = [self.style(s) for s in styles]
|
489 |
-
|
490 |
-
if noise is None:
|
491 |
-
if randomize_noise:
|
492 |
-
noise = [None] * self.num_layers
|
493 |
-
else:
|
494 |
-
noise = [
|
495 |
-
getattr(self.noises, f'noise_{i}') for i in range(self.num_layers)
|
496 |
-
]
|
497 |
-
|
498 |
-
if truncation < 1:
|
499 |
-
style_t = []
|
500 |
-
|
501 |
-
for style in styles:
|
502 |
-
style_t.append(
|
503 |
-
truncation_latent + truncation * (style - truncation_latent)
|
504 |
-
)
|
505 |
-
|
506 |
-
styles = style_t
|
507 |
-
|
508 |
-
if len(styles) < 2:
|
509 |
-
inject_index = self.n_latent
|
510 |
-
|
511 |
-
if styles[0].ndim < 3:
|
512 |
-
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
|
513 |
-
else:
|
514 |
-
latent = styles[0]
|
515 |
-
|
516 |
-
else:
|
517 |
-
if inject_index is None:
|
518 |
-
inject_index = random.randint(1, self.n_latent - 1)
|
519 |
-
|
520 |
-
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
|
521 |
-
latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1)
|
522 |
-
|
523 |
-
latent = torch.cat([latent, latent2], 1)
|
524 |
-
|
525 |
-
out = self.input(latent)
|
526 |
-
out = self.conv1(out, latent[:, 0], noise=noise[0])
|
527 |
-
|
528 |
-
skip = self.to_rgb1(out, latent[:, 1])
|
529 |
-
|
530 |
-
i = 1
|
531 |
-
for conv1, conv2, noise1, noise2, to_rgb in zip(
|
532 |
-
self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs
|
533 |
-
):
|
534 |
-
out = conv1(out, latent[:, i], noise=noise1)
|
535 |
-
out = conv2(out, latent[:, i + 1], noise=noise2)
|
536 |
-
skip = to_rgb(out, latent[:, i + 2], skip)
|
537 |
-
|
538 |
-
i += 2
|
539 |
-
|
540 |
-
image = skip
|
541 |
-
|
542 |
-
if return_latents:
|
543 |
-
return image, latent
|
544 |
-
elif return_features:
|
545 |
-
return image, out
|
546 |
-
else:
|
547 |
-
return image, None
|
548 |
-
|
549 |
-
|
550 |
-
class ConvLayer(nn.Sequential):
|
551 |
-
def __init__(
|
552 |
-
self,
|
553 |
-
in_channel,
|
554 |
-
out_channel,
|
555 |
-
kernel_size,
|
556 |
-
downsample=False,
|
557 |
-
blur_kernel=[1, 3, 3, 1],
|
558 |
-
bias=True,
|
559 |
-
activate=True,
|
560 |
-
):
|
561 |
-
layers = []
|
562 |
-
|
563 |
-
if downsample:
|
564 |
-
factor = 2
|
565 |
-
p = (len(blur_kernel) - factor) + (kernel_size - 1)
|
566 |
-
pad0 = (p + 1) // 2
|
567 |
-
pad1 = p // 2
|
568 |
-
|
569 |
-
layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
|
570 |
-
|
571 |
-
stride = 2
|
572 |
-
self.padding = 0
|
573 |
-
|
574 |
-
else:
|
575 |
-
stride = 1
|
576 |
-
self.padding = kernel_size // 2
|
577 |
-
|
578 |
-
layers.append(
|
579 |
-
EqualConv2d(
|
580 |
-
in_channel,
|
581 |
-
out_channel,
|
582 |
-
kernel_size,
|
583 |
-
padding=self.padding,
|
584 |
-
stride=stride,
|
585 |
-
bias=bias and not activate,
|
586 |
-
)
|
587 |
-
)
|
588 |
-
|
589 |
-
if activate:
|
590 |
-
if bias:
|
591 |
-
layers.append(FusedLeakyReLU(out_channel))
|
592 |
-
|
593 |
-
else:
|
594 |
-
layers.append(ScaledLeakyReLU(0.2))
|
595 |
-
|
596 |
-
super().__init__(*layers)
|
597 |
-
|
598 |
-
|
599 |
-
class ResBlock(nn.Module):
|
600 |
-
def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]):
|
601 |
-
super().__init__()
|
602 |
-
|
603 |
-
self.conv1 = ConvLayer(in_channel, in_channel, 3)
|
604 |
-
self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True)
|
605 |
-
|
606 |
-
self.skip = ConvLayer(
|
607 |
-
in_channel, out_channel, 1, downsample=True, activate=False, bias=False
|
608 |
-
)
|
609 |
-
|
610 |
-
def forward(self, input):
|
611 |
-
out = self.conv1(input)
|
612 |
-
out = self.conv2(out)
|
613 |
-
|
614 |
-
skip = self.skip(input)
|
615 |
-
out = (out + skip) / math.sqrt(2)
|
616 |
-
|
617 |
-
return out
|
618 |
-
|
619 |
-
|
620 |
-
class Discriminator(nn.Module):
|
621 |
-
def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1]):
|
622 |
-
super().__init__()
|
623 |
-
|
624 |
-
channels = {
|
625 |
-
4: 512,
|
626 |
-
8: 512,
|
627 |
-
16: 512,
|
628 |
-
32: 512,
|
629 |
-
64: 256 * channel_multiplier,
|
630 |
-
128: 128 * channel_multiplier,
|
631 |
-
256: 64 * channel_multiplier,
|
632 |
-
512: 32 * channel_multiplier,
|
633 |
-
1024: 16 * channel_multiplier,
|
634 |
-
}
|
635 |
-
|
636 |
-
convs = [ConvLayer(3, channels[size], 1)]
|
637 |
-
|
638 |
-
log_size = int(math.log(size, 2))
|
639 |
-
|
640 |
-
in_channel = channels[size]
|
641 |
-
|
642 |
-
for i in range(log_size, 2, -1):
|
643 |
-
out_channel = channels[2 ** (i - 1)]
|
644 |
-
|
645 |
-
convs.append(ResBlock(in_channel, out_channel, blur_kernel))
|
646 |
-
|
647 |
-
in_channel = out_channel
|
648 |
-
|
649 |
-
self.convs = nn.Sequential(*convs)
|
650 |
-
|
651 |
-
self.stddev_group = 4
|
652 |
-
self.stddev_feat = 1
|
653 |
-
|
654 |
-
self.final_conv = ConvLayer(in_channel + 1, channels[4], 3)
|
655 |
-
self.final_linear = nn.Sequential(
|
656 |
-
EqualLinear(channels[4] * 4 * 4, channels[4], activation='fused_lrelu'),
|
657 |
-
EqualLinear(channels[4], 1),
|
658 |
-
)
|
659 |
-
|
660 |
-
def forward(self, input):
|
661 |
-
out = self.convs(input)
|
662 |
-
|
663 |
-
batch, channel, height, width = out.shape
|
664 |
-
group = min(batch, self.stddev_group)
|
665 |
-
stddev = out.view(
|
666 |
-
group, -1, self.stddev_feat, channel // self.stddev_feat, height, width
|
667 |
-
)
|
668 |
-
stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
|
669 |
-
stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
|
670 |
-
stddev = stddev.repeat(group, 1, height, width)
|
671 |
-
out = torch.cat([out, stddev], 1)
|
672 |
-
|
673 |
-
out = self.final_conv(out)
|
674 |
-
|
675 |
-
out = out.view(batch, -1)
|
676 |
-
out = self.final_linear(out)
|
677 |
-
|
678 |
-
return out
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/grit/modeling/roi_heads/grit_roi_heads.py
DELETED
@@ -1,478 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
import torch
|
3 |
-
from typing import Dict, List, Optional, Tuple, Union
|
4 |
-
|
5 |
-
from detectron2.config import configurable
|
6 |
-
from detectron2.structures import Boxes, Instances, pairwise_iou
|
7 |
-
from detectron2.utils.events import get_event_storage
|
8 |
-
|
9 |
-
from detectron2.modeling.box_regression import Box2BoxTransform
|
10 |
-
from detectron2.modeling.roi_heads.roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads
|
11 |
-
from detectron2.modeling.roi_heads.cascade_rcnn import CascadeROIHeads, _ScaleGradient
|
12 |
-
from detectron2.modeling.poolers import ROIPooler
|
13 |
-
from detectron2.layers import batched_nms
|
14 |
-
from .grit_fast_rcnn import GRiTFastRCNNOutputLayers
|
15 |
-
|
16 |
-
from ..text.text_decoder import TransformerDecoderTextualHead, GRiTTextDecoder, AutoRegressiveBeamSearch
|
17 |
-
from ..text.load_text_token import LoadTextTokens
|
18 |
-
from transformers import BertTokenizer
|
19 |
-
from models.grit_src.grit.data.custom_dataset_mapper import ObjDescription
|
20 |
-
from ..soft_nms import batched_soft_nms
|
21 |
-
|
22 |
-
import logging
|
23 |
-
logger = logging.getLogger(__name__)
|
24 |
-
|
25 |
-
|
26 |
-
@ROI_HEADS_REGISTRY.register()
|
27 |
-
class GRiTROIHeadsAndTextDecoder(CascadeROIHeads):
|
28 |
-
@configurable
|
29 |
-
def __init__(
|
30 |
-
self,
|
31 |
-
*,
|
32 |
-
text_decoder_transformer,
|
33 |
-
train_task: list,
|
34 |
-
test_task: str,
|
35 |
-
mult_proposal_score: bool = False,
|
36 |
-
mask_weight: float = 1.0,
|
37 |
-
object_feat_pooler=None,
|
38 |
-
soft_nms_enabled=False,
|
39 |
-
beam_size=1,
|
40 |
-
**kwargs,
|
41 |
-
):
|
42 |
-
super().__init__(**kwargs)
|
43 |
-
self.mult_proposal_score = mult_proposal_score
|
44 |
-
self.mask_weight = mask_weight
|
45 |
-
self.object_feat_pooler = object_feat_pooler
|
46 |
-
self.soft_nms_enabled = soft_nms_enabled
|
47 |
-
self.test_task = test_task
|
48 |
-
self.beam_size = beam_size
|
49 |
-
|
50 |
-
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
|
51 |
-
self.tokenizer = tokenizer
|
52 |
-
|
53 |
-
assert test_task in train_task, 'GRiT has not been trained on {} task, ' \
|
54 |
-
'please verify the task name or train a new ' \
|
55 |
-
'GRiT on {} task'.format(test_task, test_task)
|
56 |
-
task_begin_tokens = {}
|
57 |
-
for i, task in enumerate(train_task):
|
58 |
-
if i == 0:
|
59 |
-
task_begin_tokens[task] = tokenizer.cls_token_id
|
60 |
-
else:
|
61 |
-
task_begin_tokens[task] = 103 + i
|
62 |
-
self.task_begin_tokens = task_begin_tokens
|
63 |
-
|
64 |
-
beamsearch_decode = AutoRegressiveBeamSearch(
|
65 |
-
end_token_id=tokenizer.sep_token_id,
|
66 |
-
max_steps=40,
|
67 |
-
beam_size=beam_size,
|
68 |
-
objectdet=test_task == "ObjectDet",
|
69 |
-
per_node_beam_size=1,
|
70 |
-
)
|
71 |
-
self.text_decoder = GRiTTextDecoder(
|
72 |
-
text_decoder_transformer,
|
73 |
-
beamsearch_decode=beamsearch_decode,
|
74 |
-
begin_token_id=task_begin_tokens[test_task],
|
75 |
-
loss_type='smooth',
|
76 |
-
tokenizer=tokenizer,
|
77 |
-
)
|
78 |
-
self.get_target_text_tokens = LoadTextTokens(tokenizer, max_text_len=40, padding='do_not_pad')
|
79 |
-
|
80 |
-
@classmethod
|
81 |
-
def from_config(cls, cfg, input_shape):
|
82 |
-
ret = super().from_config(cfg, input_shape)
|
83 |
-
text_decoder_transformer = TransformerDecoderTextualHead(
|
84 |
-
object_feature_size=cfg.MODEL.FPN.OUT_CHANNELS,
|
85 |
-
vocab_size=cfg.TEXT_DECODER.VOCAB_SIZE,
|
86 |
-
hidden_size=cfg.TEXT_DECODER.HIDDEN_SIZE,
|
87 |
-
num_layers=cfg.TEXT_DECODER.NUM_LAYERS,
|
88 |
-
attention_heads=cfg.TEXT_DECODER.ATTENTION_HEADS,
|
89 |
-
feedforward_size=cfg.TEXT_DECODER.FEEDFORWARD_SIZE,
|
90 |
-
mask_future_positions=True,
|
91 |
-
padding_idx=0,
|
92 |
-
decoder_type='bert_en',
|
93 |
-
use_act_checkpoint=cfg.USE_ACT_CHECKPOINT,
|
94 |
-
)
|
95 |
-
ret.update({
|
96 |
-
'text_decoder_transformer': text_decoder_transformer,
|
97 |
-
'train_task': cfg.MODEL.TRAIN_TASK,
|
98 |
-
'test_task': cfg.MODEL.TEST_TASK,
|
99 |
-
'mult_proposal_score': cfg.MODEL.ROI_BOX_HEAD.MULT_PROPOSAL_SCORE,
|
100 |
-
'mask_weight': cfg.MODEL.ROI_HEADS.MASK_WEIGHT,
|
101 |
-
'soft_nms_enabled': cfg.MODEL.ROI_HEADS.SOFT_NMS_ENABLED,
|
102 |
-
'beam_size': cfg.MODEL.BEAM_SIZE,
|
103 |
-
})
|
104 |
-
return ret
|
105 |
-
|
106 |
-
@classmethod
|
107 |
-
def _init_box_head(self, cfg, input_shape):
|
108 |
-
ret = super()._init_box_head(cfg, input_shape)
|
109 |
-
del ret['box_predictors']
|
110 |
-
cascade_bbox_reg_weights = cfg.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS
|
111 |
-
box_predictors = []
|
112 |
-
for box_head, bbox_reg_weights in zip(ret['box_heads'], \
|
113 |
-
cascade_bbox_reg_weights):
|
114 |
-
box_predictors.append(
|
115 |
-
GRiTFastRCNNOutputLayers(
|
116 |
-
cfg, box_head.output_shape,
|
117 |
-
box2box_transform=Box2BoxTransform(weights=bbox_reg_weights)
|
118 |
-
))
|
119 |
-
ret['box_predictors'] = box_predictors
|
120 |
-
|
121 |
-
in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES
|
122 |
-
pooler_scales = tuple(1.0 / input_shape[k].stride for k in in_features)
|
123 |
-
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
|
124 |
-
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
|
125 |
-
object_feat_pooler = ROIPooler(
|
126 |
-
output_size=cfg.MODEL.ROI_HEADS.OBJECT_FEAT_POOLER_RES,
|
127 |
-
scales=pooler_scales,
|
128 |
-
sampling_ratio=sampling_ratio,
|
129 |
-
pooler_type=pooler_type,
|
130 |
-
)
|
131 |
-
ret['object_feat_pooler'] = object_feat_pooler
|
132 |
-
return ret
|
133 |
-
|
134 |
-
def check_if_all_background(self, proposals, targets, stage):
|
135 |
-
all_background = True
|
136 |
-
for proposals_per_image in proposals:
|
137 |
-
if not (proposals_per_image.gt_classes == self.num_classes).all():
|
138 |
-
all_background = False
|
139 |
-
|
140 |
-
if all_background:
|
141 |
-
logger.info('all proposals are background at stage {}'.format(stage))
|
142 |
-
proposals[0].proposal_boxes.tensor[0, :] = targets[0].gt_boxes.tensor[0, :]
|
143 |
-
proposals[0].gt_boxes.tensor[0, :] = targets[0].gt_boxes.tensor[0, :]
|
144 |
-
proposals[0].objectness_logits[0] = math.log((1.0 - 1e-10) / (1 - (1.0 - 1e-10)))
|
145 |
-
proposals[0].gt_classes[0] = targets[0].gt_classes[0]
|
146 |
-
proposals[0].gt_object_descriptions.data[0] = targets[0].gt_object_descriptions.data[0]
|
147 |
-
if 'foreground' in proposals[0].get_fields().keys():
|
148 |
-
proposals[0].foreground[0] = 1
|
149 |
-
return proposals
|
150 |
-
|
151 |
-
def _forward_box(self, features, proposals, targets=None, task="ObjectDet"):
|
152 |
-
if self.training:
|
153 |
-
proposals = self.check_if_all_background(proposals, targets, 0)
|
154 |
-
if (not self.training) and self.mult_proposal_score:
|
155 |
-
if len(proposals) > 0 and proposals[0].has('scores'):
|
156 |
-
proposal_scores = [p.get('scores') for p in proposals]
|
157 |
-
else:
|
158 |
-
proposal_scores = [p.get('objectness_logits') for p in proposals]
|
159 |
-
|
160 |
-
features = [features[f] for f in self.box_in_features]
|
161 |
-
head_outputs = []
|
162 |
-
prev_pred_boxes = None
|
163 |
-
image_sizes = [x.image_size for x in proposals]
|
164 |
-
|
165 |
-
for k in range(self.num_cascade_stages):
|
166 |
-
if k > 0:
|
167 |
-
proposals = self._create_proposals_from_boxes(
|
168 |
-
prev_pred_boxes, image_sizes,
|
169 |
-
logits=[p.objectness_logits for p in proposals])
|
170 |
-
if self.training:
|
171 |
-
proposals = self._match_and_label_boxes_GRiT(
|
172 |
-
proposals, k, targets)
|
173 |
-
proposals = self.check_if_all_background(proposals, targets, k)
|
174 |
-
predictions = self._run_stage(features, proposals, k)
|
175 |
-
prev_pred_boxes = self.box_predictor[k].predict_boxes(
|
176 |
-
(predictions[0], predictions[1]), proposals)
|
177 |
-
head_outputs.append((self.box_predictor[k], predictions, proposals))
|
178 |
-
|
179 |
-
if self.training:
|
180 |
-
object_features = self.object_feat_pooler(features, [x.proposal_boxes for x in proposals])
|
181 |
-
object_features = _ScaleGradient.apply(object_features, 1.0 / self.num_cascade_stages)
|
182 |
-
foreground = torch.cat([x.foreground for x in proposals])
|
183 |
-
object_features = object_features[foreground > 0]
|
184 |
-
|
185 |
-
object_descriptions = []
|
186 |
-
for x in proposals:
|
187 |
-
object_descriptions += x.gt_object_descriptions[x.foreground > 0].data
|
188 |
-
object_descriptions = ObjDescription(object_descriptions)
|
189 |
-
object_descriptions = object_descriptions.data
|
190 |
-
|
191 |
-
if len(object_descriptions) > 0:
|
192 |
-
begin_token = self.task_begin_tokens[task]
|
193 |
-
text_decoder_inputs = self.get_target_text_tokens(object_descriptions, object_features, begin_token)
|
194 |
-
object_features = object_features.view(
|
195 |
-
object_features.shape[0], object_features.shape[1], -1).permute(0, 2, 1).contiguous()
|
196 |
-
text_decoder_inputs.update({'object_features': object_features})
|
197 |
-
text_decoder_loss = self.text_decoder(text_decoder_inputs)
|
198 |
-
else:
|
199 |
-
text_decoder_loss = head_outputs[0][1][0].new_zeros([1])[0]
|
200 |
-
|
201 |
-
losses = {}
|
202 |
-
storage = get_event_storage()
|
203 |
-
# RoI Head losses (For the proposal generator loss, please find it in grit.py)
|
204 |
-
for stage, (predictor, predictions, proposals) in enumerate(head_outputs):
|
205 |
-
with storage.name_scope("stage{}".format(stage)):
|
206 |
-
stage_losses = predictor.losses(
|
207 |
-
(predictions[0], predictions[1]), proposals)
|
208 |
-
losses.update({k + "_stage{}".format(stage): v for k, v in stage_losses.items()})
|
209 |
-
# Text Decoder loss
|
210 |
-
losses.update({'text_decoder_loss': text_decoder_loss})
|
211 |
-
return losses
|
212 |
-
else:
|
213 |
-
scores_per_stage = [h[0].predict_probs(h[1], h[2]) for h in head_outputs]
|
214 |
-
logits_per_stage = [(h[1][0],) for h in head_outputs]
|
215 |
-
scores = [
|
216 |
-
sum(list(scores_per_image)) * (1.0 / self.num_cascade_stages)
|
217 |
-
for scores_per_image in zip(*scores_per_stage)
|
218 |
-
]
|
219 |
-
logits = [
|
220 |
-
sum(list(logits_per_image)) * (1.0 / self.num_cascade_stages)
|
221 |
-
for logits_per_image in zip(*logits_per_stage)
|
222 |
-
]
|
223 |
-
if self.mult_proposal_score:
|
224 |
-
scores = [(s * ps[:, None]) ** 0.5 for s, ps in zip(scores, proposal_scores)]
|
225 |
-
predictor, predictions, proposals = head_outputs[-1]
|
226 |
-
boxes = predictor.predict_boxes(
|
227 |
-
(predictions[0], predictions[1]), proposals)
|
228 |
-
assert len(boxes) == 1
|
229 |
-
pred_instances, _ = self.fast_rcnn_inference_GRiT(
|
230 |
-
boxes,
|
231 |
-
scores,
|
232 |
-
logits,
|
233 |
-
image_sizes,
|
234 |
-
predictor.test_score_thresh,
|
235 |
-
predictor.test_nms_thresh,
|
236 |
-
predictor.test_topk_per_image,
|
237 |
-
self.soft_nms_enabled,
|
238 |
-
)
|
239 |
-
|
240 |
-
assert len(pred_instances) == 1, "Only support one image"
|
241 |
-
for i, pred_instance in enumerate(pred_instances):
|
242 |
-
if len(pred_instance.pred_boxes) > 0:
|
243 |
-
object_features = self.object_feat_pooler(features, [pred_instance.pred_boxes])
|
244 |
-
object_features = object_features.view(
|
245 |
-
object_features.shape[0], object_features.shape[1], -1).permute(0, 2, 1).contiguous()
|
246 |
-
text_decoder_output = self.text_decoder({'object_features': object_features})
|
247 |
-
if self.beam_size > 1 and self.test_task == "ObjectDet":
|
248 |
-
pred_boxes = []
|
249 |
-
pred_scores = []
|
250 |
-
pred_classes = []
|
251 |
-
pred_object_descriptions = []
|
252 |
-
|
253 |
-
for beam_id in range(self.beam_size):
|
254 |
-
pred_boxes.append(pred_instance.pred_boxes.tensor)
|
255 |
-
# object score = sqrt(objectness score x description score)
|
256 |
-
pred_scores.append((pred_instance.scores *
|
257 |
-
torch.exp(text_decoder_output['logprobs'])[:, beam_id]) ** 0.5)
|
258 |
-
pred_classes.append(pred_instance.pred_classes)
|
259 |
-
for prediction in text_decoder_output['predictions'][:, beam_id, :]:
|
260 |
-
# convert text tokens to words
|
261 |
-
description = self.tokenizer.decode(prediction.tolist()[1:], skip_special_tokens=True)
|
262 |
-
pred_object_descriptions.append(description)
|
263 |
-
|
264 |
-
merged_instances = Instances(image_sizes[0])
|
265 |
-
if torch.cat(pred_scores, dim=0).shape[0] <= predictor.test_topk_per_image:
|
266 |
-
merged_instances.scores = torch.cat(pred_scores, dim=0)
|
267 |
-
merged_instances.pred_boxes = Boxes(torch.cat(pred_boxes, dim=0))
|
268 |
-
merged_instances.pred_classes = torch.cat(pred_classes, dim=0)
|
269 |
-
merged_instances.pred_object_descriptions = ObjDescription(pred_object_descriptions)
|
270 |
-
else:
|
271 |
-
pred_scores, top_idx = torch.topk(
|
272 |
-
torch.cat(pred_scores, dim=0), predictor.test_topk_per_image)
|
273 |
-
merged_instances.scores = pred_scores
|
274 |
-
merged_instances.pred_boxes = Boxes(torch.cat(pred_boxes, dim=0)[top_idx, :])
|
275 |
-
merged_instances.pred_classes = torch.cat(pred_classes, dim=0)[top_idx]
|
276 |
-
merged_instances.pred_object_descriptions = \
|
277 |
-
ObjDescription(ObjDescription(pred_object_descriptions)[top_idx].data)
|
278 |
-
|
279 |
-
pred_instances[i] = merged_instances
|
280 |
-
else:
|
281 |
-
# object score = sqrt(objectness score x description score)
|
282 |
-
pred_instance.scores = (pred_instance.scores *
|
283 |
-
torch.exp(text_decoder_output['logprobs'])) ** 0.5
|
284 |
-
|
285 |
-
pred_object_descriptions = []
|
286 |
-
for prediction in text_decoder_output['predictions']:
|
287 |
-
# convert text tokens to words
|
288 |
-
description = self.tokenizer.decode(prediction.tolist()[1:], skip_special_tokens=True)
|
289 |
-
pred_object_descriptions.append(description)
|
290 |
-
pred_instance.pred_object_descriptions = ObjDescription(pred_object_descriptions)
|
291 |
-
else:
|
292 |
-
pred_instance.pred_object_descriptions = ObjDescription([])
|
293 |
-
|
294 |
-
return pred_instances
|
295 |
-
|
296 |
-
|
297 |
-
def forward(self, features, proposals, targets=None, targets_task="ObjectDet"):
|
298 |
-
if self.training:
|
299 |
-
proposals = self.label_and_sample_proposals(
|
300 |
-
proposals, targets)
|
301 |
-
|
302 |
-
losses = self._forward_box(features, proposals, targets, task=targets_task)
|
303 |
-
if targets[0].has('gt_masks'):
|
304 |
-
mask_losses = self._forward_mask(features, proposals)
|
305 |
-
losses.update({k: v * self.mask_weight \
|
306 |
-
for k, v in mask_losses.items()})
|
307 |
-
else:
|
308 |
-
losses.update(self._get_empty_mask_loss(device=proposals[0].objectness_logits.device))
|
309 |
-
return proposals, losses
|
310 |
-
else:
|
311 |
-
pred_instances = self._forward_box(features, proposals, task=self.test_task)
|
312 |
-
pred_instances = self.forward_with_given_boxes(features, pred_instances)
|
313 |
-
return pred_instances, {}
|
314 |
-
|
315 |
-
@torch.no_grad()
|
316 |
-
def _match_and_label_boxes_GRiT(self, proposals, stage, targets):
|
317 |
-
"""
|
318 |
-
Add "gt_object_description" and "foreground" to detectron2's _match_and_label_boxes
|
319 |
-
"""
|
320 |
-
num_fg_samples, num_bg_samples = [], []
|
321 |
-
for proposals_per_image, targets_per_image in zip(proposals, targets):
|
322 |
-
match_quality_matrix = pairwise_iou(
|
323 |
-
targets_per_image.gt_boxes, proposals_per_image.proposal_boxes
|
324 |
-
)
|
325 |
-
# proposal_labels are 0 or 1
|
326 |
-
matched_idxs, proposal_labels = self.proposal_matchers[stage](match_quality_matrix)
|
327 |
-
if len(targets_per_image) > 0:
|
328 |
-
gt_classes = targets_per_image.gt_classes[matched_idxs]
|
329 |
-
# Label unmatched proposals (0 label from matcher) as background (label=num_classes)
|
330 |
-
gt_classes[proposal_labels == 0] = self.num_classes
|
331 |
-
foreground = torch.ones_like(gt_classes)
|
332 |
-
foreground[proposal_labels == 0] = 0
|
333 |
-
gt_boxes = targets_per_image.gt_boxes[matched_idxs]
|
334 |
-
gt_object_descriptions = targets_per_image.gt_object_descriptions[matched_idxs]
|
335 |
-
else:
|
336 |
-
gt_classes = torch.zeros_like(matched_idxs) + self.num_classes
|
337 |
-
foreground = torch.zeros_like(gt_classes)
|
338 |
-
gt_boxes = Boxes(
|
339 |
-
targets_per_image.gt_boxes.tensor.new_zeros((len(proposals_per_image), 4))
|
340 |
-
)
|
341 |
-
gt_object_descriptions = ObjDescription(['None' for i in range(len(proposals_per_image))])
|
342 |
-
proposals_per_image.gt_classes = gt_classes
|
343 |
-
proposals_per_image.gt_boxes = gt_boxes
|
344 |
-
proposals_per_image.gt_object_descriptions = gt_object_descriptions
|
345 |
-
proposals_per_image.foreground = foreground
|
346 |
-
|
347 |
-
num_fg_samples.append((proposal_labels == 1).sum().item())
|
348 |
-
num_bg_samples.append(proposal_labels.numel() - num_fg_samples[-1])
|
349 |
-
|
350 |
-
# Log the number of fg/bg samples in each stage
|
351 |
-
storage = get_event_storage()
|
352 |
-
storage.put_scalar(
|
353 |
-
"stage{}/roi_head/num_fg_samples".format(stage),
|
354 |
-
sum(num_fg_samples) / len(num_fg_samples),
|
355 |
-
)
|
356 |
-
storage.put_scalar(
|
357 |
-
"stage{}/roi_head/num_bg_samples".format(stage),
|
358 |
-
sum(num_bg_samples) / len(num_bg_samples),
|
359 |
-
)
|
360 |
-
return proposals
|
361 |
-
|
362 |
-
def fast_rcnn_inference_GRiT(
|
363 |
-
self,
|
364 |
-
boxes: List[torch.Tensor],
|
365 |
-
scores: List[torch.Tensor],
|
366 |
-
logits: List[torch.Tensor],
|
367 |
-
image_shapes: List[Tuple[int, int]],
|
368 |
-
score_thresh: float,
|
369 |
-
nms_thresh: float,
|
370 |
-
topk_per_image: int,
|
371 |
-
soft_nms_enabled: bool,
|
372 |
-
):
|
373 |
-
result_per_image = [
|
374 |
-
self.fast_rcnn_inference_single_image_GRiT(
|
375 |
-
boxes_per_image, scores_per_image, logits_per_image, image_shape,
|
376 |
-
score_thresh, nms_thresh, topk_per_image, soft_nms_enabled
|
377 |
-
)
|
378 |
-
for scores_per_image, boxes_per_image, image_shape, logits_per_image \
|
379 |
-
in zip(scores, boxes, image_shapes, logits)
|
380 |
-
]
|
381 |
-
return [x[0] for x in result_per_image], [x[1] for x in result_per_image]
|
382 |
-
|
383 |
-
def fast_rcnn_inference_single_image_GRiT(
|
384 |
-
self,
|
385 |
-
boxes,
|
386 |
-
scores,
|
387 |
-
logits,
|
388 |
-
image_shape: Tuple[int, int],
|
389 |
-
score_thresh: float,
|
390 |
-
nms_thresh: float,
|
391 |
-
topk_per_image: int,
|
392 |
-
soft_nms_enabled,
|
393 |
-
):
|
394 |
-
"""
|
395 |
-
Add soft NMS to detectron2's fast_rcnn_inference_single_image
|
396 |
-
"""
|
397 |
-
valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1)
|
398 |
-
if not valid_mask.all():
|
399 |
-
boxes = boxes[valid_mask]
|
400 |
-
scores = scores[valid_mask]
|
401 |
-
logits = logits[valid_mask]
|
402 |
-
|
403 |
-
scores = scores[:, :-1]
|
404 |
-
logits = logits[:, :-1]
|
405 |
-
num_bbox_reg_classes = boxes.shape[1] // 4
|
406 |
-
# Convert to Boxes to use the `clip` function ...
|
407 |
-
boxes = Boxes(boxes.reshape(-1, 4))
|
408 |
-
boxes.clip(image_shape)
|
409 |
-
boxes = boxes.tensor.view(-1, num_bbox_reg_classes, 4) # R x C x 4
|
410 |
-
|
411 |
-
# 1. Filter results based on detection scores. It can make NMS more efficient
|
412 |
-
# by filtering out low-confidence detections.
|
413 |
-
filter_mask = scores > score_thresh # R x K
|
414 |
-
# R' x 2. First column contains indices of the R predictions;
|
415 |
-
# Second column contains indices of classes.
|
416 |
-
filter_inds = filter_mask.nonzero()
|
417 |
-
if num_bbox_reg_classes == 1:
|
418 |
-
boxes = boxes[filter_inds[:, 0], 0]
|
419 |
-
else:
|
420 |
-
boxes = boxes[filter_mask]
|
421 |
-
scores = scores[filter_mask]
|
422 |
-
logits = logits[filter_mask]
|
423 |
-
|
424 |
-
# 2. Apply NMS for each class independently.
|
425 |
-
if not soft_nms_enabled:
|
426 |
-
keep = batched_nms(boxes, scores, filter_inds[:, 1], nms_thresh)
|
427 |
-
else:
|
428 |
-
keep, soft_nms_scores = batched_soft_nms(
|
429 |
-
boxes,
|
430 |
-
scores,
|
431 |
-
filter_inds[:, 1],
|
432 |
-
"linear",
|
433 |
-
0.5,
|
434 |
-
nms_thresh,
|
435 |
-
0.001,
|
436 |
-
)
|
437 |
-
scores[keep] = soft_nms_scores
|
438 |
-
if topk_per_image >= 0:
|
439 |
-
keep = keep[:topk_per_image]
|
440 |
-
boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep]
|
441 |
-
logits = logits[keep]
|
442 |
-
|
443 |
-
result = Instances(image_shape)
|
444 |
-
result.pred_boxes = Boxes(boxes)
|
445 |
-
result.scores = scores
|
446 |
-
result.pred_classes = filter_inds[:, 1]
|
447 |
-
result.logits = logits
|
448 |
-
return result, filter_inds[:, 0]
|
449 |
-
|
450 |
-
def _get_empty_mask_loss(self, device):
|
451 |
-
if self.mask_on:
|
452 |
-
return {'loss_mask': torch.zeros(
|
453 |
-
(1, ), device=device, dtype=torch.float32)[0]}
|
454 |
-
else:
|
455 |
-
return {}
|
456 |
-
|
457 |
-
def _create_proposals_from_boxes(self, boxes, image_sizes, logits):
|
458 |
-
boxes = [Boxes(b.detach()) for b in boxes]
|
459 |
-
proposals = []
|
460 |
-
for boxes_per_image, image_size, logit in zip(
|
461 |
-
boxes, image_sizes, logits):
|
462 |
-
boxes_per_image.clip(image_size)
|
463 |
-
if self.training:
|
464 |
-
inds = boxes_per_image.nonempty()
|
465 |
-
boxes_per_image = boxes_per_image[inds]
|
466 |
-
logit = logit[inds]
|
467 |
-
prop = Instances(image_size)
|
468 |
-
prop.proposal_boxes = boxes_per_image
|
469 |
-
prop.objectness_logits = logit
|
470 |
-
proposals.append(prop)
|
471 |
-
return proposals
|
472 |
-
|
473 |
-
def _run_stage(self, features, proposals, stage):
|
474 |
-
pool_boxes = [x.proposal_boxes for x in proposals]
|
475 |
-
box_features = self.box_pooler(features, pool_boxes)
|
476 |
-
box_features = _ScaleGradient.apply(box_features, 1.0 / self.num_cascade_stages)
|
477 |
-
box_features = self.box_head[stage](box_features)
|
478 |
-
return self.box_predictor[stage](box_features)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/test_checkpoint.py
DELETED
@@ -1,49 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import unittest
|
3 |
-
from collections import OrderedDict
|
4 |
-
import torch
|
5 |
-
from torch import nn
|
6 |
-
|
7 |
-
from detectron2.checkpoint.c2_model_loading import align_and_update_state_dicts
|
8 |
-
from detectron2.utils.logger import setup_logger
|
9 |
-
|
10 |
-
|
11 |
-
class TestCheckpointer(unittest.TestCase):
|
12 |
-
def setUp(self):
|
13 |
-
setup_logger()
|
14 |
-
|
15 |
-
def create_complex_model(self):
|
16 |
-
m = nn.Module()
|
17 |
-
m.block1 = nn.Module()
|
18 |
-
m.block1.layer1 = nn.Linear(2, 3)
|
19 |
-
m.layer2 = nn.Linear(3, 2)
|
20 |
-
m.res = nn.Module()
|
21 |
-
m.res.layer2 = nn.Linear(3, 2)
|
22 |
-
|
23 |
-
state_dict = OrderedDict()
|
24 |
-
state_dict["layer1.weight"] = torch.rand(3, 2)
|
25 |
-
state_dict["layer1.bias"] = torch.rand(3)
|
26 |
-
state_dict["layer2.weight"] = torch.rand(2, 3)
|
27 |
-
state_dict["layer2.bias"] = torch.rand(2)
|
28 |
-
state_dict["res.layer2.weight"] = torch.rand(2, 3)
|
29 |
-
state_dict["res.layer2.bias"] = torch.rand(2)
|
30 |
-
return m, state_dict
|
31 |
-
|
32 |
-
def test_complex_model_loaded(self):
|
33 |
-
for add_data_parallel in [False, True]:
|
34 |
-
model, state_dict = self.create_complex_model()
|
35 |
-
if add_data_parallel:
|
36 |
-
model = nn.DataParallel(model)
|
37 |
-
model_sd = model.state_dict()
|
38 |
-
|
39 |
-
sd_to_load = align_and_update_state_dicts(model_sd, state_dict)
|
40 |
-
model.load_state_dict(sd_to_load)
|
41 |
-
for loaded, stored in zip(model_sd.values(), state_dict.values()):
|
42 |
-
# different tensor references
|
43 |
-
self.assertFalse(id(loaded) == id(stored))
|
44 |
-
# same content
|
45 |
-
self.assertTrue(loaded.to(stored).equal(stored))
|
46 |
-
|
47 |
-
|
48 |
-
if __name__ == "__main__":
|
49 |
-
unittest.main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Banbri/zcvzcv/src/components/ui/switch.tsx
DELETED
@@ -1,29 +0,0 @@
|
|
1 |
-
"use client"
|
2 |
-
|
3 |
-
import * as React from "react"
|
4 |
-
import * as SwitchPrimitives from "@radix-ui/react-switch"
|
5 |
-
|
6 |
-
import { cn } from "@/lib/utils"
|
7 |
-
|
8 |
-
const Switch = React.forwardRef<
|
9 |
-
React.ElementRef<typeof SwitchPrimitives.Root>,
|
10 |
-
React.ComponentPropsWithoutRef<typeof SwitchPrimitives.Root>
|
11 |
-
>(({ className, ...props }, ref) => (
|
12 |
-
<SwitchPrimitives.Root
|
13 |
-
className={cn(
|
14 |
-
"peer inline-flex h-[24px] w-[44px] shrink-0 cursor-pointer items-center rounded-full border-2 border-transparent transition-colors focus-visible:outline-none focus-visible:ring-2 focus-visible:ring-stone-400 focus-visible:ring-offset-2 focus-visible:ring-offset-white disabled:cursor-not-allowed disabled:opacity-50 data-[state=checked]:bg-stone-900 data-[state=unchecked]:bg-stone-200 dark:focus-visible:ring-stone-800 dark:focus-visible:ring-offset-stone-950 dark:data-[state=checked]:bg-stone-50 dark:data-[state=unchecked]:bg-stone-800",
|
15 |
-
className
|
16 |
-
)}
|
17 |
-
{...props}
|
18 |
-
ref={ref}
|
19 |
-
>
|
20 |
-
<SwitchPrimitives.Thumb
|
21 |
-
className={cn(
|
22 |
-
"pointer-events-none block h-5 w-5 rounded-full bg-white shadow-lg ring-0 transition-transform data-[state=checked]:translate-x-5 data-[state=unchecked]:translate-x-0 dark:bg-stone-950"
|
23 |
-
)}
|
24 |
-
/>
|
25 |
-
</SwitchPrimitives.Root>
|
26 |
-
))
|
27 |
-
Switch.displayName = SwitchPrimitives.Root.displayName
|
28 |
-
|
29 |
-
export { Switch }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Cmo Descargar Coches De Lujo Europeos.md
DELETED
@@ -1,47 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Cómo descargar coches de lujo europeos</h1>
|
3 |
-
<p>Los coches de lujo europeos son algunos de los vehículos más codiciados del mundo, con marcas como BMW, Audi, Mercedes-Benz, Porsche, Ferrari, Lamborghini y más. Estos coches ofrecen velocidad espectacular, estilo sofisticado y comodidad sin igual. Pero no todos pueden permitirse comprar o conducir estos coches en la vida real. Es por eso que descargarlos en su dispositivo puede ser una gran manera de experimentar la emoción y la emoción de poseer y conducir un coche de lujo europeo. </p>
|
4 |
-
<h2>cómo descargar coches de lujo europeos</h2><br /><p><b><b>Download File</b> ››› <a href="https://bltlly.com/2v6JPC">https://bltlly.com/2v6JPC</a></b></p><br /><br />
|
5 |
-
<p>Descargar coches de lujo europeos en tu dispositivo tiene muchos beneficios. Puede elegir entre una amplia gama de modelos y personalizarlos según sus preferencias. También puede conducirlos en diferentes terrenos y entornos, como carreteras, pistas todoterreno o incluso una isla privada. También puede disfrutar de sonidos realistas, gráficos y física que simulan el rendimiento real y el comportamiento de estos coches. Además, puedes divertirte con tus amigos u otros jugadores online compitiendo o navegando juntos. </p>
|
6 |
-
<p>Sin embargo, descargar coches de lujo europeos también tiene algunos desafíos y riesgos. Necesitas encontrar una fuente confiable y segura para descargarlos, ya que algunos sitios web o aplicaciones pueden contener virus o malware que pueden dañar tu dispositivo o robar tu información personal. También debes asegurarte de que tu dispositivo tenga suficiente espacio de almacenamiento y cumpla con los requisitos mínimos para que el juego funcione sin problemas. Además, debe ser consciente de los problemas legales y las implicaciones éticas de descargar estos coches, ya que algunos de ellos pueden estar protegidos por derechos de propiedad intelectual o pueden promover la conducción irresponsable. </p>
|
7 |
-
<h2>Cómo descargar coches de lujo europeos en dispositivos Android</h2>
|
8 |
-
<h3>Usando Google Play Store</h3>
|
9 |
-
|
10 |
-
<ol>
|
11 |
-
<li>Abra la aplicación Google Play Store en su dispositivo. </li>
|
12 |
-
<li>Inicia sesión con tu cuenta de Google si aún no lo has hecho. </li>
|
13 |
-
<li>Buscar "Coches de lujo europeos" en la barra de búsqueda. </li>
|
14 |
-
<li>Encuentre la aplicación de DMNK Studio en los resultados de búsqueda y toque en ella. </li>
|
15 |
-
<li>Toque en el botón "Instalar" para comenzar el proceso de descarga e instalación. </li>
|
16 |
-
<li> Espere a que la aplicación termine de instalar y luego toque en "Abrir" para lanzarlo. </li>
|
17 |
-
</ol>
|
18 |
-
<p>Felicidades! Usted ha descargado con éxito los coches de lujo europeos en su dispositivo Android utilizando la Google Play Store. Ahora puede elegir su coche de lujo favorito y conducirlo con amigos o solo a través de una isla privada. <h3>Uso de otros métodos</h3>
|
19 |
-
<p>Si no quieres usar el emulador de GameLoop, o si quieres probar otros métodos para descargar coches de lujo europeos en tu PC, también puedes usar otros sitios web o software que ofrecen versiones de PC de la aplicación. Sin embargo, debe tener cuidado al descargar estos archivos, ya que pueden no ser oficiales o seguros. Estos son los pasos para descargar coches de lujo europeos utilizando otros métodos:</p>
|
20 |
-
<p></p>
|
21 |
-
<ol>
|
22 |
-
<li>Ir a un sitio web que ofrece versiones de PC de aplicaciones Android, tales como Twitscoop. También puede usar otros sitios web, pero asegúrese de que sean confiables y seguros. </li>
|
23 |
-
<li>Buscar "Coches de lujo europeos" en la barra de búsqueda del sitio web. </li>
|
24 |
-
<li>Encuentre la aplicación de DMNK Studio en los resultados de búsqueda y haga clic en ella. </li>
|
25 |
-
<li>Haga clic en el botón "Descargar" para comenzar a descargar la versión para PC de la aplicación en su PC.</li>
|
26 |
-
<li>Una vez completada la descarga, ejecute el archivo y siga las instrucciones para instalar la aplicación en su PC.</li>
|
27 |
-
<li>Iniciar la aplicación y disfrutar jugando el juego. </li>
|
28 |
-
</ol>
|
29 |
-
<p>Genial! Ha descargado con éxito coches de lujo europeos en su PC utilizando otros métodos. Ahora puedes divertirte con tu coche de lujo favorito en una isla privada con gráficos realistas y física. </p>
|
30 |
-
<h2>Conclusión</h2>
|
31 |
-
|
32 |
-
<p>Descargar coches de lujo europeos puede ser una gran manera de experimentar la emoción y la emoción de poseer y conducir un coche de lujo europeo. Puede elegir entre una amplia gama de modelos y personalizarlos según sus preferencias. También puede conducirlos en diferentes terrenos y entornos, como carreteras, pistas todoterreno o incluso una isla privada. También puede disfrutar de sonidos realistas, gráficos y física que simulan el rendimiento real y el comportamiento de estos coches. Además, puedes divertirte con tus amigos u otros jugadores online compitiendo o navegando juntos. </p>
|
33 |
-
<p>Sin embargo, también es necesario tener cuidado al descargar coches de lujo europeos, ya que algunas fuentes pueden no ser fiables o seguras. También debes asegurarte de que tu dispositivo tenga suficiente espacio de almacenamiento y cumpla con los requisitos mínimos para que el juego funcione sin problemas. Además, debe ser consciente de los problemas legales y las implicaciones éticas de descargar estos coches, ya que algunos de ellos pueden estar protegidos por derechos de propiedad intelectual o pueden promover la conducción irresponsable. </p>
|
34 |
-
<p>Te invitamos a probar el juego y compartir tus comentarios con nosotros. ¿Cuáles son tus marcas europeas favoritas de coches de lujo? ¿Cómo te gustan los gráficos y la física del juego? ¿Cuáles son algunas de las características y opciones disponibles en el juego? ¡Déjanos saber en los comentarios abajo! </p>
|
35 |
-
<h2>Preguntas frecuentes</h2>
|
36 |
-
<h4>¿Cuáles son algunas de las mejores marcas europeas de automóviles de lujo? </h4>
|
37 |
-
<p>Algunas de las mejores marcas europeas de automóviles de lujo son BMW, Audi, Mercedes-Benz, Porsche, Ferrari, Lamborghini y más. Estas marcas ofrecen una velocidad espectacular, un estilo sofisticado y un confort sin igual. También tienen una larga historia y reputación de excelencia e innovación en la industria automotriz. </p>
|
38 |
-
<h4>¿Cómo puedo actualizar o desinstalar la aplicación European Luxury Cars? </h4>
|
39 |
-
|
40 |
-
<h4>¿Cómo puedo jugar con amigos u otros jugadores online? </h4>
|
41 |
-
<p>La aplicación European Luxury Cars de DMNK Studio ofrece un modo en línea donde puedes jugar con amigos u otros jugadores en línea. Puede unirse o crear una habitación e invitar a otros a unirse a usted. También puede chatear con ellos utilizando mensajes de voz o de texto. Pueden competir o navegar juntos en una isla privada con gráficos realistas y física. </p>
|
42 |
-
<h4>¿Cuáles son algunas de las características y opciones disponibles en el juego? </h4>
|
43 |
-
<p>El juego ofrece muchas características y opciones para que usted disfrute. Puede elegir entre una amplia gama de modelos y personalizarlos según sus preferencias. Puede cambiar el color, ruedas, alerones, luces, calcomanías, matrículas y más. También puede conducirlos en diferentes terrenos y entornos, como carreteras, pistas todoterreno o incluso una isla privada. También puede ajustar el ángulo de la cámara, los efectos de sonido, el volumen de la música, la sensibilidad de la dirección, la fuerza del freno, el control de la tracción y más. También puede disfrutar de sonidos realistas, gráficos y física que simulan el rendimiento real y el comportamiento de estos coches. </p>
|
44 |
-
<h4>¿Cómo puedo contactar al desarrollador o reportar un problema con el juego? </h4>
|
45 |
-
<p>Si tiene alguna pregunta, sugerencia o problema con el juego, puede ponerse en contacto con el desarrollador o informar de un problema a través de la propia aplicación. Simplemente vaya al menú de configuración y toque en "Contáctenos" o "Reportar un problema". También puede enviar un correo electrónico al desarrollador a [email protected] o visitar su sitio web en https://dmnkstudio.com/.</p> 64aa2da5cf<br />
|
46 |
-
<br />
|
47 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Benson/text-generation/Examples/Descargar Granja Hroes Sper Saga Para Pc.md
DELETED
@@ -1,79 +0,0 @@
|
|
1 |
-
<br />
|
2 |
-
<h1>Cómo descargar Farm Heroes Super Saga para PC</h1>
|
3 |
-
<p>Farm Heroes Super Saga es un divertido y adictivo juego de puzzle match-3 que te reta a crecer y cosechar los mayores cropsies y derrotar al malvado mapache rancio. El juego cuenta con cientos de niveles, personajes lindos y modos de juego emocionantes. Si te encanta jugar a Farm Heroes Super Saga en tu dispositivo móvil, es posible que te preguntes si puedes jugar en tu PC también. La respuesta es sí, ¡puedes! </p>
|
4 |
-
<p>Jugar juegos Android en PC tiene muchos beneficios, como disfrutar de una pantalla más grande, mejores gráficos, controles mejorados y más espacio de almacenamiento. Además, puedes sincronizar tu progreso y logros en todos los dispositivos con tu cuenta de Google. En este artículo, te mostraremos tres formas de descargar y jugar Farm Heroes Super Saga para PC usando Google Play Games, un emulador de Android o tu aplicación de teléfono. También compararemos los pros y los contras de cada método y te ayudaremos a decidir cuál es el mejor para ti. </p>
|
5 |
-
<h2>descargar granja héroes súper saga para pc</h2><br /><p><b><b>DOWNLOAD</b> ✵✵✵ <a href="https://bltlly.com/2v6M99">https://bltlly.com/2v6M99</a></b></p><br /><br />
|
6 |
-
<h2>Lo que necesita para jugar juegos Android en PC</h2>
|
7 |
-
<p>Antes de que pueda jugar juegos de Android en PC, debe asegurarse de que su PC cumple con los requisitos mínimos para ejecutarlos. Estas son algunas de las cosas que necesitas:</p>
|
8 |
-
<ul>
|
9 |
-
<li>Un sistema operativo Windows 10 o 11</li>
|
10 |
-
<li>Una unidad de estado sólido (SSD) con al menos 10 GB de espacio de almacenamiento disponible</li>
|
11 |
-
<li>Una GPU Intel UHD Graphics 630 o similar</li>
|
12 |
-
<li>Un procesador con al menos cuatro núcleos físicos de CPU</li>
|
13 |
-
<li>8 GB de RAM</li>
|
14 |
-
<li>Una cuenta de administrador de Windows</li>
|
15 |
-
<li>Virtualización de hardware habilitada</li>
|
16 |
-
</ul>
|
17 |
-
<p>También necesitas una conexión a Internet y una cuenta de Google para acceder a Google Play Store y descargar juegos. </p>
|
18 |
-
<h2>Cómo utilizar Google Play Juegos para jugar Android en PC</h2>
|
19 |
-
|
20 |
-
<ol>
|
21 |
-
<li>Ir a [5](https://play.google.com/googleplaygames) y haga clic en Descargar Beta.</li>
|
22 |
-
<li>Una vez descargado, haga clic derecho en el archivo y haga clic en Ejecutar como administrador. </li>
|
23 |
-
<li>Espere a que la aplicación se instale. </li>
|
24 |
-
<li>Una vez instalado, un mensaje en la aplicación le pedirá que inicie sesión en su cuenta de Google. </li>
|
25 |
-
<li>Después de iniciar sesión, haga clic en la pestaña Juegos en la barra lateral izquierda. </li>
|
26 |
-
<li>Encuentra Farm Heroes Super Saga en la lista de juegos y selecciónala. </li>
|
27 |
-
<li>Haga clic en Instalar en la página de información. El juego se descargará y luego se instalará. </li>
|
28 |
-
<li>Una vez instalado, haga clic en Jugar para iniciar el juego. </li>
|
29 |
-
</ol>
|
30 |
-
<h2>Cómo usar un emulador de Android para jugar juegos de Android en PC</h2>
|
31 |
-
<p>Otra forma de jugar juegos Android en PC es utilizar un emulador de Android, un software que imita el sistema operativo Android en su PC. Un emulador de Android le permite acceder a la Google Play Store completa y descargar cualquier juego o aplicación que desee. Hay muchos emuladores de Android disponibles, pero uno de los más populares y fiables es BlueStacks. BlueStacks ofrece una experiencia de juego rápida y fluida con controles personalizables, modo de varias instancias y optimización de juegos. Estos son los pasos para usar BlueStacks para jugar Farm Heroes Super Saga para PC:</p>
|
32 |
-
<ol>
|
33 |
-
<li>Ir a [4](https://www.bluestacks.com/) y haga clic en Descargar BlueStacks.</li>
|
34 |
-
<li>Una vez descargado, haga doble clic en el archivo y siga las instrucciones para instalar BlueStacks.</li>
|
35 |
-
<li>Una vez instalado, inicie BlueStacks e inicie sesión en su cuenta de Google. </li>
|
36 |
-
<li>Haga clic en el icono de Google Play en la pantalla de inicio. </li>
|
37 |
-
<li>Buscar Farm Heroes Super Saga en la barra de búsqueda y seleccionarlo. </li>
|
38 |
-
<li>Haga clic en Instalar en la página de información. El juego se descargará y luego se instalará. </li>
|
39 |
-
<li>Una vez instalado, haga clic en Abrir para iniciar el juego. </li>
|
40 |
-
</ol>
|
41 |
-
<h2>Cómo utilizar la aplicación de teléfono para jugar juegos Android en PC</h2>
|
42 |
-
|
43 |
-
<ol>
|
44 |
-
<li>En su PC, abra el menú Inicio y busque la aplicación Su teléfono. Si no lo tiene, puede descargarlo desde [3](https://www.microsoft.com/en-us/p/your-phone/9nmpj99vjbwv). </li>
|
45 |
-
<li>En tu teléfono, ve a Configuración > Sistema > Acerca del teléfono y toca Número de compilación siete veces para habilitar las opciones del desarrollador. </li>
|
46 |
-
<li>Volver a Configuración > Sistema > Opciones del desarrollador y habilitar la depuración USB. </li>
|
47 |
-
<li>Conecte su teléfono a su PC con un cable USB. </li>
|
48 |
-
<li>En su PC, inicie su aplicación de teléfono e inicie sesión con su cuenta de Microsoft. </li>
|
49 |
-
<li>Siga las instrucciones para vincular su teléfono y conceder permisos. </li>
|
50 |
-
<li>En la aplicación Teléfono, haga clic en Aplicaciones en la barra lateral izquierda. </li>
|
51 |
-
<li>Encuentra Farm Heroes Super Saga en la lista de aplicaciones y seleccionarlo. </li>
|
52 |
-
<li> El juego se lanzará en su teléfono y espejo en su PC. Puede utilizar el ratón y el teclado para jugarlo. </li>
|
53 |
-
</ol>
|
54 |
-
<h2>Pros y contras de cada método</h2>
|
55 |
-
<p>Ahora que sabes cómo descargar Farm Heroes Super Saga para PC usando tres métodos diferentes, es posible que te preguntes cuál es el mejor para ti. Para ayudarte a decidir, estos son algunos de los pros y contras de cada método:</p>
|
56 |
-
<tabla>
|
57 |
-
<tr><th>Método</th><th>Pros</th><th>Contras</th></tr>
|
58 |
-
<tr><td>Google Play Juegos</td><td>- Experiencia oficial de Google<br>- Sincronización perfecta entre dispositivos<br>- Controles mejorados<br>- Recompensas mientras juegas</td><td>- Selección limitada de juegos<br>- Requiere Windows 11<br>- Puede que no sea compatible con todas las características de algunos juegos</td><><tr>
|
59 |
-
<tr><td>Android Emulator</td><td>- Acceso a Google Play Store<br>- Experiencia de juego rápida y fluida<br>- Controles personalizables<br>- Modo multiinstancia<br>- Optimización de juegos</td><td>- Requiere más espacio de almacenamiento<>- Puede ralentizar tu PC br<>-> Puede tener problemas de compatibilidad con algunos juegos</td></tr>
|
60 |
-
|
61 |
-
</tabla>
|
62 |
-
<h1>Conclusión</h1>
|
63 |
-
<p>Farm Heroes Super Saga es un divertido y adictivo juego de puzzle match-3 que puedes jugar en tu PC usando Google Play Games, un emulador de Android o la aplicación Your Phone. Cada método tiene sus propios pros y contras, por lo que debe elegir el que se adapte a sus preferencias y necesidades. Recomendamos usar Google Play Games si quieres una experiencia oficial de Google con sincronización perfecta entre dispositivos, controles mejorados y recompensas a medida que juegas. Recomendamos usar un emulador de Android como BlueStacks si quieres acceder a la Google Play Store completa y una experiencia de juego rápida y fluida con controles personalizables, modo de varias instancias y optimización de juegos. Recomendamos usar la aplicación Su teléfono si desea usar las aplicaciones de su teléfono en su PC sin descargar nada adicional y reflejar la pantalla del teléfono. Esperamos que este artículo le ayudó a aprender cómo descargar Farm Heroes Super Saga para PC y disfrutar de este increíble juego en una pantalla más grande. Si tiene alguna pregunta o comentario, háganoslo saber en los comentarios a continuación. ¡Feliz agricultura! <h2>FAQs</h2>
|
64 |
-
<p>Aquí están algunas de las preguntas más frecuentes sobre la descarga de Farm Heroes Super Saga para PC:</p>
|
65 |
-
<ol>
|
66 |
-
<li>¿Farm Heroes Super Saga es gratis? </li>
|
67 |
-
<p>Sí, Farm Heroes Super Saga es gratis para jugar, pero ofrece compras en la aplicación para vidas adicionales, refuerzos y otros artículos. </p>
|
68 |
-
<p></p>
|
69 |
-
<li>¿Puedo jugar Farm Heroes Super Saga sin conexión? </li>
|
70 |
-
<p>No, Farm Heroes Super Saga requiere una conexión a Internet para jugar. </p>
|
71 |
-
<li>¿Cómo puedo guardar mi progreso en Farm Heroes Super Saga? </li>
|
72 |
-
<p>Puedes guardar tu progreso en Farm Heroes Super Saga iniciando sesión con tu cuenta de Google o Facebook. De esta manera, puedes sincronizar tu progreso y logros entre dispositivos. </p>
|
73 |
-
<li>¿Cómo puedo obtener más vidas en Farm Heroes Super Saga? </li>
|
74 |
-
<p>Puedes conseguir más vidas en Farm Heroes Super Saga esperando a que se llenen, pidiendo ayuda a tus amigos, viendo anuncios o comprándolos con barras de oro. </p>
|
75 |
-
|
76 |
-
<p>Puede ponerse en contacto con el equipo de soporte de Farm Heroes Super Saga yendo a la configuración del juego y tocando el botón Centro de ayuda. También puede visitar [2](https://community.king.com/en/farm-heroes-super-saga) para unirse a la comunidad y obtener ayuda de otros jugadores. </p>
|
77 |
-
</ol></p> 64aa2da5cf<br />
|
78 |
-
<br />
|
79 |
-
<br />
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/BetterAPI/BetterChat_new/src/lib/utils/trimSuffix.ts
DELETED
@@ -1,6 +0,0 @@
|
|
1 |
-
export function trimSuffix(input: string, end: string): string {
|
2 |
-
if (input.endsWith(end)) {
|
3 |
-
return input.slice(0, input.length - end.length);
|
4 |
-
}
|
5 |
-
return input;
|
6 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/_distutils_hack/override.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
__import__('_distutils_hack').do_override()
|
|
|
|
spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/pyparsing/util.py
DELETED
@@ -1,235 +0,0 @@
|
|
1 |
-
# util.py
|
2 |
-
import warnings
|
3 |
-
import types
|
4 |
-
import collections
|
5 |
-
import itertools
|
6 |
-
from functools import lru_cache
|
7 |
-
from typing import List, Union, Iterable
|
8 |
-
|
9 |
-
_bslash = chr(92)
|
10 |
-
|
11 |
-
|
12 |
-
class __config_flags:
|
13 |
-
"""Internal class for defining compatibility and debugging flags"""
|
14 |
-
|
15 |
-
_all_names: List[str] = []
|
16 |
-
_fixed_names: List[str] = []
|
17 |
-
_type_desc = "configuration"
|
18 |
-
|
19 |
-
@classmethod
|
20 |
-
def _set(cls, dname, value):
|
21 |
-
if dname in cls._fixed_names:
|
22 |
-
warnings.warn(
|
23 |
-
"{}.{} {} is {} and cannot be overridden".format(
|
24 |
-
cls.__name__,
|
25 |
-
dname,
|
26 |
-
cls._type_desc,
|
27 |
-
str(getattr(cls, dname)).upper(),
|
28 |
-
)
|
29 |
-
)
|
30 |
-
return
|
31 |
-
if dname in cls._all_names:
|
32 |
-
setattr(cls, dname, value)
|
33 |
-
else:
|
34 |
-
raise ValueError("no such {} {!r}".format(cls._type_desc, dname))
|
35 |
-
|
36 |
-
enable = classmethod(lambda cls, name: cls._set(name, True))
|
37 |
-
disable = classmethod(lambda cls, name: cls._set(name, False))
|
38 |
-
|
39 |
-
|
40 |
-
@lru_cache(maxsize=128)
|
41 |
-
def col(loc: int, strg: str) -> int:
|
42 |
-
"""
|
43 |
-
Returns current column within a string, counting newlines as line separators.
|
44 |
-
The first column is number 1.
|
45 |
-
|
46 |
-
Note: the default parsing behavior is to expand tabs in the input string
|
47 |
-
before starting the parsing process. See
|
48 |
-
:class:`ParserElement.parseString` for more
|
49 |
-
information on parsing strings containing ``<TAB>`` s, and suggested
|
50 |
-
methods to maintain a consistent view of the parsed string, the parse
|
51 |
-
location, and line and column positions within the parsed string.
|
52 |
-
"""
|
53 |
-
s = strg
|
54 |
-
return 1 if 0 < loc < len(s) and s[loc - 1] == "\n" else loc - s.rfind("\n", 0, loc)
|
55 |
-
|
56 |
-
|
57 |
-
@lru_cache(maxsize=128)
|
58 |
-
def lineno(loc: int, strg: str) -> int:
|
59 |
-
"""Returns current line number within a string, counting newlines as line separators.
|
60 |
-
The first line is number 1.
|
61 |
-
|
62 |
-
Note - the default parsing behavior is to expand tabs in the input string
|
63 |
-
before starting the parsing process. See :class:`ParserElement.parseString`
|
64 |
-
for more information on parsing strings containing ``<TAB>`` s, and
|
65 |
-
suggested methods to maintain a consistent view of the parsed string, the
|
66 |
-
parse location, and line and column positions within the parsed string.
|
67 |
-
"""
|
68 |
-
return strg.count("\n", 0, loc) + 1
|
69 |
-
|
70 |
-
|
71 |
-
@lru_cache(maxsize=128)
|
72 |
-
def line(loc: int, strg: str) -> str:
|
73 |
-
"""
|
74 |
-
Returns the line of text containing loc within a string, counting newlines as line separators.
|
75 |
-
"""
|
76 |
-
last_cr = strg.rfind("\n", 0, loc)
|
77 |
-
next_cr = strg.find("\n", loc)
|
78 |
-
return strg[last_cr + 1 : next_cr] if next_cr >= 0 else strg[last_cr + 1 :]
|
79 |
-
|
80 |
-
|
81 |
-
class _UnboundedCache:
|
82 |
-
def __init__(self):
|
83 |
-
cache = {}
|
84 |
-
cache_get = cache.get
|
85 |
-
self.not_in_cache = not_in_cache = object()
|
86 |
-
|
87 |
-
def get(_, key):
|
88 |
-
return cache_get(key, not_in_cache)
|
89 |
-
|
90 |
-
def set_(_, key, value):
|
91 |
-
cache[key] = value
|
92 |
-
|
93 |
-
def clear(_):
|
94 |
-
cache.clear()
|
95 |
-
|
96 |
-
self.size = None
|
97 |
-
self.get = types.MethodType(get, self)
|
98 |
-
self.set = types.MethodType(set_, self)
|
99 |
-
self.clear = types.MethodType(clear, self)
|
100 |
-
|
101 |
-
|
102 |
-
class _FifoCache:
|
103 |
-
def __init__(self, size):
|
104 |
-
self.not_in_cache = not_in_cache = object()
|
105 |
-
cache = collections.OrderedDict()
|
106 |
-
cache_get = cache.get
|
107 |
-
|
108 |
-
def get(_, key):
|
109 |
-
return cache_get(key, not_in_cache)
|
110 |
-
|
111 |
-
def set_(_, key, value):
|
112 |
-
cache[key] = value
|
113 |
-
while len(cache) > size:
|
114 |
-
cache.popitem(last=False)
|
115 |
-
|
116 |
-
def clear(_):
|
117 |
-
cache.clear()
|
118 |
-
|
119 |
-
self.size = size
|
120 |
-
self.get = types.MethodType(get, self)
|
121 |
-
self.set = types.MethodType(set_, self)
|
122 |
-
self.clear = types.MethodType(clear, self)
|
123 |
-
|
124 |
-
|
125 |
-
class LRUMemo:
|
126 |
-
"""
|
127 |
-
A memoizing mapping that retains `capacity` deleted items
|
128 |
-
|
129 |
-
The memo tracks retained items by their access order; once `capacity` items
|
130 |
-
are retained, the least recently used item is discarded.
|
131 |
-
"""
|
132 |
-
|
133 |
-
def __init__(self, capacity):
|
134 |
-
self._capacity = capacity
|
135 |
-
self._active = {}
|
136 |
-
self._memory = collections.OrderedDict()
|
137 |
-
|
138 |
-
def __getitem__(self, key):
|
139 |
-
try:
|
140 |
-
return self._active[key]
|
141 |
-
except KeyError:
|
142 |
-
self._memory.move_to_end(key)
|
143 |
-
return self._memory[key]
|
144 |
-
|
145 |
-
def __setitem__(self, key, value):
|
146 |
-
self._memory.pop(key, None)
|
147 |
-
self._active[key] = value
|
148 |
-
|
149 |
-
def __delitem__(self, key):
|
150 |
-
try:
|
151 |
-
value = self._active.pop(key)
|
152 |
-
except KeyError:
|
153 |
-
pass
|
154 |
-
else:
|
155 |
-
while len(self._memory) >= self._capacity:
|
156 |
-
self._memory.popitem(last=False)
|
157 |
-
self._memory[key] = value
|
158 |
-
|
159 |
-
def clear(self):
|
160 |
-
self._active.clear()
|
161 |
-
self._memory.clear()
|
162 |
-
|
163 |
-
|
164 |
-
class UnboundedMemo(dict):
|
165 |
-
"""
|
166 |
-
A memoizing mapping that retains all deleted items
|
167 |
-
"""
|
168 |
-
|
169 |
-
def __delitem__(self, key):
|
170 |
-
pass
|
171 |
-
|
172 |
-
|
173 |
-
def _escape_regex_range_chars(s: str) -> str:
|
174 |
-
# escape these chars: ^-[]
|
175 |
-
for c in r"\^-[]":
|
176 |
-
s = s.replace(c, _bslash + c)
|
177 |
-
s = s.replace("\n", r"\n")
|
178 |
-
s = s.replace("\t", r"\t")
|
179 |
-
return str(s)
|
180 |
-
|
181 |
-
|
182 |
-
def _collapse_string_to_ranges(
|
183 |
-
s: Union[str, Iterable[str]], re_escape: bool = True
|
184 |
-
) -> str:
|
185 |
-
def is_consecutive(c):
|
186 |
-
c_int = ord(c)
|
187 |
-
is_consecutive.prev, prev = c_int, is_consecutive.prev
|
188 |
-
if c_int - prev > 1:
|
189 |
-
is_consecutive.value = next(is_consecutive.counter)
|
190 |
-
return is_consecutive.value
|
191 |
-
|
192 |
-
is_consecutive.prev = 0
|
193 |
-
is_consecutive.counter = itertools.count()
|
194 |
-
is_consecutive.value = -1
|
195 |
-
|
196 |
-
def escape_re_range_char(c):
|
197 |
-
return "\\" + c if c in r"\^-][" else c
|
198 |
-
|
199 |
-
def no_escape_re_range_char(c):
|
200 |
-
return c
|
201 |
-
|
202 |
-
if not re_escape:
|
203 |
-
escape_re_range_char = no_escape_re_range_char
|
204 |
-
|
205 |
-
ret = []
|
206 |
-
s = "".join(sorted(set(s)))
|
207 |
-
if len(s) > 3:
|
208 |
-
for _, chars in itertools.groupby(s, key=is_consecutive):
|
209 |
-
first = last = next(chars)
|
210 |
-
last = collections.deque(
|
211 |
-
itertools.chain(iter([last]), chars), maxlen=1
|
212 |
-
).pop()
|
213 |
-
if first == last:
|
214 |
-
ret.append(escape_re_range_char(first))
|
215 |
-
else:
|
216 |
-
sep = "" if ord(last) == ord(first) + 1 else "-"
|
217 |
-
ret.append(
|
218 |
-
"{}{}{}".format(
|
219 |
-
escape_re_range_char(first), sep, escape_re_range_char(last)
|
220 |
-
)
|
221 |
-
)
|
222 |
-
else:
|
223 |
-
ret = [escape_re_range_char(c) for c in s]
|
224 |
-
|
225 |
-
return "".join(ret)
|
226 |
-
|
227 |
-
|
228 |
-
def _flatten(ll: list) -> list:
|
229 |
-
ret = []
|
230 |
-
for i in ll:
|
231 |
-
if isinstance(i, list):
|
232 |
-
ret.extend(_flatten(i))
|
233 |
-
else:
|
234 |
-
ret.append(i)
|
235 |
-
return ret
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/structures/image_list.py
DELETED
@@ -1,102 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
2 |
-
from __future__ import division
|
3 |
-
from typing import Any, List, Sequence, Tuple, Union
|
4 |
-
import torch
|
5 |
-
from torch.nn import functional as F
|
6 |
-
|
7 |
-
|
8 |
-
class ImageList(object):
|
9 |
-
"""
|
10 |
-
Structure that holds a list of images (of possibly
|
11 |
-
varying sizes) as a single tensor.
|
12 |
-
This works by padding the images to the same size,
|
13 |
-
and storing in a field the original sizes of each image
|
14 |
-
|
15 |
-
Attributes:
|
16 |
-
image_sizes (list[tuple[int, int]]): each tuple is (h, w)
|
17 |
-
"""
|
18 |
-
|
19 |
-
def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]):
|
20 |
-
"""
|
21 |
-
Arguments:
|
22 |
-
tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1
|
23 |
-
image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can
|
24 |
-
be smaller than (H, W) due to padding.
|
25 |
-
"""
|
26 |
-
self.tensor = tensor
|
27 |
-
self.image_sizes = image_sizes
|
28 |
-
|
29 |
-
def __len__(self) -> int:
|
30 |
-
return len(self.image_sizes)
|
31 |
-
|
32 |
-
def __getitem__(self, idx: Union[int, slice]) -> torch.Tensor:
|
33 |
-
"""
|
34 |
-
Access the individual image in its original size.
|
35 |
-
|
36 |
-
Returns:
|
37 |
-
Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1
|
38 |
-
"""
|
39 |
-
size = self.image_sizes[idx]
|
40 |
-
return self.tensor[idx, ..., : size[0], : size[1]] # type: ignore
|
41 |
-
|
42 |
-
def to(self, *args: Any, **kwargs: Any) -> "ImageList":
|
43 |
-
cast_tensor = self.tensor.to(*args, **kwargs)
|
44 |
-
return ImageList(cast_tensor, self.image_sizes)
|
45 |
-
|
46 |
-
@property
|
47 |
-
def device(self) -> torch.device:
|
48 |
-
return self.tensor.device
|
49 |
-
|
50 |
-
@staticmethod
|
51 |
-
def from_tensors(
|
52 |
-
tensors: Sequence[torch.Tensor], size_divisibility: int = 0, pad_value: float = 0.0
|
53 |
-
) -> "ImageList":
|
54 |
-
"""
|
55 |
-
Args:
|
56 |
-
tensors: a tuple or list of `torch.Tensors`, each of shape (Hi, Wi) or
|
57 |
-
(C_1, ..., C_K, Hi, Wi) where K >= 1. The Tensors will be padded
|
58 |
-
to the same shape with `pad_value`.
|
59 |
-
size_divisibility (int): If `size_divisibility > 0`, add padding to ensure
|
60 |
-
the common height and width is divisible by `size_divisibility`.
|
61 |
-
This depends on the model and many models need a divisibility of 32.
|
62 |
-
pad_value (float): value to pad
|
63 |
-
|
64 |
-
Returns:
|
65 |
-
an `ImageList`.
|
66 |
-
"""
|
67 |
-
assert len(tensors) > 0
|
68 |
-
assert isinstance(tensors, (tuple, list))
|
69 |
-
for t in tensors:
|
70 |
-
assert isinstance(t, torch.Tensor), type(t)
|
71 |
-
assert t.shape[1:-2] == tensors[0].shape[1:-2], t.shape
|
72 |
-
# per dimension maximum (H, W) or (C_1, ..., C_K, H, W) where K >= 1 among all tensors
|
73 |
-
max_size = tuple(max(s) for s in zip(*[img.shape for img in tensors]))
|
74 |
-
|
75 |
-
if size_divisibility > 0:
|
76 |
-
import math
|
77 |
-
|
78 |
-
stride = size_divisibility
|
79 |
-
max_size = list(max_size) # type: ignore
|
80 |
-
max_size[-2] = int(math.ceil(max_size[-2] / stride) * stride) # type: ignore
|
81 |
-
max_size[-1] = int(math.ceil(max_size[-1] / stride) * stride) # type: ignore
|
82 |
-
max_size = tuple(max_size)
|
83 |
-
|
84 |
-
image_sizes = [tuple(im.shape[-2:]) for im in tensors]
|
85 |
-
|
86 |
-
if len(tensors) == 1:
|
87 |
-
# This seems slightly (2%) faster.
|
88 |
-
# TODO: check whether it's faster for multiple images as well
|
89 |
-
image_size = image_sizes[0]
|
90 |
-
padding_size = [0, max_size[-1] - image_size[1], 0, max_size[-2] - image_size[0]]
|
91 |
-
if all(x == 0 for x in padding_size): # https://github.com/pytorch/pytorch/issues/31734
|
92 |
-
batched_imgs = tensors[0].unsqueeze(0)
|
93 |
-
else:
|
94 |
-
padded = F.pad(tensors[0], padding_size, value=pad_value)
|
95 |
-
batched_imgs = padded.unsqueeze_(0)
|
96 |
-
else:
|
97 |
-
batch_shape = (len(tensors),) + max_size
|
98 |
-
batched_imgs = tensors[0].new_full(batch_shape, pad_value)
|
99 |
-
for img, pad_img in zip(tensors, batched_imgs):
|
100 |
-
pad_img[..., : img.shape[-2], : img.shape[-1]].copy_(img)
|
101 |
-
|
102 |
-
return ImageList(batched_imgs.contiguous(), image_sizes)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/malloc_and_free.h
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
/*
|
2 |
-
* Copyright 2008-2013 NVIDIA Corporation
|
3 |
-
*
|
4 |
-
* Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
* you may not use this file except in compliance with the License.
|
6 |
-
* You may obtain a copy of the License at
|
7 |
-
*
|
8 |
-
* http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
*
|
10 |
-
* Unless required by applicable law or agreed to in writing, software
|
11 |
-
* distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
* See the License for the specific language governing permissions and
|
14 |
-
* limitations under the License.
|
15 |
-
*/
|
16 |
-
|
17 |
-
#pragma once
|
18 |
-
|
19 |
-
#include <thrust/detail/config.h>
|
20 |
-
|
21 |
-
// this system inherits malloc and free
|
22 |
-
#include <thrust/system/cpp/detail/malloc_and_free.h>
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/ChenWu98/Stable-CycleDiffusion/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Stable CycleDiffusion
|
3 |
-
emoji: 🚀
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: blue
|
6 |
-
sdk: gradio
|
7 |
-
sdk_version: 3.9
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CofAI/chat/client/js/highlight.min.js
DELETED
The diff for this file is too large to render.
See raw diff
|
|
spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/models/diffusion/ddim.py
DELETED
@@ -1,337 +0,0 @@
|
|
1 |
-
"""SAMPLING ONLY."""
|
2 |
-
|
3 |
-
import torch
|
4 |
-
import numpy as np
|
5 |
-
from tqdm import tqdm
|
6 |
-
|
7 |
-
from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor
|
8 |
-
|
9 |
-
|
10 |
-
class DDIMSampler(object):
|
11 |
-
def __init__(self, model, schedule="linear", **kwargs):
|
12 |
-
super().__init__()
|
13 |
-
self.model = model
|
14 |
-
self.ddpm_num_timesteps = model.num_timesteps
|
15 |
-
self.schedule = schedule
|
16 |
-
|
17 |
-
def register_buffer(self, name, attr):
|
18 |
-
# Do not force module to cuda by default.
|
19 |
-
#if type(attr) == torch.Tensor:
|
20 |
-
# if attr.device != torch.device("cuda"):
|
21 |
-
# attr = attr.to(torch.device("cuda"))
|
22 |
-
setattr(self, name, attr)
|
23 |
-
|
24 |
-
def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
|
25 |
-
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
|
26 |
-
num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
|
27 |
-
alphas_cumprod = self.model.alphas_cumprod
|
28 |
-
assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
|
29 |
-
to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
|
30 |
-
|
31 |
-
self.register_buffer('betas', to_torch(self.model.betas))
|
32 |
-
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
|
33 |
-
self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
|
34 |
-
|
35 |
-
# calculations for diffusion q(x_t | x_{t-1}) and others
|
36 |
-
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
|
37 |
-
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
|
38 |
-
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
|
39 |
-
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
|
40 |
-
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
|
41 |
-
|
42 |
-
# ddim sampling parameters
|
43 |
-
ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
|
44 |
-
ddim_timesteps=self.ddim_timesteps,
|
45 |
-
eta=ddim_eta,verbose=verbose)
|
46 |
-
self.register_buffer('ddim_sigmas', ddim_sigmas)
|
47 |
-
self.register_buffer('ddim_alphas', ddim_alphas)
|
48 |
-
self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
|
49 |
-
self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
|
50 |
-
sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
|
51 |
-
(1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
|
52 |
-
1 - self.alphas_cumprod / self.alphas_cumprod_prev))
|
53 |
-
self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
|
54 |
-
|
55 |
-
@torch.no_grad()
|
56 |
-
def sample(self,
|
57 |
-
S,
|
58 |
-
batch_size,
|
59 |
-
shape,
|
60 |
-
conditioning=None,
|
61 |
-
callback=None,
|
62 |
-
normals_sequence=None,
|
63 |
-
img_callback=None,
|
64 |
-
quantize_x0=False,
|
65 |
-
eta=0.,
|
66 |
-
mask=None,
|
67 |
-
x0=None,
|
68 |
-
temperature=1.,
|
69 |
-
noise_dropout=0.,
|
70 |
-
score_corrector=None,
|
71 |
-
corrector_kwargs=None,
|
72 |
-
verbose=True,
|
73 |
-
x_T=None,
|
74 |
-
log_every_t=100,
|
75 |
-
unconditional_guidance_scale=1.,
|
76 |
-
unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
|
77 |
-
dynamic_threshold=None,
|
78 |
-
ucg_schedule=None,
|
79 |
-
**kwargs
|
80 |
-
):
|
81 |
-
if conditioning is not None:
|
82 |
-
if isinstance(conditioning, dict):
|
83 |
-
ctmp = conditioning[list(conditioning.keys())[0]]
|
84 |
-
while isinstance(ctmp, list): ctmp = ctmp[0]
|
85 |
-
cbs = ctmp.shape[0]
|
86 |
-
if cbs != batch_size:
|
87 |
-
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
|
88 |
-
|
89 |
-
elif isinstance(conditioning, list):
|
90 |
-
for ctmp in conditioning:
|
91 |
-
if ctmp.shape[0] != batch_size:
|
92 |
-
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
|
93 |
-
|
94 |
-
else:
|
95 |
-
if conditioning.shape[0] != batch_size:
|
96 |
-
print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
|
97 |
-
|
98 |
-
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
|
99 |
-
# sampling
|
100 |
-
C, H, W = shape
|
101 |
-
size = (batch_size, C, H, W)
|
102 |
-
print(f'Data shape for DDIM sampling is {size}, eta {eta}')
|
103 |
-
|
104 |
-
samples, intermediates = self.ddim_sampling(conditioning, size,
|
105 |
-
callback=callback,
|
106 |
-
img_callback=img_callback,
|
107 |
-
quantize_denoised=quantize_x0,
|
108 |
-
mask=mask, x0=x0,
|
109 |
-
ddim_use_original_steps=False,
|
110 |
-
noise_dropout=noise_dropout,
|
111 |
-
temperature=temperature,
|
112 |
-
score_corrector=score_corrector,
|
113 |
-
corrector_kwargs=corrector_kwargs,
|
114 |
-
x_T=x_T,
|
115 |
-
log_every_t=log_every_t,
|
116 |
-
unconditional_guidance_scale=unconditional_guidance_scale,
|
117 |
-
unconditional_conditioning=unconditional_conditioning,
|
118 |
-
dynamic_threshold=dynamic_threshold,
|
119 |
-
ucg_schedule=ucg_schedule
|
120 |
-
)
|
121 |
-
return samples, intermediates
|
122 |
-
|
123 |
-
@torch.no_grad()
|
124 |
-
def ddim_sampling(self, cond, shape,
|
125 |
-
x_T=None, ddim_use_original_steps=False,
|
126 |
-
callback=None, timesteps=None, quantize_denoised=False,
|
127 |
-
mask=None, x0=None, img_callback=None, log_every_t=100,
|
128 |
-
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
|
129 |
-
unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,
|
130 |
-
ucg_schedule=None):
|
131 |
-
device = self.model.betas.device
|
132 |
-
b = shape[0]
|
133 |
-
if x_T is None:
|
134 |
-
img = torch.randn(shape, device=device)
|
135 |
-
else:
|
136 |
-
img = x_T
|
137 |
-
|
138 |
-
if timesteps is None:
|
139 |
-
timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
|
140 |
-
elif timesteps is not None and not ddim_use_original_steps:
|
141 |
-
subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
|
142 |
-
timesteps = self.ddim_timesteps[:subset_end]
|
143 |
-
|
144 |
-
intermediates = {'x_inter': [img], 'pred_x0': [img]}
|
145 |
-
time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)
|
146 |
-
total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
|
147 |
-
print(f"Running DDIM Sampling with {total_steps} timesteps")
|
148 |
-
|
149 |
-
iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)
|
150 |
-
|
151 |
-
for i, step in enumerate(iterator):
|
152 |
-
index = total_steps - i - 1
|
153 |
-
ts = torch.full((b,), step, device=device, dtype=torch.long)
|
154 |
-
|
155 |
-
if mask is not None:
|
156 |
-
assert x0 is not None
|
157 |
-
img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
|
158 |
-
img = img_orig * mask + (1. - mask) * img
|
159 |
-
|
160 |
-
if ucg_schedule is not None:
|
161 |
-
assert len(ucg_schedule) == len(time_range)
|
162 |
-
unconditional_guidance_scale = ucg_schedule[i]
|
163 |
-
|
164 |
-
outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
|
165 |
-
quantize_denoised=quantize_denoised, temperature=temperature,
|
166 |
-
noise_dropout=noise_dropout, score_corrector=score_corrector,
|
167 |
-
corrector_kwargs=corrector_kwargs,
|
168 |
-
unconditional_guidance_scale=unconditional_guidance_scale,
|
169 |
-
unconditional_conditioning=unconditional_conditioning,
|
170 |
-
dynamic_threshold=dynamic_threshold)
|
171 |
-
img, pred_x0 = outs
|
172 |
-
if callback: callback(i)
|
173 |
-
if img_callback: img_callback(pred_x0, i)
|
174 |
-
|
175 |
-
if index % log_every_t == 0 or index == total_steps - 1:
|
176 |
-
intermediates['x_inter'].append(img)
|
177 |
-
intermediates['pred_x0'].append(pred_x0)
|
178 |
-
|
179 |
-
return img, intermediates
|
180 |
-
|
181 |
-
@torch.no_grad()
|
182 |
-
def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
|
183 |
-
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
|
184 |
-
unconditional_guidance_scale=1., unconditional_conditioning=None,
|
185 |
-
dynamic_threshold=None):
|
186 |
-
b, *_, device = *x.shape, x.device
|
187 |
-
|
188 |
-
if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
|
189 |
-
model_output = self.model.apply_model(x, t, c)
|
190 |
-
else:
|
191 |
-
x_in = torch.cat([x] * 2)
|
192 |
-
t_in = torch.cat([t] * 2)
|
193 |
-
if isinstance(c, dict):
|
194 |
-
assert isinstance(unconditional_conditioning, dict)
|
195 |
-
c_in = dict()
|
196 |
-
for k in c:
|
197 |
-
if isinstance(c[k], list):
|
198 |
-
c_in[k] = [torch.cat([
|
199 |
-
unconditional_conditioning[k][i],
|
200 |
-
c[k][i]]) for i in range(len(c[k]))]
|
201 |
-
else:
|
202 |
-
c_in[k] = torch.cat([
|
203 |
-
unconditional_conditioning[k],
|
204 |
-
c[k]])
|
205 |
-
elif isinstance(c, list):
|
206 |
-
c_in = list()
|
207 |
-
assert isinstance(unconditional_conditioning, list)
|
208 |
-
for i in range(len(c)):
|
209 |
-
c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))
|
210 |
-
else:
|
211 |
-
c_in = torch.cat([unconditional_conditioning, c])
|
212 |
-
model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
|
213 |
-
model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)
|
214 |
-
|
215 |
-
if self.model.parameterization == "v":
|
216 |
-
e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)
|
217 |
-
else:
|
218 |
-
e_t = model_output
|
219 |
-
|
220 |
-
if score_corrector is not None:
|
221 |
-
assert self.model.parameterization == "eps", 'not implemented'
|
222 |
-
e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
|
223 |
-
|
224 |
-
alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
|
225 |
-
alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
|
226 |
-
sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
|
227 |
-
sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
|
228 |
-
# select parameters corresponding to the currently considered timestep
|
229 |
-
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
|
230 |
-
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
|
231 |
-
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
|
232 |
-
sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
|
233 |
-
|
234 |
-
# current prediction for x_0
|
235 |
-
if self.model.parameterization != "v":
|
236 |
-
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
|
237 |
-
else:
|
238 |
-
pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)
|
239 |
-
|
240 |
-
if quantize_denoised:
|
241 |
-
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
|
242 |
-
|
243 |
-
if dynamic_threshold is not None:
|
244 |
-
raise NotImplementedError()
|
245 |
-
|
246 |
-
# direction pointing to x_t
|
247 |
-
dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
|
248 |
-
noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
|
249 |
-
if noise_dropout > 0.:
|
250 |
-
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
|
251 |
-
x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
|
252 |
-
return x_prev, pred_x0
|
253 |
-
|
254 |
-
@torch.no_grad()
|
255 |
-
def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,
|
256 |
-
unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):
|
257 |
-
num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]
|
258 |
-
|
259 |
-
assert t_enc <= num_reference_steps
|
260 |
-
num_steps = t_enc
|
261 |
-
|
262 |
-
if use_original_steps:
|
263 |
-
alphas_next = self.alphas_cumprod[:num_steps]
|
264 |
-
alphas = self.alphas_cumprod_prev[:num_steps]
|
265 |
-
else:
|
266 |
-
alphas_next = self.ddim_alphas[:num_steps]
|
267 |
-
alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])
|
268 |
-
|
269 |
-
x_next = x0
|
270 |
-
intermediates = []
|
271 |
-
inter_steps = []
|
272 |
-
for i in tqdm(range(num_steps), desc='Encoding Image'):
|
273 |
-
t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)
|
274 |
-
if unconditional_guidance_scale == 1.:
|
275 |
-
noise_pred = self.model.apply_model(x_next, t, c)
|
276 |
-
else:
|
277 |
-
assert unconditional_conditioning is not None
|
278 |
-
e_t_uncond, noise_pred = torch.chunk(
|
279 |
-
self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),
|
280 |
-
torch.cat((unconditional_conditioning, c))), 2)
|
281 |
-
noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)
|
282 |
-
|
283 |
-
xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next
|
284 |
-
weighted_noise_pred = alphas_next[i].sqrt() * (
|
285 |
-
(1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred
|
286 |
-
x_next = xt_weighted + weighted_noise_pred
|
287 |
-
if return_intermediates and i % (
|
288 |
-
num_steps // return_intermediates) == 0 and i < num_steps - 1:
|
289 |
-
intermediates.append(x_next)
|
290 |
-
inter_steps.append(i)
|
291 |
-
elif return_intermediates and i >= num_steps - 2:
|
292 |
-
intermediates.append(x_next)
|
293 |
-
inter_steps.append(i)
|
294 |
-
if callback: callback(i)
|
295 |
-
|
296 |
-
out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}
|
297 |
-
if return_intermediates:
|
298 |
-
out.update({'intermediates': intermediates})
|
299 |
-
return x_next, out
|
300 |
-
|
301 |
-
@torch.no_grad()
|
302 |
-
def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):
|
303 |
-
# fast, but does not allow for exact reconstruction
|
304 |
-
# t serves as an index to gather the correct alphas
|
305 |
-
if use_original_steps:
|
306 |
-
sqrt_alphas_cumprod = self.sqrt_alphas_cumprod
|
307 |
-
sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod
|
308 |
-
else:
|
309 |
-
sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)
|
310 |
-
sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas
|
311 |
-
|
312 |
-
if noise is None:
|
313 |
-
noise = torch.randn_like(x0)
|
314 |
-
return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +
|
315 |
-
extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)
|
316 |
-
|
317 |
-
@torch.no_grad()
|
318 |
-
def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,
|
319 |
-
use_original_steps=False, callback=None):
|
320 |
-
|
321 |
-
timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps
|
322 |
-
timesteps = timesteps[:t_start]
|
323 |
-
|
324 |
-
time_range = np.flip(timesteps)
|
325 |
-
total_steps = timesteps.shape[0]
|
326 |
-
print(f"Running DDIM Sampling with {total_steps} timesteps")
|
327 |
-
|
328 |
-
iterator = tqdm(time_range, desc='Decoding image', total=total_steps)
|
329 |
-
x_dec = x_latent
|
330 |
-
for i, step in enumerate(iterator):
|
331 |
-
index = total_steps - i - 1
|
332 |
-
ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)
|
333 |
-
x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,
|
334 |
-
unconditional_guidance_scale=unconditional_guidance_scale,
|
335 |
-
unconditional_conditioning=unconditional_conditioning)
|
336 |
-
if callback: callback(i)
|
337 |
-
return x_dec
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/midas/utils.py
DELETED
@@ -1,189 +0,0 @@
|
|
1 |
-
"""Utils for monoDepth."""
|
2 |
-
import sys
|
3 |
-
import re
|
4 |
-
import numpy as np
|
5 |
-
import cv2
|
6 |
-
import torch
|
7 |
-
|
8 |
-
|
9 |
-
def read_pfm(path):
|
10 |
-
"""Read pfm file.
|
11 |
-
|
12 |
-
Args:
|
13 |
-
path (str): path to file
|
14 |
-
|
15 |
-
Returns:
|
16 |
-
tuple: (data, scale)
|
17 |
-
"""
|
18 |
-
with open(path, "rb") as file:
|
19 |
-
|
20 |
-
color = None
|
21 |
-
width = None
|
22 |
-
height = None
|
23 |
-
scale = None
|
24 |
-
endian = None
|
25 |
-
|
26 |
-
header = file.readline().rstrip()
|
27 |
-
if header.decode("ascii") == "PF":
|
28 |
-
color = True
|
29 |
-
elif header.decode("ascii") == "Pf":
|
30 |
-
color = False
|
31 |
-
else:
|
32 |
-
raise Exception("Not a PFM file: " + path)
|
33 |
-
|
34 |
-
dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii"))
|
35 |
-
if dim_match:
|
36 |
-
width, height = list(map(int, dim_match.groups()))
|
37 |
-
else:
|
38 |
-
raise Exception("Malformed PFM header.")
|
39 |
-
|
40 |
-
scale = float(file.readline().decode("ascii").rstrip())
|
41 |
-
if scale < 0:
|
42 |
-
# little-endian
|
43 |
-
endian = "<"
|
44 |
-
scale = -scale
|
45 |
-
else:
|
46 |
-
# big-endian
|
47 |
-
endian = ">"
|
48 |
-
|
49 |
-
data = np.fromfile(file, endian + "f")
|
50 |
-
shape = (height, width, 3) if color else (height, width)
|
51 |
-
|
52 |
-
data = np.reshape(data, shape)
|
53 |
-
data = np.flipud(data)
|
54 |
-
|
55 |
-
return data, scale
|
56 |
-
|
57 |
-
|
58 |
-
def write_pfm(path, image, scale=1):
|
59 |
-
"""Write pfm file.
|
60 |
-
|
61 |
-
Args:
|
62 |
-
path (str): pathto file
|
63 |
-
image (array): data
|
64 |
-
scale (int, optional): Scale. Defaults to 1.
|
65 |
-
"""
|
66 |
-
|
67 |
-
with open(path, "wb") as file:
|
68 |
-
color = None
|
69 |
-
|
70 |
-
if image.dtype.name != "float32":
|
71 |
-
raise Exception("Image dtype must be float32.")
|
72 |
-
|
73 |
-
image = np.flipud(image)
|
74 |
-
|
75 |
-
if len(image.shape) == 3 and image.shape[2] == 3: # color image
|
76 |
-
color = True
|
77 |
-
elif (
|
78 |
-
len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1
|
79 |
-
): # greyscale
|
80 |
-
color = False
|
81 |
-
else:
|
82 |
-
raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
|
83 |
-
|
84 |
-
file.write("PF\n" if color else "Pf\n".encode())
|
85 |
-
file.write("%d %d\n".encode() % (image.shape[1], image.shape[0]))
|
86 |
-
|
87 |
-
endian = image.dtype.byteorder
|
88 |
-
|
89 |
-
if endian == "<" or endian == "=" and sys.byteorder == "little":
|
90 |
-
scale = -scale
|
91 |
-
|
92 |
-
file.write("%f\n".encode() % scale)
|
93 |
-
|
94 |
-
image.tofile(file)
|
95 |
-
|
96 |
-
|
97 |
-
def read_image(path):
|
98 |
-
"""Read image and output RGB image (0-1).
|
99 |
-
|
100 |
-
Args:
|
101 |
-
path (str): path to file
|
102 |
-
|
103 |
-
Returns:
|
104 |
-
array: RGB image (0-1)
|
105 |
-
"""
|
106 |
-
img = cv2.imread(path)
|
107 |
-
|
108 |
-
if img.ndim == 2:
|
109 |
-
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
110 |
-
|
111 |
-
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
|
112 |
-
|
113 |
-
return img
|
114 |
-
|
115 |
-
|
116 |
-
def resize_image(img):
|
117 |
-
"""Resize image and make it fit for network.
|
118 |
-
|
119 |
-
Args:
|
120 |
-
img (array): image
|
121 |
-
|
122 |
-
Returns:
|
123 |
-
tensor: data ready for network
|
124 |
-
"""
|
125 |
-
height_orig = img.shape[0]
|
126 |
-
width_orig = img.shape[1]
|
127 |
-
|
128 |
-
if width_orig > height_orig:
|
129 |
-
scale = width_orig / 384
|
130 |
-
else:
|
131 |
-
scale = height_orig / 384
|
132 |
-
|
133 |
-
height = (np.ceil(height_orig / scale / 32) * 32).astype(int)
|
134 |
-
width = (np.ceil(width_orig / scale / 32) * 32).astype(int)
|
135 |
-
|
136 |
-
img_resized = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA)
|
137 |
-
|
138 |
-
img_resized = (
|
139 |
-
torch.from_numpy(np.transpose(img_resized, (2, 0, 1))).contiguous().float()
|
140 |
-
)
|
141 |
-
img_resized = img_resized.unsqueeze(0)
|
142 |
-
|
143 |
-
return img_resized
|
144 |
-
|
145 |
-
|
146 |
-
def resize_depth(depth, width, height):
|
147 |
-
"""Resize depth map and bring to CPU (numpy).
|
148 |
-
|
149 |
-
Args:
|
150 |
-
depth (tensor): depth
|
151 |
-
width (int): image width
|
152 |
-
height (int): image height
|
153 |
-
|
154 |
-
Returns:
|
155 |
-
array: processed depth
|
156 |
-
"""
|
157 |
-
depth = torch.squeeze(depth[0, :, :, :]).to("cpu")
|
158 |
-
|
159 |
-
depth_resized = cv2.resize(
|
160 |
-
depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC
|
161 |
-
)
|
162 |
-
|
163 |
-
return depth_resized
|
164 |
-
|
165 |
-
def write_depth(path, depth, bits=1):
|
166 |
-
"""Write depth map to pfm and png file.
|
167 |
-
|
168 |
-
Args:
|
169 |
-
path (str): filepath without extension
|
170 |
-
depth (array): depth
|
171 |
-
"""
|
172 |
-
write_pfm(path + ".pfm", depth.astype(np.float32))
|
173 |
-
|
174 |
-
depth_min = depth.min()
|
175 |
-
depth_max = depth.max()
|
176 |
-
|
177 |
-
max_val = (2**(8*bits))-1
|
178 |
-
|
179 |
-
if depth_max - depth_min > np.finfo("float").eps:
|
180 |
-
out = max_val * (depth - depth_min) / (depth_max - depth_min)
|
181 |
-
else:
|
182 |
-
out = np.zeros(depth.shape, dtype=depth.type)
|
183 |
-
|
184 |
-
if bits == 1:
|
185 |
-
cv2.imwrite(path + ".png", out.astype("uint8"))
|
186 |
-
elif bits == 2:
|
187 |
-
cv2.imwrite(path + ".png", out.astype("uint16"))
|
188 |
-
|
189 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/CuriousDolphin/MobileSAM/utils/__init__.py
DELETED
File without changes
|
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/data/datasets/evaluation/word/util/__init__.py
DELETED
@@ -1,62 +0,0 @@
|
|
1 |
-
# import log
|
2 |
-
# import dtype
|
3 |
-
# # import plt
|
4 |
-
# import np
|
5 |
-
# import img
|
6 |
-
# _img = img
|
7 |
-
# import dec
|
8 |
-
# import rand
|
9 |
-
# import mod
|
10 |
-
# import proc
|
11 |
-
# import test
|
12 |
-
# import neighbour as nb
|
13 |
-
# #import mask
|
14 |
-
# import str_ as str
|
15 |
-
# import io as sys_io
|
16 |
-
# import io_ as io
|
17 |
-
# import feature
|
18 |
-
# import thread_ as thread
|
19 |
-
# import caffe_ as caffe
|
20 |
-
# # import tf
|
21 |
-
# import cmd
|
22 |
-
# import ml
|
23 |
-
# import sys
|
24 |
-
# import url
|
25 |
-
# from .misc import *
|
26 |
-
# from .logger import *
|
27 |
-
# # log.init_logger('~/temp/log/log_' + get_date_str() + '.log')
|
28 |
-
#
|
29 |
-
# def exit(code = 0):
|
30 |
-
# sys.exit(0)
|
31 |
-
#
|
32 |
-
# is_main = mod.is_main
|
33 |
-
# init_logger = log.init_logger
|
34 |
-
#
|
35 |
-
# def sit(img, path = None, name = ""):
|
36 |
-
# if path is None:
|
37 |
-
# _count = get_count();
|
38 |
-
# path = '~/temp/no-use/images/%s_%d_%s.jpg'%(log.get_date_str(), _count, name)
|
39 |
-
#
|
40 |
-
# if type(img) == list:
|
41 |
-
# plt.show_images(images = img, path = path, show = False, axis_off = True, save = True)
|
42 |
-
# else:
|
43 |
-
# plt.imwrite(path, img)
|
44 |
-
#
|
45 |
-
# return path
|
46 |
-
# _count = 0;
|
47 |
-
#
|
48 |
-
# def get_count():
|
49 |
-
# global _count;
|
50 |
-
# _count += 1;
|
51 |
-
# return _count
|
52 |
-
#
|
53 |
-
# def cit(img, path = None, rgb = True, name = ""):
|
54 |
-
# _count = get_count();
|
55 |
-
# if path is None:
|
56 |
-
# img = np.np.asarray(img, dtype = np.np.uint8)
|
57 |
-
# path = '~/temp/no-use/%s_%d_%s.jpg'%(log.get_date_str(), _count, name)
|
58 |
-
# _img.imwrite(path, img, rgb = rgb)
|
59 |
-
# return path
|
60 |
-
#
|
61 |
-
# def argv(index):
|
62 |
-
# return sys.argv[index]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/structures/mty.py
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
|
3 |
-
# transpose
|
4 |
-
FLIP_LEFT_RIGHT = 0
|
5 |
-
FLIP_TOP_BOTTOM = 1
|
6 |
-
|
7 |
-
all_types = [[1,2,3,4],[1,2,4,3],[1,3,2,4],[1,3,4,2],[1,4,2,3],[1,4,3,2],\
|
8 |
-
[2,1,3,4],[2,1,4,3],[2,3,1,4],[2,3,4,1],[2,4,1,3],[2,4,3,1],\
|
9 |
-
[3,1,2,4],[3,1,4,2],[3,2,1,4],[3,2,4,1],[3,4,1,2],[3,4,2,1],\
|
10 |
-
[4,1,2,3],[4,1,3,2],[4,2,1,3],[4,2,3,1],[4,3,1,2],[4,3,2,1]]
|
11 |
-
aty= [[all_types[iat][0]-1,all_types[iat][1]-1,all_types[iat][2]-1,all_types[iat][3]-1] for iat in range(24)]
|
12 |
-
|
13 |
-
class MTY(object):
|
14 |
-
def __init__(self, mty, size, mode=None):
|
15 |
-
# FIXME remove check once we have better integration with device
|
16 |
-
# in my version this would consistently return a CPU tensor
|
17 |
-
device = mty.device if isinstance(mty, torch.Tensor) else torch.device('cpu')
|
18 |
-
mty = torch.as_tensor(mty, dtype=torch.int64, device=device)
|
19 |
-
|
20 |
-
# TODO should I split them?
|
21 |
-
assert(len(mty.size()) == 1), str(mty.size())
|
22 |
-
self.mty = mty
|
23 |
-
|
24 |
-
self.size = size
|
25 |
-
self.mode = mode
|
26 |
-
|
27 |
-
def crop(self, box):
|
28 |
-
w, h = box[2] - box[0], box[3] - box[1]
|
29 |
-
return type(self)(self.mty, (w, h), self.mode)
|
30 |
-
|
31 |
-
def resize(self, size, *args, **kwargs):
|
32 |
-
return type(self)(self.mty, size, self.mode)
|
33 |
-
|
34 |
-
def transpose(self, method):
|
35 |
-
if method not in (FLIP_LEFT_RIGHT,):
|
36 |
-
raise NotImplementedError(
|
37 |
-
"Only FLIP_LEFT_RIGHT implemented")
|
38 |
-
|
39 |
-
flipped_data = self.mty.clone()
|
40 |
-
for i in range(self.mty.size()[0]):
|
41 |
-
revs = [it for it in aty[self.mty[i]]]
|
42 |
-
revs.reverse()
|
43 |
-
flip_type = aty.index(revs)
|
44 |
-
flipped_data[i] = flip_type
|
45 |
-
|
46 |
-
return type(self)(flipped_data, self.size, self.mode)
|
47 |
-
|
48 |
-
def to(self, *args, **kwargs):
|
49 |
-
return type(self)(self.mty.to(*args, **kwargs), self.size, self.mode)
|
50 |
-
|
51 |
-
def __getitem__(self, item):
|
52 |
-
return type(self)(self.mty[item], self.size, self.mode)
|
53 |
-
|
54 |
-
def __repr__(self):
|
55 |
-
s = self.__class__.__name__ + '('
|
56 |
-
s += 'num_instances={}, '.format(len(self.mty))
|
57 |
-
s += 'image_width={}, '.format(self.size[0])
|
58 |
-
s += 'image_height={})'.format(self.size[1])
|
59 |
-
return s
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/misc/filenames.py
DELETED
@@ -1,246 +0,0 @@
|
|
1 |
-
"""
|
2 |
-
This module implements the algorithm for converting between a "user name" -
|
3 |
-
something that a user can choose arbitrarily inside a font editor - and a file
|
4 |
-
name suitable for use in a wide range of operating systems and filesystems.
|
5 |
-
|
6 |
-
The `UFO 3 specification <http://unifiedfontobject.org/versions/ufo3/conventions/>`_
|
7 |
-
provides an example of an algorithm for such conversion, which avoids illegal
|
8 |
-
characters, reserved file names, ambiguity between upper- and lower-case
|
9 |
-
characters, and clashes with existing files.
|
10 |
-
|
11 |
-
This code was originally copied from
|
12 |
-
`ufoLib <https://github.com/unified-font-object/ufoLib/blob/8747da7/Lib/ufoLib/filenames.py>`_
|
13 |
-
by Tal Leming and is copyright (c) 2005-2016, The RoboFab Developers:
|
14 |
-
|
15 |
-
- Erik van Blokland
|
16 |
-
- Tal Leming
|
17 |
-
- Just van Rossum
|
18 |
-
"""
|
19 |
-
|
20 |
-
|
21 |
-
illegalCharacters = r"\" * + / : < > ? [ \ ] | \0".split(" ")
|
22 |
-
illegalCharacters += [chr(i) for i in range(1, 32)]
|
23 |
-
illegalCharacters += [chr(0x7F)]
|
24 |
-
reservedFileNames = "CON PRN AUX CLOCK$ NUL A:-Z: COM1".lower().split(" ")
|
25 |
-
reservedFileNames += "LPT1 LPT2 LPT3 COM2 COM3 COM4".lower().split(" ")
|
26 |
-
maxFileNameLength = 255
|
27 |
-
|
28 |
-
|
29 |
-
class NameTranslationError(Exception):
|
30 |
-
pass
|
31 |
-
|
32 |
-
|
33 |
-
def userNameToFileName(userName, existing=[], prefix="", suffix=""):
|
34 |
-
"""Converts from a user name to a file name.
|
35 |
-
|
36 |
-
Takes care to avoid illegal characters, reserved file names, ambiguity between
|
37 |
-
upper- and lower-case characters, and clashes with existing files.
|
38 |
-
|
39 |
-
Args:
|
40 |
-
userName (str): The input file name.
|
41 |
-
existing: A case-insensitive list of all existing file names.
|
42 |
-
prefix: Prefix to be prepended to the file name.
|
43 |
-
suffix: Suffix to be appended to the file name.
|
44 |
-
|
45 |
-
Returns:
|
46 |
-
A suitable filename.
|
47 |
-
|
48 |
-
Raises:
|
49 |
-
NameTranslationError: If no suitable name could be generated.
|
50 |
-
|
51 |
-
Examples::
|
52 |
-
|
53 |
-
>>> userNameToFileName("a") == "a"
|
54 |
-
True
|
55 |
-
>>> userNameToFileName("A") == "A_"
|
56 |
-
True
|
57 |
-
>>> userNameToFileName("AE") == "A_E_"
|
58 |
-
True
|
59 |
-
>>> userNameToFileName("Ae") == "A_e"
|
60 |
-
True
|
61 |
-
>>> userNameToFileName("ae") == "ae"
|
62 |
-
True
|
63 |
-
>>> userNameToFileName("aE") == "aE_"
|
64 |
-
True
|
65 |
-
>>> userNameToFileName("a.alt") == "a.alt"
|
66 |
-
True
|
67 |
-
>>> userNameToFileName("A.alt") == "A_.alt"
|
68 |
-
True
|
69 |
-
>>> userNameToFileName("A.Alt") == "A_.A_lt"
|
70 |
-
True
|
71 |
-
>>> userNameToFileName("A.aLt") == "A_.aL_t"
|
72 |
-
True
|
73 |
-
>>> userNameToFileName(u"A.alT") == "A_.alT_"
|
74 |
-
True
|
75 |
-
>>> userNameToFileName("T_H") == "T__H_"
|
76 |
-
True
|
77 |
-
>>> userNameToFileName("T_h") == "T__h"
|
78 |
-
True
|
79 |
-
>>> userNameToFileName("t_h") == "t_h"
|
80 |
-
True
|
81 |
-
>>> userNameToFileName("F_F_I") == "F__F__I_"
|
82 |
-
True
|
83 |
-
>>> userNameToFileName("f_f_i") == "f_f_i"
|
84 |
-
True
|
85 |
-
>>> userNameToFileName("Aacute_V.swash") == "A_acute_V_.swash"
|
86 |
-
True
|
87 |
-
>>> userNameToFileName(".notdef") == "_notdef"
|
88 |
-
True
|
89 |
-
>>> userNameToFileName("con") == "_con"
|
90 |
-
True
|
91 |
-
>>> userNameToFileName("CON") == "C_O_N_"
|
92 |
-
True
|
93 |
-
>>> userNameToFileName("con.alt") == "_con.alt"
|
94 |
-
True
|
95 |
-
>>> userNameToFileName("alt.con") == "alt._con"
|
96 |
-
True
|
97 |
-
"""
|
98 |
-
# the incoming name must be a str
|
99 |
-
if not isinstance(userName, str):
|
100 |
-
raise ValueError("The value for userName must be a string.")
|
101 |
-
# establish the prefix and suffix lengths
|
102 |
-
prefixLength = len(prefix)
|
103 |
-
suffixLength = len(suffix)
|
104 |
-
# replace an initial period with an _
|
105 |
-
# if no prefix is to be added
|
106 |
-
if not prefix and userName[0] == ".":
|
107 |
-
userName = "_" + userName[1:]
|
108 |
-
# filter the user name
|
109 |
-
filteredUserName = []
|
110 |
-
for character in userName:
|
111 |
-
# replace illegal characters with _
|
112 |
-
if character in illegalCharacters:
|
113 |
-
character = "_"
|
114 |
-
# add _ to all non-lower characters
|
115 |
-
elif character != character.lower():
|
116 |
-
character += "_"
|
117 |
-
filteredUserName.append(character)
|
118 |
-
userName = "".join(filteredUserName)
|
119 |
-
# clip to 255
|
120 |
-
sliceLength = maxFileNameLength - prefixLength - suffixLength
|
121 |
-
userName = userName[:sliceLength]
|
122 |
-
# test for illegal files names
|
123 |
-
parts = []
|
124 |
-
for part in userName.split("."):
|
125 |
-
if part.lower() in reservedFileNames:
|
126 |
-
part = "_" + part
|
127 |
-
parts.append(part)
|
128 |
-
userName = ".".join(parts)
|
129 |
-
# test for clash
|
130 |
-
fullName = prefix + userName + suffix
|
131 |
-
if fullName.lower() in existing:
|
132 |
-
fullName = handleClash1(userName, existing, prefix, suffix)
|
133 |
-
# finished
|
134 |
-
return fullName
|
135 |
-
|
136 |
-
|
137 |
-
def handleClash1(userName, existing=[], prefix="", suffix=""):
|
138 |
-
"""
|
139 |
-
existing should be a case-insensitive list
|
140 |
-
of all existing file names.
|
141 |
-
|
142 |
-
>>> prefix = ("0" * 5) + "."
|
143 |
-
>>> suffix = "." + ("0" * 10)
|
144 |
-
>>> existing = ["a" * 5]
|
145 |
-
|
146 |
-
>>> e = list(existing)
|
147 |
-
>>> handleClash1(userName="A" * 5, existing=e,
|
148 |
-
... prefix=prefix, suffix=suffix) == (
|
149 |
-
... '00000.AAAAA000000000000001.0000000000')
|
150 |
-
True
|
151 |
-
|
152 |
-
>>> e = list(existing)
|
153 |
-
>>> e.append(prefix + "aaaaa" + "1".zfill(15) + suffix)
|
154 |
-
>>> handleClash1(userName="A" * 5, existing=e,
|
155 |
-
... prefix=prefix, suffix=suffix) == (
|
156 |
-
... '00000.AAAAA000000000000002.0000000000')
|
157 |
-
True
|
158 |
-
|
159 |
-
>>> e = list(existing)
|
160 |
-
>>> e.append(prefix + "AAAAA" + "2".zfill(15) + suffix)
|
161 |
-
>>> handleClash1(userName="A" * 5, existing=e,
|
162 |
-
... prefix=prefix, suffix=suffix) == (
|
163 |
-
... '00000.AAAAA000000000000001.0000000000')
|
164 |
-
True
|
165 |
-
"""
|
166 |
-
# if the prefix length + user name length + suffix length + 15 is at
|
167 |
-
# or past the maximum length, silce 15 characters off of the user name
|
168 |
-
prefixLength = len(prefix)
|
169 |
-
suffixLength = len(suffix)
|
170 |
-
if prefixLength + len(userName) + suffixLength + 15 > maxFileNameLength:
|
171 |
-
l = prefixLength + len(userName) + suffixLength + 15
|
172 |
-
sliceLength = maxFileNameLength - l
|
173 |
-
userName = userName[:sliceLength]
|
174 |
-
finalName = None
|
175 |
-
# try to add numbers to create a unique name
|
176 |
-
counter = 1
|
177 |
-
while finalName is None:
|
178 |
-
name = userName + str(counter).zfill(15)
|
179 |
-
fullName = prefix + name + suffix
|
180 |
-
if fullName.lower() not in existing:
|
181 |
-
finalName = fullName
|
182 |
-
break
|
183 |
-
else:
|
184 |
-
counter += 1
|
185 |
-
if counter >= 999999999999999:
|
186 |
-
break
|
187 |
-
# if there is a clash, go to the next fallback
|
188 |
-
if finalName is None:
|
189 |
-
finalName = handleClash2(existing, prefix, suffix)
|
190 |
-
# finished
|
191 |
-
return finalName
|
192 |
-
|
193 |
-
|
194 |
-
def handleClash2(existing=[], prefix="", suffix=""):
|
195 |
-
"""
|
196 |
-
existing should be a case-insensitive list
|
197 |
-
of all existing file names.
|
198 |
-
|
199 |
-
>>> prefix = ("0" * 5) + "."
|
200 |
-
>>> suffix = "." + ("0" * 10)
|
201 |
-
>>> existing = [prefix + str(i) + suffix for i in range(100)]
|
202 |
-
|
203 |
-
>>> e = list(existing)
|
204 |
-
>>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
|
205 |
-
... '00000.100.0000000000')
|
206 |
-
True
|
207 |
-
|
208 |
-
>>> e = list(existing)
|
209 |
-
>>> e.remove(prefix + "1" + suffix)
|
210 |
-
>>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
|
211 |
-
... '00000.1.0000000000')
|
212 |
-
True
|
213 |
-
|
214 |
-
>>> e = list(existing)
|
215 |
-
>>> e.remove(prefix + "2" + suffix)
|
216 |
-
>>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
|
217 |
-
... '00000.2.0000000000')
|
218 |
-
True
|
219 |
-
"""
|
220 |
-
# calculate the longest possible string
|
221 |
-
maxLength = maxFileNameLength - len(prefix) - len(suffix)
|
222 |
-
maxValue = int("9" * maxLength)
|
223 |
-
# try to find a number
|
224 |
-
finalName = None
|
225 |
-
counter = 1
|
226 |
-
while finalName is None:
|
227 |
-
fullName = prefix + str(counter) + suffix
|
228 |
-
if fullName.lower() not in existing:
|
229 |
-
finalName = fullName
|
230 |
-
break
|
231 |
-
else:
|
232 |
-
counter += 1
|
233 |
-
if counter >= maxValue:
|
234 |
-
break
|
235 |
-
# raise an error if nothing has been found
|
236 |
-
if finalName is None:
|
237 |
-
raise NameTranslationError("No unique name could be found.")
|
238 |
-
# finished
|
239 |
-
return finalName
|
240 |
-
|
241 |
-
|
242 |
-
if __name__ == "__main__":
|
243 |
-
import doctest
|
244 |
-
import sys
|
245 |
-
|
246 |
-
sys.exit(doctest.testmod().failed)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/_p_o_s_t.py
DELETED
@@ -1,308 +0,0 @@
|
|
1 |
-
from fontTools import ttLib
|
2 |
-
from fontTools.ttLib.standardGlyphOrder import standardGlyphOrder
|
3 |
-
from fontTools.misc import sstruct
|
4 |
-
from fontTools.misc.textTools import bytechr, byteord, tobytes, tostr, safeEval, readHex
|
5 |
-
from . import DefaultTable
|
6 |
-
import sys
|
7 |
-
import struct
|
8 |
-
import array
|
9 |
-
import logging
|
10 |
-
|
11 |
-
log = logging.getLogger(__name__)
|
12 |
-
|
13 |
-
postFormat = """
|
14 |
-
>
|
15 |
-
formatType: 16.16F
|
16 |
-
italicAngle: 16.16F # italic angle in degrees
|
17 |
-
underlinePosition: h
|
18 |
-
underlineThickness: h
|
19 |
-
isFixedPitch: L
|
20 |
-
minMemType42: L # minimum memory if TrueType font is downloaded
|
21 |
-
maxMemType42: L # maximum memory if TrueType font is downloaded
|
22 |
-
minMemType1: L # minimum memory if Type1 font is downloaded
|
23 |
-
maxMemType1: L # maximum memory if Type1 font is downloaded
|
24 |
-
"""
|
25 |
-
|
26 |
-
postFormatSize = sstruct.calcsize(postFormat)
|
27 |
-
|
28 |
-
|
29 |
-
class table__p_o_s_t(DefaultTable.DefaultTable):
|
30 |
-
def decompile(self, data, ttFont):
|
31 |
-
sstruct.unpack(postFormat, data[:postFormatSize], self)
|
32 |
-
data = data[postFormatSize:]
|
33 |
-
if self.formatType == 1.0:
|
34 |
-
self.decode_format_1_0(data, ttFont)
|
35 |
-
elif self.formatType == 2.0:
|
36 |
-
self.decode_format_2_0(data, ttFont)
|
37 |
-
elif self.formatType == 3.0:
|
38 |
-
self.decode_format_3_0(data, ttFont)
|
39 |
-
elif self.formatType == 4.0:
|
40 |
-
self.decode_format_4_0(data, ttFont)
|
41 |
-
else:
|
42 |
-
# supported format
|
43 |
-
raise ttLib.TTLibError(
|
44 |
-
"'post' table format %f not supported" % self.formatType
|
45 |
-
)
|
46 |
-
|
47 |
-
def compile(self, ttFont):
|
48 |
-
data = sstruct.pack(postFormat, self)
|
49 |
-
if self.formatType == 1.0:
|
50 |
-
pass # we're done
|
51 |
-
elif self.formatType == 2.0:
|
52 |
-
data = data + self.encode_format_2_0(ttFont)
|
53 |
-
elif self.formatType == 3.0:
|
54 |
-
pass # we're done
|
55 |
-
elif self.formatType == 4.0:
|
56 |
-
data = data + self.encode_format_4_0(ttFont)
|
57 |
-
else:
|
58 |
-
# supported format
|
59 |
-
raise ttLib.TTLibError(
|
60 |
-
"'post' table format %f not supported" % self.formatType
|
61 |
-
)
|
62 |
-
return data
|
63 |
-
|
64 |
-
def getGlyphOrder(self):
|
65 |
-
"""This function will get called by a ttLib.TTFont instance.
|
66 |
-
Do not call this function yourself, use TTFont().getGlyphOrder()
|
67 |
-
or its relatives instead!
|
68 |
-
"""
|
69 |
-
if not hasattr(self, "glyphOrder"):
|
70 |
-
raise ttLib.TTLibError("illegal use of getGlyphOrder()")
|
71 |
-
glyphOrder = self.glyphOrder
|
72 |
-
del self.glyphOrder
|
73 |
-
return glyphOrder
|
74 |
-
|
75 |
-
def decode_format_1_0(self, data, ttFont):
|
76 |
-
self.glyphOrder = standardGlyphOrder[: ttFont["maxp"].numGlyphs]
|
77 |
-
|
78 |
-
def decode_format_2_0(self, data, ttFont):
|
79 |
-
(numGlyphs,) = struct.unpack(">H", data[:2])
|
80 |
-
numGlyphs = int(numGlyphs)
|
81 |
-
if numGlyphs > ttFont["maxp"].numGlyphs:
|
82 |
-
# Assume the numGlyphs field is bogus, so sync with maxp.
|
83 |
-
# I've seen this in one font, and if the assumption is
|
84 |
-
# wrong elsewhere, well, so be it: it's hard enough to
|
85 |
-
# work around _one_ non-conforming post format...
|
86 |
-
numGlyphs = ttFont["maxp"].numGlyphs
|
87 |
-
data = data[2:]
|
88 |
-
indices = array.array("H")
|
89 |
-
indices.frombytes(data[: 2 * numGlyphs])
|
90 |
-
if sys.byteorder != "big":
|
91 |
-
indices.byteswap()
|
92 |
-
data = data[2 * numGlyphs :]
|
93 |
-
maxIndex = max(indices)
|
94 |
-
self.extraNames = extraNames = unpackPStrings(data, maxIndex - 257)
|
95 |
-
self.glyphOrder = glyphOrder = [""] * int(ttFont["maxp"].numGlyphs)
|
96 |
-
for glyphID in range(numGlyphs):
|
97 |
-
index = indices[glyphID]
|
98 |
-
if index > 257:
|
99 |
-
try:
|
100 |
-
name = extraNames[index - 258]
|
101 |
-
except IndexError:
|
102 |
-
name = ""
|
103 |
-
else:
|
104 |
-
# fetch names from standard list
|
105 |
-
name = standardGlyphOrder[index]
|
106 |
-
glyphOrder[glyphID] = name
|
107 |
-
self.build_psNameMapping(ttFont)
|
108 |
-
|
109 |
-
def build_psNameMapping(self, ttFont):
|
110 |
-
mapping = {}
|
111 |
-
allNames = {}
|
112 |
-
for i in range(ttFont["maxp"].numGlyphs):
|
113 |
-
glyphName = psName = self.glyphOrder[i]
|
114 |
-
if glyphName == "":
|
115 |
-
glyphName = "glyph%.5d" % i
|
116 |
-
if glyphName in allNames:
|
117 |
-
# make up a new glyphName that's unique
|
118 |
-
n = allNames[glyphName]
|
119 |
-
while (glyphName + "#" + str(n)) in allNames:
|
120 |
-
n += 1
|
121 |
-
allNames[glyphName] = n + 1
|
122 |
-
glyphName = glyphName + "#" + str(n)
|
123 |
-
|
124 |
-
self.glyphOrder[i] = glyphName
|
125 |
-
allNames[glyphName] = 1
|
126 |
-
if glyphName != psName:
|
127 |
-
mapping[glyphName] = psName
|
128 |
-
|
129 |
-
self.mapping = mapping
|
130 |
-
|
131 |
-
def decode_format_3_0(self, data, ttFont):
|
132 |
-
# Setting self.glyphOrder to None will cause the TTFont object
|
133 |
-
# try and construct glyph names from a Unicode cmap table.
|
134 |
-
self.glyphOrder = None
|
135 |
-
|
136 |
-
def decode_format_4_0(self, data, ttFont):
|
137 |
-
from fontTools import agl
|
138 |
-
|
139 |
-
numGlyphs = ttFont["maxp"].numGlyphs
|
140 |
-
indices = array.array("H")
|
141 |
-
indices.frombytes(data)
|
142 |
-
if sys.byteorder != "big":
|
143 |
-
indices.byteswap()
|
144 |
-
# In some older fonts, the size of the post table doesn't match
|
145 |
-
# the number of glyphs. Sometimes it's bigger, sometimes smaller.
|
146 |
-
self.glyphOrder = glyphOrder = [""] * int(numGlyphs)
|
147 |
-
for i in range(min(len(indices), numGlyphs)):
|
148 |
-
if indices[i] == 0xFFFF:
|
149 |
-
self.glyphOrder[i] = ""
|
150 |
-
elif indices[i] in agl.UV2AGL:
|
151 |
-
self.glyphOrder[i] = agl.UV2AGL[indices[i]]
|
152 |
-
else:
|
153 |
-
self.glyphOrder[i] = "uni%04X" % indices[i]
|
154 |
-
self.build_psNameMapping(ttFont)
|
155 |
-
|
156 |
-
def encode_format_2_0(self, ttFont):
|
157 |
-
numGlyphs = ttFont["maxp"].numGlyphs
|
158 |
-
glyphOrder = ttFont.getGlyphOrder()
|
159 |
-
assert len(glyphOrder) == numGlyphs
|
160 |
-
indices = array.array("H")
|
161 |
-
extraDict = {}
|
162 |
-
extraNames = self.extraNames = [
|
163 |
-
n for n in self.extraNames if n not in standardGlyphOrder
|
164 |
-
]
|
165 |
-
for i in range(len(extraNames)):
|
166 |
-
extraDict[extraNames[i]] = i
|
167 |
-
for glyphID in range(numGlyphs):
|
168 |
-
glyphName = glyphOrder[glyphID]
|
169 |
-
if glyphName in self.mapping:
|
170 |
-
psName = self.mapping[glyphName]
|
171 |
-
else:
|
172 |
-
psName = glyphName
|
173 |
-
if psName in extraDict:
|
174 |
-
index = 258 + extraDict[psName]
|
175 |
-
elif psName in standardGlyphOrder:
|
176 |
-
index = standardGlyphOrder.index(psName)
|
177 |
-
else:
|
178 |
-
index = 258 + len(extraNames)
|
179 |
-
extraDict[psName] = len(extraNames)
|
180 |
-
extraNames.append(psName)
|
181 |
-
indices.append(index)
|
182 |
-
if sys.byteorder != "big":
|
183 |
-
indices.byteswap()
|
184 |
-
return (
|
185 |
-
struct.pack(">H", numGlyphs) + indices.tobytes() + packPStrings(extraNames)
|
186 |
-
)
|
187 |
-
|
188 |
-
def encode_format_4_0(self, ttFont):
|
189 |
-
from fontTools import agl
|
190 |
-
|
191 |
-
numGlyphs = ttFont["maxp"].numGlyphs
|
192 |
-
glyphOrder = ttFont.getGlyphOrder()
|
193 |
-
assert len(glyphOrder) == numGlyphs
|
194 |
-
indices = array.array("H")
|
195 |
-
for glyphID in glyphOrder:
|
196 |
-
glyphID = glyphID.split("#")[0]
|
197 |
-
if glyphID in agl.AGL2UV:
|
198 |
-
indices.append(agl.AGL2UV[glyphID])
|
199 |
-
elif len(glyphID) == 7 and glyphID[:3] == "uni":
|
200 |
-
indices.append(int(glyphID[3:], 16))
|
201 |
-
else:
|
202 |
-
indices.append(0xFFFF)
|
203 |
-
if sys.byteorder != "big":
|
204 |
-
indices.byteswap()
|
205 |
-
return indices.tobytes()
|
206 |
-
|
207 |
-
def toXML(self, writer, ttFont):
|
208 |
-
formatstring, names, fixes = sstruct.getformat(postFormat)
|
209 |
-
for name in names:
|
210 |
-
value = getattr(self, name)
|
211 |
-
writer.simpletag(name, value=value)
|
212 |
-
writer.newline()
|
213 |
-
if hasattr(self, "mapping"):
|
214 |
-
writer.begintag("psNames")
|
215 |
-
writer.newline()
|
216 |
-
writer.comment(
|
217 |
-
"This file uses unique glyph names based on the information\n"
|
218 |
-
"found in the 'post' table. Since these names might not be unique,\n"
|
219 |
-
"we have to invent artificial names in case of clashes. In order to\n"
|
220 |
-
"be able to retain the original information, we need a name to\n"
|
221 |
-
"ps name mapping for those cases where they differ. That's what\n"
|
222 |
-
"you see below.\n"
|
223 |
-
)
|
224 |
-
writer.newline()
|
225 |
-
items = sorted(self.mapping.items())
|
226 |
-
for name, psName in items:
|
227 |
-
writer.simpletag("psName", name=name, psName=psName)
|
228 |
-
writer.newline()
|
229 |
-
writer.endtag("psNames")
|
230 |
-
writer.newline()
|
231 |
-
if hasattr(self, "extraNames"):
|
232 |
-
writer.begintag("extraNames")
|
233 |
-
writer.newline()
|
234 |
-
writer.comment(
|
235 |
-
"following are the name that are not taken from the standard Mac glyph order"
|
236 |
-
)
|
237 |
-
writer.newline()
|
238 |
-
for name in self.extraNames:
|
239 |
-
writer.simpletag("psName", name=name)
|
240 |
-
writer.newline()
|
241 |
-
writer.endtag("extraNames")
|
242 |
-
writer.newline()
|
243 |
-
if hasattr(self, "data"):
|
244 |
-
writer.begintag("hexdata")
|
245 |
-
writer.newline()
|
246 |
-
writer.dumphex(self.data)
|
247 |
-
writer.endtag("hexdata")
|
248 |
-
writer.newline()
|
249 |
-
|
250 |
-
def fromXML(self, name, attrs, content, ttFont):
|
251 |
-
if name not in ("psNames", "extraNames", "hexdata"):
|
252 |
-
setattr(self, name, safeEval(attrs["value"]))
|
253 |
-
elif name == "psNames":
|
254 |
-
self.mapping = {}
|
255 |
-
for element in content:
|
256 |
-
if not isinstance(element, tuple):
|
257 |
-
continue
|
258 |
-
name, attrs, content = element
|
259 |
-
if name == "psName":
|
260 |
-
self.mapping[attrs["name"]] = attrs["psName"]
|
261 |
-
elif name == "extraNames":
|
262 |
-
self.extraNames = []
|
263 |
-
for element in content:
|
264 |
-
if not isinstance(element, tuple):
|
265 |
-
continue
|
266 |
-
name, attrs, content = element
|
267 |
-
if name == "psName":
|
268 |
-
self.extraNames.append(attrs["name"])
|
269 |
-
else:
|
270 |
-
self.data = readHex(content)
|
271 |
-
|
272 |
-
|
273 |
-
def unpackPStrings(data, n):
|
274 |
-
# extract n Pascal strings from data.
|
275 |
-
# if there is not enough data, use ""
|
276 |
-
|
277 |
-
strings = []
|
278 |
-
index = 0
|
279 |
-
dataLen = len(data)
|
280 |
-
|
281 |
-
for _ in range(n):
|
282 |
-
if dataLen <= index:
|
283 |
-
length = 0
|
284 |
-
else:
|
285 |
-
length = byteord(data[index])
|
286 |
-
index += 1
|
287 |
-
|
288 |
-
if dataLen <= index + length - 1:
|
289 |
-
name = ""
|
290 |
-
else:
|
291 |
-
name = tostr(data[index : index + length], encoding="latin1")
|
292 |
-
strings.append(name)
|
293 |
-
index += length
|
294 |
-
|
295 |
-
if index < dataLen:
|
296 |
-
log.warning("%d extra bytes in post.stringData array", dataLen - index)
|
297 |
-
|
298 |
-
elif dataLen < index:
|
299 |
-
log.warning("not enough data in post.stringData array")
|
300 |
-
|
301 |
-
return strings
|
302 |
-
|
303 |
-
|
304 |
-
def packPStrings(strings):
|
305 |
-
data = b""
|
306 |
-
for s in strings:
|
307 |
-
data = data + bytechr(len(s)) + tobytes(s, encoding="latin1")
|
308 |
-
return data
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DaleChen/AutoGPT/autogpt/commands/write_tests.py
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
"""A module that contains a function to generate test cases for the submitted code."""
|
2 |
-
from __future__ import annotations
|
3 |
-
|
4 |
-
import json
|
5 |
-
|
6 |
-
from autogpt.llm_utils import call_ai_function
|
7 |
-
|
8 |
-
|
9 |
-
def write_tests(code: str, focus: list[str]) -> str:
|
10 |
-
"""
|
11 |
-
A function that takes in code and focus topics and returns a response from create
|
12 |
-
chat completion api call.
|
13 |
-
|
14 |
-
Parameters:
|
15 |
-
focus (list): A list of suggestions around what needs to be improved.
|
16 |
-
code (str): Code for test cases to be generated against.
|
17 |
-
Returns:
|
18 |
-
A result string from create chat completion. Test cases for the submitted code
|
19 |
-
in response.
|
20 |
-
"""
|
21 |
-
|
22 |
-
function_string = (
|
23 |
-
"def create_test_cases(code: str, focus: Optional[str] = None) -> str:"
|
24 |
-
)
|
25 |
-
args = [code, json.dumps(focus)]
|
26 |
-
description_string = (
|
27 |
-
"Generates test cases for the existing code, focusing on"
|
28 |
-
" specific areas if required."
|
29 |
-
)
|
30 |
-
|
31 |
-
return call_ai_function(function_string, args, description_string)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/DanielSan7/judini-video/README.md
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
---
|
2 |
-
title: Judini Video
|
3 |
-
emoji: 🐠
|
4 |
-
colorFrom: pink
|
5 |
-
colorTo: green
|
6 |
-
sdk: streamlit
|
7 |
-
sdk_version: 1.19.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
-
license: mit
|
11 |
-
---
|
12 |
-
|
13 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
spaces/Datasculptor/OpenAI-Chatbot_App/app.py
DELETED
@@ -1,66 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import requests
|
3 |
-
import json
|
4 |
-
|
5 |
-
st.title("OpenAI Chatbot Interface")
|
6 |
-
st.write("Interact with OpenAI's GPT-3 models in real-time using your OpenAI API. Choose from a selection of their best models, set the temperature and max tokens, and start a conversation. Delete the conversation at any time to start fresh.")
|
7 |
-
|
8 |
-
if "history" not in st.session_state:
|
9 |
-
st.session_state.history = []
|
10 |
-
|
11 |
-
st.sidebar.markdown("## Configuration")
|
12 |
-
KEY = st.sidebar.text_input("Enter Your OpenAI API Key", placeholder="API Key", value="")
|
13 |
-
models = ['text-davinci-003', 'text-curie-001', 'text-babbage-001', 'text-ada-001', 'gpt-3.5-turbo']
|
14 |
-
model = st.sidebar.selectbox("Select a model", models, index=0)
|
15 |
-
|
16 |
-
temperature = st.sidebar.slider("Temperature", 0.0, 1.0, 0.7)
|
17 |
-
max_tokens = st.sidebar.slider("Max Tokens", 0, 4000, 1786)
|
18 |
-
|
19 |
-
if st.sidebar.button("Delete Conversation"):
|
20 |
-
st.session_state.history = []
|
21 |
-
st.sidebar.markdown("## GPT-3")
|
22 |
-
st.sidebar.markdown("OpenAI's GPT-3 models can understand and generate natural language. They offer four main models with different levels of power suitable for different tasks. Davinci is the most capable model, and Ada is the fastest.")
|
23 |
-
st.sidebar.markdown("text-davinci-003 | 4,000 max tokens")
|
24 |
-
st.sidebar.markdown("text-curie-001 | 2,048 max tokens")
|
25 |
-
st.sidebar.markdown("text-babbage-001 | 2,048 max tokens")
|
26 |
-
st.sidebar.markdown("text-ada-001 | 2,048 max tokens")
|
27 |
-
|
28 |
-
def generate_answer(prompt):
|
29 |
-
API_KEY = KEY
|
30 |
-
API_URL = "https://api.openai.com/v1/completions"
|
31 |
-
headers = {
|
32 |
-
'Content-Type': 'application/json',
|
33 |
-
'Authorization': 'Bearer ' + API_KEY
|
34 |
-
}
|
35 |
-
previous_messages = [chat['message'] for chat in st.session_state.history if not chat['is_user']]
|
36 |
-
previous_messages_text = '\n'.join(previous_messages)
|
37 |
-
full_prompt = previous_messages_text + '\n' + prompt if previous_messages_text else prompt
|
38 |
-
data = {
|
39 |
-
"model": model,
|
40 |
-
"prompt": full_prompt,
|
41 |
-
"temperature": temperature,
|
42 |
-
"max_tokens": max_tokens
|
43 |
-
}
|
44 |
-
if not API_KEY:
|
45 |
-
st.warning("Please input your API key")
|
46 |
-
return
|
47 |
-
response = requests.post(API_URL, headers=headers, data=json.dumps(data))
|
48 |
-
result = response.json()
|
49 |
-
if 'choices' in result:
|
50 |
-
message_bot = result['choices'][0]['text'].strip()
|
51 |
-
st.session_state.history.append({"message": prompt, "is_user": True})
|
52 |
-
st.session_state.history.append({"message": message_bot, "is_user": False})
|
53 |
-
else:
|
54 |
-
st.error("An error occurred while processing the API response. If using a model other than text-davinci-003, then lower the Max Tokens.")
|
55 |
-
|
56 |
-
prompt = st.text_input("Prompt", placeholder="Prompt Here", value="")
|
57 |
-
if st.button("Submit"):
|
58 |
-
generate_answer(prompt)
|
59 |
-
with st.spinner("Waiting for the response from the bot..."):
|
60 |
-
for chat in st.session_state.history:
|
61 |
-
if chat['is_user']:
|
62 |
-
st.markdown("<img src='https://i.ibb.co/zVSbGvb/585e4beacb11b227491c3399.png' width='50' height='50' style='float:right;'>", unsafe_allow_html=True)
|
63 |
-
st.markdown(f"<div style='float:right; padding:10px; background-color: #2E2E2E; border-radius:10px; margin:10px;'>{chat['message']}</div>", unsafe_allow_html=True)
|
64 |
-
else:
|
65 |
-
st.markdown("<img src='https://i.ibb.co/LZFvDND/5841c0bda6515b1e0ad75a9e-1.png' width='50' height='50' style='float:left;'>", unsafe_allow_html=True)
|
66 |
-
st.markdown(f"<div style='float:left; padding:10px; background-color: #2E2E2E; border-radius:10px; margin:10px;'>{chat['message']}</div>", unsafe_allow_html=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|