parquet-converter commited on
Commit
8f8e3d3
·
1 Parent(s): d5a5682

Update parquet files (step 11 of 296)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download and Install Guitar Rig 5 for Free A Step-by-Step Tutorial.md +0 -61
  2. spaces/1gistliPinn/ChatGPT4/Examples/Chillar Party Full Movie In Hindi Dubbed Free Download Hd [EXCLUSIVE].md +0 -6
  3. spaces/1gistliPinn/ChatGPT4/Examples/Contabilidad Intermedia De Juan Funes Orellana Pdf 181.md +0 -80
  4. spaces/1gistliPinn/ChatGPT4/Examples/Euro Truck Simulator 2 Going East Dlc Activation Code.md +0 -6
  5. spaces/1phancelerku/anime-remove-background/Download Temple Run for Android in Browser No Play Store Needed.md +0 -126
  6. spaces/1vash/demo-flask-docker-template/static/script.js +0 -30
  7. spaces/2023Liu2023/bingo/src/lib/utils.ts +0 -138
  8. spaces/AI-Hobbyist/Hoyo-RVC/extract_feature_print.py +0 -123
  9. spaces/AIFILMS/generate_human_motion/pyrender/tests/unit/test_lights.py +0 -104
  10. spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/model.py +0 -77
  11. spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/commons/ckpt_utils.py +0 -66
  12. spaces/AIGC-Audio/Make_An_Audio/vocoder/bigvgan/activations.py +0 -120
  13. spaces/AIGC-Audio/Make_An_Audio/vocoder/bigvgan/models.py +0 -414
  14. spaces/AIGText/GlyphControl/cldm/ddim_hacked.py +0 -318
  15. spaces/Aabbhishekk/MistralQnA/app.py +0 -60
  16. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorpicker/methods/Transform.js +0 -27
  17. spaces/AleksBlacky/Arxiv_paper_classifier/README.md +0 -14
  18. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_comparison.py +0 -405
  19. spaces/Andy1621/uniformer_image_detection/configs/_base_/models/mask_rcnn_r50_caffe_c4.py +0 -123
  20. spaces/Andy1621/uniformer_image_detection/mmdet/models/backbones/swin_transformer.py +0 -630
  21. spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/ga_retina_head.py +0 -109
  22. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/api/streaming_api.py +0 -124
  23. spaces/Arafath10/chatcode/cleaner.py +0 -57
  24. spaces/Arthur678/vits-uma-genshin-honkai/text/__init__.py +0 -57
  25. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/util/ssltransport.py +0 -221
  26. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/py39compat.py +0 -22
  27. spaces/AvinashRamesh23/AIEditor/stable_whisper.py +0 -1491
  28. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/tutorials/training.md +0 -67
  29. spaces/BAAI/vid2vid-zero/README.md +0 -12
  30. spaces/Bart92/RVC_HF/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py +0 -86
  31. spaces/Benson/text-generation/Examples/Descargar Aplikasi True Skate.md +0 -51
  32. spaces/Betacuckgpt/ehartford-Wizard-Vicuna-30B-Uncensored123/README.md +0 -12
  33. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/poolers.py +0 -235
  34. spaces/CVPR/WALT/mmdet/models/detectors/yolact.py +0 -146
  35. spaces/ChandraMohanNayal/AutoGPT/CONTRIBUTING.md +0 -105
  36. spaces/Cletrason/Cletrason-toad-in-the-mario-movie/optimization.py +0 -756
  37. spaces/ClueAI/CLUE_AIGC/README.md +0 -13
  38. spaces/CofAI/chat.b4/README.md +0 -16
  39. spaces/CoreyMorris/MMLU-by-task-Leaderboard/result_data_processor.py +0 -226
  40. spaces/CorvaeOboro/gen_ability_icon/torch_utils/__init__.py +0 -9
  41. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/designspaceLib/__init__.py +0 -0
  42. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/pens/transformPen.py +0 -111
  43. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-4ffdbeab.css +0 -1
  44. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-bacb8946.js +0 -5
  45. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio_client/__init__.py +0 -7
  46. spaces/Docfile/open_llm_leaderboard/src/display_models/model_metadata_flags.py +0 -18
  47. spaces/DragGan/DragGan-Inversion/PTI/utils/log_utils.py +0 -79
  48. spaces/EPFL-VILAB/MultiMAE/utils/checkpoint.py +0 -152
  49. spaces/Flux9665/IMS-Toucan/app.py +0 -160
  50. spaces/FrankZxShen/so-vits-svc-models-pcr/diffusion/unit2mel.py +0 -100
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download and Install Guitar Rig 5 for Free A Step-by-Step Tutorial.md DELETED
@@ -1,61 +0,0 @@
1
-
2
- <h1>Guitar Rig 5 Full Download: How to Get It for Free</h1>
3
- <p>If you are a guitarist who wants to create and experiment with different tones and effects on your computer, you may have heard of Guitar Rig 5. Guitar Rig 5 is a software solution that simulates various amps, cabinets, pedals, and microphones, and lets you mix and match them to create your own custom sound. Guitar Rig 5 can also be used as a standalone application or as a plugin in your DAW.</p>
4
- <h2>guitar rig 5 full download</h2><br /><p><b><b>Download File</b> &#128279; <a href="https://byltly.com/2uKx7R">https://byltly.com/2uKx7R</a></b></p><br /><br />
5
- <p>But how can you get Guitar Rig 5 full download for free? Is it even possible? In this article, we will answer these questions and show you how to download and install Guitar Rig 5 for free on your PC.</p>
6
- <h2>What is Guitar Rig 5?</h2>
7
- <p>Guitar Rig 5 is a product of Native Instruments, a leading manufacturer of software and hardware for music production and DJing. Guitar Rig 5 was released in 2011 as the successor of Guitar Rig 4, and it has many new features and improvements, such as:</p>
8
- <ul>
9
- <li>Two new high-gain amps: Van 51 and Hot Solo+</li>
10
- <li>Six new effects: Resochord, Vintage Compressor, Skreamer, Limiter, Stereo Tune, and Container</li>
11
- <li>19 additional cabinets in the all-new Control Room Pro</li>
12
- <li>A new sidechaining feature that allows you to modulate any parameter with any input signal</li>
13
- <li>A new container module that lets you create multi-effects chains with drag-and-drop</li>
14
- <li>A redesigned user interface with improved graphics and workflow</li>
15
- <li>A new preset browser with tags and ratings</li>
16
- <li>A new tape deck module that lets you record and play back your performance</li>
17
- <li>A new metronome module that helps you keep time</li>
18
- <li>A new tuner module that helps you tune your guitar</li>
19
- </ul>
20
- <p>Guitar Rig 5 is suitable for guitarists of any genre and skill level who want to explore the possibilities of digital sound processing. It can also be used for other instruments such as bass, keyboards, vocals, drums, etc. It can run on any PC that meets the minimum system requirements:</p>
21
- <ul>
22
- <li>Windows 7 or later (64-bit)</li>
23
- <li>Intel Core 2 Duo or AMD Athlon 64 X2 processor</li>
24
- <li>4 GB RAM or more</li>
25
- <li>1 GB free disk space or more</li>
26
- <li>An ASIO compatible sound card</li>
27
- <li>A MIDI controller (optional)</li>
28
- </ul>
29
-
30
- <h2>How to Download Guitar Rig 5 Full Version for Free?</h2>
31
-
32
- <p>To download Guitar Rig 5 full version for free, you need to have an account on the Native Instruments website. If you don't have one, you can create one for free by following these steps:</p>
33
-
34
- <ol>
35
-
36
- <li>Go to <a href="https://www.native-instruments.com/en/my-account/create-account/">https://www.native-instruments.com/en/my-account/create-account/</a> on your web browser.</li>
37
-
38
- <li>Fill in your personal details and choose a password.</li>
39
-
40
- <li>Check the box to agree to the terms and conditions and click Create Account.</li>
41
-
42
- <li>You will receive a confirmation email with a link to activate your account. Click on the link to complete the registration.</li>
43
-
44
- </ol>
45
-
46
- <p>Once you have an account on the Native Instruments website, you can download Guitar Rig 5 full version for free by following these steps:</p>
47
-
48
- <ol>
49
-
50
- <li>Go to <a href="https://www.native-instruments.com/en/specials/download-free-software-and-demo-versions/">https://www.native-instruments.com/en/specials/download-free-software-and-demo-versions/</a> on your web browser.</li>
51
-
52
- <li>Scroll down to the section where it says GUITAR RIG 6 PRO and click on DOWNLOAD MORE INFO.</li>
53
-
54
- <li>You will be redirected to the product page of Guitar Rig 6 Pro. Scroll down to the section where it says TRY IT FREE FOR 30 DAYS and click on DOWNLOAD DEMO.</li>
55
-
56
- <li>You will be asked to log in with your Native Instruments account. Enter your email and password and click Log In.</li>
57
-
58
- <li>You will be taken to the download page of Guitar Rig 6 Pro. Click on DOWNLOAD</p>
59
- <p></p> ddb901b051<br />
60
- <br />
61
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Chillar Party Full Movie In Hindi Dubbed Free Download Hd [EXCLUSIVE].md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Chillar Party Full Movie In Hindi Dubbed Free Download Hd</h2><br /><p><b><b>Download File</b> &#10004; <a href="https://imgfil.com/2uy0zJ">https://imgfil.com/2uy0zJ</a></b></p><br /><br />
2
-
3
- aaccfb2cb3<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Contabilidad Intermedia De Juan Funes Orellana Pdf 181.md DELETED
@@ -1,80 +0,0 @@
1
- <h2>Contabilidad Intermedia De Juan Funes Orellana Pdf 181</h2><br /><p><b><b>Download File</b> &#10001; <a href="https://imgfil.com/2uxYeC">https://imgfil.com/2uxYeC</a></b></p><br /><br />
2
-
3
- ,746 6.
4
-
5
- Pero si pensamos en una sola serie de promedios,
6
-
7
- tendremos que esperar
8
-
9
- solo un poco más de 10.000 años.
10
-
11
- En efecto, la serie de promedios
12
-
13
- de las pruebas de capacidad de Juan,
14
-
15
- tras su procesamiento,
16
-
17
- es solo de 19.737.
18
-
19
- ¿Ya pueden imaginarse que es
20
-
21
- una gran cifra, una cifra pequeña,
22
-
23
- pero, desde luego, una gran cifra
24
-
25
- para una serie de promedios.
26
-
27
- Es decir, tanto hoy como
28
-
29
- cincuenta años atrás,
30
-
31
- French:
32
-
33
- Et je vais prouver que la fonction d'étalonnage
34
-
35
- joue un rôle crucial dans le succès des prévisions.
36
-
37
- Nous savons que c'est une prédiction
38
-
39
- fondée sur l'étalonnage, la fonction d'étalonnage.
40
-
41
- Qu'est-ce que cela signifie?
42
-
43
- Eh bien, il n'y a qu'une forme
44
-
45
- d'étalonnage qui a été étudiée avec rigueur.
46
-
47
- Et il est possible de l'étudier parce que
48
-
49
- j'ai fait l'expérience d'étalonner
50
-
51
- une famille d'organismes.
52
-
53
- Je fais du bétail ou du porc.
54
-
55
- J'ai essayé d'étalonner
56
-
57
- le succès des vins.
58
-
59
- J'étais professeur, et il y avait des étudiants.
60
-
61
- Et je leur ai demandé de mesurer
62
-
63
- la fonction d'étalonnage de la vigne.
64
-
65
- Et ils ont réalisé de grosses études.
66
-
67
- Ils ont mesuré
68
-
69
- Mais pourquoi?
70
-
71
- Portuguese:
72
-
73
- E eu vou provar que a função
74
-
75
- de calibragem, que todos nós
76
-
77
- conhece 4fefd39f24<br />
78
- <br />
79
- <br />
80
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Euro Truck Simulator 2 Going East Dlc Activation Code.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>euro truck simulator 2 going east dlc activation code</h2><br /><p><b><b>Download</b> &#128279; <a href="https://imgfil.com/2uxZLZ">https://imgfil.com/2uxZLZ</a></b></p><br /><br />
2
-
3
- Brought to you by Steam Labs. Filter reviews by the user's playtime when the review was written:. Going East Download. No minimum to No maximum. Off-topic ... 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Temple Run for Android in Browser No Play Store Needed.md DELETED
@@ -1,126 +0,0 @@
1
- <br />
2
- <h1>How to Download Temple Run Without Play Store</h1>
3
- <p>Temple Run is one of the most popular and addictive games on Android. It is an endless runner game where you have to escape from a horde of evil monkeys while avoiding obstacles and collecting coins. But what if you want to download Temple Run without Play Store? Maybe you don't have access to the Play Store, or you want to try a different version of the game, or you just want to have more control over your app installation. Whatever your reason, there is a way to download Temple Run without Play Store, and it's not too difficult. In this article, we will show you how to do it step by step.</p>
4
- <h2>What is Temple Run and Why You Might Want to Download It</h2>
5
- <h3>Temple Run is a popular endless runner game</h3>
6
- <p>Temple Run was released in 2011 by Imangi Studios and quickly became a hit among mobile gamers. The game has been downloaded over a billion times and has spawned several sequels and spin-offs. The gameplay is simple but addictive: you control a treasure hunter who has stolen a cursed idol from a temple and must run for his life while being chased by angry monkeys. Along the way, you have to swipe, tilt, and tap your device to turn, jump, slide, and use power-ups. The game features stunning graphics, smooth animations, and catchy sound effects that make you feel like you are in an adventure movie.</p>
7
- <h2>download temple run without play store</h2><br /><p><b><b>DOWNLOAD</b> &mdash; <a href="https://jinyurl.com/2uNQK8">https://jinyurl.com/2uNQK8</a></b></p><br /><br />
8
- <h3>You might want to download it without Play Store for various reasons</h3>
9
- <p>While Temple Run is available for free on the Google Play Store, there are some reasons why you might want to download it without using the Play Store. For example:</p>
10
- <ul>
11
- <li>You don't have access to the Play Store because of your location, device, or network restrictions.</li>
12
- <li>You want to try an older or newer version of the game that is not available on the Play Store.</li>
13
- <li>You want to avoid ads or in-app purchases that are present in the Play Store version.</li>
14
- <li>You want to have more control over your app installation and updates.</li>
15
- <li>You want to backup or share the APK file with others.</li>
16
- </ul>
17
- <p>Whatever your reason, downloading Temple Run without Play Store is possible and safe if you follow the right steps.</p>
18
- <h2>How to Download APK Files from the Web</h2>
19
- <h3>APK files are the packages for Android apps</h3>
20
- <p>The first step to download Temple Run without Play Store is to get the APK file of the game. APK stands for Android Package Kit, and it is the file format that Android uses to distribute and install apps. An APK file contains all the code, resources, and metadata that an app needs to run on your device. When you download an app from the Play Store, you are actually downloading an APK file that is then installed on your device. But you can also download APK files from other sources on the web.</p>
21
- <h3>You can use a web tool or an APK extractor app to get APK files from Google Play URLs</h3>
22
- <p>There are two main ways to get APK files from Google Play URLs. One way is to use a web tool that generates download links for APK files by pasting Google Play URLs. There are many websites that offer this service, such as APKPure, APKMirror, APKCombo, and Evozi. These websites are usually safe and reliable, but you should always check the ratings, reviews, and permissions of the apps before downloading them. Another way is to use an APK extractor app that can extract APK files from any app installed on your device. There are many apps that can do this, such as APK Extractor, ML Manager, and App Backup & Restore. These apps are useful if you want to backup or share the APK files of the apps you already have on your device.</p>
23
- <h3>You need to allow unknown apps on your device before installing APK files</h3>
24
- <p>Before you can install APK files on your device, you need to enable the option to allow unknown apps on your device. Unknown apps are apps that are not from the Play Store or other trusted sources. By default, Android blocks the installation of unknown apps for security reasons. However, you can change this setting by following these steps:</p>
25
- <ol>
26
- <li>Go to Settings > Apps & notifications > Advanced > Special app access > Install unknown apps.</li>
27
- <li>Select the app that you want to use to install APK files, such as your browser or file manager.</li>
28
- <li>Toggle on the option to Allow from this source.</li>
29
- </ol>
30
- <p>Alternatively, you can also enable this option when you try to install an APK file for the first time. You will see a pop-up asking you to allow unknown apps from that source. Tap on Settings and then toggle on the option to Allow from this source.</p>
31
- <p>How to download temple run on PC without play store<br />
32
- Temple run apk download for android without play store<br />
33
- Play temple run online for free in browser without downloading<br />
34
- Temple run game download for mobile without play store<br />
35
- Download temple run 2 without play store<br />
36
- Temple run mod apk download without play store<br />
37
- Download temple run oz without play store<br />
38
- Temple run free download for laptop without play store<br />
39
- Download temple run brave without play store<br />
40
- Play temple run 2 online for free without downloading<br />
41
- Download temple run for windows 10 without play store<br />
42
- Temple run download for iphone without app store<br />
43
- Download temple run for java phone without play store<br />
44
- Temple run game download for pc offline without play store<br />
45
- Download temple run 3 without play store<br />
46
- Temple run game download for tablet without play store<br />
47
- Download temple run for mac without play store<br />
48
- Temple run game download for jio phone without play store<br />
49
- Download temple run for nokia lumia without play store<br />
50
- Play temple run online unblocked without downloading<br />
51
- Download temple run for samsung galaxy without play store<br />
52
- Temple run game download for blackberry without play store<br />
53
- Download temple run for fire tablet without play store<br />
54
- Temple run game download for windows phone without play store<br />
55
- Download temple run for chromebook without play store<br />
56
- Temple run game download for kindle fire without play store<br />
57
- Download temple run for ios without app store<br />
58
- Temple run game download for android tv without play store<br />
59
- Download temple run for linux without play store<br />
60
- Temple run game download for smartwatch without play store<br />
61
- Download temple run hacked version without play store<br />
62
- Temple run game download for mi tv without play store<br />
63
- Download temple run old version without play store<br />
64
- Temple run game download for ps4 without play store<br />
65
- Download temple run unlimited coins and gems without play store<br />
66
- Temple run game download for xbox one without play store<br />
67
- Download temple run frozen shadows without play store<br />
68
- Temple run game download for nintendo switch without play store<br />
69
- Download temple run bluestacks edition without play store<br />
70
- Temple run game download for vr headset without play store</p>
71
- <h2>How to Install APK Files on Your Android Device</h2>
72
- <h3>You can use a file manager app or an APK installer app to locate and install APK files</h3>
73
- <p>Once you have downloaded the APK file of Temple Run, you need to locate and install it on your device. You can use a file manager app or an APK installer app to do this. A file manager app is an app that lets you browse and manage the files and folders on your device. You can use any file manager app that you have on your device, such as Files by Google, ES File Explorer, or Solid Explorer. An APK installer app is an app that simplifies the process of installing APK files by scanning your device for them and showing them in a list. You can use any APK installer app that you like, such as Installer, Easy Installer, or SAI (Split APKs Installer). Here are the steps to install APK files using either method:</p>
74
- <ul>
75
- <li>Open the file manager or APK installer app and navigate to the folder where you downloaded or extracted the APK file of Temple Run.</li>
76
- <li>Tap on the APK file and then tap on Install.</li>
77
- <li>Wait for the installation to complete and then tap on Open to launch the game.</li>
78
- </ul>
79
- <h3>You may need to accept some permissions or pop-ups before installing the file</h3>
80
- <p>Depending on your device and Android version, you may need to accept some permissions or pop-ups before installing the APK file of Temple Run. For example:</p>
81
- <ul>
82
- <li>You may see a warning message saying that installing unknown apps can harm your device. Tap on Install anyway (unsafe) to proceed.</li>
83
- <li>You may see a pop-up asking you to confirm the installation of Temple Run. Tap on Install to continue.</li>
84
- <li>You may see a list of permissions that Temple Run requires to access your device's features and data. Tap on Accept or Allow to grant them.</li>
85
- </ul>
86
- <h3>You can also transfer the APK file from your computer to your device via USB</h3>
87
- <p>If you don't want to download the APK file of Temple Run directly from your device, you can also transfer it from your computer to your device via USB. Here are the steps to do this:</p>
88
- <ol>
89
- <li>Download the APK file of Temple Run from a web tool or an APK extractor app on your computer.</li>
90
- <li>Connect your device to your computer via USB cable and make sure it is in file transfer mode.</li>
91
- <li>Copy and paste the APK file of Temple Run from your computer to a folder on your device's internal storage or SD card.</li>
92
- <li>Disconnect your device from your computer and follow the steps above to locate and install the APK file using a file manager or an APK installer app.</li>
93
- </ol>
94
- <h2>Conclusion and FAQs</h2>
95
- <h3>Conclusion: Downloading Temple Run without Play Store is easy and safe if you follow the steps above</h3>
96
- <p>In conclusion, downloading Temple Run without Play Store is not a difficult task if you follow the steps above. You just need to get the APK file of Temple Run from a web tool or an APK extractor app, enable unknown apps on your device, and install the APK file using a file manager or an APK installer app. This way, you can enjoy Temple Run without Play Store and have more control over your app installation and updates. Downloading Temple Run without Play Store is also safe and legal, as long as you download the APK file from a trusted source and do not modify or distribute it without permission. However, you should always be careful when installing unknown apps on your device, as they may contain malware or viruses that can harm your device or data. Always check the ratings, reviews, and permissions of the apps before downloading them, and scan them with an antivirus app if possible.</p>
97
- <h3>FAQs: Five common questions and answers about downloading Temple Run without Play Store</h3>
98
- <p>Here are some of the most frequently asked questions and answers about downloading Temple Run without Play Store:</p>
99
- <table>
100
- <tr>
101
- <th>Question</th>
102
- <th>Answer</th>
103
- </tr>
104
- <tr>
105
- <td>Can I download Temple Run without Play Store on any Android device?</td>
106
- <td>Yes, you can download Temple Run without Play Store on any Android device that supports the game's minimum requirements. The game requires Android 4.1 or higher and at least 50 MB of free space.</td>
107
- </tr>
108
- <tr>
109
- <td>Can I update Temple Run without Play Store?</td>
110
- <td>Yes, you can update Temple Run without Play Store by downloading and installing the latest APK file of the game from a web tool or an APK extractor app. However, you will not receive automatic notifications when a new update is available, so you will have to check manually.</td>
111
- </tr>
112
- <tr>
113
- <td>Can I play Temple Run offline without Play Store?</td>
114
- <td>Yes, you can play Temple Run offline without Play Store, as the game does not require an internet connection to run. However, you will not be able to access some features that require an internet connection, such as leaderboards, achievements, and cloud save.</td>
115
- </tr>
116
- <tr>
117
- <td>Can I restore my progress in Temple Run without Play Store?</td>
118
- <td>Yes, you can restore your progress in Temple Run without Play Store by using a backup app or a cloud service. You can use a backup app such as Helium or Titanium Backup to backup and restore your app data on your device. You can also use a cloud service such as Google Drive or Dropbox to sync and restore your app data across devices.</td>
119
- </tr>
120
- <tr>
121
- <td>Can I get banned from Temple Run for downloading it without Play Store?</td>
122
- <td>No, you will not get banned from Temple Run for downloading it without Play Store, as long as you do not use any cheats, hacks, or mods that violate the game's terms of service. Downloading Temple Run without Play Store is not illegal or unethical, as long as you respect the rights of the developers and do not distribute or modify the APK file without permission.</td>
123
- </tr>
124
- </table></p> 197e85843d<br />
125
- <br />
126
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1vash/demo-flask-docker-template/static/script.js DELETED
@@ -1,30 +0,0 @@
1
- //const textGenForm = document.querySelector('.text-gen-form');
2
- //
3
- //const translateText = async (text) => {
4
- // const inferResponse = await fetch(`infer_t5?input=${text}`);
5
- // const inferJson = await inferResponse.json();
6
- //
7
- // return inferJson.output;
8
- //};
9
- //
10
- //textGenForm.addEventListener('submit', async (event) => {
11
- // event.preventDefault();
12
- //
13
- // const textGenInput = document.getElementById('text-gen-input');
14
- // const textGenParagraph = document.querySelector('.text-gen-output');
15
- //
16
- // try {
17
- // textGenParagraph.textContent = await translateText(textGenInput.value);
18
- // } catch (err) {
19
- // console.error(err);
20
- // }
21
- //});
22
-
23
- document.addEventListener("DOMContentLoaded", () => {
24
- const uploadForm = document.querySelector(".image-upload-form");
25
- const uploadButton = document.querySelector("#image-upload-submit");
26
-
27
- uploadButton.addEventListener("click", () => {
28
- uploadForm.submit();
29
- });
30
- });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/2023Liu2023/bingo/src/lib/utils.ts DELETED
@@ -1,138 +0,0 @@
1
- import { clsx, type ClassValue } from 'clsx'
2
- import { customAlphabet } from 'nanoid'
3
- import { twMerge } from 'tailwind-merge'
4
-
5
- export function cn(...inputs: ClassValue[]) {
6
- return twMerge(clsx(inputs))
7
- }
8
-
9
- export const nanoid = customAlphabet(
10
- '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz',
11
- 7
12
- ) // 7-character random string
13
-
14
- export function createChunkDecoder() {
15
- const decoder = new TextDecoder()
16
- return function (chunk: Uint8Array | undefined): string {
17
- if (!chunk) return ''
18
- return decoder.decode(chunk, { stream: true })
19
- }
20
- }
21
-
22
- export function random (start: number, end: number) {
23
- return start + Math.ceil(Math.random() * (end - start))
24
- }
25
-
26
- export function randomIP() {
27
- return `11.${random(104, 107)}.${random(1, 255)}.${random(1, 255)}`
28
- }
29
-
30
- export function parseHeadersFromCurl(content: string) {
31
- const re = /-H '([^:]+):\s*([^']+)/mg
32
- const headers: HeadersInit = {}
33
- content = content.replaceAll('-H "', '-H \'').replaceAll('" ^', '\'\\').replaceAll('^\\^"', '"') // 将 cmd curl 转成 bash curl
34
- content.replace(re, (_: string, key: string, value: string) => {
35
- headers[key] = value
36
- return ''
37
- })
38
-
39
- return headers
40
- }
41
-
42
- export const ChunkKeys = ['BING_HEADER', 'BING_HEADER1', 'BING_HEADER2']
43
- export function encodeHeadersToCookie(content: string) {
44
- const base64Content = btoa(content)
45
- const contentChunks = base64Content.match(/.{1,4000}/g) || []
46
- return ChunkKeys.map((key, index) => `${key}=${contentChunks[index] ?? ''}`)
47
- }
48
-
49
- export function extraCurlFromCookie(cookies: Partial<{ [key: string]: string }>) {
50
- let base64Content = ''
51
- ChunkKeys.forEach((key) => {
52
- base64Content += (cookies[key] || '')
53
- })
54
- try {
55
- return atob(base64Content)
56
- } catch(e) {
57
- return ''
58
- }
59
- }
60
-
61
- export function extraHeadersFromCookie(cookies: Partial<{ [key: string]: string }>) {
62
- return parseHeadersFromCurl(extraCurlFromCookie(cookies))
63
- }
64
-
65
- export function formatDate(input: string | number | Date): string {
66
- const date = new Date(input)
67
- return date.toLocaleDateString('en-US', {
68
- month: 'long',
69
- day: 'numeric',
70
- year: 'numeric'
71
- })
72
- }
73
-
74
- export function parseCookie(cookie: string, cookieName: string) {
75
- const targetCookie = new RegExp(`(?:[; ]|^)${cookieName}=([^;]*)`).test(cookie) ? RegExp.$1 : cookie
76
- return targetCookie ? decodeURIComponent(targetCookie).trim() : cookie.indexOf('=') === -1 ? cookie.trim() : ''
77
- }
78
-
79
- export function parseCookies(cookie: string, cookieNames: string[]) {
80
- const cookies: { [key: string]: string } = {}
81
- cookieNames.forEach(cookieName => {
82
- cookies[cookieName] = parseCookie(cookie, cookieName)
83
- })
84
- return cookies
85
- }
86
-
87
- export const DEFAULT_UA = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36 Edg/115.0.0.0'
88
- export const DEFAULT_IP = process.env.BING_IP || randomIP()
89
-
90
- export function parseUA(ua?: string, default_ua = DEFAULT_UA) {
91
- return / EDGE?/i.test(decodeURIComponent(ua || '')) ? decodeURIComponent(ua!.trim()) : default_ua
92
- }
93
-
94
- export function createHeaders(cookies: Partial<{ [key: string]: string }>, defaultHeaders?: Partial<{ [key: string]: string }>) {
95
- let {
96
- BING_COOKIE = process.env.BING_COOKIE,
97
- BING_UA = process.env.BING_UA,
98
- BING_IP = process.env.BING_IP,
99
- BING_HEADER = process.env.BING_HEADER,
100
- } = cookies
101
-
102
- if (BING_HEADER) {
103
- return extraHeadersFromCookie({
104
- BING_HEADER,
105
- ...cookies,
106
- })
107
- }
108
-
109
- const ua = parseUA(BING_UA)
110
-
111
- if (!BING_COOKIE) {
112
- BING_COOKIE = defaultHeaders?.IMAGE_BING_COOKIE || 'xxx' // hf 暂时不用 Cookie 也可以正常使用
113
- }
114
-
115
- const parsedCookie = parseCookie(BING_COOKIE, '_U')
116
- if (!parsedCookie) {
117
- throw new Error('Invalid Cookie')
118
- }
119
- return {
120
- 'x-forwarded-for': BING_IP || DEFAULT_IP,
121
- 'Accept-Encoding': 'gzip, deflate, br',
122
- 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
123
- 'User-Agent': ua!,
124
- 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32',
125
- cookie: `_U=${parsedCookie}` || '',
126
- }
127
- }
128
-
129
- export class WatchDog {
130
- private tid = 0
131
- watch(fn: Function, timeout = 2000) {
132
- clearTimeout(this.tid)
133
- this.tid = setTimeout(fn, timeout + Math.random() * 1000)
134
- }
135
- reset() {
136
- clearTimeout(this.tid)
137
- }
138
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI-Hobbyist/Hoyo-RVC/extract_feature_print.py DELETED
@@ -1,123 +0,0 @@
1
- import os, sys, traceback
2
-
3
- # device=sys.argv[1]
4
- n_part = int(sys.argv[2])
5
- i_part = int(sys.argv[3])
6
- if len(sys.argv) == 5:
7
- exp_dir = sys.argv[4]
8
- version = sys.argv[5]
9
- else:
10
- i_gpu = sys.argv[4]
11
- exp_dir = sys.argv[5]
12
- os.environ["CUDA_VISIBLE_DEVICES"] = str(i_gpu)
13
- version = sys.argv[6]
14
- import torch
15
- import torch.nn.functional as F
16
- import soundfile as sf
17
- import numpy as np
18
- from fairseq import checkpoint_utils
19
-
20
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
21
-
22
- if torch.cuda.is_available():
23
- device = "cuda"
24
- elif torch.backends.mps.is_available():
25
- device = "mps"
26
- else:
27
- device = "cpu"
28
-
29
- f = open("%s/extract_f0_feature.log" % exp_dir, "a+")
30
-
31
-
32
- def printt(strr):
33
- print(strr)
34
- f.write("%s\n" % strr)
35
- f.flush()
36
-
37
-
38
- printt(sys.argv)
39
- model_path = "hubert_base.pt"
40
-
41
- printt(exp_dir)
42
- wavPath = "%s/1_16k_wavs" % exp_dir
43
- outPath = (
44
- "%s/3_feature256" % exp_dir if version == "v1" else "%s/3_feature768" % exp_dir
45
- )
46
- os.makedirs(outPath, exist_ok=True)
47
-
48
-
49
- # wave must be 16k, hop_size=320
50
- def readwave(wav_path, normalize=False):
51
- wav, sr = sf.read(wav_path)
52
- assert sr == 16000
53
- feats = torch.from_numpy(wav).float()
54
- if feats.dim() == 2: # double channels
55
- feats = feats.mean(-1)
56
- assert feats.dim() == 1, feats.dim()
57
- if normalize:
58
- with torch.no_grad():
59
- feats = F.layer_norm(feats, feats.shape)
60
- feats = feats.view(1, -1)
61
- return feats
62
-
63
-
64
- # HuBERT model
65
- printt("load model(s) from {}".format(model_path))
66
- # if hubert model is exist
67
- if os.access(model_path, os.F_OK) == False:
68
- printt(
69
- "Error: Extracting is shut down because %s does not exist, you may download it from https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main"
70
- % model_path
71
- )
72
- exit(0)
73
- models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
74
- [model_path],
75
- suffix="",
76
- )
77
- model = models[0]
78
- model = model.to(device)
79
- printt("move model to %s" % device)
80
- if device not in ["mps", "cpu"]:
81
- model = model.half()
82
- model.eval()
83
-
84
- todo = sorted(list(os.listdir(wavPath)))[i_part::n_part]
85
- n = max(1, len(todo) // 10) # 最多打印十条
86
- if len(todo) == 0:
87
- printt("no-feature-todo")
88
- else:
89
- printt("all-feature-%s" % len(todo))
90
- for idx, file in enumerate(todo):
91
- try:
92
- if file.endswith(".wav"):
93
- wav_path = "%s/%s" % (wavPath, file)
94
- out_path = "%s/%s" % (outPath, file.replace("wav", "npy"))
95
-
96
- if os.path.exists(out_path):
97
- continue
98
-
99
- feats = readwave(wav_path, normalize=saved_cfg.task.normalize)
100
- padding_mask = torch.BoolTensor(feats.shape).fill_(False)
101
- inputs = {
102
- "source": feats.half().to(device)
103
- if device not in ["mps", "cpu"]
104
- else feats.to(device),
105
- "padding_mask": padding_mask.to(device),
106
- "output_layer": 9 if version == "v1" else 12, # layer 9
107
- }
108
- with torch.no_grad():
109
- logits = model.extract_features(**inputs)
110
- feats = (
111
- model.final_proj(logits[0]) if version == "v1" else logits[0]
112
- )
113
-
114
- feats = feats.squeeze(0).float().cpu().numpy()
115
- if np.isnan(feats).sum() == 0:
116
- np.save(out_path, feats, allow_pickle=False)
117
- else:
118
- printt("%s-contains nan" % file)
119
- if idx % n == 0:
120
- printt("now-%s,all-%s,%s,%s" % (len(todo), idx, file, feats.shape))
121
- except:
122
- printt(traceback.format_exc())
123
- printt("all-feature-done")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/generate_human_motion/pyrender/tests/unit/test_lights.py DELETED
@@ -1,104 +0,0 @@
1
- import numpy as np
2
- import pytest
3
-
4
- from pyrender import (DirectionalLight, SpotLight, PointLight, Texture,
5
- PerspectiveCamera, OrthographicCamera)
6
- from pyrender.constants import SHADOW_TEX_SZ
7
-
8
-
9
- def test_directional_light():
10
-
11
- d = DirectionalLight()
12
- assert d.name is None
13
- assert np.all(d.color == 1.0)
14
- assert d.intensity == 1.0
15
-
16
- d.name = 'direc'
17
- with pytest.raises(ValueError):
18
- d.color = None
19
- with pytest.raises(TypeError):
20
- d.intensity = None
21
-
22
- d = DirectionalLight(color=[0.0, 0.0, 0.0])
23
- assert np.all(d.color == 0.0)
24
-
25
- d._generate_shadow_texture()
26
- st = d.shadow_texture
27
- assert isinstance(st, Texture)
28
- assert st.width == st.height == SHADOW_TEX_SZ
29
-
30
- sc = d._get_shadow_camera(scene_scale=5.0)
31
- assert isinstance(sc, OrthographicCamera)
32
- assert sc.xmag == sc.ymag == 5.0
33
- assert sc.znear == 0.01 * 5.0
34
- assert sc.zfar == 10 * 5.0
35
-
36
-
37
- def test_spot_light():
38
-
39
- s = SpotLight()
40
- assert s.name is None
41
- assert np.all(s.color == 1.0)
42
- assert s.intensity == 1.0
43
- assert s.innerConeAngle == 0.0
44
- assert s.outerConeAngle == np.pi / 4.0
45
- assert s.range is None
46
-
47
- with pytest.raises(ValueError):
48
- s.range = -1.0
49
-
50
- with pytest.raises(ValueError):
51
- s.range = 0.0
52
-
53
- with pytest.raises(ValueError):
54
- s.innerConeAngle = -1.0
55
-
56
- with pytest.raises(ValueError):
57
- s.innerConeAngle = np.pi / 3.0
58
-
59
- with pytest.raises(ValueError):
60
- s.outerConeAngle = -1.0
61
-
62
- with pytest.raises(ValueError):
63
- s.outerConeAngle = np.pi
64
-
65
- s.range = 5.0
66
- s.outerConeAngle = np.pi / 2 - 0.05
67
- s.innerConeAngle = np.pi / 3
68
- s.innerConeAngle = 0.0
69
- s.outerConeAngle = np.pi / 4.0
70
-
71
- s._generate_shadow_texture()
72
- st = s.shadow_texture
73
- assert isinstance(st, Texture)
74
- assert st.width == st.height == SHADOW_TEX_SZ
75
-
76
- sc = s._get_shadow_camera(scene_scale=5.0)
77
- assert isinstance(sc, PerspectiveCamera)
78
- assert sc.znear == 0.01 * 5.0
79
- assert sc.zfar == 10 * 5.0
80
- assert sc.aspectRatio == 1.0
81
- assert np.allclose(sc.yfov, np.pi / 16.0 * 9.0) # Plus pi / 16
82
-
83
-
84
- def test_point_light():
85
-
86
- s = PointLight()
87
- assert s.name is None
88
- assert np.all(s.color == 1.0)
89
- assert s.intensity == 1.0
90
- assert s.range is None
91
-
92
- with pytest.raises(ValueError):
93
- s.range = -1.0
94
-
95
- with pytest.raises(ValueError):
96
- s.range = 0.0
97
-
98
- s.range = 5.0
99
-
100
- with pytest.raises(NotImplementedError):
101
- s._generate_shadow_texture()
102
-
103
- with pytest.raises(NotImplementedError):
104
- s._get_shadow_camera(scene_scale=5.0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/model.py DELETED
@@ -1,77 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
-
5
- class VGGishish(nn.Module):
6
-
7
- def __init__(self, conv_layers, use_bn, num_classes):
8
- '''
9
- Mostly from
10
- https://pytorch.org/vision/0.8/_modules/torchvision/models/vgg.html
11
- '''
12
- super().__init__()
13
- layers = []
14
- in_channels = 1
15
-
16
- # a list of channels with 'MP' (maxpool) from config
17
- for v in conv_layers:
18
- if v == 'MP':
19
- layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
20
- else:
21
- conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1, stride=1)
22
- if use_bn:
23
- layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
24
- else:
25
- layers += [conv2d, nn.ReLU(inplace=True)]
26
- in_channels = v
27
- self.features = nn.Sequential(*layers)
28
-
29
- self.avgpool = nn.AdaptiveAvgPool2d((5, 10))
30
-
31
- self.flatten = nn.Flatten()
32
- self.classifier = nn.Sequential(
33
- nn.Linear(512 * 5 * 10, 4096),
34
- nn.ReLU(True),
35
- nn.Linear(4096, 4096),
36
- nn.ReLU(True),
37
- nn.Linear(4096, num_classes)
38
- )
39
-
40
- # weight init
41
- self.reset_parameters()
42
-
43
- def forward(self, x):
44
- # adding channel dim for conv2d (B, 1, F, T) <-
45
- x = x.unsqueeze(1)
46
- # backbone (B, 1, 5, 53) <- (B, 1, 80, 860)
47
- x = self.features(x)
48
- # adaptive avg pooling (B, 1, 5, 10) <- (B, 1, 5, 53) – if no MP is used as the end of VGG
49
- x = self.avgpool(x)
50
- # flatten
51
- x = self.flatten(x)
52
- # classify
53
- x = self.classifier(x)
54
- return x
55
-
56
- def reset_parameters(self):
57
- for m in self.modules():
58
- if isinstance(m, nn.Conv2d):
59
- nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
60
- if m.bias is not None:
61
- nn.init.constant_(m.bias, 0)
62
- elif isinstance(m, nn.BatchNorm2d):
63
- nn.init.constant_(m.weight, 1)
64
- nn.init.constant_(m.bias, 0)
65
- elif isinstance(m, nn.Linear):
66
- nn.init.normal_(m.weight, 0, 0.01)
67
- nn.init.constant_(m.bias, 0)
68
-
69
-
70
- if __name__ == '__main__':
71
- num_classes = 309
72
- inputs = torch.rand(3, 80, 848)
73
- conv_layers = [64, 64, 'MP', 128, 128, 'MP', 256, 256, 256, 'MP', 512, 512, 512, 'MP', 512, 512, 512]
74
- # conv_layers = [64, 'MP', 128, 'MP', 256, 256, 'MP', 512, 512, 'MP']
75
- model = VGGishish(conv_layers, use_bn=False, num_classes=num_classes)
76
- outputs = model(inputs)
77
- print(outputs.shape)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_speech/utils/commons/ckpt_utils.py DELETED
@@ -1,66 +0,0 @@
1
- import glob
2
- import os
3
- import re
4
- import torch
5
-
6
-
7
- def get_last_checkpoint(work_dir, steps=None):
8
- checkpoint = None
9
- last_ckpt_path = None
10
- ckpt_paths = get_all_ckpts(work_dir, steps)
11
- if len(ckpt_paths) > 0:
12
- last_ckpt_path = ckpt_paths[0]
13
- checkpoint = torch.load(last_ckpt_path, map_location='cpu')
14
- return checkpoint, last_ckpt_path
15
-
16
-
17
- def get_all_ckpts(work_dir, steps=None):
18
- if steps is None:
19
- ckpt_path_pattern = f'{work_dir}/model_ckpt_steps_*.ckpt'
20
- else:
21
- ckpt_path_pattern = f'{work_dir}/model_ckpt_steps_{steps}.ckpt'
22
- return sorted(glob.glob(ckpt_path_pattern),
23
- key=lambda x: -int(re.findall('.*steps\_(\d+)\.ckpt', x)[0]))
24
-
25
-
26
- def load_ckpt(cur_model, ckpt_base_dir, model_name='model', force=True, strict=True):
27
- if os.path.isfile(ckpt_base_dir):
28
- base_dir = os.path.dirname(ckpt_base_dir)
29
- ckpt_path = ckpt_base_dir
30
- checkpoint = torch.load(ckpt_base_dir, map_location='cpu')
31
- else:
32
- base_dir = ckpt_base_dir
33
- checkpoint, ckpt_path = get_last_checkpoint(ckpt_base_dir)
34
- if checkpoint is not None:
35
- state_dict = checkpoint["state_dict"]
36
- if len([k for k in state_dict.keys() if '.' in k]) > 0:
37
- state_dict = {k[len(model_name) + 1:]: v for k, v in state_dict.items()
38
- if k.startswith(f'{model_name}.')}
39
- else:
40
- if '.' not in model_name:
41
- state_dict = state_dict[model_name]
42
- else:
43
- base_model_name = model_name.split('.')[0]
44
- rest_model_name = model_name[len(base_model_name) + 1:]
45
- state_dict = {
46
- k[len(rest_model_name) + 1:]: v for k, v in state_dict[base_model_name].items()
47
- if k.startswith(f'{rest_model_name}.')}
48
- if not strict:
49
- cur_model_state_dict = cur_model.state_dict()
50
- unmatched_keys = []
51
- for key, param in state_dict.items():
52
- if key in cur_model_state_dict:
53
- new_param = cur_model_state_dict[key]
54
- if new_param.shape != param.shape:
55
- unmatched_keys.append(key)
56
- print("| Unmatched keys: ", key, new_param.shape, param.shape)
57
- for key in unmatched_keys:
58
- del state_dict[key]
59
- cur_model.load_state_dict(state_dict, strict=strict)
60
- print(f"| load '{model_name}' from '{ckpt_path}'.")
61
- else:
62
- e_msg = f"| ckpt not found in {base_dir}."
63
- if force:
64
- assert False, e_msg
65
- else:
66
- print(e_msg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio/vocoder/bigvgan/activations.py DELETED
@@ -1,120 +0,0 @@
1
- # Implementation adapted from https://github.com/EdwardDixon/snake under the MIT license.
2
- # LICENSE is in incl_licenses directory.
3
-
4
- import torch
5
- from torch import nn, sin, pow
6
- from torch.nn import Parameter
7
-
8
-
9
- class Snake(nn.Module):
10
- '''
11
- Implementation of a sine-based periodic activation function
12
- Shape:
13
- - Input: (B, C, T)
14
- - Output: (B, C, T), same shape as the input
15
- Parameters:
16
- - alpha - trainable parameter
17
- References:
18
- - This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
19
- https://arxiv.org/abs/2006.08195
20
- Examples:
21
- >>> a1 = snake(256)
22
- >>> x = torch.randn(256)
23
- >>> x = a1(x)
24
- '''
25
- def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False):
26
- '''
27
- Initialization.
28
- INPUT:
29
- - in_features: shape of the input
30
- - alpha: trainable parameter
31
- alpha is initialized to 1 by default, higher values = higher-frequency.
32
- alpha will be trained along with the rest of your model.
33
- '''
34
- super(Snake, self).__init__()
35
- self.in_features = in_features
36
-
37
- # initialize alpha
38
- self.alpha_logscale = alpha_logscale
39
- if self.alpha_logscale: # log scale alphas initialized to zeros
40
- self.alpha = Parameter(torch.zeros(in_features) * alpha)
41
- else: # linear scale alphas initialized to ones
42
- self.alpha = Parameter(torch.ones(in_features) * alpha)
43
-
44
- self.alpha.requires_grad = alpha_trainable
45
-
46
- self.no_div_by_zero = 0.000000001
47
-
48
- def forward(self, x):
49
- '''
50
- Forward pass of the function.
51
- Applies the function to the input elementwise.
52
- Snake ∶= x + 1/a * sin^2 (xa)
53
- '''
54
- alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T]
55
- if self.alpha_logscale:
56
- alpha = torch.exp(alpha)
57
- x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
58
-
59
- return x
60
-
61
-
62
- class SnakeBeta(nn.Module):
63
- '''
64
- A modified Snake function which uses separate parameters for the magnitude of the periodic components
65
- Shape:
66
- - Input: (B, C, T)
67
- - Output: (B, C, T), same shape as the input
68
- Parameters:
69
- - alpha - trainable parameter that controls frequency
70
- - beta - trainable parameter that controls magnitude
71
- References:
72
- - This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
73
- https://arxiv.org/abs/2006.08195
74
- Examples:
75
- >>> a1 = snakebeta(256)
76
- >>> x = torch.randn(256)
77
- >>> x = a1(x)
78
- '''
79
- def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False):
80
- '''
81
- Initialization.
82
- INPUT:
83
- - in_features: shape of the input
84
- - alpha - trainable parameter that controls frequency
85
- - beta - trainable parameter that controls magnitude
86
- alpha is initialized to 1 by default, higher values = higher-frequency.
87
- beta is initialized to 1 by default, higher values = higher-magnitude.
88
- alpha will be trained along with the rest of your model.
89
- '''
90
- super(SnakeBeta, self).__init__()
91
- self.in_features = in_features
92
-
93
- # initialize alpha
94
- self.alpha_logscale = alpha_logscale
95
- if self.alpha_logscale: # log scale alphas initialized to zeros
96
- self.alpha = Parameter(torch.zeros(in_features) * alpha)
97
- self.beta = Parameter(torch.zeros(in_features) * alpha)
98
- else: # linear scale alphas initialized to ones
99
- self.alpha = Parameter(torch.ones(in_features) * alpha)
100
- self.beta = Parameter(torch.ones(in_features) * alpha)
101
-
102
- self.alpha.requires_grad = alpha_trainable
103
- self.beta.requires_grad = alpha_trainable
104
-
105
- self.no_div_by_zero = 0.000000001
106
-
107
- def forward(self, x):
108
- '''
109
- Forward pass of the function.
110
- Applies the function to the input elementwise.
111
- SnakeBeta ∶= x + 1/b * sin^2 (xa)
112
- '''
113
- alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T]
114
- beta = self.beta.unsqueeze(0).unsqueeze(-1)
115
- if self.alpha_logscale:
116
- alpha = torch.exp(alpha)
117
- beta = torch.exp(beta)
118
- x = x + (1.0 / (beta + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
119
-
120
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio/vocoder/bigvgan/models.py DELETED
@@ -1,414 +0,0 @@
1
- # Copyright (c) 2022 NVIDIA CORPORATION.
2
- # Licensed under the MIT license.
3
-
4
- # Adapted from https://github.com/jik876/hifi-gan under the MIT license.
5
- # LICENSE is in incl_licenses directory.
6
-
7
-
8
- import torch
9
- import torch.nn.functional as F
10
- import torch.nn as nn
11
- from torch.nn import Conv1d, ConvTranspose1d, Conv2d
12
- from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
13
- import numpy as np
14
- from .activations import Snake,SnakeBeta
15
- from .alias_free_torch import *
16
- import os
17
- from omegaconf import OmegaConf
18
-
19
- LRELU_SLOPE = 0.1
20
-
21
- def init_weights(m, mean=0.0, std=0.01):
22
- classname = m.__class__.__name__
23
- if classname.find("Conv") != -1:
24
- m.weight.data.normal_(mean, std)
25
-
26
-
27
- def get_padding(kernel_size, dilation=1):
28
- return int((kernel_size*dilation - dilation)/2)
29
-
30
- class AMPBlock1(torch.nn.Module):
31
- def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5), activation=None):
32
- super(AMPBlock1, self).__init__()
33
- self.h = h
34
-
35
- self.convs1 = nn.ModuleList([
36
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
37
- padding=get_padding(kernel_size, dilation[0]))),
38
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
39
- padding=get_padding(kernel_size, dilation[1]))),
40
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
41
- padding=get_padding(kernel_size, dilation[2])))
42
- ])
43
- self.convs1.apply(init_weights)
44
-
45
- self.convs2 = nn.ModuleList([
46
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
47
- padding=get_padding(kernel_size, 1))),
48
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
49
- padding=get_padding(kernel_size, 1))),
50
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
51
- padding=get_padding(kernel_size, 1)))
52
- ])
53
- self.convs2.apply(init_weights)
54
-
55
- self.num_layers = len(self.convs1) + len(self.convs2) # total number of conv layers
56
-
57
- if activation == 'snake': # periodic nonlinearity with snake function and anti-aliasing
58
- self.activations = nn.ModuleList([
59
- Activation1d(
60
- activation=Snake(channels, alpha_logscale=h.snake_logscale))
61
- for _ in range(self.num_layers)
62
- ])
63
- elif activation == 'snakebeta': # periodic nonlinearity with snakebeta function and anti-aliasing
64
- self.activations = nn.ModuleList([
65
- Activation1d(
66
- activation=SnakeBeta(channels, alpha_logscale=h.snake_logscale))
67
- for _ in range(self.num_layers)
68
- ])
69
- else:
70
- raise NotImplementedError("activation incorrectly specified. check the config file and look for 'activation'.")
71
-
72
- def forward(self, x):
73
- acts1, acts2 = self.activations[::2], self.activations[1::2]
74
- for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2):
75
- xt = a1(x)
76
- xt = c1(xt)
77
- xt = a2(xt)
78
- xt = c2(xt)
79
- x = xt + x
80
-
81
- return x
82
-
83
- def remove_weight_norm(self):
84
- for l in self.convs1:
85
- remove_weight_norm(l)
86
- for l in self.convs2:
87
- remove_weight_norm(l)
88
-
89
-
90
- class AMPBlock2(torch.nn.Module):
91
- def __init__(self, h, channels, kernel_size=3, dilation=(1, 3), activation=None):
92
- super(AMPBlock2, self).__init__()
93
- self.h = h
94
-
95
- self.convs = nn.ModuleList([
96
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
97
- padding=get_padding(kernel_size, dilation[0]))),
98
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
99
- padding=get_padding(kernel_size, dilation[1])))
100
- ])
101
- self.convs.apply(init_weights)
102
-
103
- self.num_layers = len(self.convs) # total number of conv layers
104
-
105
- if activation == 'snake': # periodic nonlinearity with snake function and anti-aliasing
106
- self.activations = nn.ModuleList([
107
- Activation1d(
108
- activation=Snake(channels, alpha_logscale=h.snake_logscale))
109
- for _ in range(self.num_layers)
110
- ])
111
- elif activation == 'snakebeta': # periodic nonlinearity with snakebeta function and anti-aliasing
112
- self.activations = nn.ModuleList([
113
- Activation1d(
114
- activation=SnakeBeta(channels, alpha_logscale=h.snake_logscale))
115
- for _ in range(self.num_layers)
116
- ])
117
- else:
118
- raise NotImplementedError("activation incorrectly specified. check the config file and look for 'activation'.")
119
-
120
- def forward(self, x):
121
- for c, a in zip (self.convs, self.activations):
122
- xt = a(x)
123
- xt = c(xt)
124
- x = xt + x
125
-
126
- return x
127
-
128
- def remove_weight_norm(self):
129
- for l in self.convs:
130
- remove_weight_norm(l)
131
-
132
-
133
- class BigVGAN(torch.nn.Module):
134
- # this is our main BigVGAN model. Applies anti-aliased periodic activation for resblocks.
135
- def __init__(self, h):
136
- super(BigVGAN, self).__init__()
137
- self.h = h
138
-
139
- self.num_kernels = len(h.resblock_kernel_sizes)
140
- self.num_upsamples = len(h.upsample_rates)
141
-
142
- # pre conv
143
- self.conv_pre = weight_norm(Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3))
144
-
145
- # define which AMPBlock to use. BigVGAN uses AMPBlock1 as default
146
- resblock = AMPBlock1 if h.resblock == '1' else AMPBlock2
147
-
148
- # transposed conv-based upsamplers. does not apply anti-aliasing
149
- self.ups = nn.ModuleList()
150
- for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
151
- self.ups.append(nn.ModuleList([
152
- weight_norm(ConvTranspose1d(h.upsample_initial_channel // (2 ** i),
153
- h.upsample_initial_channel // (2 ** (i + 1)),
154
- k, u, padding=(k - u) // 2))
155
- ]))
156
-
157
- # residual blocks using anti-aliased multi-periodicity composition modules (AMP)
158
- self.resblocks = nn.ModuleList()
159
- for i in range(len(self.ups)):
160
- ch = h.upsample_initial_channel // (2 ** (i + 1))
161
- for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):
162
- self.resblocks.append(resblock(h, ch, k, d, activation=h.activation))
163
-
164
- # post conv
165
- if h.activation == "snake": # periodic nonlinearity with snake function and anti-aliasing
166
- activation_post = Snake(ch, alpha_logscale=h.snake_logscale)
167
- self.activation_post = Activation1d(activation=activation_post)
168
- elif h.activation == "snakebeta": # periodic nonlinearity with snakebeta function and anti-aliasing
169
- activation_post = SnakeBeta(ch, alpha_logscale=h.snake_logscale)
170
- self.activation_post = Activation1d(activation=activation_post)
171
- else:
172
- raise NotImplementedError("activation incorrectly specified. check the config file and look for 'activation'.")
173
-
174
- self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
175
-
176
- # weight initialization
177
- for i in range(len(self.ups)):
178
- self.ups[i].apply(init_weights)
179
- self.conv_post.apply(init_weights)
180
-
181
- def forward(self, x):
182
- # pre conv
183
- x = self.conv_pre(x)
184
-
185
- for i in range(self.num_upsamples):
186
- # upsampling
187
- for i_up in range(len(self.ups[i])):
188
- x = self.ups[i][i_up](x)
189
- # AMP blocks
190
- xs = None
191
- for j in range(self.num_kernels):
192
- if xs is None:
193
- xs = self.resblocks[i * self.num_kernels + j](x)
194
- else:
195
- xs += self.resblocks[i * self.num_kernels + j](x)
196
- x = xs / self.num_kernels
197
-
198
- # post conv
199
- x = self.activation_post(x)
200
- x = self.conv_post(x)
201
- x = torch.tanh(x)
202
-
203
- return x
204
-
205
- def remove_weight_norm(self):
206
- print('Removing weight norm...')
207
- for l in self.ups:
208
- for l_i in l:
209
- remove_weight_norm(l_i)
210
- for l in self.resblocks:
211
- l.remove_weight_norm()
212
- remove_weight_norm(self.conv_pre)
213
- remove_weight_norm(self.conv_post)
214
-
215
-
216
- class DiscriminatorP(torch.nn.Module):
217
- def __init__(self, h, period, kernel_size=5, stride=3, use_spectral_norm=False):
218
- super(DiscriminatorP, self).__init__()
219
- self.period = period
220
- self.d_mult = h.discriminator_channel_mult
221
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
222
- self.convs = nn.ModuleList([
223
- norm_f(Conv2d(1, int(32*self.d_mult), (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
224
- norm_f(Conv2d(int(32*self.d_mult), int(128*self.d_mult), (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
225
- norm_f(Conv2d(int(128*self.d_mult), int(512*self.d_mult), (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
226
- norm_f(Conv2d(int(512*self.d_mult), int(1024*self.d_mult), (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
227
- norm_f(Conv2d(int(1024*self.d_mult), int(1024*self.d_mult), (kernel_size, 1), 1, padding=(2, 0))),
228
- ])
229
- self.conv_post = norm_f(Conv2d(int(1024*self.d_mult), 1, (3, 1), 1, padding=(1, 0)))
230
-
231
- def forward(self, x):
232
- fmap = []
233
-
234
- # 1d to 2d
235
- b, c, t = x.shape
236
- if t % self.period != 0: # pad first
237
- n_pad = self.period - (t % self.period)
238
- x = F.pad(x, (0, n_pad), "reflect")
239
- t = t + n_pad
240
- x = x.view(b, c, t // self.period, self.period)
241
-
242
- for l in self.convs:
243
- x = l(x)
244
- x = F.leaky_relu(x, LRELU_SLOPE)
245
- fmap.append(x)
246
- x = self.conv_post(x)
247
- fmap.append(x)
248
- x = torch.flatten(x, 1, -1)
249
-
250
- return x, fmap
251
-
252
-
253
- class MultiPeriodDiscriminator(torch.nn.Module):
254
- def __init__(self, h):
255
- super(MultiPeriodDiscriminator, self).__init__()
256
- self.mpd_reshapes = h.mpd_reshapes
257
- print("mpd_reshapes: {}".format(self.mpd_reshapes))
258
- discriminators = [DiscriminatorP(h, rs, use_spectral_norm=h.use_spectral_norm) for rs in self.mpd_reshapes]
259
- self.discriminators = nn.ModuleList(discriminators)
260
-
261
- def forward(self, y, y_hat):
262
- y_d_rs = []
263
- y_d_gs = []
264
- fmap_rs = []
265
- fmap_gs = []
266
- for i, d in enumerate(self.discriminators):
267
- y_d_r, fmap_r = d(y)
268
- y_d_g, fmap_g = d(y_hat)
269
- y_d_rs.append(y_d_r)
270
- fmap_rs.append(fmap_r)
271
- y_d_gs.append(y_d_g)
272
- fmap_gs.append(fmap_g)
273
-
274
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
275
-
276
-
277
- class DiscriminatorR(nn.Module):
278
- def __init__(self, cfg, resolution):
279
- super().__init__()
280
-
281
- self.resolution = resolution
282
- assert len(self.resolution) == 3, \
283
- "MRD layer requires list with len=3, got {}".format(self.resolution)
284
- self.lrelu_slope = LRELU_SLOPE
285
-
286
- norm_f = weight_norm if cfg.use_spectral_norm == False else spectral_norm
287
- if hasattr(cfg, "mrd_use_spectral_norm"):
288
- print("INFO: overriding MRD use_spectral_norm as {}".format(cfg.mrd_use_spectral_norm))
289
- norm_f = weight_norm if cfg.mrd_use_spectral_norm == False else spectral_norm
290
- self.d_mult = cfg.discriminator_channel_mult
291
- if hasattr(cfg, "mrd_channel_mult"):
292
- print("INFO: overriding mrd channel multiplier as {}".format(cfg.mrd_channel_mult))
293
- self.d_mult = cfg.mrd_channel_mult
294
-
295
- self.convs = nn.ModuleList([
296
- norm_f(nn.Conv2d(1, int(32*self.d_mult), (3, 9), padding=(1, 4))),
297
- norm_f(nn.Conv2d(int(32*self.d_mult), int(32*self.d_mult), (3, 9), stride=(1, 2), padding=(1, 4))),
298
- norm_f(nn.Conv2d(int(32*self.d_mult), int(32*self.d_mult), (3, 9), stride=(1, 2), padding=(1, 4))),
299
- norm_f(nn.Conv2d(int(32*self.d_mult), int(32*self.d_mult), (3, 9), stride=(1, 2), padding=(1, 4))),
300
- norm_f(nn.Conv2d(int(32*self.d_mult), int(32*self.d_mult), (3, 3), padding=(1, 1))),
301
- ])
302
- self.conv_post = norm_f(nn.Conv2d(int(32 * self.d_mult), 1, (3, 3), padding=(1, 1)))
303
-
304
- def forward(self, x):
305
- fmap = []
306
-
307
- x = self.spectrogram(x)
308
- x = x.unsqueeze(1)
309
- for l in self.convs:
310
- x = l(x)
311
- x = F.leaky_relu(x, self.lrelu_slope)
312
- fmap.append(x)
313
- x = self.conv_post(x)
314
- fmap.append(x)
315
- x = torch.flatten(x, 1, -1)
316
-
317
- return x, fmap
318
-
319
- def spectrogram(self, x):
320
- n_fft, hop_length, win_length = self.resolution
321
- x = F.pad(x, (int((n_fft - hop_length) / 2), int((n_fft - hop_length) / 2)), mode='reflect')
322
- x = x.squeeze(1)
323
- x = torch.stft(x, n_fft=n_fft, hop_length=hop_length, win_length=win_length, center=False, return_complex=True)
324
- x = torch.view_as_real(x) # [B, F, TT, 2]
325
- mag = torch.norm(x, p=2, dim =-1) #[B, F, TT]
326
-
327
- return mag
328
-
329
-
330
- class MultiResolutionDiscriminator(nn.Module):
331
- def __init__(self, cfg, debug=False):
332
- super().__init__()
333
- self.resolutions = cfg.resolutions
334
- assert len(self.resolutions) == 3,\
335
- "MRD requires list of list with len=3, each element having a list with len=3. got {}".\
336
- format(self.resolutions)
337
- self.discriminators = nn.ModuleList(
338
- [DiscriminatorR(cfg, resolution) for resolution in self.resolutions]
339
- )
340
-
341
- def forward(self, y, y_hat):
342
- y_d_rs = []
343
- y_d_gs = []
344
- fmap_rs = []
345
- fmap_gs = []
346
-
347
- for i, d in enumerate(self.discriminators):
348
- y_d_r, fmap_r = d(x=y)
349
- y_d_g, fmap_g = d(x=y_hat)
350
- y_d_rs.append(y_d_r)
351
- fmap_rs.append(fmap_r)
352
- y_d_gs.append(y_d_g)
353
- fmap_gs.append(fmap_g)
354
-
355
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
356
-
357
-
358
- def feature_loss(fmap_r, fmap_g):
359
- loss = 0
360
- for dr, dg in zip(fmap_r, fmap_g):
361
- for rl, gl in zip(dr, dg):
362
- loss += torch.mean(torch.abs(rl - gl))
363
-
364
- return loss*2
365
-
366
-
367
- def discriminator_loss(disc_real_outputs, disc_generated_outputs):
368
- loss = 0
369
- r_losses = []
370
- g_losses = []
371
- for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
372
- r_loss = torch.mean((1-dr)**2)
373
- g_loss = torch.mean(dg**2)
374
- loss += (r_loss + g_loss)
375
- r_losses.append(r_loss.item())
376
- g_losses.append(g_loss.item())
377
-
378
- return loss, r_losses, g_losses
379
-
380
-
381
- def generator_loss(disc_outputs):
382
- loss = 0
383
- gen_losses = []
384
- for dg in disc_outputs:
385
- l = torch.mean((1-dg)**2)
386
- gen_losses.append(l)
387
- loss += l
388
-
389
- return loss, gen_losses
390
-
391
-
392
-
393
- class VocoderBigVGAN(object):
394
- def __init__(self, ckpt_vocoder,device='cuda'):
395
- vocoder_sd = torch.load(os.path.join(ckpt_vocoder,'best_netG.pt'), map_location='cpu')
396
-
397
- vocoder_args = OmegaConf.load(os.path.join(ckpt_vocoder,'args.yml'))
398
-
399
- self.generator = BigVGAN(vocoder_args)
400
- self.generator.load_state_dict(vocoder_sd['generator'])
401
- self.generator.eval()
402
-
403
- self.device = device
404
- self.generator.to(self.device)
405
-
406
- def vocode(self, spec):
407
- with torch.no_grad():
408
- if isinstance(spec,np.ndarray):
409
- spec = torch.from_numpy(spec).unsqueeze(0)
410
- spec = spec.to(dtype=torch.float32,device=self.device)
411
- return self.generator(spec).squeeze().cpu().numpy()
412
-
413
- def __call__(self, wav):
414
- return self.vocode(wav)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGText/GlyphControl/cldm/ddim_hacked.py DELETED
@@ -1,318 +0,0 @@
1
- """SAMPLING ONLY."""
2
-
3
- import torch
4
- import numpy as np
5
- from tqdm import tqdm
6
-
7
- from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor
8
-
9
-
10
- class DDIMSampler(object):
11
- def __init__(self, model, schedule="linear", **kwargs):
12
- super().__init__()
13
- self.model = model
14
- self.ddpm_num_timesteps = model.num_timesteps
15
- self.schedule = schedule
16
-
17
- def register_buffer(self, name, attr):
18
- if type(attr) == torch.Tensor:
19
- if attr.device != torch.device("cuda") and torch.cuda.is_available():
20
- attr = attr.to(torch.device("cuda"))
21
- setattr(self, name, attr)
22
-
23
- def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
24
- self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
25
- num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
26
- alphas_cumprod = self.model.alphas_cumprod
27
- assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
28
- to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
29
-
30
- self.register_buffer('betas', to_torch(self.model.betas))
31
- self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
32
- self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
33
-
34
- # calculations for diffusion q(x_t | x_{t-1}) and others
35
- self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
36
- self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
37
- self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
38
- self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
39
- self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
40
-
41
- # ddim sampling parameters
42
- ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
43
- ddim_timesteps=self.ddim_timesteps,
44
- eta=ddim_eta,verbose=verbose)
45
- self.register_buffer('ddim_sigmas', ddim_sigmas)
46
- self.register_buffer('ddim_alphas', ddim_alphas)
47
- self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
48
- self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
49
- sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
50
- (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
51
- 1 - self.alphas_cumprod / self.alphas_cumprod_prev))
52
- self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
53
-
54
- @torch.no_grad()
55
- def sample(self,
56
- S,
57
- batch_size,
58
- shape,
59
- conditioning=None,
60
- callback=None,
61
- normals_sequence=None,
62
- img_callback=None,
63
- quantize_x0=False,
64
- eta=0.,
65
- mask=None,
66
- x0=None,
67
- temperature=1.,
68
- noise_dropout=0.,
69
- score_corrector=None,
70
- corrector_kwargs=None,
71
- verbose=True,
72
- x_T=None,
73
- log_every_t=100,
74
- unconditional_guidance_scale=1.,
75
- unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
76
- dynamic_threshold=None,
77
- ucg_schedule=None,
78
- **kwargs
79
- ):
80
- if conditioning is not None:
81
- if isinstance(conditioning, dict):
82
- for key, ctmp in conditioning.items():
83
- if ctmp is None:
84
- continue
85
- else:
86
- while isinstance(ctmp, list): ctmp = ctmp[0]
87
- if ctmp.shape[0] != batch_size:
88
- print(f"Warning: Got {ctmp.shape[0]} conditionings but batch-size is {batch_size}")
89
-
90
- elif isinstance(conditioning, list):
91
- for ctmp in conditioning:
92
- if ctmp is not None and ctmp.shape[0] != batch_size:
93
- print(f"Warning: Got {ctmp.shape[0]} conditionings but batch-size is {batch_size}")
94
-
95
- else:
96
- if conditioning.shape[0] != batch_size:
97
- print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
98
-
99
- self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
100
- # sampling
101
- C, H, W = shape
102
- size = (batch_size, C, H, W)
103
- print(f'Data shape for DDIM sampling is {size}, eta {eta}')
104
-
105
- samples, intermediates = self.ddim_sampling(conditioning, size,
106
- callback=callback,
107
- img_callback=img_callback,
108
- quantize_denoised=quantize_x0,
109
- mask=mask, x0=x0,
110
- ddim_use_original_steps=False,
111
- noise_dropout=noise_dropout,
112
- temperature=temperature,
113
- score_corrector=score_corrector,
114
- corrector_kwargs=corrector_kwargs,
115
- x_T=x_T,
116
- log_every_t=log_every_t,
117
- unconditional_guidance_scale=unconditional_guidance_scale,
118
- unconditional_conditioning=unconditional_conditioning,
119
- dynamic_threshold=dynamic_threshold,
120
- ucg_schedule=ucg_schedule
121
- )
122
- return samples, intermediates
123
-
124
- @torch.no_grad()
125
- def ddim_sampling(self, cond, shape,
126
- x_T=None, ddim_use_original_steps=False,
127
- callback=None, timesteps=None, quantize_denoised=False,
128
- mask=None, x0=None, img_callback=None, log_every_t=100,
129
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
130
- unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,
131
- ucg_schedule=None):
132
- device = self.model.betas.device
133
- b = shape[0]
134
- if x_T is None:
135
- img = torch.randn(shape, device=device)
136
- else:
137
- img = x_T
138
-
139
- if timesteps is None:
140
- timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
141
- elif timesteps is not None and not ddim_use_original_steps:
142
- subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
143
- timesteps = self.ddim_timesteps[:subset_end]
144
-
145
- intermediates = {'x_inter': [img], 'pred_x0': [img]}
146
- time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)
147
- total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
148
- print(f"Running DDIM Sampling with {total_steps} timesteps")
149
-
150
- iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)
151
-
152
- for i, step in enumerate(iterator):
153
- index = total_steps - i - 1
154
- ts = torch.full((b,), step, device=device, dtype=torch.long)
155
-
156
- if mask is not None:
157
- assert x0 is not None
158
- img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
159
- img = img_orig * mask + (1. - mask) * img
160
-
161
- if ucg_schedule is not None:
162
- assert len(ucg_schedule) == len(time_range)
163
- unconditional_guidance_scale = ucg_schedule[i]
164
-
165
- outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
166
- quantize_denoised=quantize_denoised, temperature=temperature,
167
- noise_dropout=noise_dropout, score_corrector=score_corrector,
168
- corrector_kwargs=corrector_kwargs,
169
- unconditional_guidance_scale=unconditional_guidance_scale,
170
- unconditional_conditioning=unconditional_conditioning,
171
- dynamic_threshold=dynamic_threshold)
172
- img, pred_x0 = outs
173
- if callback: callback(i)
174
- if img_callback: img_callback(pred_x0, i)
175
-
176
- if index % log_every_t == 0 or index == total_steps - 1:
177
- intermediates['x_inter'].append(img)
178
- intermediates['pred_x0'].append(pred_x0)
179
-
180
- return img, intermediates
181
-
182
- @torch.no_grad()
183
- def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
184
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
185
- unconditional_guidance_scale=1., unconditional_conditioning=None,
186
- dynamic_threshold=None):
187
- b, *_, device = *x.shape, x.device
188
-
189
- if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
190
- model_output = self.model.apply_model(x, t, c)
191
- else:
192
- model_t = self.model.apply_model(x, t, c)
193
- model_uncond = self.model.apply_model(x, t, unconditional_conditioning)
194
- model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)
195
-
196
- if self.model.parameterization == "v":
197
- e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)
198
- else:
199
- e_t = model_output
200
-
201
- if score_corrector is not None:
202
- assert self.model.parameterization == "eps", 'not implemented'
203
- e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
204
-
205
- alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
206
- alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
207
- sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
208
- sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
209
- # select parameters corresponding to the currently considered timestep
210
- a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
211
- a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
212
- sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
213
- sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
214
-
215
- # current prediction for x_0
216
- if self.model.parameterization != "v":
217
- pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
218
- else:
219
- pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)
220
-
221
- if quantize_denoised:
222
- pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
223
-
224
- if dynamic_threshold is not None:
225
- raise NotImplementedError()
226
-
227
- # direction pointing to x_t
228
- dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
229
- noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
230
- if noise_dropout > 0.:
231
- noise = torch.nn.functional.dropout(noise, p=noise_dropout)
232
- x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
233
- return x_prev, pred_x0
234
-
235
- @torch.no_grad()
236
- def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,
237
- unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):
238
- num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]
239
-
240
- assert t_enc <= num_reference_steps
241
- num_steps = t_enc
242
-
243
- if use_original_steps:
244
- alphas_next = self.alphas_cumprod[:num_steps]
245
- alphas = self.alphas_cumprod_prev[:num_steps]
246
- else:
247
- alphas_next = self.ddim_alphas[:num_steps]
248
- alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])
249
-
250
- x_next = x0
251
- intermediates = []
252
- inter_steps = []
253
- for i in tqdm(range(num_steps), desc='Encoding Image'):
254
- t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)
255
- if unconditional_guidance_scale == 1.:
256
- noise_pred = self.model.apply_model(x_next, t, c)
257
- else:
258
- assert unconditional_conditioning is not None
259
- e_t_uncond, noise_pred = torch.chunk(
260
- self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),
261
- torch.cat((unconditional_conditioning, c))), 2)
262
- noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)
263
-
264
- xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next
265
- weighted_noise_pred = alphas_next[i].sqrt() * (
266
- (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred
267
- x_next = xt_weighted + weighted_noise_pred
268
- if return_intermediates and i % (
269
- num_steps // return_intermediates) == 0 and i < num_steps - 1:
270
- intermediates.append(x_next)
271
- inter_steps.append(i)
272
- elif return_intermediates and i >= num_steps - 2:
273
- intermediates.append(x_next)
274
- inter_steps.append(i)
275
- if callback: callback(i)
276
-
277
- out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}
278
- if return_intermediates:
279
- out.update({'intermediates': intermediates})
280
- return x_next, out
281
-
282
- @torch.no_grad()
283
- def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):
284
- # fast, but does not allow for exact reconstruction
285
- # t serves as an index to gather the correct alphas
286
- if use_original_steps:
287
- sqrt_alphas_cumprod = self.sqrt_alphas_cumprod
288
- sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod
289
- else:
290
- sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)
291
- sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas
292
-
293
- if noise is None:
294
- noise = torch.randn_like(x0)
295
- return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +
296
- extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)
297
-
298
- @torch.no_grad()
299
- def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,
300
- use_original_steps=False, callback=None):
301
-
302
- timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps
303
- timesteps = timesteps[:t_start]
304
-
305
- time_range = np.flip(timesteps)
306
- total_steps = timesteps.shape[0]
307
- print(f"Running DDIM Sampling with {total_steps} timesteps")
308
-
309
- iterator = tqdm(time_range, desc='Decoding image', total=total_steps)
310
- x_dec = x_latent
311
- for i, step in enumerate(iterator):
312
- index = total_steps - i - 1
313
- ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)
314
- x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,
315
- unconditional_guidance_scale=unconditional_guidance_scale,
316
- unconditional_conditioning=unconditional_conditioning)
317
- if callback: callback(i)
318
- return x_dec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aabbhishekk/MistralQnA/app.py DELETED
@@ -1,60 +0,0 @@
1
- import streamlit as st
2
-
3
- st.title("Mistral QA")
4
-
5
- # import chainlit as cl
6
-
7
- import os
8
- huggingfacehub_api_token = st.secrets["hf_token"]
9
-
10
- from langchain import HuggingFaceHub, PromptTemplate, LLMChain
11
-
12
- repo_id = "mistralai/Mistral-7B-v0.1"
13
- llm = HuggingFaceHub(huggingfacehub_api_token=huggingfacehub_api_token,
14
- repo_id=repo_id,
15
- model_kwargs={"temperature":0.2, "max_new_tokens":200})
16
-
17
- template = """Give answer for the question.
18
- question: {question}
19
-
20
- At the end of the answer, just say, 'Thanks for asking'
21
-
22
- """
23
- # input = st.text_input("What do you want to ask about", placeholder="Input your question here")
24
-
25
-
26
- # # @cl.langchain_factory
27
- # def factory():
28
- # prompt = PromptTemplate(template=template, input_variables=['question'])
29
- # llm_chain = LLMChain(prompt=prompt, llm=llm, verbose=True)
30
-
31
- # return llm_chain
32
-
33
-
34
- prompt = PromptTemplate(template=template, input_variables=["question"])
35
- llm_chain = LLMChain(prompt=prompt,verbose=True,llm=llm)
36
-
37
- # result = llm_chain.predict(question=input)
38
-
39
- # print(result)
40
-
41
- def chat(query):
42
- # prompt = PromptTemplate(template=template, input_variables=["question"])
43
- # llm_chain = LLMChain(prompt=prompt,verbose=True,llm=llm)
44
-
45
- result = llm_chain.predict(question=query)
46
-
47
- return result
48
-
49
-
50
-
51
-
52
- def main():
53
- input = st.text_input("What do you want to ask about", placeholder="Input your question here")
54
- if input:
55
- output = chat(input)
56
- st.write(output,unsafe_allow_html=True)
57
-
58
-
59
- if __name__ == '__main__':
60
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/colorinput/colorpicker/methods/Transform.js DELETED
@@ -1,27 +0,0 @@
1
- var RotateAround = Phaser.Math.RotateAround;
2
- var LocalToWorld = function (gameObject, localX, localY, out) {
3
- if (out === undefined) {
4
- out = {};
5
- } else if (out === true) {
6
- if (GlobOut === undefined) {
7
- GlobOut = {};
8
- }
9
- out = GlobOut;
10
- }
11
-
12
- localX -= (gameObject.width * gameObject.originX);
13
- localY -= (gameObject.height * gameObject.originY);
14
- var point = {
15
- x: localX * gameObject.scaleX,
16
- y: localY * gameObject.scaleY
17
- };
18
- RotateAround(point, 0, 0, -gameObject.rotation);
19
-
20
- out.x = gameObject.x + localX;
21
- out.y = gameObject.y + localY;
22
-
23
- return out;
24
- }
25
-
26
- var GlobOut;
27
- export { LocalToWorld };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AleksBlacky/Arxiv_paper_classifier/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: Arxiv_paper_classifier
3
- emoji: 📉
4
- colorFrom: gray
5
- colorTo: pink
6
- sdk: streamlit
7
- sdk_version: 1.2.0
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
14
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_comparison.py DELETED
@@ -1,405 +0,0 @@
1
- from typing import Any, Callable, Dict, List, Optional, Union
2
-
3
- import torch
4
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
5
-
6
- from diffusers import (
7
- AutoencoderKL,
8
- DDIMScheduler,
9
- DiffusionPipeline,
10
- LMSDiscreteScheduler,
11
- PNDMScheduler,
12
- StableDiffusionPipeline,
13
- UNet2DConditionModel,
14
- )
15
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
16
- from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
17
-
18
-
19
- pipe1_model_id = "CompVis/stable-diffusion-v1-1"
20
- pipe2_model_id = "CompVis/stable-diffusion-v1-2"
21
- pipe3_model_id = "CompVis/stable-diffusion-v1-3"
22
- pipe4_model_id = "CompVis/stable-diffusion-v1-4"
23
-
24
-
25
- class StableDiffusionComparisonPipeline(DiffusionPipeline):
26
- r"""
27
- Pipeline for parallel comparison of Stable Diffusion v1-v4
28
- This pipeline inherits from DiffusionPipeline and depends on the use of an Auth Token for
29
- downloading pre-trained checkpoints from Hugging Face Hub.
30
- If using Hugging Face Hub, pass the Model ID for Stable Diffusion v1.4 as the previous 3 checkpoints will be loaded
31
- automatically.
32
- Args:
33
- vae ([`AutoencoderKL`]):
34
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
35
- text_encoder ([`CLIPTextModel`]):
36
- Frozen text-encoder. Stable Diffusion uses the text portion of
37
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
38
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
39
- tokenizer (`CLIPTokenizer`):
40
- Tokenizer of class
41
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
42
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
43
- scheduler ([`SchedulerMixin`]):
44
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
45
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
46
- safety_checker ([`StableDiffusionMegaSafetyChecker`]):
47
- Classification module that estimates whether generated images could be considered offensive or harmful.
48
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
49
- feature_extractor ([`CLIPImageProcessor`]):
50
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
51
- """
52
-
53
- def __init__(
54
- self,
55
- vae: AutoencoderKL,
56
- text_encoder: CLIPTextModel,
57
- tokenizer: CLIPTokenizer,
58
- unet: UNet2DConditionModel,
59
- scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
60
- safety_checker: StableDiffusionSafetyChecker,
61
- feature_extractor: CLIPImageProcessor,
62
- requires_safety_checker: bool = True,
63
- ):
64
- super()._init_()
65
-
66
- self.pipe1 = StableDiffusionPipeline.from_pretrained(pipe1_model_id)
67
- self.pipe2 = StableDiffusionPipeline.from_pretrained(pipe2_model_id)
68
- self.pipe3 = StableDiffusionPipeline.from_pretrained(pipe3_model_id)
69
- self.pipe4 = StableDiffusionPipeline(
70
- vae=vae,
71
- text_encoder=text_encoder,
72
- tokenizer=tokenizer,
73
- unet=unet,
74
- scheduler=scheduler,
75
- safety_checker=safety_checker,
76
- feature_extractor=feature_extractor,
77
- requires_safety_checker=requires_safety_checker,
78
- )
79
-
80
- self.register_modules(pipeline1=self.pipe1, pipeline2=self.pipe2, pipeline3=self.pipe3, pipeline4=self.pipe4)
81
-
82
- @property
83
- def layers(self) -> Dict[str, Any]:
84
- return {k: getattr(self, k) for k in self.config.keys() if not k.startswith("_")}
85
-
86
- def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
87
- r"""
88
- Enable sliced attention computation.
89
- When this option is enabled, the attention module will split the input tensor in slices, to compute attention
90
- in several steps. This is useful to save some memory in exchange for a small speed decrease.
91
- Args:
92
- slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
93
- When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
94
- a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case,
95
- `attention_head_dim` must be a multiple of `slice_size`.
96
- """
97
- if slice_size == "auto":
98
- # half the attention head size is usually a good trade-off between
99
- # speed and memory
100
- slice_size = self.unet.config.attention_head_dim // 2
101
- self.unet.set_attention_slice(slice_size)
102
-
103
- def disable_attention_slicing(self):
104
- r"""
105
- Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go
106
- back to computing attention in one step.
107
- """
108
- # set slice_size = `None` to disable `attention slicing`
109
- self.enable_attention_slicing(None)
110
-
111
- @torch.no_grad()
112
- def text2img_sd1_1(
113
- self,
114
- prompt: Union[str, List[str]],
115
- height: int = 512,
116
- width: int = 512,
117
- num_inference_steps: int = 50,
118
- guidance_scale: float = 7.5,
119
- negative_prompt: Optional[Union[str, List[str]]] = None,
120
- num_images_per_prompt: Optional[int] = 1,
121
- eta: float = 0.0,
122
- generator: Optional[torch.Generator] = None,
123
- latents: Optional[torch.FloatTensor] = None,
124
- output_type: Optional[str] = "pil",
125
- return_dict: bool = True,
126
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
127
- callback_steps: int = 1,
128
- **kwargs,
129
- ):
130
- return self.pipe1(
131
- prompt=prompt,
132
- height=height,
133
- width=width,
134
- num_inference_steps=num_inference_steps,
135
- guidance_scale=guidance_scale,
136
- negative_prompt=negative_prompt,
137
- num_images_per_prompt=num_images_per_prompt,
138
- eta=eta,
139
- generator=generator,
140
- latents=latents,
141
- output_type=output_type,
142
- return_dict=return_dict,
143
- callback=callback,
144
- callback_steps=callback_steps,
145
- **kwargs,
146
- )
147
-
148
- @torch.no_grad()
149
- def text2img_sd1_2(
150
- self,
151
- prompt: Union[str, List[str]],
152
- height: int = 512,
153
- width: int = 512,
154
- num_inference_steps: int = 50,
155
- guidance_scale: float = 7.5,
156
- negative_prompt: Optional[Union[str, List[str]]] = None,
157
- num_images_per_prompt: Optional[int] = 1,
158
- eta: float = 0.0,
159
- generator: Optional[torch.Generator] = None,
160
- latents: Optional[torch.FloatTensor] = None,
161
- output_type: Optional[str] = "pil",
162
- return_dict: bool = True,
163
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
164
- callback_steps: int = 1,
165
- **kwargs,
166
- ):
167
- return self.pipe2(
168
- prompt=prompt,
169
- height=height,
170
- width=width,
171
- num_inference_steps=num_inference_steps,
172
- guidance_scale=guidance_scale,
173
- negative_prompt=negative_prompt,
174
- num_images_per_prompt=num_images_per_prompt,
175
- eta=eta,
176
- generator=generator,
177
- latents=latents,
178
- output_type=output_type,
179
- return_dict=return_dict,
180
- callback=callback,
181
- callback_steps=callback_steps,
182
- **kwargs,
183
- )
184
-
185
- @torch.no_grad()
186
- def text2img_sd1_3(
187
- self,
188
- prompt: Union[str, List[str]],
189
- height: int = 512,
190
- width: int = 512,
191
- num_inference_steps: int = 50,
192
- guidance_scale: float = 7.5,
193
- negative_prompt: Optional[Union[str, List[str]]] = None,
194
- num_images_per_prompt: Optional[int] = 1,
195
- eta: float = 0.0,
196
- generator: Optional[torch.Generator] = None,
197
- latents: Optional[torch.FloatTensor] = None,
198
- output_type: Optional[str] = "pil",
199
- return_dict: bool = True,
200
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
201
- callback_steps: int = 1,
202
- **kwargs,
203
- ):
204
- return self.pipe3(
205
- prompt=prompt,
206
- height=height,
207
- width=width,
208
- num_inference_steps=num_inference_steps,
209
- guidance_scale=guidance_scale,
210
- negative_prompt=negative_prompt,
211
- num_images_per_prompt=num_images_per_prompt,
212
- eta=eta,
213
- generator=generator,
214
- latents=latents,
215
- output_type=output_type,
216
- return_dict=return_dict,
217
- callback=callback,
218
- callback_steps=callback_steps,
219
- **kwargs,
220
- )
221
-
222
- @torch.no_grad()
223
- def text2img_sd1_4(
224
- self,
225
- prompt: Union[str, List[str]],
226
- height: int = 512,
227
- width: int = 512,
228
- num_inference_steps: int = 50,
229
- guidance_scale: float = 7.5,
230
- negative_prompt: Optional[Union[str, List[str]]] = None,
231
- num_images_per_prompt: Optional[int] = 1,
232
- eta: float = 0.0,
233
- generator: Optional[torch.Generator] = None,
234
- latents: Optional[torch.FloatTensor] = None,
235
- output_type: Optional[str] = "pil",
236
- return_dict: bool = True,
237
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
238
- callback_steps: int = 1,
239
- **kwargs,
240
- ):
241
- return self.pipe4(
242
- prompt=prompt,
243
- height=height,
244
- width=width,
245
- num_inference_steps=num_inference_steps,
246
- guidance_scale=guidance_scale,
247
- negative_prompt=negative_prompt,
248
- num_images_per_prompt=num_images_per_prompt,
249
- eta=eta,
250
- generator=generator,
251
- latents=latents,
252
- output_type=output_type,
253
- return_dict=return_dict,
254
- callback=callback,
255
- callback_steps=callback_steps,
256
- **kwargs,
257
- )
258
-
259
- @torch.no_grad()
260
- def _call_(
261
- self,
262
- prompt: Union[str, List[str]],
263
- height: int = 512,
264
- width: int = 512,
265
- num_inference_steps: int = 50,
266
- guidance_scale: float = 7.5,
267
- negative_prompt: Optional[Union[str, List[str]]] = None,
268
- num_images_per_prompt: Optional[int] = 1,
269
- eta: float = 0.0,
270
- generator: Optional[torch.Generator] = None,
271
- latents: Optional[torch.FloatTensor] = None,
272
- output_type: Optional[str] = "pil",
273
- return_dict: bool = True,
274
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
275
- callback_steps: int = 1,
276
- **kwargs,
277
- ):
278
- r"""
279
- Function invoked when calling the pipeline for generation. This function will generate 4 results as part
280
- of running all the 4 pipelines for SD1.1-1.4 together in a serial-processing, parallel-invocation fashion.
281
- Args:
282
- prompt (`str` or `List[str]`):
283
- The prompt or prompts to guide the image generation.
284
- height (`int`, optional, defaults to 512):
285
- The height in pixels of the generated image.
286
- width (`int`, optional, defaults to 512):
287
- The width in pixels of the generated image.
288
- num_inference_steps (`int`, optional, defaults to 50):
289
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
290
- expense of slower inference.
291
- guidance_scale (`float`, optional, defaults to 7.5):
292
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
293
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
294
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
295
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
296
- usually at the expense of lower image quality.
297
- eta (`float`, optional, defaults to 0.0):
298
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
299
- [`schedulers.DDIMScheduler`], will be ignored for others.
300
- generator (`torch.Generator`, optional):
301
- A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
302
- deterministic.
303
- latents (`torch.FloatTensor`, optional):
304
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
305
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
306
- tensor will ge generated by sampling using the supplied random `generator`.
307
- output_type (`str`, optional, defaults to `"pil"`):
308
- The output format of the generate image. Choose between
309
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
310
- return_dict (`bool`, optional, defaults to `True`):
311
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
312
- plain tuple.
313
- Returns:
314
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
315
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
316
- When returning a tuple, the first element is a list with the generated images, and the second element is a
317
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
318
- (nsfw) content, according to the `safety_checker`.
319
- """
320
-
321
- device = "cuda" if torch.cuda.is_available() else "cpu"
322
- self.to(device)
323
-
324
- # Checks if the height and width are divisible by 8 or not
325
- if height % 8 != 0 or width % 8 != 0:
326
- raise ValueError(f"`height` and `width` must be divisible by 8 but are {height} and {width}.")
327
-
328
- # Get first result from Stable Diffusion Checkpoint v1.1
329
- res1 = self.text2img_sd1_1(
330
- prompt=prompt,
331
- height=height,
332
- width=width,
333
- num_inference_steps=num_inference_steps,
334
- guidance_scale=guidance_scale,
335
- negative_prompt=negative_prompt,
336
- num_images_per_prompt=num_images_per_prompt,
337
- eta=eta,
338
- generator=generator,
339
- latents=latents,
340
- output_type=output_type,
341
- return_dict=return_dict,
342
- callback=callback,
343
- callback_steps=callback_steps,
344
- **kwargs,
345
- )
346
-
347
- # Get first result from Stable Diffusion Checkpoint v1.2
348
- res2 = self.text2img_sd1_2(
349
- prompt=prompt,
350
- height=height,
351
- width=width,
352
- num_inference_steps=num_inference_steps,
353
- guidance_scale=guidance_scale,
354
- negative_prompt=negative_prompt,
355
- num_images_per_prompt=num_images_per_prompt,
356
- eta=eta,
357
- generator=generator,
358
- latents=latents,
359
- output_type=output_type,
360
- return_dict=return_dict,
361
- callback=callback,
362
- callback_steps=callback_steps,
363
- **kwargs,
364
- )
365
-
366
- # Get first result from Stable Diffusion Checkpoint v1.3
367
- res3 = self.text2img_sd1_3(
368
- prompt=prompt,
369
- height=height,
370
- width=width,
371
- num_inference_steps=num_inference_steps,
372
- guidance_scale=guidance_scale,
373
- negative_prompt=negative_prompt,
374
- num_images_per_prompt=num_images_per_prompt,
375
- eta=eta,
376
- generator=generator,
377
- latents=latents,
378
- output_type=output_type,
379
- return_dict=return_dict,
380
- callback=callback,
381
- callback_steps=callback_steps,
382
- **kwargs,
383
- )
384
-
385
- # Get first result from Stable Diffusion Checkpoint v1.4
386
- res4 = self.text2img_sd1_4(
387
- prompt=prompt,
388
- height=height,
389
- width=width,
390
- num_inference_steps=num_inference_steps,
391
- guidance_scale=guidance_scale,
392
- negative_prompt=negative_prompt,
393
- num_images_per_prompt=num_images_per_prompt,
394
- eta=eta,
395
- generator=generator,
396
- latents=latents,
397
- output_type=output_type,
398
- return_dict=return_dict,
399
- callback=callback,
400
- callback_steps=callback_steps,
401
- **kwargs,
402
- )
403
-
404
- # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
405
- return StableDiffusionPipelineOutput([res1[0], res2[0], res3[0], res4[0]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/_base_/models/mask_rcnn_r50_caffe_c4.py DELETED
@@ -1,123 +0,0 @@
1
- # model settings
2
- norm_cfg = dict(type='BN', requires_grad=False)
3
- model = dict(
4
- type='MaskRCNN',
5
- pretrained='open-mmlab://detectron2/resnet50_caffe',
6
- backbone=dict(
7
- type='ResNet',
8
- depth=50,
9
- num_stages=3,
10
- strides=(1, 2, 2),
11
- dilations=(1, 1, 1),
12
- out_indices=(2, ),
13
- frozen_stages=1,
14
- norm_cfg=norm_cfg,
15
- norm_eval=True,
16
- style='caffe'),
17
- rpn_head=dict(
18
- type='RPNHead',
19
- in_channels=1024,
20
- feat_channels=1024,
21
- anchor_generator=dict(
22
- type='AnchorGenerator',
23
- scales=[2, 4, 8, 16, 32],
24
- ratios=[0.5, 1.0, 2.0],
25
- strides=[16]),
26
- bbox_coder=dict(
27
- type='DeltaXYWHBBoxCoder',
28
- target_means=[.0, .0, .0, .0],
29
- target_stds=[1.0, 1.0, 1.0, 1.0]),
30
- loss_cls=dict(
31
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
32
- loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
33
- roi_head=dict(
34
- type='StandardRoIHead',
35
- shared_head=dict(
36
- type='ResLayer',
37
- depth=50,
38
- stage=3,
39
- stride=2,
40
- dilation=1,
41
- style='caffe',
42
- norm_cfg=norm_cfg,
43
- norm_eval=True),
44
- bbox_roi_extractor=dict(
45
- type='SingleRoIExtractor',
46
- roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
47
- out_channels=1024,
48
- featmap_strides=[16]),
49
- bbox_head=dict(
50
- type='BBoxHead',
51
- with_avg_pool=True,
52
- roi_feat_size=7,
53
- in_channels=2048,
54
- num_classes=80,
55
- bbox_coder=dict(
56
- type='DeltaXYWHBBoxCoder',
57
- target_means=[0., 0., 0., 0.],
58
- target_stds=[0.1, 0.1, 0.2, 0.2]),
59
- reg_class_agnostic=False,
60
- loss_cls=dict(
61
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
62
- loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
63
- mask_roi_extractor=None,
64
- mask_head=dict(
65
- type='FCNMaskHead',
66
- num_convs=0,
67
- in_channels=2048,
68
- conv_out_channels=256,
69
- num_classes=80,
70
- loss_mask=dict(
71
- type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
72
- # model training and testing settings
73
- train_cfg=dict(
74
- rpn=dict(
75
- assigner=dict(
76
- type='MaxIoUAssigner',
77
- pos_iou_thr=0.7,
78
- neg_iou_thr=0.3,
79
- min_pos_iou=0.3,
80
- match_low_quality=True,
81
- ignore_iof_thr=-1),
82
- sampler=dict(
83
- type='RandomSampler',
84
- num=256,
85
- pos_fraction=0.5,
86
- neg_pos_ub=-1,
87
- add_gt_as_proposals=False),
88
- allowed_border=0,
89
- pos_weight=-1,
90
- debug=False),
91
- rpn_proposal=dict(
92
- nms_pre=12000,
93
- max_per_img=2000,
94
- nms=dict(type='nms', iou_threshold=0.7),
95
- min_bbox_size=0),
96
- rcnn=dict(
97
- assigner=dict(
98
- type='MaxIoUAssigner',
99
- pos_iou_thr=0.5,
100
- neg_iou_thr=0.5,
101
- min_pos_iou=0.5,
102
- match_low_quality=False,
103
- ignore_iof_thr=-1),
104
- sampler=dict(
105
- type='RandomSampler',
106
- num=512,
107
- pos_fraction=0.25,
108
- neg_pos_ub=-1,
109
- add_gt_as_proposals=True),
110
- mask_size=14,
111
- pos_weight=-1,
112
- debug=False)),
113
- test_cfg=dict(
114
- rpn=dict(
115
- nms_pre=6000,
116
- nms=dict(type='nms', iou_threshold=0.7),
117
- max_per_img=1000,
118
- min_bbox_size=0),
119
- rcnn=dict(
120
- score_thr=0.05,
121
- nms=dict(type='nms', iou_threshold=0.5),
122
- max_per_img=100,
123
- mask_thr_binary=0.5)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/backbones/swin_transformer.py DELETED
@@ -1,630 +0,0 @@
1
- # --------------------------------------------------------
2
- # Swin Transformer
3
- # Copyright (c) 2021 Microsoft
4
- # Licensed under The MIT License [see LICENSE for details]
5
- # Written by Ze Liu, Yutong Lin, Yixuan Wei
6
- # --------------------------------------------------------
7
-
8
- import torch
9
- import torch.nn as nn
10
- import torch.nn.functional as F
11
- import torch.utils.checkpoint as checkpoint
12
- import numpy as np
13
- from timm.models.layers import DropPath, to_2tuple, trunc_normal_
14
-
15
- from mmcv_custom import load_checkpoint
16
- from mmdet.utils import get_root_logger
17
- from ..builder import BACKBONES
18
-
19
-
20
- class Mlp(nn.Module):
21
- """ Multilayer perceptron."""
22
-
23
- def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
24
- super().__init__()
25
- out_features = out_features or in_features
26
- hidden_features = hidden_features or in_features
27
- self.fc1 = nn.Linear(in_features, hidden_features)
28
- self.act = act_layer()
29
- self.fc2 = nn.Linear(hidden_features, out_features)
30
- self.drop = nn.Dropout(drop)
31
-
32
- def forward(self, x):
33
- x = self.fc1(x)
34
- x = self.act(x)
35
- x = self.drop(x)
36
- x = self.fc2(x)
37
- x = self.drop(x)
38
- return x
39
-
40
-
41
- def window_partition(x, window_size):
42
- """
43
- Args:
44
- x: (B, H, W, C)
45
- window_size (int): window size
46
-
47
- Returns:
48
- windows: (num_windows*B, window_size, window_size, C)
49
- """
50
- B, H, W, C = x.shape
51
- x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
52
- windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
53
- return windows
54
-
55
-
56
- def window_reverse(windows, window_size, H, W):
57
- """
58
- Args:
59
- windows: (num_windows*B, window_size, window_size, C)
60
- window_size (int): Window size
61
- H (int): Height of image
62
- W (int): Width of image
63
-
64
- Returns:
65
- x: (B, H, W, C)
66
- """
67
- B = int(windows.shape[0] / (H * W / window_size / window_size))
68
- x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
69
- x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
70
- return x
71
-
72
-
73
- class WindowAttention(nn.Module):
74
- """ Window based multi-head self attention (W-MSA) module with relative position bias.
75
- It supports both of shifted and non-shifted window.
76
-
77
- Args:
78
- dim (int): Number of input channels.
79
- window_size (tuple[int]): The height and width of the window.
80
- num_heads (int): Number of attention heads.
81
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
82
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
83
- attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
84
- proj_drop (float, optional): Dropout ratio of output. Default: 0.0
85
- """
86
-
87
- def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
88
-
89
- super().__init__()
90
- self.dim = dim
91
- self.window_size = window_size # Wh, Ww
92
- self.num_heads = num_heads
93
- head_dim = dim // num_heads
94
- self.scale = qk_scale or head_dim ** -0.5
95
-
96
- # define a parameter table of relative position bias
97
- self.relative_position_bias_table = nn.Parameter(
98
- torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
99
-
100
- # get pair-wise relative position index for each token inside the window
101
- coords_h = torch.arange(self.window_size[0])
102
- coords_w = torch.arange(self.window_size[1])
103
- coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
104
- coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
105
- relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
106
- relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
107
- relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
108
- relative_coords[:, :, 1] += self.window_size[1] - 1
109
- relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
110
- relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
111
- self.register_buffer("relative_position_index", relative_position_index)
112
-
113
- self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
114
- self.attn_drop = nn.Dropout(attn_drop)
115
- self.proj = nn.Linear(dim, dim)
116
- self.proj_drop = nn.Dropout(proj_drop)
117
-
118
- trunc_normal_(self.relative_position_bias_table, std=.02)
119
- self.softmax = nn.Softmax(dim=-1)
120
-
121
- def forward(self, x, mask=None):
122
- """ Forward function.
123
-
124
- Args:
125
- x: input features with shape of (num_windows*B, N, C)
126
- mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
127
- """
128
- B_, N, C = x.shape
129
- qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
130
- q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
131
-
132
- q = q * self.scale
133
- attn = (q @ k.transpose(-2, -1))
134
-
135
- relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
136
- self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
137
- relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
138
- attn = attn + relative_position_bias.unsqueeze(0)
139
-
140
- if mask is not None:
141
- nW = mask.shape[0]
142
- attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
143
- attn = attn.view(-1, self.num_heads, N, N)
144
- attn = self.softmax(attn)
145
- else:
146
- attn = self.softmax(attn)
147
-
148
- attn = self.attn_drop(attn)
149
-
150
- x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
151
- x = self.proj(x)
152
- x = self.proj_drop(x)
153
- return x
154
-
155
-
156
- class SwinTransformerBlock(nn.Module):
157
- """ Swin Transformer Block.
158
-
159
- Args:
160
- dim (int): Number of input channels.
161
- num_heads (int): Number of attention heads.
162
- window_size (int): Window size.
163
- shift_size (int): Shift size for SW-MSA.
164
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
165
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
166
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
167
- drop (float, optional): Dropout rate. Default: 0.0
168
- attn_drop (float, optional): Attention dropout rate. Default: 0.0
169
- drop_path (float, optional): Stochastic depth rate. Default: 0.0
170
- act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
171
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
172
- """
173
-
174
- def __init__(self, dim, num_heads, window_size=7, shift_size=0,
175
- mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
176
- act_layer=nn.GELU, norm_layer=nn.LayerNorm):
177
- super().__init__()
178
- self.dim = dim
179
- self.num_heads = num_heads
180
- self.window_size = window_size
181
- self.shift_size = shift_size
182
- self.mlp_ratio = mlp_ratio
183
- assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
184
-
185
- self.norm1 = norm_layer(dim)
186
- self.attn = WindowAttention(
187
- dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
188
- qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
189
-
190
- self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
191
- self.norm2 = norm_layer(dim)
192
- mlp_hidden_dim = int(dim * mlp_ratio)
193
- self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
194
-
195
- self.H = None
196
- self.W = None
197
-
198
- def forward(self, x, mask_matrix):
199
- """ Forward function.
200
-
201
- Args:
202
- x: Input feature, tensor size (B, H*W, C).
203
- H, W: Spatial resolution of the input feature.
204
- mask_matrix: Attention mask for cyclic shift.
205
- """
206
- B, L, C = x.shape
207
- H, W = self.H, self.W
208
- assert L == H * W, "input feature has wrong size"
209
-
210
- shortcut = x
211
- x = self.norm1(x)
212
- x = x.view(B, H, W, C)
213
-
214
- # pad feature maps to multiples of window size
215
- pad_l = pad_t = 0
216
- pad_r = (self.window_size - W % self.window_size) % self.window_size
217
- pad_b = (self.window_size - H % self.window_size) % self.window_size
218
- x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
219
- _, Hp, Wp, _ = x.shape
220
-
221
- # cyclic shift
222
- if self.shift_size > 0:
223
- shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
224
- attn_mask = mask_matrix
225
- else:
226
- shifted_x = x
227
- attn_mask = None
228
-
229
- # partition windows
230
- x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
231
- x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
232
-
233
- # W-MSA/SW-MSA
234
- attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C
235
-
236
- # merge windows
237
- attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
238
- shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C
239
-
240
- # reverse cyclic shift
241
- if self.shift_size > 0:
242
- x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
243
- else:
244
- x = shifted_x
245
-
246
- if pad_r > 0 or pad_b > 0:
247
- x = x[:, :H, :W, :].contiguous()
248
-
249
- x = x.view(B, H * W, C)
250
-
251
- # FFN
252
- x = shortcut + self.drop_path(x)
253
- x = x + self.drop_path(self.mlp(self.norm2(x)))
254
-
255
- return x
256
-
257
-
258
- class PatchMerging(nn.Module):
259
- """ Patch Merging Layer
260
-
261
- Args:
262
- dim (int): Number of input channels.
263
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
264
- """
265
- def __init__(self, dim, norm_layer=nn.LayerNorm):
266
- super().__init__()
267
- self.dim = dim
268
- self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
269
- self.norm = norm_layer(4 * dim)
270
-
271
- def forward(self, x, H, W):
272
- """ Forward function.
273
-
274
- Args:
275
- x: Input feature, tensor size (B, H*W, C).
276
- H, W: Spatial resolution of the input feature.
277
- """
278
- B, L, C = x.shape
279
- assert L == H * W, "input feature has wrong size"
280
-
281
- x = x.view(B, H, W, C)
282
-
283
- # padding
284
- pad_input = (H % 2 == 1) or (W % 2 == 1)
285
- if pad_input:
286
- x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))
287
-
288
- x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
289
- x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
290
- x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
291
- x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
292
- x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
293
- x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
294
-
295
- x = self.norm(x)
296
- x = self.reduction(x)
297
-
298
- return x
299
-
300
-
301
- class BasicLayer(nn.Module):
302
- """ A basic Swin Transformer layer for one stage.
303
-
304
- Args:
305
- dim (int): Number of feature channels
306
- depth (int): Depths of this stage.
307
- num_heads (int): Number of attention head.
308
- window_size (int): Local window size. Default: 7.
309
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
310
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
311
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
312
- drop (float, optional): Dropout rate. Default: 0.0
313
- attn_drop (float, optional): Attention dropout rate. Default: 0.0
314
- drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
315
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
316
- downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
317
- use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
318
- """
319
-
320
- def __init__(self,
321
- dim,
322
- depth,
323
- num_heads,
324
- window_size=7,
325
- mlp_ratio=4.,
326
- qkv_bias=True,
327
- qk_scale=None,
328
- drop=0.,
329
- attn_drop=0.,
330
- drop_path=0.,
331
- norm_layer=nn.LayerNorm,
332
- downsample=None,
333
- use_checkpoint=False):
334
- super().__init__()
335
- self.window_size = window_size
336
- self.shift_size = window_size // 2
337
- self.depth = depth
338
- self.use_checkpoint = use_checkpoint
339
-
340
- # build blocks
341
- self.blocks = nn.ModuleList([
342
- SwinTransformerBlock(
343
- dim=dim,
344
- num_heads=num_heads,
345
- window_size=window_size,
346
- shift_size=0 if (i % 2 == 0) else window_size // 2,
347
- mlp_ratio=mlp_ratio,
348
- qkv_bias=qkv_bias,
349
- qk_scale=qk_scale,
350
- drop=drop,
351
- attn_drop=attn_drop,
352
- drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
353
- norm_layer=norm_layer)
354
- for i in range(depth)])
355
-
356
- # patch merging layer
357
- if downsample is not None:
358
- self.downsample = downsample(dim=dim, norm_layer=norm_layer)
359
- else:
360
- self.downsample = None
361
-
362
- def forward(self, x, H, W):
363
- """ Forward function.
364
-
365
- Args:
366
- x: Input feature, tensor size (B, H*W, C).
367
- H, W: Spatial resolution of the input feature.
368
- """
369
-
370
- # calculate attention mask for SW-MSA
371
- Hp = int(np.ceil(H / self.window_size)) * self.window_size
372
- Wp = int(np.ceil(W / self.window_size)) * self.window_size
373
- img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1
374
- h_slices = (slice(0, -self.window_size),
375
- slice(-self.window_size, -self.shift_size),
376
- slice(-self.shift_size, None))
377
- w_slices = (slice(0, -self.window_size),
378
- slice(-self.window_size, -self.shift_size),
379
- slice(-self.shift_size, None))
380
- cnt = 0
381
- for h in h_slices:
382
- for w in w_slices:
383
- img_mask[:, h, w, :] = cnt
384
- cnt += 1
385
-
386
- mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
387
- mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
388
- attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
389
- attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
390
-
391
- for blk in self.blocks:
392
- blk.H, blk.W = H, W
393
- if self.use_checkpoint:
394
- x = checkpoint.checkpoint(blk, x, attn_mask)
395
- else:
396
- x = blk(x, attn_mask)
397
- if self.downsample is not None:
398
- x_down = self.downsample(x, H, W)
399
- Wh, Ww = (H + 1) // 2, (W + 1) // 2
400
- return x, H, W, x_down, Wh, Ww
401
- else:
402
- return x, H, W, x, H, W
403
-
404
-
405
- class PatchEmbed(nn.Module):
406
- """ Image to Patch Embedding
407
-
408
- Args:
409
- patch_size (int): Patch token size. Default: 4.
410
- in_chans (int): Number of input image channels. Default: 3.
411
- embed_dim (int): Number of linear projection output channels. Default: 96.
412
- norm_layer (nn.Module, optional): Normalization layer. Default: None
413
- """
414
-
415
- def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
416
- super().__init__()
417
- patch_size = to_2tuple(patch_size)
418
- self.patch_size = patch_size
419
-
420
- self.in_chans = in_chans
421
- self.embed_dim = embed_dim
422
-
423
- self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
424
- if norm_layer is not None:
425
- self.norm = norm_layer(embed_dim)
426
- else:
427
- self.norm = None
428
-
429
- def forward(self, x):
430
- """Forward function."""
431
- # padding
432
- _, _, H, W = x.size()
433
- if W % self.patch_size[1] != 0:
434
- x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1]))
435
- if H % self.patch_size[0] != 0:
436
- x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0]))
437
-
438
- x = self.proj(x) # B C Wh Ww
439
- if self.norm is not None:
440
- Wh, Ww = x.size(2), x.size(3)
441
- x = x.flatten(2).transpose(1, 2)
442
- x = self.norm(x)
443
- x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww)
444
-
445
- return x
446
-
447
-
448
- @BACKBONES.register_module()
449
- class SwinTransformer(nn.Module):
450
- """ Swin Transformer backbone.
451
- A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
452
- https://arxiv.org/pdf/2103.14030
453
-
454
- Args:
455
- pretrain_img_size (int): Input image size for training the pretrained model,
456
- used in absolute postion embedding. Default 224.
457
- patch_size (int | tuple(int)): Patch size. Default: 4.
458
- in_chans (int): Number of input image channels. Default: 3.
459
- embed_dim (int): Number of linear projection output channels. Default: 96.
460
- depths (tuple[int]): Depths of each Swin Transformer stage.
461
- num_heads (tuple[int]): Number of attention head of each stage.
462
- window_size (int): Window size. Default: 7.
463
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
464
- qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
465
- qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
466
- drop_rate (float): Dropout rate.
467
- attn_drop_rate (float): Attention dropout rate. Default: 0.
468
- drop_path_rate (float): Stochastic depth rate. Default: 0.2.
469
- norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
470
- ape (bool): If True, add absolute position embedding to the patch embedding. Default: False.
471
- patch_norm (bool): If True, add normalization after patch embedding. Default: True.
472
- out_indices (Sequence[int]): Output from which stages.
473
- frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
474
- -1 means not freezing any parameters.
475
- use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
476
- """
477
-
478
- def __init__(self,
479
- pretrain_img_size=224,
480
- patch_size=4,
481
- in_chans=3,
482
- embed_dim=96,
483
- depths=[2, 2, 6, 2],
484
- num_heads=[3, 6, 12, 24],
485
- window_size=7,
486
- mlp_ratio=4.,
487
- qkv_bias=True,
488
- qk_scale=None,
489
- drop_rate=0.,
490
- attn_drop_rate=0.,
491
- drop_path_rate=0.2,
492
- norm_layer=nn.LayerNorm,
493
- ape=False,
494
- patch_norm=True,
495
- out_indices=(0, 1, 2, 3),
496
- frozen_stages=-1,
497
- use_checkpoint=False):
498
- super().__init__()
499
-
500
- self.pretrain_img_size = pretrain_img_size
501
- self.num_layers = len(depths)
502
- self.embed_dim = embed_dim
503
- self.ape = ape
504
- self.patch_norm = patch_norm
505
- self.out_indices = out_indices
506
- self.frozen_stages = frozen_stages
507
-
508
- # split image into non-overlapping patches
509
- self.patch_embed = PatchEmbed(
510
- patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
511
- norm_layer=norm_layer if self.patch_norm else None)
512
-
513
- # absolute position embedding
514
- if self.ape:
515
- pretrain_img_size = to_2tuple(pretrain_img_size)
516
- patch_size = to_2tuple(patch_size)
517
- patches_resolution = [pretrain_img_size[0] // patch_size[0], pretrain_img_size[1] // patch_size[1]]
518
-
519
- self.absolute_pos_embed = nn.Parameter(torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1]))
520
- trunc_normal_(self.absolute_pos_embed, std=.02)
521
-
522
- self.pos_drop = nn.Dropout(p=drop_rate)
523
-
524
- # stochastic depth
525
- dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
526
-
527
- # build layers
528
- self.layers = nn.ModuleList()
529
- for i_layer in range(self.num_layers):
530
- layer = BasicLayer(
531
- dim=int(embed_dim * 2 ** i_layer),
532
- depth=depths[i_layer],
533
- num_heads=num_heads[i_layer],
534
- window_size=window_size,
535
- mlp_ratio=mlp_ratio,
536
- qkv_bias=qkv_bias,
537
- qk_scale=qk_scale,
538
- drop=drop_rate,
539
- attn_drop=attn_drop_rate,
540
- drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
541
- norm_layer=norm_layer,
542
- downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
543
- use_checkpoint=use_checkpoint)
544
- self.layers.append(layer)
545
-
546
- num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]
547
- self.num_features = num_features
548
-
549
- # add a norm layer for each output
550
- for i_layer in out_indices:
551
- layer = norm_layer(num_features[i_layer])
552
- layer_name = f'norm{i_layer}'
553
- self.add_module(layer_name, layer)
554
-
555
- self._freeze_stages()
556
-
557
- def _freeze_stages(self):
558
- if self.frozen_stages >= 0:
559
- self.patch_embed.eval()
560
- for param in self.patch_embed.parameters():
561
- param.requires_grad = False
562
-
563
- if self.frozen_stages >= 1 and self.ape:
564
- self.absolute_pos_embed.requires_grad = False
565
-
566
- if self.frozen_stages >= 2:
567
- self.pos_drop.eval()
568
- for i in range(0, self.frozen_stages - 1):
569
- m = self.layers[i]
570
- m.eval()
571
- for param in m.parameters():
572
- param.requires_grad = False
573
-
574
- def init_weights(self, pretrained=None):
575
- """Initialize the weights in backbone.
576
-
577
- Args:
578
- pretrained (str, optional): Path to pre-trained weights.
579
- Defaults to None.
580
- """
581
-
582
- def _init_weights(m):
583
- if isinstance(m, nn.Linear):
584
- trunc_normal_(m.weight, std=.02)
585
- if isinstance(m, nn.Linear) and m.bias is not None:
586
- nn.init.constant_(m.bias, 0)
587
- elif isinstance(m, nn.LayerNorm):
588
- nn.init.constant_(m.bias, 0)
589
- nn.init.constant_(m.weight, 1.0)
590
-
591
- if isinstance(pretrained, str):
592
- self.apply(_init_weights)
593
- logger = get_root_logger()
594
- load_checkpoint(self, pretrained, strict=False, logger=logger)
595
- elif pretrained is None:
596
- self.apply(_init_weights)
597
- else:
598
- raise TypeError('pretrained must be a str or None')
599
-
600
- def forward(self, x):
601
- """Forward function."""
602
- x = self.patch_embed(x)
603
-
604
- Wh, Ww = x.size(2), x.size(3)
605
- if self.ape:
606
- # interpolate the position embedding to the corresponding size
607
- absolute_pos_embed = F.interpolate(self.absolute_pos_embed, size=(Wh, Ww), mode='bicubic')
608
- x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C
609
- else:
610
- x = x.flatten(2).transpose(1, 2)
611
- x = self.pos_drop(x)
612
-
613
- outs = []
614
- for i in range(self.num_layers):
615
- layer = self.layers[i]
616
- x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)
617
-
618
- if i in self.out_indices:
619
- norm_layer = getattr(self, f'norm{i}')
620
- x_out = norm_layer(x_out)
621
-
622
- out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
623
- outs.append(out)
624
-
625
- return tuple(outs)
626
-
627
- def train(self, mode=True):
628
- """Convert the model into training mode while keep layers freezed."""
629
- super(SwinTransformer, self).train(mode)
630
- self._freeze_stages()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/ga_retina_head.py DELETED
@@ -1,109 +0,0 @@
1
- import torch.nn as nn
2
- from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
3
- from mmcv.ops import MaskedConv2d
4
-
5
- from ..builder import HEADS
6
- from .guided_anchor_head import FeatureAdaption, GuidedAnchorHead
7
-
8
-
9
- @HEADS.register_module()
10
- class GARetinaHead(GuidedAnchorHead):
11
- """Guided-Anchor-based RetinaNet head."""
12
-
13
- def __init__(self,
14
- num_classes,
15
- in_channels,
16
- stacked_convs=4,
17
- conv_cfg=None,
18
- norm_cfg=None,
19
- **kwargs):
20
- self.stacked_convs = stacked_convs
21
- self.conv_cfg = conv_cfg
22
- self.norm_cfg = norm_cfg
23
- super(GARetinaHead, self).__init__(num_classes, in_channels, **kwargs)
24
-
25
- def _init_layers(self):
26
- """Initialize layers of the head."""
27
- self.relu = nn.ReLU(inplace=True)
28
- self.cls_convs = nn.ModuleList()
29
- self.reg_convs = nn.ModuleList()
30
- for i in range(self.stacked_convs):
31
- chn = self.in_channels if i == 0 else self.feat_channels
32
- self.cls_convs.append(
33
- ConvModule(
34
- chn,
35
- self.feat_channels,
36
- 3,
37
- stride=1,
38
- padding=1,
39
- conv_cfg=self.conv_cfg,
40
- norm_cfg=self.norm_cfg))
41
- self.reg_convs.append(
42
- ConvModule(
43
- chn,
44
- self.feat_channels,
45
- 3,
46
- stride=1,
47
- padding=1,
48
- conv_cfg=self.conv_cfg,
49
- norm_cfg=self.norm_cfg))
50
-
51
- self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1)
52
- self.conv_shape = nn.Conv2d(self.feat_channels, self.num_anchors * 2,
53
- 1)
54
- self.feature_adaption_cls = FeatureAdaption(
55
- self.feat_channels,
56
- self.feat_channels,
57
- kernel_size=3,
58
- deform_groups=self.deform_groups)
59
- self.feature_adaption_reg = FeatureAdaption(
60
- self.feat_channels,
61
- self.feat_channels,
62
- kernel_size=3,
63
- deform_groups=self.deform_groups)
64
- self.retina_cls = MaskedConv2d(
65
- self.feat_channels,
66
- self.num_anchors * self.cls_out_channels,
67
- 3,
68
- padding=1)
69
- self.retina_reg = MaskedConv2d(
70
- self.feat_channels, self.num_anchors * 4, 3, padding=1)
71
-
72
- def init_weights(self):
73
- """Initialize weights of the layer."""
74
- for m in self.cls_convs:
75
- normal_init(m.conv, std=0.01)
76
- for m in self.reg_convs:
77
- normal_init(m.conv, std=0.01)
78
-
79
- self.feature_adaption_cls.init_weights()
80
- self.feature_adaption_reg.init_weights()
81
-
82
- bias_cls = bias_init_with_prob(0.01)
83
- normal_init(self.conv_loc, std=0.01, bias=bias_cls)
84
- normal_init(self.conv_shape, std=0.01)
85
- normal_init(self.retina_cls, std=0.01, bias=bias_cls)
86
- normal_init(self.retina_reg, std=0.01)
87
-
88
- def forward_single(self, x):
89
- """Forward feature map of a single scale level."""
90
- cls_feat = x
91
- reg_feat = x
92
- for cls_conv in self.cls_convs:
93
- cls_feat = cls_conv(cls_feat)
94
- for reg_conv in self.reg_convs:
95
- reg_feat = reg_conv(reg_feat)
96
-
97
- loc_pred = self.conv_loc(cls_feat)
98
- shape_pred = self.conv_shape(reg_feat)
99
-
100
- cls_feat = self.feature_adaption_cls(cls_feat, shape_pred)
101
- reg_feat = self.feature_adaption_reg(reg_feat, shape_pred)
102
-
103
- if not self.training:
104
- mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr
105
- else:
106
- mask = None
107
- cls_score = self.retina_cls(cls_feat, mask)
108
- bbox_pred = self.retina_reg(reg_feat, mask)
109
- return cls_score, bbox_pred, shape_pred, loc_pred
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/api/streaming_api.py DELETED
@@ -1,124 +0,0 @@
1
- import asyncio
2
- import json
3
- from threading import Thread
4
-
5
- from extensions.api.util import (
6
- build_parameters,
7
- try_start_cloudflared,
8
- with_api_lock
9
- )
10
- from modules import shared
11
- from modules.chat import generate_chat_reply
12
- from modules.text_generation import generate_reply
13
- from websockets.server import serve
14
-
15
- PATH = '/api/v1/stream'
16
-
17
-
18
- @with_api_lock
19
- async def _handle_stream_message(websocket, message):
20
- message = json.loads(message)
21
-
22
- prompt = message['prompt']
23
- generate_params = build_parameters(message)
24
- stopping_strings = generate_params.pop('stopping_strings')
25
- generate_params['stream'] = True
26
-
27
- generator = generate_reply(
28
- prompt, generate_params, stopping_strings=stopping_strings, is_chat=False)
29
-
30
- # As we stream, only send the new bytes.
31
- skip_index = 0
32
- message_num = 0
33
-
34
- for a in generator:
35
- to_send = a[skip_index:]
36
- if to_send is None or chr(0xfffd) in to_send: # partial unicode character, don't send it yet.
37
- continue
38
-
39
- await websocket.send(json.dumps({
40
- 'event': 'text_stream',
41
- 'message_num': message_num,
42
- 'text': to_send
43
- }))
44
-
45
- await asyncio.sleep(0)
46
- skip_index += len(to_send)
47
- message_num += 1
48
-
49
- await websocket.send(json.dumps({
50
- 'event': 'stream_end',
51
- 'message_num': message_num
52
- }))
53
-
54
-
55
- @with_api_lock
56
- async def _handle_chat_stream_message(websocket, message):
57
- body = json.loads(message)
58
-
59
- user_input = body['user_input']
60
- generate_params = build_parameters(body, chat=True)
61
- generate_params['stream'] = True
62
- regenerate = body.get('regenerate', False)
63
- _continue = body.get('_continue', False)
64
-
65
- generator = generate_chat_reply(
66
- user_input, generate_params, regenerate=regenerate, _continue=_continue, loading_message=False)
67
-
68
- message_num = 0
69
- for a in generator:
70
- await websocket.send(json.dumps({
71
- 'event': 'text_stream',
72
- 'message_num': message_num,
73
- 'history': a
74
- }))
75
-
76
- await asyncio.sleep(0)
77
- message_num += 1
78
-
79
- await websocket.send(json.dumps({
80
- 'event': 'stream_end',
81
- 'message_num': message_num
82
- }))
83
-
84
-
85
- async def _handle_connection(websocket, path):
86
-
87
- if path == '/api/v1/stream':
88
- async for message in websocket:
89
- await _handle_stream_message(websocket, message)
90
-
91
- elif path == '/api/v1/chat-stream':
92
- async for message in websocket:
93
- await _handle_chat_stream_message(websocket, message)
94
-
95
- else:
96
- print(f'Streaming api: unknown path: {path}')
97
- return
98
-
99
-
100
- async def _run(host: str, port: int):
101
- async with serve(_handle_connection, host, port, ping_interval=None):
102
- await asyncio.Future() # run forever
103
-
104
-
105
- def _run_server(port: int, share: bool = False, tunnel_id=str):
106
- address = '0.0.0.0' if shared.args.listen else '127.0.0.1'
107
-
108
- def on_start(public_url: str):
109
- public_url = public_url.replace('https://', 'wss://')
110
- print(f'Starting streaming server at public url {public_url}{PATH}')
111
-
112
- if share:
113
- try:
114
- try_start_cloudflared(port, tunnel_id, max_attempts=3, on_start=on_start)
115
- except Exception as e:
116
- print(e)
117
- else:
118
- print(f'Starting streaming server at ws://{address}:{port}{PATH}')
119
-
120
- asyncio.run(_run(host=address, port=port))
121
-
122
-
123
- def start_server(port: int, share: bool = False, tunnel_id=str):
124
- Thread(target=_run_server, args=[port, share, tunnel_id], daemon=True).start()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arafath10/chatcode/cleaner.py DELETED
@@ -1,57 +0,0 @@
1
- import re
2
-
3
-
4
- def clean_corpus(chat_export_file):
5
- """Prepare a WhatsApp chat export for training with chatterbot."""
6
- message_corpus = remove_chat_metadata(chat_export_file)
7
- cleaned_corpus = remove_non_message_text(message_corpus)
8
- return cleaned_corpus
9
-
10
-
11
- def remove_chat_metadata(chat_export_file):
12
- """Remove WhatsApp chat metadata.
13
-
14
- WhatsApp chat exports come with metadata about each message:
15
-
16
- date time username message
17
- ---------------------------------------
18
- 8/26/22, 17:47 - Jane Doe: Message text
19
-
20
- This function removes all the metadata up to the text of each message.
21
-
22
- Args:
23
- chat_export_file (str): The name of the chat export file
24
-
25
- Returns:
26
- tuple: The text of each message in the conversation
27
- """
28
- date_time = r"(\d+\/\d+\/\d+,\s\d+:\d+)" # e.g. "8/26/22, 17:47"
29
- dash_whitespace = r"\s-\s" # " - "
30
- username = r"([\w\s]+)" # e.g. "Jane Doe"
31
- metadata_end = r":\s" # ": "
32
- pattern = date_time + dash_whitespace + username + metadata_end
33
-
34
- with open(chat_export_file, "r") as corpus_file:
35
- content = corpus_file.read()
36
- cleaned_corpus = re.sub(pattern, "", content)
37
- return tuple(cleaned_corpus.split("\n"))
38
-
39
-
40
- def remove_non_message_text(export_text_lines):
41
- """Remove conversation-irrelevant text from chat export.
42
-
43
- WhatsApp chat exports come with a standardized intro line,
44
- and an empty line at the end of the file.
45
- Text exports also replace media messages with text that isn't
46
- relevant for the conversation. This function removes all that.
47
-
48
- Args:
49
- export_text_lines (tuple): All lines from the export file
50
-
51
- Returns:
52
- tuple: Messages that are a relevant part of the conversation
53
- """
54
- messages = export_text_lines[1:-1]
55
-
56
- filter_out_msgs = ("<Media omitted>",)
57
- return tuple((msg for msg in messages if msg not in filter_out_msgs))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arthur678/vits-uma-genshin-honkai/text/__init__.py DELETED
@@ -1,57 +0,0 @@
1
- """ from https://github.com/keithito/tacotron """
2
- from text import cleaners
3
- from text.symbols import symbols
4
-
5
-
6
- # Mappings from symbol to numeric ID and vice versa:
7
- _symbol_to_id = {s: i for i, s in enumerate(symbols)}
8
- _id_to_symbol = {i: s for i, s in enumerate(symbols)}
9
-
10
-
11
- def text_to_sequence(text, symbols, cleaner_names):
12
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
13
- Args:
14
- text: string to convert to a sequence
15
- cleaner_names: names of the cleaner functions to run the text through
16
- Returns:
17
- List of integers corresponding to the symbols in the text
18
- '''
19
- _symbol_to_id = {s: i for i, s in enumerate(symbols)}
20
- sequence = []
21
-
22
- clean_text = _clean_text(text, cleaner_names)
23
- for symbol in clean_text:
24
- if symbol not in _symbol_to_id.keys():
25
- continue
26
- symbol_id = _symbol_to_id[symbol]
27
- sequence += [symbol_id]
28
- return sequence, clean_text
29
-
30
-
31
- def cleaned_text_to_sequence(cleaned_text):
32
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
33
- Args:
34
- text: string to convert to a sequence
35
- Returns:
36
- List of integers corresponding to the symbols in the text
37
- '''
38
- sequence = [_symbol_to_id[symbol] for symbol in cleaned_text if symbol in _symbol_to_id.keys()]
39
- return sequence
40
-
41
-
42
- def sequence_to_text(sequence):
43
- '''Converts a sequence of IDs back to a string'''
44
- result = ''
45
- for symbol_id in sequence:
46
- s = _id_to_symbol[symbol_id]
47
- result += s
48
- return result
49
-
50
-
51
- def _clean_text(text, cleaner_names):
52
- for name in cleaner_names:
53
- cleaner = getattr(cleaners, name)
54
- if not cleaner:
55
- raise Exception('Unknown cleaner: %s' % name)
56
- text = cleaner(text)
57
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/urllib3/util/ssltransport.py DELETED
@@ -1,221 +0,0 @@
1
- import io
2
- import socket
3
- import ssl
4
-
5
- from ..exceptions import ProxySchemeUnsupported
6
- from ..packages import six
7
-
8
- SSL_BLOCKSIZE = 16384
9
-
10
-
11
- class SSLTransport:
12
- """
13
- The SSLTransport wraps an existing socket and establishes an SSL connection.
14
-
15
- Contrary to Python's implementation of SSLSocket, it allows you to chain
16
- multiple TLS connections together. It's particularly useful if you need to
17
- implement TLS within TLS.
18
-
19
- The class supports most of the socket API operations.
20
- """
21
-
22
- @staticmethod
23
- def _validate_ssl_context_for_tls_in_tls(ssl_context):
24
- """
25
- Raises a ProxySchemeUnsupported if the provided ssl_context can't be used
26
- for TLS in TLS.
27
-
28
- The only requirement is that the ssl_context provides the 'wrap_bio'
29
- methods.
30
- """
31
-
32
- if not hasattr(ssl_context, "wrap_bio"):
33
- if six.PY2:
34
- raise ProxySchemeUnsupported(
35
- "TLS in TLS requires SSLContext.wrap_bio() which isn't "
36
- "supported on Python 2"
37
- )
38
- else:
39
- raise ProxySchemeUnsupported(
40
- "TLS in TLS requires SSLContext.wrap_bio() which isn't "
41
- "available on non-native SSLContext"
42
- )
43
-
44
- def __init__(
45
- self, socket, ssl_context, server_hostname=None, suppress_ragged_eofs=True
46
- ):
47
- """
48
- Create an SSLTransport around socket using the provided ssl_context.
49
- """
50
- self.incoming = ssl.MemoryBIO()
51
- self.outgoing = ssl.MemoryBIO()
52
-
53
- self.suppress_ragged_eofs = suppress_ragged_eofs
54
- self.socket = socket
55
-
56
- self.sslobj = ssl_context.wrap_bio(
57
- self.incoming, self.outgoing, server_hostname=server_hostname
58
- )
59
-
60
- # Perform initial handshake.
61
- self._ssl_io_loop(self.sslobj.do_handshake)
62
-
63
- def __enter__(self):
64
- return self
65
-
66
- def __exit__(self, *_):
67
- self.close()
68
-
69
- def fileno(self):
70
- return self.socket.fileno()
71
-
72
- def read(self, len=1024, buffer=None):
73
- return self._wrap_ssl_read(len, buffer)
74
-
75
- def recv(self, len=1024, flags=0):
76
- if flags != 0:
77
- raise ValueError("non-zero flags not allowed in calls to recv")
78
- return self._wrap_ssl_read(len)
79
-
80
- def recv_into(self, buffer, nbytes=None, flags=0):
81
- if flags != 0:
82
- raise ValueError("non-zero flags not allowed in calls to recv_into")
83
- if buffer and (nbytes is None):
84
- nbytes = len(buffer)
85
- elif nbytes is None:
86
- nbytes = 1024
87
- return self.read(nbytes, buffer)
88
-
89
- def sendall(self, data, flags=0):
90
- if flags != 0:
91
- raise ValueError("non-zero flags not allowed in calls to sendall")
92
- count = 0
93
- with memoryview(data) as view, view.cast("B") as byte_view:
94
- amount = len(byte_view)
95
- while count < amount:
96
- v = self.send(byte_view[count:])
97
- count += v
98
-
99
- def send(self, data, flags=0):
100
- if flags != 0:
101
- raise ValueError("non-zero flags not allowed in calls to send")
102
- response = self._ssl_io_loop(self.sslobj.write, data)
103
- return response
104
-
105
- def makefile(
106
- self, mode="r", buffering=None, encoding=None, errors=None, newline=None
107
- ):
108
- """
109
- Python's httpclient uses makefile and buffered io when reading HTTP
110
- messages and we need to support it.
111
-
112
- This is unfortunately a copy and paste of socket.py makefile with small
113
- changes to point to the socket directly.
114
- """
115
- if not set(mode) <= {"r", "w", "b"}:
116
- raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,))
117
-
118
- writing = "w" in mode
119
- reading = "r" in mode or not writing
120
- assert reading or writing
121
- binary = "b" in mode
122
- rawmode = ""
123
- if reading:
124
- rawmode += "r"
125
- if writing:
126
- rawmode += "w"
127
- raw = socket.SocketIO(self, rawmode)
128
- self.socket._io_refs += 1
129
- if buffering is None:
130
- buffering = -1
131
- if buffering < 0:
132
- buffering = io.DEFAULT_BUFFER_SIZE
133
- if buffering == 0:
134
- if not binary:
135
- raise ValueError("unbuffered streams must be binary")
136
- return raw
137
- if reading and writing:
138
- buffer = io.BufferedRWPair(raw, raw, buffering)
139
- elif reading:
140
- buffer = io.BufferedReader(raw, buffering)
141
- else:
142
- assert writing
143
- buffer = io.BufferedWriter(raw, buffering)
144
- if binary:
145
- return buffer
146
- text = io.TextIOWrapper(buffer, encoding, errors, newline)
147
- text.mode = mode
148
- return text
149
-
150
- def unwrap(self):
151
- self._ssl_io_loop(self.sslobj.unwrap)
152
-
153
- def close(self):
154
- self.socket.close()
155
-
156
- def getpeercert(self, binary_form=False):
157
- return self.sslobj.getpeercert(binary_form)
158
-
159
- def version(self):
160
- return self.sslobj.version()
161
-
162
- def cipher(self):
163
- return self.sslobj.cipher()
164
-
165
- def selected_alpn_protocol(self):
166
- return self.sslobj.selected_alpn_protocol()
167
-
168
- def selected_npn_protocol(self):
169
- return self.sslobj.selected_npn_protocol()
170
-
171
- def shared_ciphers(self):
172
- return self.sslobj.shared_ciphers()
173
-
174
- def compression(self):
175
- return self.sslobj.compression()
176
-
177
- def settimeout(self, value):
178
- self.socket.settimeout(value)
179
-
180
- def gettimeout(self):
181
- return self.socket.gettimeout()
182
-
183
- def _decref_socketios(self):
184
- self.socket._decref_socketios()
185
-
186
- def _wrap_ssl_read(self, len, buffer=None):
187
- try:
188
- return self._ssl_io_loop(self.sslobj.read, len, buffer)
189
- except ssl.SSLError as e:
190
- if e.errno == ssl.SSL_ERROR_EOF and self.suppress_ragged_eofs:
191
- return 0 # eof, return 0.
192
- else:
193
- raise
194
-
195
- def _ssl_io_loop(self, func, *args):
196
- """Performs an I/O loop between incoming/outgoing and the socket."""
197
- should_loop = True
198
- ret = None
199
-
200
- while should_loop:
201
- errno = None
202
- try:
203
- ret = func(*args)
204
- except ssl.SSLError as e:
205
- if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):
206
- # WANT_READ, and WANT_WRITE are expected, others are not.
207
- raise e
208
- errno = e.errno
209
-
210
- buf = self.outgoing.read()
211
- self.socket.sendall(buf)
212
-
213
- if errno is None:
214
- should_loop = False
215
- elif errno == ssl.SSL_ERROR_WANT_READ:
216
- buf = self.socket.recv(SSL_BLOCKSIZE)
217
- if buf:
218
- self.incoming.write(buf)
219
- else:
220
- self.incoming.write_eof()
221
- return ret
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/py39compat.py DELETED
@@ -1,22 +0,0 @@
1
- import sys
2
- import platform
3
-
4
-
5
- def add_ext_suffix_39(vars):
6
- """
7
- Ensure vars contains 'EXT_SUFFIX'. pypa/distutils#130
8
- """
9
- import _imp
10
-
11
- ext_suffix = _imp.extension_suffixes()[0]
12
- vars.update(
13
- EXT_SUFFIX=ext_suffix,
14
- # sysconfig sets SO to match EXT_SUFFIX, so maintain
15
- # that expectation.
16
- # https://github.com/python/cpython/blob/785cc6770588de087d09e89a69110af2542be208/Lib/sysconfig.py#L671-L673
17
- SO=ext_suffix,
18
- )
19
-
20
-
21
- needs_ext_suffix = sys.version_info < (3, 10) and platform.system() == 'Windows'
22
- add_ext_suffix = add_ext_suffix_39 if needs_ext_suffix else lambda vars: None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AvinashRamesh23/AIEditor/stable_whisper.py DELETED
@@ -1,1491 +0,0 @@
1
-
2
- import ffmpeg
3
- import whisper
4
- import warnings
5
- import numpy as np
6
- import torch
7
- from torch import Tensor
8
- from torch.nn import functional as F
9
- from torch.distributions import Categorical
10
- from typing import List, Optional, Tuple, Union
11
- from whisper.audio import SAMPLE_RATE, N_FRAMES, HOP_LENGTH, pad_or_trim, log_mel_spectrogram
12
- from whisper.decoding import DecodingOptions, DecodingResult
13
- from whisper.tokenizer import LANGUAGES
14
- from whisper.utils import exact_div, format_timestamp, compression_ratio
15
- from whisper.model import Whisper
16
- from whisper.decoding import DecodingTask, BeamSearchDecoder, GreedyDecoder
17
- from whisper.tokenizer import Tokenizer, get_tokenizer
18
- from types import MethodType
19
- from itertools import chain, repeat
20
- from copy import deepcopy
21
- import os
22
- import json
23
-
24
-
25
- # no_caption changed to no_speech newer commits
26
- def get_new_attrs(obj_, attr: str):
27
- if attr == 'no_caption_probs':
28
- return getattr(obj_, attr) if hasattr(obj_, 'no_caption_probs') else getattr(obj_, 'no_speech_probs')
29
- elif attr == 'no_caption_prob':
30
- return getattr(obj_, attr) if hasattr(obj_, 'no_caption_prob') else getattr(obj_, 'no_speech_prob')
31
- elif attr == 'no_captions':
32
- return getattr(obj_, attr) if hasattr(obj_, 'no_captions') else getattr(obj_, 'no_speech')
33
- else:
34
- raise NotImplementedError(attr)
35
-
36
-
37
- def check_ascending_sequence(seq: Union[List[Union[int, float]], np.ndarray], verbose=True) -> bool:
38
- """
39
- check if a sequence of numbers are in ascending order
40
- """
41
- is_ascending = True
42
- for idx, (i, j) in enumerate(zip(seq[:-1], seq[1:])):
43
- if i > j:
44
- is_ascending = False
45
- if verbose:
46
- print(f'[Index{idx}]:{i} > [Index{idx + 1}]:{j}')
47
- else:
48
- break
49
-
50
- return is_ascending
51
-
52
-
53
- def check_ascending_sentence_ts(res: (dict, list)) -> bool:
54
- segs = res['segments'] if isinstance(res, dict) else res
55
- return check_ascending_sequence(list(chain.from_iterable((float(i['start']), float(i['end']))
56
- for i in segs)))
57
-
58
-
59
- def check_ascending_word_ts(res: (dict, list)) -> bool:
60
- cc = group_word_timestamps(res['segments'] if isinstance(res, dict) else res, ts_key='word_timestamps')
61
- return check_ascending_sequence((list(chain.from_iterable((float(i['start']), float(i['end']))
62
- for i in cc))))
63
-
64
-
65
- def is_equal_ts(a: (float, int, np.ndarray), b: (float, int, np.ndarray), rtol=1e-03):
66
- """
67
- check if timestamp a and timestamp b are equal within the relative tolerance (rtol)
68
- """
69
- return np.isclose(a, b, rtol=rtol)
70
-
71
-
72
- def check_is_same_results(res0: (dict, list), res1: (dict, list), check_unstable=False) -> bool:
73
- """
74
- check if res0 and res1 have same timestamps
75
- """
76
- if isinstance(res0, dict):
77
- res0 = res0['segments']
78
- if isinstance(res1, dict):
79
- res1 = res1['segments']
80
- ts_key = 'unstable_word_timestamps' if check_unstable else 'word_timestamps'
81
- inner_ts_key = 'timestamps' if check_unstable else 'timestamp'
82
-
83
- def _reduce(x):
84
- if isinstance(x, np.ndarray):
85
- return set(tuple(x)) == {True}
86
- return x
87
-
88
- t = set(set(_reduce(is_equal_ts(a[inner_ts_key], b[inner_ts_key])) for a, b in zip(i[ts_key], j[ts_key])) == {True}
89
- for i, j in zip(res0['segments'], res1['segments']))
90
- return t == {True}
91
-
92
-
93
- def to_srt(lines: List[dict], save_path: str = None, strip=False) -> str:
94
- """
95
- lines: List[dict]
96
- [{start:<start-timestamp-of-text>, end:<end-timestamp-of-text>, text:<str-of-text>}, ...]
97
- """
98
-
99
- def secs_to_hhmmss(secs: (float, int)):
100
- mm, ss = divmod(secs, 60)
101
- hh, mm = divmod(mm, 60)
102
- return f'{hh:0>2.0f}:{mm:0>2.0f}:{ss:0>6.3f}'.replace(".", ",")
103
-
104
- srt_str = '\n'.join(
105
- f'{i}\n'
106
- f'{secs_to_hhmmss(sub["start"])} --> {secs_to_hhmmss(sub["end"])}\n'
107
- f'{sub["text"].strip() if strip else sub["text"]}\n'
108
- for i, sub in enumerate(lines, 1))
109
-
110
- if save_path:
111
- with open(save_path, 'w', encoding='utf-8') as f:
112
- f.write(srt_str)
113
- print(f'Saved: {os.path.abspath(save_path)}')
114
-
115
- return srt_str
116
-
117
-
118
- def group_word_timestamps(res: (dict, list), one_group=True, combine_compound=False,
119
- ts_key='whole_word_timestamps', min_dur: float = None):
120
-
121
- if min_dur is None:
122
- min_dur = 0.02
123
-
124
- def group_ts(ts_: List[dict], start) -> List[dict]:
125
- first_group: List[dict] = []
126
- for w_ts in ts_:
127
- if first_group:
128
- if (not combine_compound or w_ts['word'].startswith(' ')) and \
129
- (w_ts['timestamp'] - first_group[-1]['start']) >= min_dur and \
130
- first_group[-1]['end'] < w_ts['timestamp']:
131
- first_group.append(dict(start=first_group[-1]['end'],
132
- end=w_ts['timestamp'],
133
- text=w_ts['word']))
134
- else:
135
- first_group[-1]['end'] = max(first_group[-1]['end'], w_ts['timestamp'])
136
- first_group[-1]['text'] += w_ts['word']
137
- else:
138
- first_group.append(dict(start=start,
139
- end=w_ts['timestamp'],
140
- text=w_ts['word']))
141
-
142
- return first_group
143
-
144
- def group_zero_duration(first_group: List[dict]) -> List[dict]:
145
- final_group: List[dict] = []
146
- for ts_dict in first_group:
147
- if not final_group or (ts_dict['end'] - ts_dict['start']) > 0:
148
- final_group.append(ts_dict)
149
- else:
150
- final_group[-1]['end'] = ts_dict['end']
151
- final_group[-1]['text'] += ts_dict['text']
152
-
153
- return final_group
154
-
155
- segs: List[dict] = res['segments'] if isinstance(res, dict) else res
156
- assert set(ts_key in seg for seg in segs) == {True}, f'input contains missing {ts_key}'
157
-
158
- grouped = (group_ts(seg[ts_key], seg['start']) for seg in segs)
159
- return group_zero_duration(list(chain.from_iterable(grouped))) if one_group else list(grouped)
160
-
161
-
162
- def tighten_timestamps(res: dict, end_at_last_word=True, end_before_period=False, start_at_first_word=False) -> dict:
163
- res = deepcopy(res)
164
- for i in range(len(res['segments'])):
165
- if start_at_first_word:
166
- res['segments'][i]['start'] = res['segments'][i]['word_timestamps'][0]['timestamp']
167
- if end_before_period and \
168
- res['segments'][i]['word_timestamps'][-1] == '.' and \
169
- len(res['segments'][i]['word_timestamps']) > 1:
170
- res['segments'][i]['end'] = res['segments'][i]['word_timestamps'][-2]['timestamp']
171
- elif end_at_last_word:
172
- res['segments'][i]['end'] = res['segments'][i]['word_timestamps'][-1]['timestamp']
173
-
174
- return res
175
-
176
-
177
- def results_to_srt(res: dict, srt_path, word_level=True, combine_compound=False,
178
- end_at_last_word=False, end_before_period=False, start_at_first_word=False, strip=False):
179
- if word_level:
180
- results_to_word_srt(res, srt_path, combine_compound=combine_compound, strip=strip)
181
- else:
182
- results_to_sentence_srt(res, srt_path,
183
- end_at_last_word=end_at_last_word,
184
- end_before_period=end_before_period,
185
- start_at_first_word=start_at_first_word,
186
- strip=strip)
187
-
188
-
189
- def results_to_sentence_srt(res: dict, srt_path,
190
- end_at_last_word=False,
191
- end_before_period=False,
192
- start_at_first_word=False,
193
- strip=True):
194
- """
195
-
196
- Parameters
197
- ----------
198
- res: dict
199
- results from modified model
200
- srt_path: str
201
- output path of srt
202
- end_at_last_word: bool
203
- set end-of-sentence to timestamp-of-last-token
204
- end_before_period: bool
205
- set end-of-sentence to timestamp-of-last-non-period-token
206
- start_at_first_word: bool
207
- set start-of-sentence to timestamp-of-first-token
208
- strip: bool
209
- perform strip() on each sentence
210
-
211
- """
212
- strict = any((end_at_last_word, end_before_period, start_at_first_word))
213
- segs = tighten_timestamps(res,
214
- end_at_last_word=end_at_last_word,
215
- end_before_period=end_before_period,
216
- start_at_first_word=start_at_first_word)['segments'] \
217
- if strict else res['segments']
218
-
219
- max_idx = len(segs) - 1
220
- i = 1
221
- while i <= max_idx:
222
- if not (segs[i]['end'] - segs[i]['start']):
223
- if segs[i - 1]['end'] == segs[i]['end']:
224
- segs[i - 1]['text'] += (' ' + segs[i]['text'].strip())
225
- del segs[i]
226
- max_idx -= 1
227
- continue
228
- else:
229
- segs[i]['start'] = segs[i - 1]['end']
230
- i += 1
231
-
232
- to_srt(segs, srt_path, strip=strip)
233
-
234
-
235
- def results_to_word_srt(res: dict, srt_path, combine_compound=False, strip=False, min_dur: float = None):
236
- """
237
-
238
- Parameters
239
- ----------
240
- res: dict
241
- results from modified model
242
- srt_path: str
243
- output path of srt
244
- combine_compound: bool
245
- concatenate words without inbetween spacing
246
- strip: bool
247
- perform strip() on each word
248
- min_dur: bool
249
- minimum duration for each word (i.e. concat the words if it is less than specified value; Default 0.02)
250
-
251
- """
252
- to_srt(group_word_timestamps(res, combine_compound=combine_compound, min_dur=min_dur),
253
- srt_path, strip=strip)
254
-
255
-
256
- def results_to_token_srt(res: dict, srt_path, combine_compound=False, strip=False, min_dur: float = None):
257
- """
258
-
259
- Parameters
260
- ----------
261
- res: dict
262
- results from modified model
263
- srt_path: str
264
- output path of srt
265
- combine_compound: bool
266
- concatenate words without inbetween spacing
267
- strip: bool
268
- perform strip() on each token
269
- min_dur: bool
270
- minimum duration for each token (i.e. concat the tokens if it is less than specified value; Default 0.02)
271
-
272
- """
273
- to_srt(group_word_timestamps(res, combine_compound=combine_compound, ts_key='word_timestamps', min_dur=min_dur),
274
- srt_path, strip=strip)
275
-
276
-
277
- def _get_min_estimation(estimations: List[Union[list, np.ndarray]],
278
- min_: (int, float) = None,
279
- max_: (int, float) = None) -> np.ndarray:
280
- estimations = deepcopy(estimations)
281
- estimations = list(map(lambda est_: np.array(est_) if isinstance(est_, list) else est_, estimations))
282
- prev_min = min_ or 0
283
- curr_max = max_ or np.max(estimations[-1])
284
-
285
- min_est = []
286
- for curr_est in estimations:
287
- curr_min = curr_est[np.logical_and(curr_max > curr_est, curr_est > prev_min)]
288
- curr_min = np.min(curr_min) if curr_min.shape[0] else prev_min
289
- min_est.append(curr_min)
290
- prev_min = curr_min
291
-
292
- return np.array(min_est)
293
-
294
-
295
- def _get_max_estimation(estimations: List[Union[list, np.ndarray]],
296
- max_: (int, float) = None,
297
- min_: (int, float) = None) -> np.ndarray:
298
- estimations = deepcopy(estimations)
299
- estimations = list(map(lambda est_: np.array(est_) if isinstance(est_, list) else est_, estimations))
300
- prev_max = max_ or np.max(estimations[-1])
301
- curr_min = np.min(estimations[0]) if min_ is None else min_
302
-
303
- max_est = []
304
- for curr_est in reversed(estimations):
305
- curr_max = curr_est[np.logical_and(prev_max > curr_est, curr_est > curr_min)]
306
- curr_max = np.max(curr_max) if curr_max.shape[0] else prev_max
307
- max_est.append(curr_max)
308
- prev_max = curr_max
309
-
310
- max_est.reverse()
311
- return np.array(max_est)
312
-
313
-
314
- def _remove_overestimation(x: Union[np.ndarray, List[Union[int, float]]], alt_est: List[Union[list, np.ndarray]] = None,
315
- max_: (int, float) = None, min_: (int, float) = None,
316
- aggressive=False) -> np.ndarray:
317
- x = np.array(x) if isinstance(x, list) else deepcopy(x)
318
- if alt_est is not None:
319
- alt_est = list(map(lambda est_: np.array(est_) if isinstance(est_, list) else est_, alt_est))
320
- assert x.ndim == 1
321
- assert alt_est is None or len(alt_est) == x.shape[0]
322
- max_val = x[-1] if max_ is None else max_
323
- min_val = x[0] if min_ is None else min_
324
-
325
- def curr_max_min(val):
326
- if min_ is None:
327
- return val
328
- return max(min_, val)
329
-
330
- if min_ is not None:
331
- x[x < min_] = min_
332
- reduce_ = np.min if aggressive else np.mean
333
- for i in range(x.shape[-1] - 1, -1, -1):
334
- if x[i] > max_val or (i > 1 and x[i] < reduce_(x[:i])): # spikes or dips
335
- if alt_est is None or alt_est[i] is None:
336
- x[i] = max_val
337
- else:
338
- tmp_min = min_val if i < 2 else curr_max_min(np.mean(x[:i]))
339
- alt_ = alt_est[i][np.logical_and(alt_est[i] < max_val, alt_est[i] > tmp_min)]
340
- x[i] = max_val if alt_.shape[0] == 0 else alt_[0]
341
- max_val = x[i]
342
- return x
343
-
344
-
345
- def _remove_underestimation(x: Union[np.ndarray, List[Union[int, float]]],
346
- alt_est: List[Union[list, np.ndarray]] = None,
347
- min_: (int, float) = None, max_: (int, float) = None,
348
- aggressive=False) -> np.ndarray:
349
- x = np.array(x) if isinstance(x, list) else deepcopy(x)
350
- if alt_est is not None:
351
- alt_est = list(map(lambda est_: np.array(est_) if isinstance(est_, list) else est_, alt_est))
352
- assert x.ndim == 1
353
- assert alt_est is None or len(alt_est) == x.shape[0]
354
- min_val = x[0] if min_ is None else min_
355
- max_val = x[-1] if max_ is None else max_
356
-
357
- def curr_min_max(val):
358
- if max_ is None:
359
- return val
360
- return min(max_, val)
361
-
362
- if max_ is not None:
363
- x[x > max_] = max_
364
- reduce_ = np.max if aggressive else np.mean
365
- max_i_reduce = x.shape[-1] - 2
366
- for i in range(0, x.shape[-1]):
367
- if x[i] < min_val or (i < max_i_reduce and x[i] > reduce_(x[i + 1:])): # dips or spikes
368
- if alt_est is None or alt_est[i] is None:
369
- x[i] = min_val
370
- else:
371
- tmp_max = max_val if i >= max_i_reduce else curr_min_max(np.mean(x[i + 1:]))
372
- alt_ = alt_est[i][np.logical_and(alt_est[i] > min_val, alt_est[i] < tmp_max)]
373
- x[i] = min_val if alt_.shape[0] == 0 else alt_[0]
374
- min_val = x[i]
375
- return x
376
-
377
-
378
- def _merge_max_min_estimation(mx: Union[np.ndarray, List[Union[int, float]]],
379
- mn: Union[np.ndarray, List[Union[int, float]]],
380
- alt_est: List[Union[list, np.ndarray]] = None) -> np.ndarray:
381
- mx = np.array(mx) if isinstance(mx, list) else deepcopy(mx)
382
- mn = np.array(mn) if isinstance(mn, list) else deepcopy(mn)
383
- if alt_est is not None:
384
- alt_est = list(map(lambda est_: np.array(est_) if isinstance(est_, list) else est_, alt_est))
385
- assert mx.ndim == 1 and mn.ndim == 1
386
- assert mx.shape[0] == mn.shape[0]
387
- assert alt_est is None or len(alt_est) == mx.shape[0]
388
-
389
- pref_mx = np.var(mx) > np.var(mn)
390
- if pref_mx:
391
- mn[0] = mx[0]
392
- prev_min = mn[0]
393
- for i in range(1, mn.shape[0]):
394
- if prev_min > mn[i]:
395
- if mn[i] > mx[i]: # prev_min > mn[i] > mx[i]
396
- mn[i] = prev_min
397
- elif mx[i] > mn[i]:
398
- if prev_min > mx[i]: # prev_min > mx[i] > mn[i]
399
- mn[i] = prev_min
400
- else: # mx[i] > prev_min > mn[i]
401
- alt_ = alt_est[i][np.logical_and(alt_est[i] > prev_min, alt_est[i] < mx[i])]
402
- mn[i] = (mx[i] if pref_mx else prev_min) if alt_.shape[0] == 0 else alt_[0]
403
- else: # prev_min > mn[i] == mx[i]
404
- mn[i] = prev_min
405
- elif mn[i] > prev_min:
406
- # if prev_min > mx[i]: # mn[i] > prev_min > mx[i]
407
- # pass
408
- if mx[i] > prev_min:
409
- if mn[i] > mx[i]: # mn[i] > mx[i] > prev_min
410
- pass
411
- elif mx[i] > mn[i]: # mx[i] > mn[i] > prev_min
412
- alt_ = alt_est[i][np.logical_and(alt_est[i] > mn[i], alt_est[i] < mx[i])]
413
- if alt_.shape[0]:
414
- mn[i] = alt_[0]
415
- elif pref_mx:
416
- mn[i] = mx[i]
417
- # else: # mx[i] == mn[i] > prev_min
418
- # pass
419
- # else: # mn[i] > mx[i] == prev_min
420
- # pass
421
- else: # mn[i] == prev_min
422
- if mx[i] > mn[i]: # mx[i] > mn[i] == prev_min
423
- alt_ = alt_est[i][np.logical_and(alt_est[i] > mn[i], alt_est[i] < mx[i])]
424
- if alt_.shape[0]:
425
- mn[i] = alt_[0]
426
- elif pref_mx:
427
- mn[i] = mx[i]
428
- # elif mn[i] > mx[i]: # mn[i] == prev_min > mx[i]
429
- # pass
430
- # else: # mn[i] == prev_min == mx[i]
431
- # pass
432
-
433
- prev_min = mn[i]
434
-
435
- return mn
436
-
437
-
438
- def _avg_merge_min_max(mx: Union[np.ndarray, List[Union[int, float]]],
439
- mn: Union[np.ndarray, List[Union[int, float]]],
440
- alt_timestamps: List[Union[List[Union[int, float]], np.ndarray]] = None,
441
- max_: (int, float) = None, min_: (int, float) = None):
442
- mx = np.array(mx) if isinstance(mx, list) else deepcopy(mx)
443
- mn = np.array(mn) if isinstance(mn, list) else deepcopy(mn)
444
- assert mx.ndim == mn.ndim == 1
445
- assert mx.shape[0] == mn.shape[0]
446
-
447
- avg_ = (mx + mn) / 2
448
-
449
- if check_ascending_sequence(avg_, verbose=False):
450
- return avg_
451
-
452
- if not max_:
453
- max_ = max(mx[-1], mn[-1])
454
- if min_ is None:
455
- min_ = min(mn[0], mx[0])
456
-
457
- return _stabilize_timestamps(avg_, alt_timestamps, max_=max_, min_=min_)
458
-
459
-
460
- def _stabilize_timestamps(timestamps: Union[np.ndarray, List[Union[int, float]]],
461
- alt_timestamps: List[Union[List[Union[int, float]], np.ndarray]] = None,
462
- max_: (int, float) = None, min_: (int, float) = None, aggressive=False) -> np.ndarray:
463
- mx = _remove_overestimation(timestamps, alt_est=alt_timestamps, max_=max_, min_=min_, aggressive=aggressive)
464
- mn = _remove_underestimation(timestamps, alt_est=alt_timestamps, max_=max_, min_=min_, aggressive=aggressive)
465
- return _merge_max_min_estimation(mx, mn, alt_timestamps)
466
-
467
-
468
- def _stabilize_more_timestamps(timestamps: List[Union[list, np.ndarray]],
469
- max_: (int, float) = None, min_: (int, float) = None, average=True) -> np.ndarray:
470
- mx = _get_max_estimation(timestamps, max_=max_, min_=min_)
471
- mn = _get_min_estimation(timestamps, max_=max_, min_=min_)
472
- if average:
473
- return _avg_merge_min_max(mx, mn, timestamps, max_=max_, min_=min_)
474
- return _merge_max_min_estimation(mx, mn, timestamps)
475
-
476
-
477
- def stabilize_timestamps(segments: Union[List[dict], dict],
478
- top_focus=False, aggressive=False, average=True) -> List[dict]:
479
- """
480
-
481
- Parameters
482
- ----------
483
- segments: Union[List[dict], dict]
484
- result['segments'] or result
485
- top_focus: bool
486
- adhere closely to the top predictions for word timestamps
487
- aggressive: bool
488
- only if top_focus=True,
489
- allow greater variation in word_timestamps/whole_word_timestamps
490
- average: bool
491
- only if top_focus=False,
492
- average min and max of unstable_word_timestamps to get word_timestamps/whole_word_timestamps
493
-
494
- """
495
- if isinstance(segments, dict):
496
- segments = segments['segments']
497
- if not segments:
498
- warnings.warn('No Segments Found')
499
- return []
500
- missing_ts_idx = set(map(lambda x: None if x[1].get('unstable_word_timestamps') else x[0], enumerate(segments))) - {
501
- None}
502
- no_word_timestamps = len(missing_ts_idx) == len(segments)
503
- if not no_word_timestamps and missing_ts_idx:
504
- warnings.warn(f'Segments {list(missing_ts_idx)} are missing unstable_word_timestamps. '
505
- f'Word-level timestamp stabilization will skipped')
506
-
507
- segments = deepcopy(segments)
508
- sectioned_segments: List[List] = [[]]
509
- for i, seg in enumerate(segments, 1):
510
- sectioned_segments[-1].append(seg)
511
- if seg['anchor_point']:
512
- if i < len(segments):
513
- sectioned_segments.append([])
514
-
515
- assert all(set(len(set(s['offset'] for s in segs)) == 1 for segs in sectioned_segments))
516
-
517
- sectioned_segments_timestamps = [dict(min_=segs[-1]['offset'],
518
- max_=segs[-1]['next_offset'],
519
- timestamps=list(chain.from_iterable((s['start'], s['end']) for s in segs)),
520
- alt_timestamps=list(chain.from_iterable((s['alt_start_timestamps'],
521
- s['alt_end_timestamps'])
522
- for s in segs)))
523
- for segs in sectioned_segments]
524
-
525
- sectioned_stab_timestamps = [_stabilize_timestamps(**kwargs).reshape(-1, 2) for kwargs in
526
- sectioned_segments_timestamps]
527
-
528
- for i in range(len(sectioned_segments)):
529
- for j in range(len(sectioned_segments[i])):
530
- sectioned_segments[i][j]['start'], sectioned_segments[i][j]['end'] = sectioned_stab_timestamps[i][j]
531
-
532
- if not missing_ts_idx:
533
- if top_focus:
534
- top_word_ts = [ts_['timestamps'][0] for ts_ in
535
- sectioned_segments[i][j]['unstable_word_timestamps']]
536
- alt_word_ts = [ts_['timestamps'][1:] for ts_ in
537
- sectioned_segments[i][j]['unstable_word_timestamps']]
538
- temp_stab_word_ts = _stabilize_timestamps(top_word_ts, alt_word_ts,
539
- max_=sectioned_segments[i][j]['end'],
540
- min_=sectioned_segments[i][j]['start'],
541
- aggressive=aggressive)
542
- else:
543
- word_ts = [ts_['timestamps'] for ts_ in sectioned_segments[i][j]['unstable_word_timestamps']]
544
- temp_stab_word_ts = _stabilize_more_timestamps(word_ts,
545
- max_=sectioned_segments[i][j]['end'],
546
- min_=sectioned_segments[i][j]['start'],
547
- average=average)
548
-
549
- temp_stab_word_ts = [{'word': sectioned_segments[i][j]['unstable_word_timestamps'][k]['word'],
550
- 'token': sectioned_segments[i][j]['unstable_word_timestamps'][k]['token'],
551
- 'timestamp': temp_stab_word_ts[k]}
552
- for k in range(temp_stab_word_ts.shape[0])]
553
-
554
- sectioned_segments[i][j]['word_timestamps'] = temp_stab_word_ts
555
-
556
- return list(chain.from_iterable(sectioned_segments))
557
-
558
-
559
- def save_as_json(results, path):
560
- with open(path, 'w', encoding='utf-8') as f:
561
- json.dump(results, f)
562
-
563
-
564
- def add_whole_word_ts(tokenizer: Tokenizer, segments: Union[List[dict], dict], merge_non_space: bool = None,
565
- prepend_punctuations: Union[List[str], Tuple[str]] = None,
566
- append_punctuations: Union[List[str], Tuple[str]] = None):
567
- merge_non_space = (tokenizer.language in ['en'] or tokenizer.language is None) \
568
- if merge_non_space is None else merge_non_space
569
- if prepend_punctuations is None:
570
- prepend_punctuations = r'“¿([{'
571
- if append_punctuations is None:
572
- append_punctuations = r'.。,,!!??::”)]}、'
573
- if isinstance(segments, dict):
574
- segments = segments['segments']
575
- if not segments:
576
- print('No segments found, whole-word timestamps cannot be added.')
577
- return
578
-
579
- missing_idx = set(-1 if seg.get('word_timestamps') else i for i, seg in enumerate(segments)) - {-1}
580
-
581
- if missing_idx:
582
- if len(missing_idx) == len(segments):
583
- print('No word_timestamps found, whole-word timestamps cannot be added.')
584
- return
585
- print(f'Some word_timestamps not found, '
586
- f'whole-word timestamps cannot be added to the following segments: {tuple(missing_idx)}')
587
-
588
- failed_idx = []
589
-
590
- for seg_idx, seg in enumerate(segments):
591
- if seg.get('word_timestamps'):
592
- prev_idx = 0
593
- remaining_text = seg['text']
594
- has_prepend = False
595
- whole_word_timestamps: List[dict] = []
596
- for wts_idx in range(1, len(seg['word_timestamps']) + 1):
597
- max_ts = seg['word_timestamps'][wts_idx - 1]['timestamp']
598
- tokens = [wts['token'] for wts in seg['word_timestamps'][prev_idx: wts_idx]]
599
- temp_whole_word = tokenizer.decode(tokens)
600
- if temp_whole_word == remaining_text[:len(temp_whole_word)]:
601
- prev_idx = wts_idx
602
- remaining_text = remaining_text[len(temp_whole_word):]
603
- if (not merge_non_space or temp_whole_word.startswith(' ') or not whole_word_timestamps) and \
604
- temp_whole_word not in append_punctuations and \
605
- not has_prepend:
606
- has_prepend = temp_whole_word.strip() in prepend_punctuations
607
- whole_word_timestamps.append(dict(word=temp_whole_word, timestamp=max_ts))
608
- else:
609
- has_prepend = False
610
- whole_word_timestamps[-1]['word'] += temp_whole_word
611
- whole_word_timestamps[-1]['timestamp'] = max_ts
612
- if remaining_text:
613
- failed_idx.append(seg_idx)
614
- whole_word_timestamps = []
615
- seg['whole_word_timestamps'] = whole_word_timestamps or None
616
- else:
617
- seg['whole_word_timestamps'] = None
618
-
619
- if failed_idx:
620
- print(f'Failed to add whole-word timestamps to the following segments: {tuple(failed_idx)}')
621
-
622
-
623
- def _load_audio_waveform(audio: Union[str, bytes, np.ndarray, torch.Tensor], h: int, w: int) -> np.ndarray:
624
- """
625
-
626
- Parameters
627
- ----------
628
- audio: Union[str, bytes, np.ndarray, torch.Tensor], shape = (*)
629
- The path to audio or bytes of audio file or a NumPy array or Tensor containing the audio waveform in 16 kHz
630
- h: int
631
- Height of waveform image
632
- w: int
633
- Width of waveform image
634
-
635
- Returns
636
- -------
637
- Audio waveform image as a NumPy array, in uint8 dtype.
638
- """
639
-
640
- try:
641
- if isinstance(audio, str):
642
- stream = ffmpeg.input(audio, threads=0)
643
- inp = None
644
-
645
- else:
646
- if isinstance(audio, bytes):
647
- stream = ffmpeg.input('pipe:', threads=0)
648
- inp = audio
649
- else:
650
- warnings.warn('A resampled input causes an unexplained temporal shift in waveform image '
651
- 'that will skew the timestamp suppression and may result in inaccurate timestamps.\n'
652
- 'Use audio_for_mask for transcribe() to provide the original audio track '
653
- 'as the path or bytes of the audio file.',
654
- stacklevel=2)
655
- stream = ffmpeg.input('pipe:', threads=0, ac=1, format='s16le')
656
- if isinstance(audio, torch.Tensor):
657
- audio = np.array(audio)
658
- inp = (audio * 32768.0).astype(np.int16).tobytes()
659
-
660
- waveform, err = (
661
- stream.filter('aformat', channel_layouts='mono')
662
- .filter('highpass', f='200').filter('lowpass', f='3000')
663
- .filter('showwavespic', s=f'{w}x{h}')
664
- .output('-', pix_fmt='gray', format='rawvideo')
665
- .run(cmd="ffmpeg", capture_stdout=True, capture_stderr=True, input=inp)
666
- )
667
- except ffmpeg.Error as e:
668
- raise RuntimeError(f"Failed to load audio in waveform: {e.stderr.decode()}") from e
669
- else:
670
- if not waveform:
671
- partial_file = b'partial file' in err and b'Output file is empty' in err
672
- add_msg = '\nMetadata for decoding are likely at end of file, try to use path of audio instead.' \
673
- if partial_file and isinstance(audio, bytes) else ''
674
- raise RuntimeError(f"Failed to load audio in waveform: {err.decode()}" + add_msg)
675
- return np.frombuffer(waveform, dtype=np.uint8).reshape(h, w)
676
-
677
-
678
- def _remove_lower_quantile(waveform: np.ndarray,
679
- upper_quantile: float = None,
680
- lower_quantile: float = None,
681
- lower_threshold: float = None) -> np.ndarray:
682
- """
683
- Removes lower quantile of amplitude from waveform image
684
- """
685
- if upper_quantile is None:
686
- upper_quantile = 0.85
687
- if lower_quantile is None:
688
- lower_quantile = 0.15
689
- if lower_threshold is None:
690
- lower_threshold = 0.15
691
- waveform = deepcopy(waveform)
692
- wave_sums = waveform.sum(0)
693
- mx = np.quantile(wave_sums, upper_quantile, -1)
694
- mn = np.quantile(wave_sums, lower_quantile, -1)
695
- mn_threshold = (mx - mn) * lower_threshold + mn
696
- waveform[:, wave_sums < mn_threshold] = 0
697
- return waveform
698
-
699
-
700
- def _wave_to_ts_filter(waveform: np.ndarray, suppress_middle=True,
701
- max_index: (list, int) = None) -> np.ndarray:
702
- """
703
- Returns A NumPy array mask of sections with amplitude zero
704
- """
705
- assert waveform.ndim <= 2, f'waveform have at most 2 dims but found {waveform.ndim}'
706
- if waveform.ndim == 1:
707
- wave_sum = waveform
708
- else:
709
- wave_sum = waveform.sum(-2)
710
-
711
- wave_filter = wave_sum.astype(bool)
712
-
713
- if not suppress_middle:
714
- nonzero_indices = wave_filter.nonzero()[0]
715
- wave_filter[nonzero_indices[0]:nonzero_indices[-1] + 1] = True
716
- if max_index is not None:
717
- wave_filter[max_index + 1:] = False
718
-
719
- return ~wave_filter
720
-
721
-
722
- # modified version of whisper.transcribe.transcribe
723
- def transcribe_word_level(
724
- model: "Whisper",
725
- audio: Union[str, np.ndarray, torch.Tensor],
726
- *,
727
- verbose: bool = False,
728
- temperature: Union[float, Tuple[float, ...]] = (0.0, 0.2, 0.4, 0.6, 0.8, 1.0),
729
- compression_ratio_threshold: Optional[float] = 2.4,
730
- logprob_threshold: Optional[float] = -1.0,
731
- no_speech_threshold: Optional[float] = 0.6,
732
- condition_on_previous_text: bool = True,
733
- stab=True, top_focus=False, ts_num: int = 10,
734
- alpha: float = None, print_unstab=False,
735
- suppress_silence: bool = True,
736
- suppress_middle: bool = True,
737
- suppress_word_ts: bool = True,
738
- remove_background: bool = True,
739
- silence_threshold: float = 0.1,
740
- prepend_punctuations: Union[List[str], Tuple[str]] = None,
741
- append_punctuations: Union[List[str], Tuple[str]] = None,
742
- audio_for_mask: (str, bytes) = None,
743
- **decode_options):
744
- """
745
- Transcribe an audio file using Whisper
746
-
747
- Parameters
748
- ----------
749
- model: Whisper
750
- The Whisper model instance
751
-
752
- audio: Union[str, np.ndarray, torch.Tensor]
753
- The path to the audio file to open, or the audio waveform
754
-
755
- verbose: bool
756
- Whether to display the decoded text (with finalized timestamps) to the console
757
-
758
- temperature: Union[float, Tuple[float, ...]]
759
- Temperature for sampling. It can be a tuple of temperatures, which will be successfully used
760
- upon failures according to either `compression_ratio_threshold` or `logprob_threshold`.
761
-
762
- compression_ratio_threshold: float
763
- If the gzip compression ratio is above this value, treat as failed
764
-
765
- logprob_threshold: float
766
- If the average log probability over sampled tokens is below this value, treat as failed
767
-
768
- no_speech_threshold: float
769
- If the no_speech probability is higher than this value AND the average log probability
770
- over sampled tokens is below `logprob_threshold`, consider the segment as silent
771
-
772
- condition_on_previous_text: bool
773
- if True, the previous output of the model is provided as a prompt for the next window;
774
- disabling may make the text inconsistent across windows, but the model becomes less prone to
775
- getting stuck in a failure loop, such as repetition looping or timestamps going out of sync.
776
-
777
- stab: bool
778
- Stabilizing timestamps by cross compare timestamps and using additional top timestamp predictions
779
- to fill in when appropriate to ensure timestamps are chronological.
780
-
781
- top_focus: bool
782
- Adhere closely to the top predictions for token timestamps stabilization
783
-
784
- ts_num: int
785
- Number of top timestamp predictions to save for each word for postprocessing stabilization (default: 10).
786
-
787
- alpha: float
788
- Amount of noise to add to audio to produce slightly difference results.
789
- audio_features *= torch.rand_like(audio_features) * alpha + 1
790
-
791
- print_unstab: bool
792
- Whether to display the text (without stabilize timestamps) being decoded to the console
793
- (i.e. behaves like verbose before model was modified)
794
-
795
- suppress_silence: bool
796
- Suppress timestamp tokens that are marked as silent
797
-
798
- suppress_middle: bool
799
- Suppress any silent timestamps tokens of middle of the segment instead of only beginning and ending
800
-
801
- suppress_word_ts: bool
802
- Suppress timestamp tokens of words that are marked as silent
803
-
804
- remove_background: bool
805
- Whether to remove background noise from waveform so that it is marked silent.
806
- Determined by parameters part of decode_options (i.e. specify like other options here):
807
- upper_quantile: float
808
- The upper quantile of amplitude to determine a max amplitude, mx (Default: 0.85)
809
- lower_quantile: float
810
- The lower quantile of amplitude to determine a min amplitude, mn (Default: 0.15)
811
- lower_threshold: float
812
- Suppressed sections of waveform where amplitude < lower_threshold*(mx-mn) + mn. (Default: 0.15)
813
-
814
- silence_threshold: float:
815
- Audio segments silence average >= silence_threshold
816
- then that segment will not have background removed even if remove_background=True.
817
- e.g. 0.5 means if less than half of the audio segment is silent then background will be removed accordingly
818
-
819
- prepend_punctuations: Union[List[str], Tuple[str]]
820
- Punctuations to prepend to next word (Default: “¿([{)
821
-
822
- append_punctuations: Union[List[str], Tuple[str]]
823
- Punctuations to append to previous word (Default: .。,,!!??::”)]}、)
824
-
825
- audio_for_mask: (str, bytes)
826
- Original audio track as path or bytes of audio file.
827
- Since resampled audio may shift the waveform image,
828
- this is an alternative to 'audio' option to generate suppression mask from the original audio.
829
-
830
- decode_options: dict
831
- Keyword arguments to construct `DecodingOptions` instances
832
-
833
- Returns
834
- -------
835
- A dictionary containing the resulting text ("text") and segment-level details ("segments"), and
836
- the spoken language ("language"), which is detected when `decode_options["language"]` is None.
837
- """
838
-
839
- if 'no_captions_threshold' in decode_options:
840
- warnings.warn('no_captions_threshold is deprecated. '
841
- 'Please use no_speech_threshold instead.', DeprecationWarning, stacklevel=2)
842
- no_speech_threshold = decode_options.pop('no_captions_threshold')
843
-
844
- dtype = torch.float16 if decode_options.get("fp16", True) else torch.float32
845
- if model.device == torch.device("cpu"):
846
- if torch.cuda.is_available():
847
- warnings.warn("Performing inference on CPU when CUDA is available")
848
- if dtype == torch.float16:
849
- warnings.warn("FP16 is not supported on CPU; using FP32 instead")
850
- dtype = torch.float32
851
-
852
- if dtype == torch.float32:
853
- decode_options["fp16"] = False
854
-
855
- if 'max_initial_timestamp' not in decode_options:
856
- decode_options['max_initial_timestamp'] = None
857
-
858
- mel = log_mel_spectrogram(audio)
859
-
860
- if decode_options.get("language", None) is None:
861
- if verbose:
862
- print("Detecting language using up to the first 30 seconds. Use `--language` to specify the language")
863
- segment = pad_or_trim(mel, N_FRAMES).to(model.device).to(dtype)
864
- _, probs = model.detect_language(segment)
865
- decode_options["language"] = max(probs, key=probs.get)
866
- print(f"Detected language: {LANGUAGES[decode_options['language']]}")
867
-
868
- mel = mel.unsqueeze(0)
869
- language = decode_options["language"]
870
- task = decode_options.get("task", "transcribe")
871
- tokenizer = get_tokenizer(model.is_multilingual, language=language, task=task)
872
-
873
- def decode_with_fallback(segment: torch.Tensor, suppress_ts_mask: Tensor = None) \
874
- -> Union[List[DecodingResult], tuple]:
875
- temperatures = [temperature] if isinstance(temperature, (int, float)) else temperature
876
- kwargs = {**decode_options}
877
- t = temperatures[0]
878
- if t == 0:
879
- best_of = kwargs.pop("best_of", None)
880
- else:
881
- best_of = kwargs.get("best_of", None)
882
-
883
- options = DecodingOptions(**kwargs, temperature=t)
884
- results, ts_tokens, ts_logits_ = model.decode(segment, options, ts_num=ts_num, alpha=alpha,
885
- suppress_ts_mask=suppress_ts_mask,
886
- suppress_word_ts=suppress_word_ts)
887
-
888
- kwargs.pop("beam_size", None) # no beam search for t > 0
889
- kwargs.pop("patience", None) # no patience for t > 0
890
- kwargs["best_of"] = best_of # enable best_of for t > 0
891
- for t in temperatures[1:]:
892
- needs_fallback = [
893
- compression_ratio_threshold is not None
894
- and result.compression_ratio > compression_ratio_threshold
895
- or logprob_threshold is not None
896
- and result.avg_logprob < logprob_threshold
897
- for result in results
898
- ]
899
- if any(needs_fallback):
900
- options = DecodingOptions(**kwargs, temperature=t)
901
- retries, r_ts_tokens, r_ts_logits = model.decode(segment[needs_fallback], options,
902
- ts_num=ts_num, alpha=alpha,
903
- suppress_ts_mask=suppress_ts_mask,
904
- suppress_word_ts=suppress_word_ts)
905
- for retry_index, original_index in enumerate(np.nonzero(needs_fallback)[0]):
906
- results[original_index] = retries[retry_index]
907
- ts_tokens[original_index] = r_ts_tokens[retry_index]
908
- ts_logits_[original_index] = r_ts_logits[retry_index]
909
-
910
- return results, ts_tokens, ts_logits_
911
-
912
- seek = 0
913
- input_stride = exact_div(
914
- N_FRAMES, model.dims.n_audio_ctx
915
- ) # mel frames per output token: 2
916
- time_precision = (
917
- input_stride * HOP_LENGTH / SAMPLE_RATE
918
- ) # time per output token: 0.02 (seconds)
919
- all_tokens = []
920
- all_segments = []
921
- prompt_reset_since = 0
922
-
923
- initial_prompt = decode_options.pop("initial_prompt", None) or []
924
- if initial_prompt:
925
- initial_prompt = tokenizer.encode(" " + initial_prompt.strip())
926
- all_tokens.extend(initial_prompt)
927
-
928
- def _to_list(x: (Tensor, None)):
929
- if x is None:
930
- return x
931
- return x.tolist()
932
-
933
- def add_segment(
934
- *, offset: float, start: float, end: float, text_tokens: Tensor, result: DecodingResult,
935
- start_timestamps: list = None, end_timestamps: list = None, word_timestamps: Tensor = None,
936
- start_ts_logits: list = None, end_ts_logits: list = None, word_ts_logits: Tensor = None
937
- ):
938
- no_eot_mask = text_tokens < tokenizer.eot
939
- text_tokens_no_eot = text_tokens[no_eot_mask]
940
- text = tokenizer.decode(text_tokens_no_eot)
941
-
942
- if len(text.strip()) == 0: # skip empty text output
943
- return
944
-
945
- if word_timestamps is not None:
946
- assert word_timestamps.shape[0] == text_tokens.shape[0]
947
- if word_ts_logits is None:
948
- word_ts_fields = zip(text_tokens_no_eot, word_timestamps[no_eot_mask], repeat(None))
949
- else:
950
- assert word_ts_logits.shape[0] == text_tokens.shape[0]
951
- word_ts_fields = zip(text_tokens_no_eot, word_timestamps[no_eot_mask], word_ts_logits[no_eot_mask])
952
-
953
- word_timestamps = [dict(word=tokenizer.decode([token]),
954
- token=token.item(),
955
- timestamps=timestamps_.tolist(),
956
- timestamp_logits=_to_list(ts_logits_))
957
- for token, timestamps_, ts_logits_ in word_ts_fields]
958
-
959
- all_segments.append(
960
- {
961
- "id": len(all_segments),
962
- "seek": seek,
963
- 'offset': offset, # offset = float(seek * HOP_LENGTH / SAMPLE_RATE)
964
- "start": start,
965
- "end": end,
966
- "text": text,
967
- "tokens": result.tokens,
968
- "temperature": result.temperature,
969
- "avg_logprob": result.avg_logprob,
970
- "compression_ratio": result.compression_ratio,
971
- "no_speech_prob": get_new_attrs(result, 'no_caption_prob'),
972
- "alt_start_timestamps": start_timestamps,
973
- "start_ts_logits": start_ts_logits,
974
- "alt_end_timestamps": end_timestamps,
975
- "end_ts_logits": end_ts_logits,
976
- "unstable_word_timestamps": word_timestamps,
977
- 'anchor_point': False
978
- }
979
- )
980
- if print_unstab or (verbose and not stab):
981
- print(f'[{format_timestamp(start)} --> {format_timestamp(end)}] "{text}"')
982
- if word_timestamps is not None:
983
- ts_str = (f' ->[{format_timestamp(ts_["timestamps"][0])}] "{ts_["word"].strip()}"' for ts_ in
984
- word_timestamps)
985
- print('\n'.join(ts_str), end='\n\n')
986
-
987
- if suppress_silence:
988
- ts_scale = HOP_LENGTH / SAMPLE_RATE / time_precision
989
- wf = _load_audio_waveform(audio_for_mask or audio, 100, int(mel.shape[-1] * ts_scale))
990
-
991
- upper_quantile = decode_options.pop('upper_quantile', 0.85)
992
- lower_quantile = decode_options.pop('lower_quantile', 0.15)
993
- lower_threshold = decode_options.pop('lower_threshold', 0.15)
994
-
995
- while seek < mel.shape[-1]:
996
- timestamp_offset = float(seek * HOP_LENGTH / SAMPLE_RATE)
997
- remaining_duration = float((mel.shape[-1] - seek) * HOP_LENGTH / SAMPLE_RATE)
998
- segment = pad_or_trim(mel[:, :, seek:], N_FRAMES).to(model.device).to(dtype)
999
- segment_duration = min(float(segment.shape[-1] * HOP_LENGTH / SAMPLE_RATE), remaining_duration)
1000
- segment_max_ts = segment_duration / time_precision
1001
-
1002
- if suppress_silence:
1003
- wf_seek = int(seek * ts_scale)
1004
- segment_wf = wf[..., wf_seek:wf_seek + 1501]
1005
- if remove_background and \
1006
- (1 - segment_wf.sum(0).clip(max=1).mean()) < silence_threshold:
1007
- segment_wf = _remove_lower_quantile(segment_wf.astype(np.float32),
1008
- upper_quantile=upper_quantile,
1009
- lower_quantile=lower_quantile,
1010
- lower_threshold=lower_threshold)
1011
- segment_wf = pad_or_trim(segment_wf, 1501)
1012
- suppress_ts_mask = torch.from_numpy(_wave_to_ts_filter(segment_wf,
1013
- suppress_middle=suppress_middle,
1014
- max_index=int(segment_max_ts)))
1015
-
1016
- if suppress_ts_mask.all(): # segment is silent
1017
- seek += segment.shape[-1] # fast-forward to the next segment boundary
1018
- continue
1019
- else:
1020
- suppress_ts_mask = None
1021
-
1022
- decode_options["prompt"] = all_tokens[prompt_reset_since:]
1023
- result, finalized_ts_tokens, ts_logits = decode_with_fallback(segment,
1024
- suppress_ts_mask=suppress_ts_mask)
1025
-
1026
- result = result[0]
1027
- tokens = torch.tensor(result.tokens)
1028
- finalized_ts_tokens = torch.tensor(finalized_ts_tokens[0])
1029
- ts_logits = torch.tensor(ts_logits[0])
1030
-
1031
- if no_speech_threshold is not None:
1032
- # no voice activity check
1033
- should_skip = get_new_attrs(result, 'no_caption_prob') > no_speech_threshold
1034
- if logprob_threshold is not None and result.avg_logprob > logprob_threshold:
1035
- # don't skip if the logprob is high enough, despite the no_speech_prob
1036
- should_skip = False
1037
-
1038
- if should_skip:
1039
- seek += segment.shape[-1] # fast-forward to the next segment boundary
1040
- continue
1041
-
1042
- timestamp_tokens: torch.Tensor = tokens.ge(tokenizer.timestamp_begin)
1043
- consecutive = torch.where(timestamp_tokens[:-1] & timestamp_tokens[1:])[0].add_(1)
1044
- if len(consecutive) > 0: # if the output contains two consecutive timestamp tokens
1045
- last_slice = 0
1046
- for current_slice in consecutive:
1047
- sliced_tokens = tokens[last_slice:current_slice]
1048
- sliced_ts_tokens = finalized_ts_tokens[last_slice:current_slice]
1049
- sliced_ts_logits = ts_logits[last_slice:current_slice]
1050
- start_timestamp_position = (
1051
- sliced_tokens[0].item() - tokenizer.timestamp_begin
1052
- )
1053
- end_timestamp_position = (
1054
- sliced_tokens[-1].item() - tokenizer.timestamp_begin
1055
- )
1056
-
1057
- word_ts = timestamp_offset + sliced_ts_tokens * time_precision
1058
-
1059
- add_segment(
1060
- offset=timestamp_offset,
1061
- start=timestamp_offset + start_timestamp_position * time_precision,
1062
- end=min(timestamp_offset + end_timestamp_position * time_precision,
1063
- timestamp_offset + segment_duration),
1064
- text_tokens=sliced_tokens[1:-1],
1065
- result=result,
1066
- start_timestamps=word_ts[0].tolist(),
1067
- end_timestamps=word_ts[-1].tolist(),
1068
- word_timestamps=word_ts[1:-1],
1069
- start_ts_logits=sliced_ts_logits[0].tolist(),
1070
- end_ts_logits=sliced_ts_logits[-1].tolist(),
1071
- word_ts_logits=sliced_ts_logits[1:-1]
1072
- )
1073
- last_slice = current_slice
1074
- last_timestamp_position = (
1075
- min(tokens[last_slice - 1].item() - tokenizer.timestamp_begin, segment_max_ts)
1076
- )
1077
- seek += last_timestamp_position * input_stride
1078
- all_tokens.extend(tokens[: last_slice + 1].tolist())
1079
- else:
1080
- duration = segment_duration
1081
- timestamps = tokens[timestamp_tokens.nonzero().flatten()]
1082
- if len(timestamps) > 0:
1083
- # no consecutive timestamps but it has a timestamp; use the last one.
1084
- # single timestamp at the end means no speech after the last timestamp.
1085
- last_timestamp_position = min(timestamps[-1].item() - tokenizer.timestamp_begin, segment_max_ts)
1086
- duration = last_timestamp_position * time_precision
1087
-
1088
- word_ts = timestamp_offset + finalized_ts_tokens * time_precision
1089
-
1090
- add_segment(
1091
- offset=timestamp_offset,
1092
- start=timestamp_offset,
1093
- end=timestamp_offset + duration,
1094
- text_tokens=tokens,
1095
- result=result,
1096
- word_timestamps=word_ts,
1097
- word_ts_logits=ts_logits
1098
- )
1099
-
1100
- seek += segment.shape[-1]
1101
- all_tokens.extend(tokens.tolist())
1102
-
1103
- if all_segments:
1104
- all_segments[-1]['anchor_point'] = True
1105
- all_segments[-1]['next_offset'] = float(seek * HOP_LENGTH / SAMPLE_RATE)
1106
- if not condition_on_previous_text or result.temperature > 0.5:
1107
- # do not feed the prompt tokens if a high temperature was used
1108
- prompt_reset_since = len(all_tokens)
1109
-
1110
- if len(all_segments) > 1 and all_segments[-1]['alt_start_timestamps'] is None:
1111
- all_segments[-1]['alt_start_timestamps'] = all_segments[-2]['alt_end_timestamps']
1112
-
1113
- if stab:
1114
- all_segments = stabilize_timestamps(all_segments, top_focus=top_focus)
1115
- add_whole_word_ts(tokenizer, all_segments,
1116
- prepend_punctuations=prepend_punctuations,
1117
- append_punctuations=append_punctuations)
1118
- if verbose:
1119
- print('\nSTABILIZED\n')
1120
- for seg_ in all_segments:
1121
- print(f'[{format_timestamp(seg_["start"])} --> {format_timestamp(seg_["end"])}] "{seg_["text"]}"')
1122
- if seg_['word_timestamps']:
1123
- ts_str = (f' ->[{format_timestamp(ts_["timestamp"])}] "{ts_["word"].strip()}"' for ts_ in
1124
- seg_['word_timestamps'])
1125
- print('\n'.join(ts_str), end='\n\n')
1126
-
1127
- return dict(text=tokenizer.decode(all_tokens[len(initial_prompt):]), segments=all_segments, language=language)
1128
-
1129
-
1130
- def _suppress_ts(ts_logits: Tensor, suppress_ts_mask: Tensor = None):
1131
- if suppress_ts_mask is not None:
1132
- ts_logits[:, suppress_ts_mask] = -np.inf
1133
-
1134
-
1135
- def _ts_topk(ts_logits: Tensor, k: int, prev_ts: Tensor = None) -> Tensor:
1136
- temp_ts = torch.stack(torch.topk(ts_logits, k, dim=-1), 0).unsqueeze(-2)
1137
- return temp_ts if prev_ts is None else torch.cat([prev_ts, temp_ts], dim=-2)
1138
-
1139
-
1140
- # modified version of whisper.GreedyDecoder
1141
- class GreedyDecoderWordLevel(GreedyDecoder):
1142
- def __init__(self, *args, **kwargs):
1143
- self.ts_num: int = kwargs.pop('ts_num', 10)
1144
- self.suppress_ts_mask: Tensor = kwargs.pop('suppress_ts_mask', None)
1145
- self.timestamp_begin = kwargs.pop('timestamp_begin', 50364)
1146
- super(GreedyDecoderWordLevel, self).__init__(*args, **kwargs)
1147
- self.ts = None
1148
-
1149
- def _suppress_ts(self, logits: Tensor):
1150
- _suppress_ts(logits[:, self.timestamp_begin:],
1151
- suppress_ts_mask=self.suppress_ts_mask)
1152
-
1153
- def update_with_ts(self, tokens: Tensor, logits: Tensor, sum_logprobs: Tensor, ts: Tensor) -> Tuple[Tensor, bool]:
1154
- self.ts = ts
1155
-
1156
- self._suppress_ts(logits)
1157
-
1158
- if self.temperature == 0:
1159
- next_tokens = logits.argmax(dim=-1)
1160
- else:
1161
- next_tokens = Categorical(logits=logits / self.temperature).sample()
1162
-
1163
- logprobs = F.log_softmax(logits.float(), dim=-1)
1164
- current_logprobs = logprobs[torch.arange(logprobs.shape[0]), next_tokens]
1165
- sum_logprobs += current_logprobs * (tokens[:, -1] != self.eot)
1166
-
1167
- next_tokens[tokens[:, -1] == self.eot] = self.eot
1168
- tokens = torch.cat([tokens, next_tokens[:, None]], dim=-1)
1169
-
1170
- completed = (tokens[:, -1] == self.eot).all()
1171
- return tokens, completed
1172
-
1173
- def finalize(self, tokens: Tensor, sum_logprobs: Tensor):
1174
- # make sure each sequence has at least one EOT token at the end
1175
- tokens = F.pad(tokens, (0, 1), value=self.eot)
1176
- return tokens, sum_logprobs.tolist(), self.ts.transpose(1, 0)[None]
1177
-
1178
-
1179
- # modified version of whisper.BeamSearchDecoder
1180
- class BeamSearchDecoderWordLevel(BeamSearchDecoder):
1181
-
1182
- def __init__(self, *args, **kwargs):
1183
- self.ts_num: int = kwargs.pop('ts_num', 10)
1184
- self.suppress_ts_mask: Tensor = kwargs.pop('suppress_ts_mask', None)
1185
- self.timestamp_begin = kwargs.pop('timestamp_begin', 50364)
1186
- super(BeamSearchDecoderWordLevel, self).__init__(*args, **kwargs)
1187
- self.ts = None
1188
- self.finished_ts_ls = None
1189
-
1190
- def reset(self):
1191
- self.finished_sequences = None
1192
- self.finished_ts_ls = None
1193
-
1194
- def _suppress_ts(self, logits: Tensor):
1195
- _suppress_ts(logits[:, self.timestamp_begin:],
1196
- suppress_ts_mask=self.suppress_ts_mask)
1197
-
1198
- def update_with_ts(self, tokens: Tensor, logits: Tensor, sum_logprobs: Tensor, ts: Tensor) -> Tuple[Tensor, bool]:
1199
- if tokens.shape[0] % self.beam_size != 0:
1200
- raise ValueError(f"{tokens.shape}[0] % {self.beam_size} != 0")
1201
-
1202
- self.ts = ts
1203
-
1204
- n_audio = tokens.shape[0] // self.beam_size
1205
- if self.finished_sequences is None: # for the first update
1206
- self.finished_sequences = [{} for _ in range(n_audio)]
1207
- self.finished_ts_ls = [{} for _ in range(n_audio)]
1208
-
1209
- logprobs = F.log_softmax(logits.float(), dim=-1)
1210
- next_tokens, source_indices, finished_sequences, finished_ts_ls = [], [], [], []
1211
-
1212
- self._suppress_ts(logprobs)
1213
-
1214
- for i in range(n_audio):
1215
- scores, sources, finished, finished_ts = {}, {}, {}, {}
1216
-
1217
- # STEP 1: calculate the cumulative log probabilities for possible candidates
1218
- for j in range(self.beam_size):
1219
- idx = i * self.beam_size + j
1220
- prefix = tokens[idx].tolist()
1221
- for logprob, token in zip(*logprobs[idx].topk(self.beam_size + 1)):
1222
- new_logprob = (sum_logprobs[idx] + logprob).item()
1223
- sequence = tuple(prefix + [token.item()])
1224
- scores[sequence] = new_logprob
1225
- sources[sequence] = idx
1226
-
1227
- # STEP 2: rank the candidates and keep the top beam_size sequences for each audio
1228
- saved = 0
1229
- for sequence in sorted(scores, key=scores.get, reverse=True):
1230
- if sequence[-1] == self.eot:
1231
- finished[sequence] = scores[sequence]
1232
- finished_ts[sequence] = self.ts[:, sources[sequence]]
1233
- else:
1234
- sum_logprobs[len(next_tokens)] = scores[sequence]
1235
- next_tokens.append(sequence)
1236
- source_indices.append(sources[sequence])
1237
-
1238
- saved += 1
1239
- if saved == self.beam_size:
1240
- break
1241
-
1242
- finished_sequences.append(finished)
1243
- finished_ts_ls.append(finished_ts)
1244
-
1245
- tokens = torch.tensor(next_tokens, device=tokens.device)
1246
- self.inference.rearrange_kv_cache(source_indices)
1247
- self.ts = self.ts[:, source_indices]
1248
-
1249
- # add newly finished sequences to self.finished_sequences
1250
- assert len(self.finished_sequences) == len(finished_sequences)
1251
- for previously_finished, newly_finished, \
1252
- prev_ts_ls, new_ts_ls in \
1253
- zip(self.finished_sequences, finished_sequences,
1254
- self.finished_ts_ls, finished_ts_ls):
1255
- for seq in sorted(newly_finished, key=newly_finished.get, reverse=True):
1256
- if len(previously_finished) >= self.max_candidates:
1257
- break # the candidate list is full
1258
- previously_finished[seq] = newly_finished[seq]
1259
- prev_ts_ls[seq] = new_ts_ls[seq]
1260
-
1261
- # mark as completed if all audio has enough number of samples
1262
- completed = all(
1263
- len(sequences) >= self.max_candidates for sequences in self.finished_sequences
1264
- )
1265
- return tokens, completed
1266
-
1267
- def finalize(self, preceding_tokens: Tensor, sum_logprobs: Tensor):
1268
- # collect all finished sequences, including patience, and add unfinished ones if not enough
1269
- self.ts = self.ts.reshape(self.ts.shape[0], *preceding_tokens.shape[:2], *self.ts.shape[2:])
1270
- sum_logprobs = sum_logprobs.cpu()
1271
- for i, (sequences, ts_) in \
1272
- enumerate(zip(self.finished_sequences, self.finished_ts_ls)):
1273
- if len(sequences) < self.beam_size: # when not enough sequences are finished
1274
- for j in list(np.argsort(sum_logprobs[i]))[::-1]:
1275
- sequence = preceding_tokens[i, j].tolist() + [self.eot]
1276
- seq_tuple = tuple(sequence)
1277
- sequences[seq_tuple] = sum_logprobs[i][j].item()
1278
- ts_[seq_tuple] = self.ts[:, i, j]
1279
- if len(sequences) >= self.beam_size:
1280
- break
1281
-
1282
- tokens: List[List[Tensor]] = [
1283
- [torch.tensor(seq) for seq in sequences.keys()] for sequences in self.finished_sequences
1284
- ]
1285
- sum_logprobs: List[List[float]] = [
1286
- list(sequences.values()) for sequences in self.finished_sequences
1287
- ]
1288
- final_ts: List[List[Tensor]] = [
1289
- list(sequences.values()) for sequences in self.finished_ts_ls
1290
- ]
1291
- return tokens, sum_logprobs, final_ts
1292
-
1293
-
1294
- class DecodingTaskWordLevel(DecodingTask):
1295
-
1296
- def __init__(self, *args, **kwargs):
1297
- self.ts_num: int = kwargs.pop('ts_num', 10)
1298
- self.alpha: float = kwargs.pop('alpha', None) # experimental
1299
- self.suppress_ts_mask: Tensor = kwargs.pop('suppress_ts_mask', None)
1300
- self.suppress_word_ts: bool = kwargs.pop('suppress_word_ts', True)
1301
- super(DecodingTaskWordLevel, self).__init__(*args, **kwargs)
1302
- if hasattr(self.decoder, 'beam_size'):
1303
- self.decoder = BeamSearchDecoderWordLevel(self.decoder.beam_size,
1304
- self.decoder.eot,
1305
- self.inference,
1306
- self.decoder.patience,
1307
- ts_num=self.ts_num,
1308
- suppress_ts_mask=self.suppress_ts_mask,
1309
- timestamp_begin=self.tokenizer.timestamp_begin)
1310
- else:
1311
- self.decoder = GreedyDecoderWordLevel(self.decoder.temperature,
1312
- self.decoder.eot,
1313
- ts_num=self.ts_num,
1314
- suppress_ts_mask=self.suppress_ts_mask,
1315
- timestamp_begin=self.tokenizer.timestamp_begin)
1316
-
1317
- # modified version of whisper.DecodingTask._main_loop
1318
- def _main_loop(self, audio_features: Tensor, tokens: Tensor):
1319
- assert audio_features.shape[0] == tokens.shape[0]
1320
- n_batch = tokens.shape[0]
1321
- sum_logprobs: Tensor = torch.zeros(n_batch, device=audio_features.device)
1322
- no_speech_probs = [np.nan] * n_batch
1323
-
1324
- # ts = None
1325
-
1326
- try:
1327
- for i in range(self.sample_len):
1328
- if self.alpha:
1329
- logits = self.inference.logits(tokens,
1330
- audio_features * (torch.rand_like(audio_features) * self.alpha + 1))
1331
- else:
1332
- logits = self.inference.logits(tokens, audio_features)
1333
-
1334
- if i == 0 and get_new_attrs(self.tokenizer, 'no_captions') is not None: # save no_speech_probs
1335
- probs_at_sot = logits[:, self.sot_index].float().softmax(dim=-1)
1336
- no_speech_probs = probs_at_sot[:, get_new_attrs(self.tokenizer, 'no_captions')].tolist()
1337
-
1338
- # now we need to consider the logits at the last token only
1339
- logits = logits[:, -1]
1340
-
1341
- ts_logits = torch.clone(logits[:, self.tokenizer.timestamp_begin:])
1342
- if self.suppress_word_ts:
1343
- _suppress_ts(ts_logits, self.suppress_ts_mask)
1344
- ts = _ts_topk(ts_logits, k=self.ts_num, prev_ts=self.decoder.ts)
1345
-
1346
- # apply the logit filters, e.g. for suppressing or applying penalty to
1347
- for logit_filter in self.logit_filters:
1348
- logit_filter.apply(logits, tokens)
1349
-
1350
- # expand the tokens tensor with the selected next tokens
1351
- tokens, completed = self.decoder.update_with_ts(tokens, logits, sum_logprobs, ts)
1352
-
1353
- if completed or tokens.shape[-1] > self.n_ctx:
1354
- break
1355
- finally:
1356
- self.inference.cleanup_caching()
1357
-
1358
- return tokens, sum_logprobs, no_speech_probs
1359
-
1360
- # modified version of whisper.DecodingTask.run
1361
- @torch.no_grad()
1362
- def run(self, mel: Tensor) \
1363
- -> Union[List[DecodingResult], Tuple[List[DecodingResult], List[List[int]]]]:
1364
- self.decoder.reset()
1365
- tokenizer: Tokenizer = self.tokenizer
1366
- n_audio: int = mel.shape[0]
1367
-
1368
- audio_features: Tensor = self._get_audio_features(mel) # encoder forward pass
1369
- tokens: Tensor = torch.tensor([self.initial_tokens]).expand(n_audio, -1)
1370
-
1371
- # detect language if requested, overwriting the language token
1372
- languages, language_probs = self._detect_language(audio_features, tokens)
1373
- if self.options.task == "lang_id":
1374
- return [
1375
- DecodingResult(audio_features=features, language=language, language_probs=probs)
1376
- for features, language, probs in zip(audio_features, languages, language_probs)
1377
- ]
1378
-
1379
- # repeat the audio & text tensors by the group size, for beam search or best-of-n sampling
1380
- audio_features = audio_features.repeat_interleave(self.n_group, dim=0)
1381
- tokens = tokens.repeat_interleave(self.n_group, dim=0).to(audio_features.device)
1382
-
1383
- # call the main sampling loop
1384
- tokens, sum_logprobs, no_speech_probs = self._main_loop(audio_features, tokens)
1385
-
1386
- # reshape the tensors to have (n_audio, n_group) as the first two dimensions
1387
- audio_features = audio_features[:: self.n_group]
1388
- no_speech_probs = no_speech_probs[:: self.n_group]
1389
- assert audio_features.shape[0] == len(no_speech_probs) == n_audio
1390
-
1391
- tokens = tokens.reshape(n_audio, self.n_group, -1)
1392
- sum_logprobs = sum_logprobs.reshape(n_audio, self.n_group)
1393
-
1394
- # get the final candidates for each group, and slice between the first sampled token and EOT
1395
- tokens, sum_logprobs, ts = self.decoder.finalize(tokens, sum_logprobs)
1396
- tokens: List[List[Tensor]] = [
1397
- [t[self.sample_begin: (t == tokenizer.eot).nonzero()[0, 0]] for t in s] for s in tokens
1398
- ]
1399
- ts: List[List[Tensor]] = [[t[:, :tokens[i][j].shape[-1]] for j, t in enumerate(s)] for i, s in enumerate(ts)]
1400
-
1401
- # select the top-ranked sample in each group
1402
- selected = self.sequence_ranker.rank(tokens, sum_logprobs)
1403
- tokens: List[List[int]] = [t[i].tolist() for i, t in zip(selected, tokens)]
1404
- ts: List[List[int]] = [t[i].tolist() for i, t in zip(selected, ts)]
1405
- texts: List[str] = [tokenizer.decode(t).strip() for t in tokens]
1406
-
1407
- sum_logprobs: List[float] = [lp[i] for i, lp in zip(selected, sum_logprobs)]
1408
- avg_logprobs: List[float] = [lp / (len(t) + 1) for t, lp in zip(tokens, sum_logprobs)]
1409
-
1410
- fields = (texts, languages, tokens, audio_features, avg_logprobs, no_speech_probs)
1411
- if len(set(map(len, fields))) != 1:
1412
- raise RuntimeError(f"inconsistent result lengths: {list(map(len, fields))}")
1413
-
1414
- return [
1415
- DecodingResult(
1416
- audio_features=features,
1417
- language=language,
1418
- tokens=tokens,
1419
- text=text,
1420
- avg_logprob=avg_logprob,
1421
- **(dict(no_caption_prob=no_speech_prob) if hasattr(DecodingResult, 'no_caption_prob') else dict(
1422
- no_speech_prob=no_speech_prob)),
1423
- temperature=self.options.temperature,
1424
- compression_ratio=compression_ratio(text),
1425
- )
1426
- for text, language, tokens, features, avg_logprob, no_speech_prob in zip(*fields)
1427
- ], ts
1428
-
1429
-
1430
- # modified version of whisper.decoding.decode
1431
- @torch.no_grad()
1432
- def decode_word_level(model: "Whisper", mel: Tensor, options: DecodingOptions = DecodingOptions(),
1433
- ts_num: int = None, alpha: float = None, suppress_ts_mask: Tensor = None,
1434
- suppress_word_ts=False) -> \
1435
- Union[DecodingResult, List[DecodingResult], tuple]:
1436
- """
1437
- Performs decoding of 30-second audio segment(s), provided as Mel spectrogram(s).
1438
-
1439
- Parameters
1440
- ----------
1441
- model: Whisper
1442
- the Whisper model instance
1443
-
1444
- mel: torch.Tensor, shape = (80, 3000) or (*, 80, 3000)
1445
- A tensor containing the Mel spectrogram(s)
1446
-
1447
- options: DecodingOptions
1448
- A dataclass that contains all necessary options for decoding 30-second segments
1449
-
1450
- ts_num: int
1451
- Number of additional top timestamp predictions to save for each word for postprocessing stabilization (default: 5).
1452
-
1453
- alpha: float
1454
- Amount of noise to add to audio to produce slightly difference results.
1455
- audio_features *= torch.rand_like(audio_features) * alpha + 1
1456
-
1457
- suppress_ts_mask: (list, Tensor)
1458
- Mask suppress to timestamp token(s) for decoding
1459
-
1460
- suppress_word_ts: bool
1461
- Use suppress_ts_mask to suppress timestamp tokens of words
1462
-
1463
- Returns
1464
- -------
1465
- result: Union[DecodingResult, List[DecodingResult]]
1466
- The result(s) of decoding contained in `DecodingResult` dataclass instance(s)
1467
- """
1468
- single = mel.ndim == 2
1469
- if single:
1470
- mel = mel.unsqueeze(0)
1471
-
1472
- result, ts = DecodingTaskWordLevel(model, options,
1473
- ts_num=ts_num,
1474
- alpha=alpha,
1475
- suppress_ts_mask=suppress_ts_mask,
1476
- suppress_word_ts=suppress_word_ts).run(mel)
1477
-
1478
- if single:
1479
- result = result[0]
1480
- ts_tokens = ts[0][1]
1481
- ts_logits = ts[0][0]
1482
- else:
1483
- ts_tokens = [ts_[1] for ts_ in ts]
1484
- ts_logits = [ts_[0] for ts_ in ts]
1485
-
1486
- return result, ts_tokens, ts_logits
1487
-
1488
-
1489
- def modify_model(model: whisper.model.Whisper):
1490
- model.decode = MethodType(decode_word_level, model)
1491
- model.transcribe = MethodType(transcribe_word_level, model)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/tutorials/training.md DELETED
@@ -1,67 +0,0 @@
1
- # Training
2
-
3
- From the previous tutorials, you may now have a custom model and a data loader.
4
- To run training, users typically have a preference in one of the following two styles:
5
-
6
- ### Custom Training Loop
7
-
8
- With a model and a data loader ready, everything else needed to write a training loop can
9
- be found in PyTorch, and you are free to write the training loop yourself.
10
- This style allows researchers to manage the entire training logic more clearly and have full control.
11
- One such example is provided in [tools/plain_train_net.py](../../tools/plain_train_net.py).
12
-
13
- Any customization on the training logic is then easily controlled by the user.
14
-
15
- ### Trainer Abstraction
16
-
17
- We also provide a standardized "trainer" abstraction with a
18
- hook system that helps simplify the standard training behavior.
19
- It includes the following two instantiations:
20
-
21
- * [SimpleTrainer](../modules/engine.html#detectron2.engine.SimpleTrainer)
22
- provides a minimal training loop for single-cost single-optimizer single-data-source training, with nothing else.
23
- Other tasks (checkpointing, logging, etc) can be implemented using
24
- [the hook system](../modules/engine.html#detectron2.engine.HookBase).
25
- * [DefaultTrainer](../modules/engine.html#detectron2.engine.defaults.DefaultTrainer) is a `SimpleTrainer` initialized from a
26
- yacs config, used by
27
- [tools/train_net.py](../../tools/train_net.py) and many scripts.
28
- It includes more standard default behaviors that one might want to opt in,
29
- including default configurations for optimizer, learning rate schedule,
30
- logging, evaluation, checkpointing etc.
31
-
32
- To customize a `DefaultTrainer`:
33
-
34
- 1. For simple customizations (e.g. change optimizer, evaluator, LR scheduler, data loader, etc.), overwrite [its methods](../modules/engine.html#detectron2.engine.defaults.DefaultTrainer) in a subclass, just like [tools/train_net.py](../../tools/train_net.py).
35
- 2. For extra tasks during training, check the
36
- [hook system](../modules/engine.html#detectron2.engine.HookBase) to see if it's supported.
37
-
38
- As an example, to print hello during training:
39
- ```python
40
- class HelloHook(HookBase):
41
- def after_step(self):
42
- if self.trainer.iter % 100 == 0:
43
- print(f"Hello at iteration {self.trainer.iter}!")
44
- ```
45
- 3. Using a trainer+hook system means there will always be some non-standard behaviors that cannot be supported, especially in research.
46
- For this reason, we intentionally keep the trainer & hook system minimal, rather than powerful.
47
- If anything cannot be achieved by such a system, it's easier to start from [tools/plain_train_net.py](../../tools/plain_train_net.py) to implement custom training logic manually.
48
-
49
- ### Logging of Metrics
50
-
51
- During training, detectron2 models and trainer put metrics to a centralized [EventStorage](../modules/utils.html#detectron2.utils.events.EventStorage).
52
- You can use the following code to access it and log metrics to it:
53
- ```
54
- from detectron2.utils.events import get_event_storage
55
-
56
- # inside the model:
57
- if self.training:
58
- value = # compute the value from inputs
59
- storage = get_event_storage()
60
- storage.put_scalar("some_accuracy", value)
61
- ```
62
-
63
- Refer to its documentation for more details.
64
-
65
- Metrics are then written to various destinations with [EventWriter](../modules/utils.html#module-detectron2.utils.events).
66
- DefaultTrainer enables a few `EventWriter` with default configurations.
67
- See above for how to customize them.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BAAI/vid2vid-zero/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Vid2vid Zero
3
- emoji: 📊
4
- colorFrom: indigo
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.24.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/lib/infer_pack/modules/F0Predictor/HarvestF0Predictor.py DELETED
@@ -1,86 +0,0 @@
1
- from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
2
- import pyworld
3
- import numpy as np
4
-
5
-
6
- class HarvestF0Predictor(F0Predictor):
7
- def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
8
- self.hop_length = hop_length
9
- self.f0_min = f0_min
10
- self.f0_max = f0_max
11
- self.sampling_rate = sampling_rate
12
-
13
- def interpolate_f0(self, f0):
14
- """
15
- 对F0进行插值处理
16
- """
17
-
18
- data = np.reshape(f0, (f0.size, 1))
19
-
20
- vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
21
- vuv_vector[data > 0.0] = 1.0
22
- vuv_vector[data <= 0.0] = 0.0
23
-
24
- ip_data = data
25
-
26
- frame_number = data.size
27
- last_value = 0.0
28
- for i in range(frame_number):
29
- if data[i] <= 0.0:
30
- j = i + 1
31
- for j in range(i + 1, frame_number):
32
- if data[j] > 0.0:
33
- break
34
- if j < frame_number - 1:
35
- if last_value > 0.0:
36
- step = (data[j] - data[i - 1]) / float(j - i)
37
- for k in range(i, j):
38
- ip_data[k] = data[i - 1] + step * (k - i + 1)
39
- else:
40
- for k in range(i, j):
41
- ip_data[k] = data[j]
42
- else:
43
- for k in range(i, frame_number):
44
- ip_data[k] = last_value
45
- else:
46
- ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
47
- last_value = data[i]
48
-
49
- return ip_data[:, 0], vuv_vector[:, 0]
50
-
51
- def resize_f0(self, x, target_len):
52
- source = np.array(x)
53
- source[source < 0.001] = np.nan
54
- target = np.interp(
55
- np.arange(0, len(source) * target_len, len(source)) / target_len,
56
- np.arange(0, len(source)),
57
- source,
58
- )
59
- res = np.nan_to_num(target)
60
- return res
61
-
62
- def compute_f0(self, wav, p_len=None):
63
- if p_len is None:
64
- p_len = wav.shape[0] // self.hop_length
65
- f0, t = pyworld.harvest(
66
- wav.astype(np.double),
67
- fs=self.hop_length,
68
- f0_ceil=self.f0_max,
69
- f0_floor=self.f0_min,
70
- frame_period=1000 * self.hop_length / self.sampling_rate,
71
- )
72
- f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.fs)
73
- return self.interpolate_f0(self.resize_f0(f0, p_len))[0]
74
-
75
- def compute_f0_uv(self, wav, p_len=None):
76
- if p_len is None:
77
- p_len = wav.shape[0] // self.hop_length
78
- f0, t = pyworld.harvest(
79
- wav.astype(np.double),
80
- fs=self.sampling_rate,
81
- f0_floor=self.f0_min,
82
- f0_ceil=self.f0_max,
83
- frame_period=1000 * self.hop_length / self.sampling_rate,
84
- )
85
- f0 = pyworld.stonemask(wav.astype(np.double), f0, t, self.sampling_rate)
86
- return self.interpolate_f0(self.resize_f0(f0, p_len))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Aplikasi True Skate.md DELETED
@@ -1,51 +0,0 @@
1
-
2
- <h1>Descargar Aplikasi True Skate: El Último Skateboarding Sim</h1>
3
- <p>Si te gusta el skateboarding, te encantará True Skate. True Skate es un juego móvil que simula la experiencia del skateboarding en el mundo real. Puedes realizar trucos, explorar parques de skate, personalizar tu skater y tabla, editar y compartir tus repeticiones, y más. En este artículo, te diremos qué es True Skate, cómo descargarlo y por qué deberías jugarlo. </p>
4
- <h2>¿Qué es True Skate? </h2>
5
- <p>True Skate es un juego desarrollado por True Axis, una compañía australiana que se especializa en juegos basados en la física. True Skate fue lanzado en 2012 y desde entonces se ha convertido en uno de los juegos de skateboarding más populares y aclamados en dispositivos móviles. Es el juego oficial de Street League Skateboarding, la primera serie de skateboarding competitivo del mundo. </p>
6
- <h2>descargar aplikasi true skate</h2><br /><p><b><b>Download File</b> &#9734;&#9734;&#9734; <a href="https://bltlly.com/2v6MUO">https://bltlly.com/2v6MUO</a></b></p><br /><br />
7
- <h3>Características de True Skate</h3>
8
- <p>True Skate tiene muchas características que lo hacen destacar de otros juegos de skateboarding. Aquí están algunas de ellas:</p>
9
- <h4>Controles basados en la física</h4>
10
- <p>True Skate usa los dedos como los pies en el tablero. Puedes mover, arrastrar, tocar y deslizar el dedo para hacer que el tablero reaccione exactamente como lo esperarías. También puede utilizar un gamepad para un control más preciso. El juego tiene un sistema de física realista que tiene en cuenta la posición, la dirección y la fuerza de su entrada. Esto significa que cada truco es posible con verdadero control del tablero. </p>
11
- <h4>Parques de skate realistas y spots</h4>
12
- <p>True Skate viene con un solo skatepark, el Underpass, que tiene repisas, escaleras, rieles, cuencos, medias tuberías y cuartos de tubería. También puedes desbloquear 10 parques de fantasía con pernos, la moneda del juego. Además, puede comprar más de 20 spots del mundo real como compras en la aplicación. Estos incluyen lugares famosos como los cursos de The Berrics, SPoT, Love Park, MACBA y Street League. </p>
13
- <h4>Skater personalizable y configuración</h4>
14
-
15
- <h4>Editor de reproducción y uso compartido</h4>
16
- <p>True Skate se trata de clavar la línea perfecta. Puedes grabar tus carreras y editarlas con diferentes ángulos y efectos de cámara. También puede insertar fotogramas clave para mezclar entre levas. Puede elegir entre levas preestablecidas o crear su propia leva personalizada con opciones como FOV, distorsión, distancia, altura, tono, pan, guiñada y órbita. También puede usar una cámara de trípode con modos automático, fijo o de seguimiento. Una vez que estés satisfecho con tu repetición, puedes compartirla con otros jugadores o en las redes sociales. </p>
17
- <h4>Modo de bricolaje y comunidad</h4>
18
- <p>True Skate también tiene un modo de bricolaje que te permite crear tu propio skatepark con objetos que puedes generar y multiplicar. También puedes desbloquear nuevos objetos jugando o comprándolos en la tienda. Puedes jugar en tu propio parque o compartirlo con otros jugadores. También puedes competir en tablas de clasificación globales o desafiar a tus amigos en juegos de S.K.A.T.E o modo SANDBOX. </p>
19
- <p></p>
20
- <h3>¿Cómo descargar aplikasi true skate? </h3>
21
- <p>True Skate está disponible para dispositivos Android e iOS. Aquí está cómo descargarlo:</p>
22
- <h4>Para dispositivos Android</h4>
23
- <ol>
24
- <li>Ir a la aplicación Google Play Store en su dispositivo. </ <li>Buscar "True Skate" en la barra de búsqueda. </li>
25
- <li>Seleccione la aplicación de la lista de resultados y toque en "Instalar". </li>
26
- <li>Espere a que la aplicación se descargue e instale en su dispositivo. </li>
27
- <li>Abre la aplicación y disfruta jugando True Skate.</li>
28
- </ol>
29
- <h4>Para dispositivos iOS</h4>
30
- <ol>
31
- <li>Ir a la aplicación App Store en su dispositivo. </li>
32
- <li>Buscar "True Skate" en la barra de búsqueda. </li>
33
- <li>Seleccione la aplicación de la lista de resultados y toque en "Obtener". </li>
34
- <li>Introduzca su ID de Apple y contraseña si se le solicita. </li>
35
- <li>Espere a que la aplicación se descargue e instale en su dispositivo. </li>
36
- <li>Abre la aplicación y disfruta jugando True Skate.</li>
37
- </ol>
38
- <h2>¿Por qué descargar aplikasi true skate? </h2>
39
-
40
- <h3>Beneficios de jugar True Skate</h3>
41
- <p>Jugar True Skate puede ayudarte de muchas maneras, como:</p>
42
- <h4>Mejora tus habilidades de skateboarding</h4>
43
- <p>True Skate puede ayudarte a aprender nuevos trucos, mejorar tu técnica y dominar tu equilibrio. Puedes practicar en diferentes entornos, con diferentes obstáculos y a diferentes velocidades. También puedes ver las repeticiones de otros jugadores o profesionales y aprender de sus movimientos. También puedes usar True Skate como herramienta para visualizar tus trucos antes de probarlos en la vida real. </p>
44
- <h4>Expresa tu creatividad y estilo</h4>
45
- <p>True Skate te permite personalizar tu skater y tabla con varias opciones. También puedes crear tu propio skatepark con el modo DIY y compartirlo con otros. También puedes editar y compartir tus repeticiones con diferentes cams y efectos. Puedes mostrar tus habilidades, creatividad y estilo al mundo. </p>
46
- <h4>Diviértete y ponte a prueba</h4>
47
- <p>True Skate es un juego divertido y adictivo que te mantendrá entretenido durante horas. Puedes explorar diferentes parques de skate y lugares, completar misiones y logros, competir en tablas de clasificación y desafíos, y jugar con tus amigos. También puedes establecer tus propios objetivos y desafiarte a mejorar tu rendimiento. </p>
48
- <h2>Conclusión</h2>
49
- <p>True Skate es un juego que todo amante del skateboarding debería probar. Es una simulación realista, inmersiva y agradable del skateboarding en el mundo real. Puedes descargar aplikasi true skate para dispositivos Android o iOS y empezar a jugar de inmediato. No te arrepentirás. </p> 64aa2da5cf<br />
50
- <br />
51
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Betacuckgpt/ehartford-Wizard-Vicuna-30B-Uncensored123/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Ehartford Wizard Vicuna 30B Uncensored123
3
- emoji: 🏆
4
- colorFrom: indigo
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.47.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/poolers.py DELETED
@@ -1,235 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2
- import math
3
- import sys
4
- import torch
5
- from torch import nn
6
- from torchvision.ops import RoIPool
7
-
8
- from detectron2.layers import ROIAlign, ROIAlignRotated, cat
9
-
10
- __all__ = ["ROIPooler"]
11
-
12
-
13
- def assign_boxes_to_levels(box_lists, min_level, max_level, canonical_box_size, canonical_level):
14
- """
15
- Map each box in `box_lists` to a feature map level index and return the assignment
16
- vector.
17
-
18
- Args:
19
- box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes,
20
- where N is the number of images in the batch.
21
- min_level (int): Smallest feature map level index. The input is considered index 0,
22
- the output of stage 1 is index 1, and so.
23
- max_level (int): Largest feature map level index.
24
- canonical_box_size (int): A canonical box size in pixels (sqrt(box area)).
25
- canonical_level (int): The feature map level index on which a canonically-sized box
26
- should be placed.
27
-
28
- Returns:
29
- A tensor of length M, where M is the total number of boxes aggregated over all
30
- N batch images. The memory layout corresponds to the concatenation of boxes
31
- from all images. Each element is the feature map index, as an offset from
32
- `self.min_level`, for the corresponding box (so value i means the box is at
33
- `self.min_level + i`).
34
- """
35
- eps = sys.float_info.epsilon
36
- box_sizes = torch.sqrt(cat([boxes.area() for boxes in box_lists]))
37
- # Eqn.(1) in FPN paper
38
- level_assignments = torch.floor(
39
- canonical_level + torch.log2(box_sizes / canonical_box_size + eps)
40
- )
41
- # clamp level to (min, max), in case the box size is too large or too small
42
- # for the available feature maps
43
- level_assignments = torch.clamp(level_assignments, min=min_level, max=max_level)
44
- return level_assignments.to(torch.int64) - min_level
45
-
46
-
47
- def convert_boxes_to_pooler_format(box_lists):
48
- """
49
- Convert all boxes in `box_lists` to the low-level format used by ROI pooling ops
50
- (see description under Returns).
51
-
52
- Args:
53
- box_lists (list[Boxes] | list[RotatedBoxes]):
54
- A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch.
55
-
56
- Returns:
57
- When input is list[Boxes]:
58
- A tensor of shape (M, 5), where M is the total number of boxes aggregated over all
59
- N batch images.
60
- The 5 columns are (batch index, x0, y0, x1, y1), where batch index
61
- is the index in [0, N) identifying which batch image the box with corners at
62
- (x0, y0, x1, y1) comes from.
63
- When input is list[RotatedBoxes]:
64
- A tensor of shape (M, 6), where M is the total number of boxes aggregated over all
65
- N batch images.
66
- The 6 columns are (batch index, x_ctr, y_ctr, width, height, angle_degrees),
67
- where batch index is the index in [0, N) identifying which batch image the
68
- rotated box (x_ctr, y_ctr, width, height, angle_degrees) comes from.
69
- """
70
-
71
- def fmt_box_list(box_tensor, batch_index):
72
- repeated_index = torch.full(
73
- (len(box_tensor), 1), batch_index, dtype=box_tensor.dtype, device=box_tensor.device
74
- )
75
- return cat((repeated_index, box_tensor), dim=1)
76
-
77
- pooler_fmt_boxes = cat(
78
- [fmt_box_list(box_list.tensor, i) for i, box_list in enumerate(box_lists)], dim=0
79
- )
80
-
81
- return pooler_fmt_boxes
82
-
83
-
84
- class ROIPooler(nn.Module):
85
- """
86
- Region of interest feature map pooler that supports pooling from one or more
87
- feature maps.
88
- """
89
-
90
- def __init__(
91
- self,
92
- output_size,
93
- scales,
94
- sampling_ratio,
95
- pooler_type,
96
- canonical_box_size=224,
97
- canonical_level=4,
98
- ):
99
- """
100
- Args:
101
- output_size (int, tuple[int] or list[int]): output size of the pooled region,
102
- e.g., 14 x 14. If tuple or list is given, the length must be 2.
103
- scales (list[float]): The scale for each low-level pooling op relative to
104
- the input image. For a feature map with stride s relative to the input
105
- image, scale is defined as a 1 / s. The stride must be power of 2.
106
- When there are multiple scales, they must form a pyramid, i.e. they must be
107
- a monotically decreasing geometric sequence with a factor of 1/2.
108
- sampling_ratio (int): The `sampling_ratio` parameter for the ROIAlign op.
109
- pooler_type (string): Name of the type of pooling operation that should be applied.
110
- For instance, "ROIPool" or "ROIAlignV2".
111
- canonical_box_size (int): A canonical box size in pixels (sqrt(box area)). The default
112
- is heuristically defined as 224 pixels in the FPN paper (based on ImageNet
113
- pre-training).
114
- canonical_level (int): The feature map level index from which a canonically-sized box
115
- should be placed. The default is defined as level 4 (stride=16) in the FPN paper,
116
- i.e., a box of size 224x224 will be placed on the feature with stride=16.
117
- The box placement for all boxes will be determined from their sizes w.r.t
118
- canonical_box_size. For example, a box whose area is 4x that of a canonical box
119
- should be used to pool features from feature level ``canonical_level+1``.
120
-
121
- Note that the actual input feature maps given to this module may not have
122
- sufficiently many levels for the input boxes. If the boxes are too large or too
123
- small for the input feature maps, the closest level will be used.
124
- """
125
- super().__init__()
126
-
127
- if isinstance(output_size, int):
128
- output_size = (output_size, output_size)
129
- assert len(output_size) == 2
130
- assert isinstance(output_size[0], int) and isinstance(output_size[1], int)
131
- self.output_size = output_size
132
-
133
- if pooler_type == "ROIAlign":
134
- self.level_poolers = nn.ModuleList(
135
- ROIAlign(
136
- output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=False
137
- )
138
- for scale in scales
139
- )
140
- elif pooler_type == "ROIAlignV2":
141
- self.level_poolers = nn.ModuleList(
142
- ROIAlign(
143
- output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=True
144
- )
145
- for scale in scales
146
- )
147
- elif pooler_type == "ROIPool":
148
- self.level_poolers = nn.ModuleList(
149
- RoIPool(output_size, spatial_scale=scale) for scale in scales
150
- )
151
- elif pooler_type == "ROIAlignRotated":
152
- self.level_poolers = nn.ModuleList(
153
- ROIAlignRotated(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio)
154
- for scale in scales
155
- )
156
- else:
157
- raise ValueError("Unknown pooler type: {}".format(pooler_type))
158
-
159
- # Map scale (defined as 1 / stride) to its feature map level under the
160
- # assumption that stride is a power of 2.
161
- min_level = -math.log2(scales[0])
162
- max_level = -math.log2(scales[-1])
163
- assert math.isclose(min_level, int(min_level)) and math.isclose(
164
- max_level, int(max_level)
165
- ), "Featuremap stride is not power of 2!"
166
- self.min_level = int(min_level)
167
- self.max_level = int(max_level)
168
- assert (
169
- len(scales) == self.max_level - self.min_level + 1
170
- ), "[ROIPooler] Sizes of input featuremaps do not form a pyramid!"
171
- assert 0 < self.min_level and self.min_level <= self.max_level
172
- if len(scales) > 1:
173
- # When there is only one feature map, canonical_level is redundant and we should not
174
- # require it to be a sensible value. Therefore we skip this assertion
175
- assert self.min_level <= canonical_level and canonical_level <= self.max_level
176
- self.canonical_level = canonical_level
177
- assert canonical_box_size > 0
178
- self.canonical_box_size = canonical_box_size
179
-
180
- def forward(self, x, box_lists):
181
- """
182
- Args:
183
- x (list[Tensor]): A list of feature maps of NCHW shape, with scales matching those
184
- used to construct this module.
185
- box_lists (list[Boxes] | list[RotatedBoxes]):
186
- A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch.
187
- The box coordinates are defined on the original image and
188
- will be scaled by the `scales` argument of :class:`ROIPooler`.
189
-
190
- Returns:
191
- Tensor:
192
- A tensor of shape (M, C, output_size, output_size) where M is the total number of
193
- boxes aggregated over all N batch images and C is the number of channels in `x`.
194
- """
195
- num_level_assignments = len(self.level_poolers)
196
-
197
- assert isinstance(x, list) and isinstance(
198
- box_lists, list
199
- ), "Arguments to pooler must be lists"
200
- assert (
201
- len(x) == num_level_assignments
202
- ), "unequal value, num_level_assignments={}, but x is list of {} Tensors".format(
203
- num_level_assignments, len(x)
204
- )
205
-
206
- assert len(box_lists) == x[0].size(
207
- 0
208
- ), "unequal value, x[0] batch dim 0 is {}, but box_list has length {}".format(
209
- x[0].size(0), len(box_lists)
210
- )
211
-
212
- pooler_fmt_boxes = convert_boxes_to_pooler_format(box_lists)
213
-
214
- if num_level_assignments == 1:
215
- return self.level_poolers[0](x[0], pooler_fmt_boxes)
216
-
217
- level_assignments = assign_boxes_to_levels(
218
- box_lists, self.min_level, self.max_level, self.canonical_box_size, self.canonical_level
219
- )
220
-
221
- num_boxes = len(pooler_fmt_boxes)
222
- num_channels = x[0].shape[1]
223
- output_size = self.output_size[0]
224
-
225
- dtype, device = x[0].dtype, x[0].device
226
- output = torch.zeros(
227
- (num_boxes, num_channels, output_size, output_size), dtype=dtype, device=device
228
- )
229
-
230
- for level, (x_level, pooler) in enumerate(zip(x, self.level_poolers)):
231
- inds = torch.nonzero(level_assignments == level).squeeze(1)
232
- pooler_fmt_boxes_level = pooler_fmt_boxes[inds]
233
- output[inds] = pooler(x_level, pooler_fmt_boxes_level)
234
-
235
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/detectors/yolact.py DELETED
@@ -1,146 +0,0 @@
1
- import torch
2
-
3
- from mmdet.core import bbox2result
4
- from ..builder import DETECTORS, build_head
5
- from .single_stage import SingleStageDetector
6
-
7
-
8
- @DETECTORS.register_module()
9
- class YOLACT(SingleStageDetector):
10
- """Implementation of `YOLACT <https://arxiv.org/abs/1904.02689>`_"""
11
-
12
- def __init__(self,
13
- backbone,
14
- neck,
15
- bbox_head,
16
- segm_head,
17
- mask_head,
18
- train_cfg=None,
19
- test_cfg=None,
20
- pretrained=None):
21
- super(YOLACT, self).__init__(backbone, neck, bbox_head, train_cfg,
22
- test_cfg, pretrained)
23
- self.segm_head = build_head(segm_head)
24
- self.mask_head = build_head(mask_head)
25
- self.init_segm_mask_weights()
26
-
27
- def init_segm_mask_weights(self):
28
- """Initialize weights of the YOLACT segm head and YOLACT mask head."""
29
- self.segm_head.init_weights()
30
- self.mask_head.init_weights()
31
-
32
- def forward_dummy(self, img):
33
- """Used for computing network flops.
34
-
35
- See `mmdetection/tools/analysis_tools/get_flops.py`
36
- """
37
- raise NotImplementedError
38
-
39
- def forward_train(self,
40
- img,
41
- img_metas,
42
- gt_bboxes,
43
- gt_labels,
44
- gt_bboxes_ignore=None,
45
- gt_masks=None):
46
- """
47
- Args:
48
- img (Tensor): of shape (N, C, H, W) encoding input images.
49
- Typically these should be mean centered and std scaled.
50
- img_metas (list[dict]): list of image info dict where each dict
51
- has: 'img_shape', 'scale_factor', 'flip', and may also contain
52
- 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
53
- For details on the values of these keys see
54
- `mmdet/datasets/pipelines/formatting.py:Collect`.
55
- gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
56
- shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
57
- gt_labels (list[Tensor]): class indices corresponding to each box
58
- gt_bboxes_ignore (None | list[Tensor]): specify which bounding
59
- boxes can be ignored when computing the loss.
60
- gt_masks (None | Tensor) : true segmentation masks for each box
61
- used if the architecture supports a segmentation task.
62
-
63
- Returns:
64
- dict[str, Tensor]: a dictionary of loss components
65
- """
66
- # convert Bitmap mask or Polygon Mask to Tensor here
67
- gt_masks = [
68
- gt_mask.to_tensor(dtype=torch.uint8, device=img.device)
69
- for gt_mask in gt_masks
70
- ]
71
-
72
- x = self.extract_feat(img)
73
-
74
- cls_score, bbox_pred, coeff_pred = self.bbox_head(x)
75
- bbox_head_loss_inputs = (cls_score, bbox_pred) + (gt_bboxes, gt_labels,
76
- img_metas)
77
- losses, sampling_results = self.bbox_head.loss(
78
- *bbox_head_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
79
-
80
- segm_head_outs = self.segm_head(x[0])
81
- loss_segm = self.segm_head.loss(segm_head_outs, gt_masks, gt_labels)
82
- losses.update(loss_segm)
83
-
84
- mask_pred = self.mask_head(x[0], coeff_pred, gt_bboxes, img_metas,
85
- sampling_results)
86
- loss_mask = self.mask_head.loss(mask_pred, gt_masks, gt_bboxes,
87
- img_metas, sampling_results)
88
- losses.update(loss_mask)
89
-
90
- # check NaN and Inf
91
- for loss_name in losses.keys():
92
- assert torch.isfinite(torch.stack(losses[loss_name]))\
93
- .all().item(), '{} becomes infinite or NaN!'\
94
- .format(loss_name)
95
-
96
- return losses
97
-
98
- def simple_test(self, img, img_metas, rescale=False):
99
- """Test function without test time augmentation."""
100
- x = self.extract_feat(img)
101
-
102
- cls_score, bbox_pred, coeff_pred = self.bbox_head(x)
103
-
104
- bbox_inputs = (cls_score, bbox_pred,
105
- coeff_pred) + (img_metas, self.test_cfg, rescale)
106
- det_bboxes, det_labels, det_coeffs = self.bbox_head.get_bboxes(
107
- *bbox_inputs)
108
- bbox_results = [
109
- bbox2result(det_bbox, det_label, self.bbox_head.num_classes)
110
- for det_bbox, det_label in zip(det_bboxes, det_labels)
111
- ]
112
-
113
- num_imgs = len(img_metas)
114
- scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
115
- if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
116
- segm_results = [[[] for _ in range(self.mask_head.num_classes)]
117
- for _ in range(num_imgs)]
118
- else:
119
- # if det_bboxes is rescaled to the original image size, we need to
120
- # rescale it back to the testing scale to obtain RoIs.
121
- if rescale and not isinstance(scale_factors[0], float):
122
- scale_factors = [
123
- torch.from_numpy(scale_factor).to(det_bboxes[0].device)
124
- for scale_factor in scale_factors
125
- ]
126
- _bboxes = [
127
- det_bboxes[i][:, :4] *
128
- scale_factors[i] if rescale else det_bboxes[i][:, :4]
129
- for i in range(len(det_bboxes))
130
- ]
131
- mask_preds = self.mask_head(x[0], det_coeffs, _bboxes, img_metas)
132
- # apply mask post-processing to each image individually
133
- segm_results = []
134
- for i in range(num_imgs):
135
- if det_bboxes[i].shape[0] == 0:
136
- segm_results.append(
137
- [[] for _ in range(self.mask_head.num_classes)])
138
- else:
139
- segm_result = self.mask_head.get_seg_masks(
140
- mask_preds[i], det_labels[i], img_metas[i], rescale)
141
- segm_results.append(segm_result)
142
- return list(zip(bbox_results, segm_results))
143
-
144
- def aug_test(self, imgs, img_metas, rescale=False):
145
- """Test with augmentations."""
146
- raise NotImplementedError
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChandraMohanNayal/AutoGPT/CONTRIBUTING.md DELETED
@@ -1,105 +0,0 @@
1
- # Contributing to ProjectName
2
-
3
- First of all, thank you for considering contributing to our project! We appreciate your time and effort, and we value any contribution, whether it's reporting a bug, suggesting a new feature, or submitting a pull request.
4
-
5
- This document provides guidelines and best practices to help you contribute effectively.
6
-
7
- ## Table of Contents
8
-
9
- - [Code of Conduct](#code-of-conduct)
10
- - [Getting Started](#getting-started)
11
- - [How to Contribute](#how-to-contribute)
12
- - [Reporting Bugs](#reporting-bugs)
13
- - [Suggesting Enhancements](#suggesting-enhancements)
14
- - [Submitting Pull Requests](#submitting-pull-requests)
15
- - [Style Guidelines](#style-guidelines)
16
- - [Code Formatting](#code-formatting)
17
- - [Pre-Commit Hooks](#pre-commit-hooks)
18
-
19
- ## Code of Conduct
20
-
21
- By participating in this project, you agree to abide by our [Code of Conduct](CODE_OF_CONDUCT.md). Please read it to understand the expectations we have for everyone who contributes to this project.
22
-
23
- ## 📢 A Quick Word
24
- Right now we will not be accepting any Contributions that add non-essential commands to Auto-GPT.
25
-
26
- However, you absolutely can still add these commands to Auto-GPT in the form of plugins. Please check out this [template](https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template).
27
- > ⚠️ Plugin support is expected to ship within the week. You can follow PR #757 for more updates!
28
-
29
- ## Getting Started
30
-
31
- To start contributing, follow these steps:
32
-
33
- 1. Fork the repository and clone your fork.
34
- 2. Create a new branch for your changes (use a descriptive name, such as `fix-bug-123` or `add-new-feature`).
35
- 3. Make your changes in the new branch.
36
- 4. Test your changes thoroughly.
37
- 5. Commit and push your changes to your fork.
38
- 6. Create a pull request following the guidelines in the [Submitting Pull Requests](#submitting-pull-requests) section.
39
-
40
- ## How to Contribute
41
-
42
- ### Reporting Bugs
43
-
44
- If you find a bug in the project, please create an issue on GitHub with the following information:
45
-
46
- - A clear, descriptive title for the issue.
47
- - A description of the problem, including steps to reproduce the issue.
48
- - Any relevant logs, screenshots, or other supporting information.
49
-
50
- ### Suggesting Enhancements
51
-
52
- If you have an idea for a new feature or improvement, please create an issue on GitHub with the following information:
53
-
54
- - A clear, descriptive title for the issue.
55
- - A detailed description of the proposed enhancement, including any benefits and potential drawbacks.
56
- - Any relevant examples, mockups, or supporting information.
57
-
58
- ### Submitting Pull Requests
59
-
60
- When submitting a pull request, please ensure that your changes meet the following criteria:
61
-
62
- - Your pull request should be atomic and focus on a single change.
63
- - Your pull request should include tests for your change.
64
- - You should have thoroughly tested your changes with multiple different prompts.
65
- - You should have considered potential risks and mitigations for your changes.
66
- - You should have documented your changes clearly and comprehensively.
67
- - You should not include any unrelated or "extra" small tweaks or changes.
68
-
69
- ## Style Guidelines
70
-
71
- ### Code Formatting
72
-
73
- We use the `black` code formatter to maintain a consistent coding style across the project. Please ensure that your code is formatted using `black` before submitting a pull request. You can install `black` using `pip`:
74
-
75
- ```bash
76
- pip install black
77
- ```
78
-
79
- To format your code, run the following command in the project's root directory:
80
-
81
- ```bash
82
- black .
83
- ```
84
- ### Pre-Commit Hooks
85
- We use pre-commit hooks to ensure that code formatting and other checks are performed automatically before each commit. To set up pre-commit hooks for this project, follow these steps:
86
-
87
- Install the pre-commit package using pip:
88
- ```bash
89
- pip install pre-commit
90
- ```
91
-
92
- Run the following command in the project's root directory to install the pre-commit hooks:
93
- ```bash
94
- pre-commit install
95
- ```
96
-
97
- Now, the pre-commit hooks will run automatically before each commit, checking your code formatting and other requirements.
98
-
99
- If you encounter any issues or have questions, feel free to reach out to the maintainers or open a new issue on GitHub. We're here to help and appreciate your efforts to contribute to the project.
100
-
101
- Happy coding, and once again, thank you for your contributions!
102
-
103
- Maintainers will look at PR that have no merge conflicts when deciding what to add to the project. Make sure your PR shows up here:
104
-
105
- https://github.com/Torantulino/Auto-GPT/pulls?q=is%3Apr+is%3Aopen+-is%3Aconflict+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cletrason/Cletrason-toad-in-the-mario-movie/optimization.py DELETED
@@ -1,756 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """PyTorch optimization for BERT model."""
16
-
17
- import math
18
- import warnings
19
- from functools import partial
20
- from typing import Callable, Iterable, Optional, Tuple, Union
21
-
22
- import torch
23
- from torch import nn
24
- from torch.optim import Optimizer
25
- from torch.optim.lr_scheduler import LambdaLR
26
-
27
- from .trainer_utils import SchedulerType
28
- from .utils import logging
29
- from .utils.versions import require_version
30
-
31
-
32
- logger = logging.get_logger(__name__)
33
-
34
-
35
- def get_constant_schedule(optimizer: Optimizer, last_epoch: int = -1):
36
- """
37
- Create a schedule with a constant learning rate, using the learning rate set in optimizer.
38
-
39
- Args:
40
- optimizer ([`~torch.optim.Optimizer`]):
41
- The optimizer for which to schedule the learning rate.
42
- last_epoch (`int`, *optional*, defaults to -1):
43
- The index of the last epoch when resuming training.
44
-
45
- Return:
46
- `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
47
- """
48
-
49
- return LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch)
50
-
51
-
52
- def _get_constant_schedule_with_warmup_lr_lambda(current_step: int, *, num_warmup_steps: int):
53
- if current_step < num_warmup_steps:
54
- return float(current_step) / float(max(1.0, num_warmup_steps))
55
- return 1.0
56
-
57
-
58
- def get_constant_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, last_epoch: int = -1):
59
- """
60
- Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate
61
- increases linearly between 0 and the initial lr set in the optimizer.
62
-
63
- Args:
64
- optimizer ([`~torch.optim.Optimizer`]):
65
- The optimizer for which to schedule the learning rate.
66
- num_warmup_steps (`int`):
67
- The number of steps for the warmup phase.
68
- last_epoch (`int`, *optional*, defaults to -1):
69
- The index of the last epoch when resuming training.
70
-
71
- Return:
72
- `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
73
- """
74
-
75
- lr_lambda = partial(_get_constant_schedule_with_warmup_lr_lambda, num_warmup_steps=num_warmup_steps)
76
- return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch)
77
-
78
-
79
- def _get_linear_schedule_with_warmup_lr_lambda(current_step: int, *, num_warmup_steps: int, num_training_steps: int):
80
- if current_step < num_warmup_steps:
81
- return float(current_step) / float(max(1, num_warmup_steps))
82
- return max(0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)))
83
-
84
-
85
- def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
86
- """
87
- Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after
88
- a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.
89
-
90
- Args:
91
- optimizer ([`~torch.optim.Optimizer`]):
92
- The optimizer for which to schedule the learning rate.
93
- num_warmup_steps (`int`):
94
- The number of steps for the warmup phase.
95
- num_training_steps (`int`):
96
- The total number of training steps.
97
- last_epoch (`int`, *optional*, defaults to -1):
98
- The index of the last epoch when resuming training.
99
-
100
- Return:
101
- `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
102
- """
103
-
104
- lr_lambda = partial(
105
- _get_linear_schedule_with_warmup_lr_lambda,
106
- num_warmup_steps=num_warmup_steps,
107
- num_training_steps=num_training_steps,
108
- )
109
- return LambdaLR(optimizer, lr_lambda, last_epoch)
110
-
111
-
112
- def _get_cosine_schedule_with_warmup_lr_lambda(
113
- current_step: int, *, num_warmup_steps: int, num_training_steps: int, num_cycles: float
114
- ):
115
- if current_step < num_warmup_steps:
116
- return float(current_step) / float(max(1, num_warmup_steps))
117
- progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
118
- return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))
119
-
120
-
121
- def get_cosine_schedule_with_warmup(
122
- optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float = 0.5, last_epoch: int = -1
123
- ):
124
- """
125
- Create a schedule with a learning rate that decreases following the values of the cosine function between the
126
- initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the
127
- initial lr set in the optimizer.
128
-
129
- Args:
130
- optimizer ([`~torch.optim.Optimizer`]):
131
- The optimizer for which to schedule the learning rate.
132
- num_warmup_steps (`int`):
133
- The number of steps for the warmup phase.
134
- num_training_steps (`int`):
135
- The total number of training steps.
136
- num_cycles (`float`, *optional*, defaults to 0.5):
137
- The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0
138
- following a half-cosine).
139
- last_epoch (`int`, *optional*, defaults to -1):
140
- The index of the last epoch when resuming training.
141
-
142
- Return:
143
- `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
144
- """
145
-
146
- lr_lambda = partial(
147
- _get_cosine_schedule_with_warmup_lr_lambda,
148
- num_warmup_steps=num_warmup_steps,
149
- num_training_steps=num_training_steps,
150
- num_cycles=num_cycles,
151
- )
152
- return LambdaLR(optimizer, lr_lambda, last_epoch)
153
-
154
-
155
- def _get_cosine_with_hard_restarts_schedule_with_warmup_lr_lambda(
156
- current_step: int, *, num_warmup_steps: int, num_training_steps: int, num_cycles: int
157
- ):
158
- if current_step < num_warmup_steps:
159
- return float(current_step) / float(max(1, num_warmup_steps))
160
- progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
161
- if progress >= 1.0:
162
- return 0.0
163
- return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0))))
164
-
165
-
166
- def get_cosine_with_hard_restarts_schedule_with_warmup(
167
- optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int = 1, last_epoch: int = -1
168
- ):
169
- """
170
- Create a schedule with a learning rate that decreases following the values of the cosine function between the
171
- initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases
172
- linearly between 0 and the initial lr set in the optimizer.
173
-
174
- Args:
175
- optimizer ([`~torch.optim.Optimizer`]):
176
- The optimizer for which to schedule the learning rate.
177
- num_warmup_steps (`int`):
178
- The number of steps for the warmup phase.
179
- num_training_steps (`int`):
180
- The total number of training steps.
181
- num_cycles (`int`, *optional*, defaults to 1):
182
- The number of hard restarts to use.
183
- last_epoch (`int`, *optional*, defaults to -1):
184
- The index of the last epoch when resuming training.
185
-
186
- Return:
187
- `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
188
- """
189
-
190
- lr_lambda = partial(
191
- _get_cosine_with_hard_restarts_schedule_with_warmup_lr_lambda,
192
- num_warmup_steps=num_warmup_steps,
193
- num_training_steps=num_training_steps,
194
- num_cycles=num_cycles,
195
- )
196
- return LambdaLR(optimizer, lr_lambda, last_epoch)
197
-
198
-
199
- def _get_polynomial_decay_schedule_with_warmup_lr_lambda(
200
- current_step: int,
201
- *,
202
- num_warmup_steps: int,
203
- num_training_steps: int,
204
- lr_end: float,
205
- power: float,
206
- lr_init: int,
207
- ):
208
- if current_step < num_warmup_steps:
209
- return float(current_step) / float(max(1, num_warmup_steps))
210
- elif current_step > num_training_steps:
211
- return lr_end / lr_init # as LambdaLR multiplies by lr_init
212
- else:
213
- lr_range = lr_init - lr_end
214
- decay_steps = num_training_steps - num_warmup_steps
215
- pct_remaining = 1 - (current_step - num_warmup_steps) / decay_steps
216
- decay = lr_range * pct_remaining**power + lr_end
217
- return decay / lr_init # as LambdaLR multiplies by lr_init
218
-
219
-
220
- def get_polynomial_decay_schedule_with_warmup(
221
- optimizer, num_warmup_steps, num_training_steps, lr_end=1e-7, power=1.0, last_epoch=-1
222
- ):
223
- """
224
- Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the
225
- optimizer to end lr defined by *lr_end*, after a warmup period during which it increases linearly from 0 to the
226
- initial lr set in the optimizer.
227
-
228
- Args:
229
- optimizer ([`~torch.optim.Optimizer`]):
230
- The optimizer for which to schedule the learning rate.
231
- num_warmup_steps (`int`):
232
- The number of steps for the warmup phase.
233
- num_training_steps (`int`):
234
- The total number of training steps.
235
- lr_end (`float`, *optional*, defaults to 1e-7):
236
- The end LR.
237
- power (`float`, *optional*, defaults to 1.0):
238
- Power factor.
239
- last_epoch (`int`, *optional*, defaults to -1):
240
- The index of the last epoch when resuming training.
241
-
242
- Note: *power* defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT
243
- implementation at
244
- https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37
245
-
246
- Return:
247
- `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
248
-
249
- """
250
-
251
- lr_init = optimizer.defaults["lr"]
252
- if not (lr_init > lr_end):
253
- raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})")
254
-
255
- lr_lambda = partial(
256
- _get_polynomial_decay_schedule_with_warmup_lr_lambda,
257
- num_warmup_steps=num_warmup_steps,
258
- num_training_steps=num_training_steps,
259
- lr_end=lr_end,
260
- power=power,
261
- lr_init=lr_init,
262
- )
263
- return LambdaLR(optimizer, lr_lambda, last_epoch)
264
-
265
-
266
- def _get_inverse_sqrt_schedule_lr_lambda(current_step: int, *, num_warmup_steps: int, timescale: int = None):
267
- if current_step < num_warmup_steps:
268
- return float(current_step) / float(max(1, num_warmup_steps))
269
- shift = timescale - num_warmup_steps
270
- decay = 1.0 / math.sqrt((current_step + shift) / timescale)
271
- return decay
272
-
273
-
274
- def get_inverse_sqrt_schedule(
275
- optimizer: Optimizer, num_warmup_steps: int, timescale: int = None, last_epoch: int = -1
276
- ):
277
- """
278
- Create a schedule with an inverse square-root learning rate, from the initial lr set in the optimizer, after a
279
- warmup period which increases lr linearly from 0 to the initial lr set in the optimizer.
280
-
281
- Args:
282
- optimizer ([`~torch.optim.Optimizer`]):
283
- The optimizer for which to schedule the learning rate.
284
- num_warmup_steps (`int`):
285
- The number of steps for the warmup phase.
286
- timescale (`int`, *optional*, defaults to `num_warmup_steps`):
287
- Time scale.
288
- last_epoch (`int`, *optional*, defaults to -1):
289
- The index of the last epoch when resuming training.
290
-
291
- Return:
292
- `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
293
- """
294
- # Note: this implementation is adapted from
295
- # https://github.com/google-research/big_vision/blob/f071ce68852d56099437004fd70057597a95f6ef/big_vision/utils.py#L930
296
-
297
- if timescale is None:
298
- timescale = num_warmup_steps
299
-
300
- lr_lambda = partial(_get_inverse_sqrt_schedule_lr_lambda, num_warmup_steps=num_warmup_steps, timescale=timescale)
301
- return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch)
302
-
303
-
304
- TYPE_TO_SCHEDULER_FUNCTION = {
305
- SchedulerType.LINEAR: get_linear_schedule_with_warmup,
306
- SchedulerType.COSINE: get_cosine_schedule_with_warmup,
307
- SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
308
- SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
309
- SchedulerType.CONSTANT: get_constant_schedule,
310
- SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
311
- SchedulerType.INVERSE_SQRT: get_inverse_sqrt_schedule,
312
- }
313
-
314
-
315
- def get_scheduler(
316
- name: Union[str, SchedulerType],
317
- optimizer: Optimizer,
318
- num_warmup_steps: Optional[int] = None,
319
- num_training_steps: Optional[int] = None,
320
- ):
321
- """
322
- Unified API to get any scheduler from its name.
323
-
324
- Args:
325
- name (`str` or `SchedulerType`):
326
- The name of the scheduler to use.
327
- optimizer (`torch.optim.Optimizer`):
328
- The optimizer that will be used during training.
329
- num_warmup_steps (`int`, *optional*):
330
- The number of warmup steps to do. This is not required by all schedulers (hence the argument being
331
- optional), the function will raise an error if it's unset and the scheduler type requires it.
332
- num_training_steps (`int``, *optional*):
333
- The number of training steps to do. This is not required by all schedulers (hence the argument being
334
- optional), the function will raise an error if it's unset and the scheduler type requires it.
335
- """
336
- name = SchedulerType(name)
337
- schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name]
338
- if name == SchedulerType.CONSTANT:
339
- return schedule_func(optimizer)
340
-
341
- # All other schedulers require `num_warmup_steps`
342
- if num_warmup_steps is None:
343
- raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument.")
344
-
345
- if name == SchedulerType.CONSTANT_WITH_WARMUP:
346
- return schedule_func(optimizer, num_warmup_steps=num_warmup_steps)
347
-
348
- if name == SchedulerType.INVERSE_SQRT:
349
- return schedule_func(optimizer, num_warmup_steps=num_warmup_steps)
350
-
351
- # All other schedulers require `num_training_steps`
352
- if num_training_steps is None:
353
- raise ValueError(f"{name} requires `num_training_steps`, please provide that argument.")
354
-
355
- return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps)
356
-
357
-
358
- class AdamW(Optimizer):
359
- """
360
- Implements Adam algorithm with weight decay fix as introduced in [Decoupled Weight Decay
361
- Regularization](https://arxiv.org/abs/1711.05101).
362
-
363
- Parameters:
364
- params (`Iterable[nn.parameter.Parameter]`):
365
- Iterable of parameters to optimize or dictionaries defining parameter groups.
366
- lr (`float`, *optional*, defaults to 1e-3):
367
- The learning rate to use.
368
- betas (`Tuple[float,float]`, *optional*, defaults to (0.9, 0.999)):
369
- Adam's betas parameters (b1, b2).
370
- eps (`float`, *optional*, defaults to 1e-6):
371
- Adam's epsilon for numerical stability.
372
- weight_decay (`float`, *optional*, defaults to 0):
373
- Decoupled weight decay to apply.
374
- correct_bias (`bool`, *optional*, defaults to `True`):
375
- Whether or not to correct bias in Adam (for instance, in Bert TF repository they use `False`).
376
- no_deprecation_warning (`bool`, *optional*, defaults to `False`):
377
- A flag used to disable the deprecation warning (set to `True` to disable the warning).
378
- """
379
-
380
- def __init__(
381
- self,
382
- params: Iterable[nn.parameter.Parameter],
383
- lr: float = 1e-3,
384
- betas: Tuple[float, float] = (0.9, 0.999),
385
- eps: float = 1e-6,
386
- weight_decay: float = 0.0,
387
- correct_bias: bool = True,
388
- no_deprecation_warning: bool = False,
389
- ):
390
- if not no_deprecation_warning:
391
- warnings.warn(
392
- "This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch"
393
- " implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this"
394
- " warning",
395
- FutureWarning,
396
- )
397
- require_version("torch>=1.5.0") # add_ with alpha
398
- if lr < 0.0:
399
- raise ValueError(f"Invalid learning rate: {lr} - should be >= 0.0")
400
- if not 0.0 <= betas[0] < 1.0:
401
- raise ValueError(f"Invalid beta parameter: {betas[0]} - should be in [0.0, 1.0)")
402
- if not 0.0 <= betas[1] < 1.0:
403
- raise ValueError(f"Invalid beta parameter: {betas[1]} - should be in [0.0, 1.0)")
404
- if not 0.0 <= eps:
405
- raise ValueError(f"Invalid epsilon value: {eps} - should be >= 0.0")
406
- defaults = {"lr": lr, "betas": betas, "eps": eps, "weight_decay": weight_decay, "correct_bias": correct_bias}
407
- super().__init__(params, defaults)
408
-
409
- def step(self, closure: Callable = None):
410
- """
411
- Performs a single optimization step.
412
-
413
- Arguments:
414
- closure (`Callable`, *optional*): A closure that reevaluates the model and returns the loss.
415
- """
416
- loss = None
417
- if closure is not None:
418
- loss = closure()
419
-
420
- for group in self.param_groups:
421
- for p in group["params"]:
422
- if p.grad is None:
423
- continue
424
- grad = p.grad.data
425
- if grad.is_sparse:
426
- raise RuntimeError("Adam does not support sparse gradients, please consider SparseAdam instead")
427
-
428
- state = self.state[p]
429
-
430
- # State initialization
431
- if len(state) == 0:
432
- state["step"] = 0
433
- # Exponential moving average of gradient values
434
- state["exp_avg"] = torch.zeros_like(p.data)
435
- # Exponential moving average of squared gradient values
436
- state["exp_avg_sq"] = torch.zeros_like(p.data)
437
-
438
- exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
439
- beta1, beta2 = group["betas"]
440
-
441
- state["step"] += 1
442
-
443
- # Decay the first and second moment running average coefficient
444
- # In-place operations to update the averages at the same time
445
- exp_avg.mul_(beta1).add_(grad, alpha=(1.0 - beta1))
446
- exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
447
- denom = exp_avg_sq.sqrt().add_(group["eps"])
448
-
449
- step_size = group["lr"]
450
- if group["correct_bias"]: # No bias correction for Bert
451
- bias_correction1 = 1.0 - beta1 ** state["step"]
452
- bias_correction2 = 1.0 - beta2 ** state["step"]
453
- step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
454
-
455
- p.data.addcdiv_(exp_avg, denom, value=-step_size)
456
-
457
- # Just adding the square of the weights to the loss function is *not*
458
- # the correct way of using L2 regularization/weight decay with Adam,
459
- # since that will interact with the m and v parameters in strange ways.
460
- #
461
- # Instead we want to decay the weights in a manner that doesn't interact
462
- # with the m/v parameters. This is equivalent to adding the square
463
- # of the weights to the loss with plain (non-momentum) SGD.
464
- # Add weight decay at the end (fixed version)
465
- if group["weight_decay"] > 0.0:
466
- p.data.add_(p.data, alpha=(-group["lr"] * group["weight_decay"]))
467
-
468
- return loss
469
-
470
-
471
- class Adafactor(Optimizer):
472
- """
473
- AdaFactor pytorch implementation can be used as a drop in replacement for Adam original fairseq code:
474
- https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py
475
-
476
- Paper: *Adafactor: Adaptive Learning Rates with Sublinear Memory Cost* https://arxiv.org/abs/1804.04235 Note that
477
- this optimizer internally adjusts the learning rate depending on the `scale_parameter`, `relative_step` and
478
- `warmup_init` options. To use a manual (external) learning rate schedule you should set `scale_parameter=False` and
479
- `relative_step=False`.
480
-
481
- Arguments:
482
- params (`Iterable[nn.parameter.Parameter]`):
483
- Iterable of parameters to optimize or dictionaries defining parameter groups.
484
- lr (`float`, *optional*):
485
- The external learning rate.
486
- eps (`Tuple[float, float]`, *optional*, defaults to (1e-30, 1e-3)):
487
- Regularization constants for square gradient and parameter scale respectively
488
- clip_threshold (`float`, *optional*, defaults 1.0):
489
- Threshold of root mean square of final gradient update
490
- decay_rate (`float`, *optional*, defaults to -0.8):
491
- Coefficient used to compute running averages of square
492
- beta1 (`float`, *optional*):
493
- Coefficient used for computing running averages of gradient
494
- weight_decay (`float`, *optional*, defaults to 0):
495
- Weight decay (L2 penalty)
496
- scale_parameter (`bool`, *optional*, defaults to `True`):
497
- If True, learning rate is scaled by root mean square
498
- relative_step (`bool`, *optional*, defaults to `True`):
499
- If True, time-dependent learning rate is computed instead of external learning rate
500
- warmup_init (`bool`, *optional*, defaults to `False`):
501
- Time-dependent learning rate computation depends on whether warm-up initialization is being used
502
-
503
- This implementation handles low-precision (FP16, bfloat) values, but we have not thoroughly tested.
504
-
505
- Recommended T5 finetuning settings (https://discuss.huggingface.co/t/t5-finetuning-tips/684/3):
506
-
507
- - Training without LR warmup or clip_threshold is not recommended.
508
-
509
- - use scheduled LR warm-up to fixed LR
510
- - use clip_threshold=1.0 (https://arxiv.org/abs/1804.04235)
511
- - Disable relative updates
512
- - Use scale_parameter=False
513
- - Additional optimizer operations like gradient clipping should not be used alongside Adafactor
514
-
515
- Example:
516
-
517
- ```python
518
- Adafactor(model.parameters(), scale_parameter=False, relative_step=False, warmup_init=False, lr=1e-3)
519
- ```
520
-
521
- Others reported the following combination to work well:
522
-
523
- ```python
524
- Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None)
525
- ```
526
-
527
- When using `lr=None` with [`Trainer`] you will most likely need to use [`~optimization.AdafactorSchedule`]
528
- scheduler as following:
529
-
530
- ```python
531
- from transformers.optimization import Adafactor, AdafactorSchedule
532
-
533
- optimizer = Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None)
534
- lr_scheduler = AdafactorSchedule(optimizer)
535
- trainer = Trainer(..., optimizers=(optimizer, lr_scheduler))
536
- ```
537
-
538
- Usage:
539
-
540
- ```python
541
- # replace AdamW with Adafactor
542
- optimizer = Adafactor(
543
- model.parameters(),
544
- lr=1e-3,
545
- eps=(1e-30, 1e-3),
546
- clip_threshold=1.0,
547
- decay_rate=-0.8,
548
- beta1=None,
549
- weight_decay=0.0,
550
- relative_step=False,
551
- scale_parameter=False,
552
- warmup_init=False,
553
- )
554
- ```"""
555
-
556
- def __init__(
557
- self,
558
- params,
559
- lr=None,
560
- eps=(1e-30, 1e-3),
561
- clip_threshold=1.0,
562
- decay_rate=-0.8,
563
- beta1=None,
564
- weight_decay=0.0,
565
- scale_parameter=True,
566
- relative_step=True,
567
- warmup_init=False,
568
- ):
569
- require_version("torch>=1.5.0") # add_ with alpha
570
- if lr is not None and relative_step:
571
- raise ValueError("Cannot combine manual `lr` and `relative_step=True` options")
572
- if warmup_init and not relative_step:
573
- raise ValueError("`warmup_init=True` requires `relative_step=True`")
574
-
575
- defaults = {
576
- "lr": lr,
577
- "eps": eps,
578
- "clip_threshold": clip_threshold,
579
- "decay_rate": decay_rate,
580
- "beta1": beta1,
581
- "weight_decay": weight_decay,
582
- "scale_parameter": scale_parameter,
583
- "relative_step": relative_step,
584
- "warmup_init": warmup_init,
585
- }
586
- super().__init__(params, defaults)
587
-
588
- @staticmethod
589
- def _get_lr(param_group, param_state):
590
- rel_step_sz = param_group["lr"]
591
- if param_group["relative_step"]:
592
- min_step = 1e-6 * param_state["step"] if param_group["warmup_init"] else 1e-2
593
- rel_step_sz = min(min_step, 1.0 / math.sqrt(param_state["step"]))
594
- param_scale = 1.0
595
- if param_group["scale_parameter"]:
596
- param_scale = max(param_group["eps"][1], param_state["RMS"])
597
- return param_scale * rel_step_sz
598
-
599
- @staticmethod
600
- def _get_options(param_group, param_shape):
601
- factored = len(param_shape) >= 2
602
- use_first_moment = param_group["beta1"] is not None
603
- return factored, use_first_moment
604
-
605
- @staticmethod
606
- def _rms(tensor):
607
- return tensor.norm(2) / (tensor.numel() ** 0.5)
608
-
609
- @staticmethod
610
- def _approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col):
611
- # copy from fairseq's adafactor implementation:
612
- # https://github.com/huggingface/transformers/blob/8395f14de6068012787d83989c3627c3df6a252b/src/transformers/optimization.py#L505
613
- r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_().unsqueeze(-1)
614
- c_factor = exp_avg_sq_col.unsqueeze(-2).rsqrt()
615
- return torch.mul(r_factor, c_factor)
616
-
617
- def step(self, closure=None):
618
- """
619
- Performs a single optimization step
620
-
621
- Arguments:
622
- closure (callable, optional): A closure that reevaluates the model
623
- and returns the loss.
624
- """
625
- loss = None
626
- if closure is not None:
627
- loss = closure()
628
-
629
- for group in self.param_groups:
630
- for p in group["params"]:
631
- if p.grad is None:
632
- continue
633
- grad = p.grad.data
634
- if grad.dtype in {torch.float16, torch.bfloat16}:
635
- grad = grad.float()
636
- if grad.is_sparse:
637
- raise RuntimeError("Adafactor does not support sparse gradients.")
638
-
639
- state = self.state[p]
640
- grad_shape = grad.shape
641
-
642
- factored, use_first_moment = self._get_options(group, grad_shape)
643
- # State Initialization
644
- if len(state) == 0:
645
- state["step"] = 0
646
-
647
- if use_first_moment:
648
- # Exponential moving average of gradient values
649
- state["exp_avg"] = torch.zeros_like(grad)
650
- if factored:
651
- state["exp_avg_sq_row"] = torch.zeros(grad_shape[:-1]).to(grad)
652
- state["exp_avg_sq_col"] = torch.zeros(grad_shape[:-2] + grad_shape[-1:]).to(grad)
653
- else:
654
- state["exp_avg_sq"] = torch.zeros_like(grad)
655
-
656
- state["RMS"] = 0
657
- else:
658
- if use_first_moment:
659
- state["exp_avg"] = state["exp_avg"].to(grad)
660
- if factored:
661
- state["exp_avg_sq_row"] = state["exp_avg_sq_row"].to(grad)
662
- state["exp_avg_sq_col"] = state["exp_avg_sq_col"].to(grad)
663
- else:
664
- state["exp_avg_sq"] = state["exp_avg_sq"].to(grad)
665
-
666
- p_data_fp32 = p.data
667
- if p.data.dtype in {torch.float16, torch.bfloat16}:
668
- p_data_fp32 = p_data_fp32.float()
669
-
670
- state["step"] += 1
671
- state["RMS"] = self._rms(p_data_fp32)
672
- lr = self._get_lr(group, state)
673
-
674
- beta2t = 1.0 - math.pow(state["step"], group["decay_rate"])
675
- update = (grad**2) + group["eps"][0]
676
- if factored:
677
- exp_avg_sq_row = state["exp_avg_sq_row"]
678
- exp_avg_sq_col = state["exp_avg_sq_col"]
679
-
680
- exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=-1), alpha=(1.0 - beta2t))
681
- exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=-2), alpha=(1.0 - beta2t))
682
-
683
- # Approximation of exponential moving average of square of gradient
684
- update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col)
685
- update.mul_(grad)
686
- else:
687
- exp_avg_sq = state["exp_avg_sq"]
688
-
689
- exp_avg_sq.mul_(beta2t).add_(update, alpha=(1.0 - beta2t))
690
- update = exp_avg_sq.rsqrt().mul_(grad)
691
-
692
- update.div_((self._rms(update) / group["clip_threshold"]).clamp_(min=1.0))
693
- update.mul_(lr)
694
-
695
- if use_first_moment:
696
- exp_avg = state["exp_avg"]
697
- exp_avg.mul_(group["beta1"]).add_(update, alpha=(1 - group["beta1"]))
698
- update = exp_avg
699
-
700
- if group["weight_decay"] != 0:
701
- p_data_fp32.add_(p_data_fp32, alpha=(-group["weight_decay"] * lr))
702
-
703
- p_data_fp32.add_(-update)
704
-
705
- if p.data.dtype in {torch.float16, torch.bfloat16}:
706
- p.data.copy_(p_data_fp32)
707
-
708
- return loss
709
-
710
-
711
- class AdafactorSchedule(LambdaLR):
712
- """
713
- Since [`~optimization.Adafactor`] performs its own scheduling, if the training loop relies on a scheduler (e.g.,
714
- for logging), this class creates a proxy object that retrieves the current lr values from the optimizer.
715
-
716
- It returns `initial_lr` during startup and the actual `lr` during stepping.
717
- """
718
-
719
- def __init__(self, optimizer, initial_lr=0.0):
720
- def lr_lambda(_):
721
- return initial_lr
722
-
723
- for group in optimizer.param_groups:
724
- group["initial_lr"] = initial_lr
725
- super().__init__(optimizer, lr_lambda)
726
- for group in optimizer.param_groups:
727
- del group["initial_lr"]
728
-
729
- def get_lr(self):
730
- opt = self.optimizer
731
- lrs = [
732
- opt._get_lr(group, opt.state[group["params"][0]])
733
- for group in opt.param_groups
734
- if group["params"][0].grad is not None
735
- ]
736
- if len(lrs) == 0:
737
- lrs = self.base_lrs # if called before stepping
738
- return lrs
739
-
740
-
741
- def get_adafactor_schedule(optimizer, initial_lr=0.0):
742
- """
743
- Get a proxy schedule for [`~optimization.Adafactor`]
744
-
745
- Args:
746
- optimizer ([`~torch.optim.Optimizer`]):
747
- The optimizer for which to schedule the learning rate.
748
- initial_lr (`float`, *optional*, defaults to 0.0):
749
- Initial lr
750
-
751
- Return:
752
- [`~optimization.Adafactor`] proxy schedule object.
753
-
754
-
755
- """
756
- return AdafactorSchedule(optimizer, initial_lr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ClueAI/CLUE_AIGC/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Demo
3
- emoji: 👁
4
- colorFrom: pink
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.9.1
8
- app_file: app.py
9
- pinned: false
10
- license: creativeml-openrail-m
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat.b4/README.md DELETED
@@ -1,16 +0,0 @@
1
- ---
2
- title: Chat.CofAI BETA-4
3
- emoji: 💬♻️🗨️
4
- colorFrom: blue
5
- colorTo: yellow
6
- sdk: docker
7
- sdk_version: 1.24.0
8
- app_file: run.py
9
- pinned: true
10
- app_port: 1338
11
- duplicated_from: TNR-5/freegpt-webui
12
- ---
13
-
14
- 💬 This is Free UI ChatGPT-4!
15
-
16
- 🍀 Try free!
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CoreyMorris/MMLU-by-task-Leaderboard/result_data_processor.py DELETED
@@ -1,226 +0,0 @@
1
- import pandas as pd
2
- import os
3
- import fnmatch
4
- import json
5
- import re
6
- import numpy as np
7
- import logging
8
-
9
- logging.basicConfig(filename='error_log.log', level=logging.ERROR)
10
-
11
- class ResultDataProcessor:
12
-
13
-
14
- def __init__(self, directory='results', pattern='results*.json'):
15
-
16
- self.directory = directory
17
- self.pattern = pattern
18
- self.data = self.process_data()
19
- self.ranked_data = self.rank_data()
20
-
21
- def _find_files(self, directory='results', pattern='results*.json'):
22
- matching_files = {}
23
- for root, dirs, files in os.walk(directory):
24
- for basename in files:
25
- if fnmatch.fnmatch(basename, pattern):
26
- filename = os.path.join(root, basename)
27
- matching_files[root] = filename
28
- # TODO decide on removing this since I am catching the error when processing the file
29
- matching_files = {key: value for key, value in matching_files.items() if 'gpt-j-6b' not in key}
30
- matching_files = list(matching_files.values())
31
- return matching_files
32
-
33
- def _read_and_transform_data(self, filename):
34
- with open(filename) as f:
35
- data = json.load(f)
36
- df = pd.DataFrame(data['results']).T
37
- return df
38
-
39
- def _cleanup_dataframe(self, df, model_name):
40
- df = df.rename(columns={'acc': model_name})
41
- df.index = (df.index.str.replace('hendrycksTest-', 'MMLU_', regex=True)
42
- .str.replace('harness\|', '', regex=True)
43
- .str.replace('\|5', '', regex=True))
44
- return df[[model_name]]
45
-
46
- def _extract_mc1(self, df, model_name):
47
- df = df.rename(columns={'mc1': model_name})
48
- # rename row harness|truthfulqa:mc|0 to truthfulqa:mc1
49
- df.index = (df.index.str.replace('mc\|0', 'mc1', regex=True))
50
- # just return the harness|truthfulqa:mc1 row
51
- df = df.loc[['harness|truthfulqa:mc1']]
52
- return df[[model_name]]
53
-
54
- def _extract_mc2(self, df, model_name):
55
- # rename row harness|truthfulqa:mc|0 to truthfulqa:mc2
56
- df = df.rename(columns={'mc2': model_name})
57
- df.index = (df.index.str.replace('mc\|0', 'mc2', regex=True))
58
- df = df.loc[['harness|truthfulqa:mc2']]
59
- return df[[model_name]]
60
-
61
- # remove extreme outliers from column harness|truthfulqa:mc1
62
- def _remove_mc1_outliers(self, df):
63
- mc1 = df['harness|truthfulqa:mc1']
64
- # Identify the outliers
65
- # outliers_condition = mc1 > mc1.quantile(.95)
66
- outliers_condition = mc1 == 1.0
67
- # Replace the outliers with NaN
68
- df.loc[outliers_condition, 'harness|truthfulqa:mc1'] = np.nan
69
- return df
70
-
71
-
72
-
73
- @staticmethod
74
- def _extract_parameters(model_name):
75
- """
76
- Function to extract parameters from model name.
77
- It handles names with 'b/B' for billions and 'm/M' for millions.
78
- """
79
- # pattern to match a number followed by 'b' (representing billions) or 'm' (representing millions)
80
- pattern = re.compile(r'(\d+\.?\d*)([bBmM])')
81
-
82
- match = pattern.search(model_name)
83
-
84
- if match:
85
- num, magnitude = match.groups()
86
- num = float(num)
87
-
88
- # convert millions to billions
89
- if magnitude.lower() == 'm':
90
- num /= 1000
91
-
92
- return num
93
-
94
- # return NaN if no match
95
- return np.nan
96
-
97
-
98
- def process_data(self):
99
- full_model_name_count = 0
100
- full_model_names = []
101
- dataframes = []
102
- organization_names = []
103
- for filename in self._find_files(self.directory, self.pattern):
104
- # try:
105
- raw_data = self._read_and_transform_data(filename)
106
- split_path = filename.split('/')
107
- model_name = split_path[2]
108
- organization_name = split_path[1]
109
- full_model_name = f'{organization_name}/{model_name}'
110
- full_model_name_count += 1
111
- # print count every 100 models
112
- if full_model_name_count % 100 == 0:
113
- print(full_model_name_count)
114
-
115
- cleaned_data = self._cleanup_dataframe(raw_data, model_name)
116
- # mc1 = self._extract_mc1(raw_data, full_model_name)
117
- # mc2 = self._extract_mc2(raw_data, full_model_name)
118
- # cleaned_data = pd.concat([cleaned_data, mc1])
119
- # cleaned_data = pd.concat([cleaned_data, mc2])
120
- organization_names.append(organization_name)
121
- full_model_names.append(full_model_name)
122
- dataframes.append(cleaned_data)
123
- # except Exception as e:
124
- # # logging.error(f'Error processing {filename}')
125
- # # logging.error(f'The error is: {e}')
126
- # print(f'Error processing {filename}')
127
- # print(f'The error is: {e}')
128
- # continue
129
-
130
-
131
- data = pd.concat(dataframes, axis=1).transpose()
132
-
133
- # Add organization column
134
- # data['organization'] = organization_names
135
- print("full_model_names")
136
- print(len(full_model_names))
137
- print("organization_names")
138
- print(len(organization_name))
139
- data['full_model_name'] = full_model_names
140
-
141
- # Add Model Name and rearrange columns
142
- data['Model Name'] = data.index
143
- cols = data.columns.tolist()
144
- cols = cols[-1:] + cols[:-1]
145
- data = data[cols]
146
-
147
- # Remove the 'Model Name' column
148
- data = data.drop(columns=['Model Name'])
149
-
150
- # Add average column
151
- data['MMLU_average'] = data.filter(regex='MMLU').mean(axis=1)
152
-
153
- # Reorder columns to move 'MMLU_average' to the third position
154
- cols = data.columns.tolist()
155
- cols = cols[:2] + cols[-1:] + cols[2:-1]
156
- data = data[cols]
157
-
158
-
159
-
160
-
161
-
162
-
163
- # Add parameter count column using extract_parameters function
164
- data['Parameters'] = data.index.to_series().apply(self._extract_parameters)
165
-
166
- # move the parameters column to the front of the dataframe
167
- cols = data.columns.tolist()
168
- cols = cols[-1:] + cols[:-1]
169
- print(cols)
170
- data = data[cols]
171
-
172
-
173
- new_columns = ['full_model_name'] + [col for col in data.columns if col != 'full_model_name']
174
- data = data.reindex(columns=new_columns)
175
-
176
- # # Reorder columns to move 'organization' to the second position
177
- # cols = data.columns.tolist()
178
- # cols = cols[-1:] + cols[:-1]
179
- # data = data[cols]
180
-
181
- # remove extreme outliers from column harness|truthfulqa:mc1
182
- # data = self._remove_mc1_outliers(data)
183
-
184
- data = self.manual_removal_of_models(data)
185
-
186
-
187
- # drop rows if MMLU_abstract_algebra is NaN
188
- data = data.dropna(subset=['MMLU_abstract_algebra'])
189
-
190
- # add a URL column that takes https://huggingface.co/ + full_model_name
191
- data['URL'] = 'https://huggingface.co/' + data['full_model_name']
192
-
193
- new_columns = ['URL'] + [col for col in data.columns if col != 'URL']
194
- data = data.reindex(columns=new_columns)
195
-
196
- # drop columns drop|3 gsm8k and winogrande
197
- data = data.drop(columns=['drop|3', 'gsm8k', 'winogrande'])
198
- # # Drop specific columns
199
- data = data.drop(columns=['all', 'truthfulqa:mc|0'])
200
-
201
- # save to csv with the current date as part of the filename
202
- data.to_csv(f'processed_data_{pd.Timestamp.now().strftime("%Y-%m-%d")}.csv')
203
-
204
- return data
205
-
206
- def manual_removal_of_models(self, df):
207
- # remove models verified to be trained on evaluation data
208
- # load the list of models
209
- with open('contaminated_models.txt') as f:
210
- contaminated_models = f.read().splitlines()
211
- # remove the models from the dataframe
212
- df = df[~df.index.isin(contaminated_models)]
213
- return df
214
-
215
-
216
- def rank_data(self):
217
- # add rank for each column to the dataframe
218
- # copy the data dataframe to avoid modifying the original dataframe
219
- rank_data = self.data.copy()
220
- for col in list(rank_data.columns):
221
- rank_data[col + "_rank"] = rank_data[col].rank(ascending=False, method='min')
222
-
223
- return rank_data
224
-
225
- def get_data(self, selected_models):
226
- return self.data[self.data.index.isin(selected_models)]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CorvaeOboro/gen_ability_icon/torch_utils/__init__.py DELETED
@@ -1,9 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- # empty
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/designspaceLib/__init__.py DELETED
The diff for this file is too large to render. See raw diff
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/pens/transformPen.py DELETED
@@ -1,111 +0,0 @@
1
- from fontTools.pens.filterPen import FilterPen, FilterPointPen
2
-
3
-
4
- __all__ = ["TransformPen", "TransformPointPen"]
5
-
6
-
7
- class TransformPen(FilterPen):
8
-
9
- """Pen that transforms all coordinates using a Affine transformation,
10
- and passes them to another pen.
11
- """
12
-
13
- def __init__(self, outPen, transformation):
14
- """The 'outPen' argument is another pen object. It will receive the
15
- transformed coordinates. The 'transformation' argument can either
16
- be a six-tuple, or a fontTools.misc.transform.Transform object.
17
- """
18
- super(TransformPen, self).__init__(outPen)
19
- if not hasattr(transformation, "transformPoint"):
20
- from fontTools.misc.transform import Transform
21
-
22
- transformation = Transform(*transformation)
23
- self._transformation = transformation
24
- self._transformPoint = transformation.transformPoint
25
- self._stack = []
26
-
27
- def moveTo(self, pt):
28
- self._outPen.moveTo(self._transformPoint(pt))
29
-
30
- def lineTo(self, pt):
31
- self._outPen.lineTo(self._transformPoint(pt))
32
-
33
- def curveTo(self, *points):
34
- self._outPen.curveTo(*self._transformPoints(points))
35
-
36
- def qCurveTo(self, *points):
37
- if points[-1] is None:
38
- points = self._transformPoints(points[:-1]) + [None]
39
- else:
40
- points = self._transformPoints(points)
41
- self._outPen.qCurveTo(*points)
42
-
43
- def _transformPoints(self, points):
44
- transformPoint = self._transformPoint
45
- return [transformPoint(pt) for pt in points]
46
-
47
- def closePath(self):
48
- self._outPen.closePath()
49
-
50
- def endPath(self):
51
- self._outPen.endPath()
52
-
53
- def addComponent(self, glyphName, transformation):
54
- transformation = self._transformation.transform(transformation)
55
- self._outPen.addComponent(glyphName, transformation)
56
-
57
-
58
- class TransformPointPen(FilterPointPen):
59
- """PointPen that transforms all coordinates using a Affine transformation,
60
- and passes them to another PointPen.
61
-
62
- >>> from fontTools.pens.recordingPen import RecordingPointPen
63
- >>> rec = RecordingPointPen()
64
- >>> pen = TransformPointPen(rec, (2, 0, 0, 2, -10, 5))
65
- >>> v = iter(rec.value)
66
- >>> pen.beginPath(identifier="contour-0")
67
- >>> next(v)
68
- ('beginPath', (), {'identifier': 'contour-0'})
69
- >>> pen.addPoint((100, 100), "line")
70
- >>> next(v)
71
- ('addPoint', ((190, 205), 'line', False, None), {})
72
- >>> pen.endPath()
73
- >>> next(v)
74
- ('endPath', (), {})
75
- >>> pen.addComponent("a", (1, 0, 0, 1, -10, 5), identifier="component-0")
76
- >>> next(v)
77
- ('addComponent', ('a', <Transform [2 0 0 2 -30 15]>), {'identifier': 'component-0'})
78
- """
79
-
80
- def __init__(self, outPointPen, transformation):
81
- """The 'outPointPen' argument is another point pen object.
82
- It will receive the transformed coordinates.
83
- The 'transformation' argument can either be a six-tuple, or a
84
- fontTools.misc.transform.Transform object.
85
- """
86
- super().__init__(outPointPen)
87
- if not hasattr(transformation, "transformPoint"):
88
- from fontTools.misc.transform import Transform
89
-
90
- transformation = Transform(*transformation)
91
- self._transformation = transformation
92
- self._transformPoint = transformation.transformPoint
93
-
94
- def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
95
- self._outPen.addPoint(
96
- self._transformPoint(pt), segmentType, smooth, name, **kwargs
97
- )
98
-
99
- def addComponent(self, baseGlyphName, transformation, **kwargs):
100
- transformation = self._transformation.transform(transformation)
101
- self._outPen.addComponent(baseGlyphName, transformation, **kwargs)
102
-
103
-
104
- if __name__ == "__main__":
105
- from fontTools.pens.basePen import _TestPen
106
-
107
- pen = TransformPen(_TestPen(None), (2, 0, 0.5, 2, -10, 0))
108
- pen.moveTo((0, 0))
109
- pen.lineTo((0, 100))
110
- pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0))
111
- pen.closePath()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-4ffdbeab.css DELETED
@@ -1 +0,0 @@
1
- .model3D.svelte-14ct53h{display:flex;position:relative;width:var(--size-full);height:var(--size-full)}canvas.svelte-14ct53h{width:var(--size-full);height:var(--size-full);object-fit:contain}.download.svelte-14ct53h{position:absolute;top:6px;right:6px}.input-model.svelte-wn75i6{display:flex;position:relative;justify-content:center;align-items:center;width:var(--size-full);height:var(--size-64)}canvas.svelte-wn75i6{width:var(--size-full);height:var(--size-full);object-fit:contain}
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-bacb8946.js DELETED
@@ -1,5 +0,0 @@
1
- import{S as he,e as ge,s as me,O as K,N as T,K as b,U as B,p as O,Q as I,n as te,A as S,a1 as be,F as ce,h as Z,P as ee,R as pe,m as rl,aq as il,j as ne,k as j,M as q,o as G,t as ae,z as M,u as X,v as U,y as x,x as Q,B as Fe,J as $,a7 as P,G as W,H as _e,I as de,am as fl,E as ul,ae as ol,q as cl,r as _l}from"./index-1d65707a.js";import{a as He,B as dl}from"./Button-f155035a.js";import{U as hl}from"./Upload-9bb55fba.js";import"./ModifyUpload.svelte_svelte_type_style_lang-d2acacf0.js";import{d as gl}from"./dsv-576afacd.js";var Ne=Object.prototype.hasOwnProperty;function se(a,e){var l,t;if(a===e)return!0;if(a&&e&&(l=a.constructor)===e.constructor){if(l===Date)return a.getTime()===e.getTime();if(l===RegExp)return a.toString()===e.toString();if(l===Array){if((t=a.length)===e.length)for(;t--&&se(a[t],e[t]););return t===-1}if(!l||typeof a=="object"){t=0;for(l in a)if(Ne.call(a,l)&&++t&&!Ne.call(e,l)||!(l in e)||!se(a[l],e[l]))return!1;return Object.keys(e).length===t}}return a!==a&&e!==e}function Ee(a){let e,l,t;return{c(){e=T("input"),b(e,"tabindex","-1"),e.value=a[0],b(e,"class","svelte-q8uklq"),B(e,"header",a[3])},m(n,r){O(n,e,r),a[7](e),l||(t=[I(e,"keydown",a[6]),I(e,"blur",a[8])],l=!0)},p(n,r){r&1&&e.value!==n[0]&&(e.value=n[0]),r&8&&B(e,"header",n[3])},d(n){n&&S(e),a[7](null),l=!1,be(t)}}}function ml(a){let e;return{c(){e=ee(a[0])},m(l,t){O(l,e,t)},p(l,t){t&1&&pe(e,l[0])},d(l){l&&S(e)}}}function bl(a){let e,l;return{c(){e=new il(!1),l=rl(),e.a=l},m(t,n){e.m(a[0],t,n),O(t,l,n)},p(t,n){n&1&&e.p(t[0])},d(t){t&&(S(l),e.d())}}}function pl(a){let e,l,t,n,r=a[2]&&Ee(a);function c(f,o){return f[4]==="markdown"||f[4]==="html"?bl:ml}let i=c(a),_=i(a);return{c(){r&&r.c(),e=K(),l=T("span"),_.c(),b(l,"tabindex","-1"),b(l,"role","button"),b(l,"class","svelte-q8uklq"),B(l,"edit",a[2])},m(f,o){r&&r.m(f,o),O(f,e,o),O(f,l,o),_.m(l,null),t||(n=I(l,"dblclick",a[5]),t=!0)},p(f,[o]){f[2]?r?r.p(f,o):(r=Ee(f),r.c(),r.m(e.parentNode,e)):r&&(r.d(1),r=null),i===(i=c(f))&&_?_.p(f,o):(_.d(1),_=i(f),_&&(_.c(),_.m(l,null))),o&4&&B(l,"edit",f[2])},i:te,o:te,d(f){f&&(S(e),S(l)),r&&r.d(f),_.d(),t=!1,n()}}}function wl(a,e,l){let{edit:t}=e,{value:n=""}=e,{el:r}=e,{header:c=!1}=e,{datatype:i="str"}=e;function _(p){ce.call(this,a,p)}function f(p){ce.call(this,a,p)}function o(p){Z[p?"unshift":"push"](()=>{r=p,l(1,r)})}const m=({currentTarget:p})=>{l(0,n=p.value),p.setAttribute("tabindex","-1")};return a.$$set=p=>{"edit"in p&&l(2,t=p.edit),"value"in p&&l(0,n=p.value),"el"in p&&l(1,r=p.el),"header"in p&&l(3,c=p.header),"datatype"in p&&l(4,i=p.datatype)},[n,r,t,c,i,_,f,o,m]}class Je extends he{constructor(e){super(),ge(this,e,wl,pl,me,{edit:2,value:0,el:1,header:3,datatype:4})}}function Le(a,e,l){const t=a.slice();return t[53]=e[l],t[55]=l,t}function qe(a,e,l){const t=a.slice();return t[56]=e[l].value,t[57]=e[l].id,t[58]=e,t[59]=l,t}function Be(a,e,l){const t=a.slice();return t[56]=e[l].value,t[57]=e[l].id,t[60]=e,t[55]=l,t}function Me(a){let e,l;return{c(){e=T("p"),l=ee(a[1]),b(e,"class","svelte-1tclfmr")},m(t,n){O(t,e,n),q(e,l)},p(t,n){n[0]&2&&pe(l,t[1])},d(t){t&&S(e)}}}function Oe(a){let e,l;return{c(){e=T("caption"),l=ee(a[1]),b(e,"class","sr-only")},m(t,n){O(t,e,n),q(e,l)},p(t,n){n[0]&2&&pe(l,t[1])},d(t){t&&S(e)}}}function Se(a,e){let l,t,n,r,c,i,_,f,o,m,p,h=e[57],A,d,C;function y(w){e[30](w,e[57])}function g(){return e[31](e[57])}let N={value:e[56],edit:e[13]===e[57],header:!0};e[10][e[57]].input!==void 0&&(N.el=e[10][e[57]].input),n=new Je({props:N}),Z.push(()=>ne(n,"el",y)),n.$on("keydown",e[21]),n.$on("dblclick",g);function L(){return e[32](e[55])}const v=()=>e[33](l,h),R=()=>e[33](null,h);return{key:a,first:null,c(){l=T("th"),t=T("div"),j(n.$$.fragment),c=K(),i=T("div"),_=$("svg"),f=$("path"),m=K(),b(f,"d","M4.49999 0L8.3971 6.75H0.602875L4.49999 0Z"),b(_,"width","1em"),b(_,"height","1em"),b(_,"viewBox","0 0 9 7"),b(_,"fill","none"),b(_,"xmlns","http://www.w3.org/2000/svg"),b(_,"class","svelte-1tclfmr"),b(i,"class",o="sort-button "+e[11]+" svelte-1tclfmr"),B(i,"sorted",e[12]===e[55]),B(i,"des",e[12]===e[55]&&e[11]==="des"),b(t,"class","cell-wrap svelte-1tclfmr"),b(l,"aria-sort",p=e[15](e[56],e[12],e[11])),b(l,"class","svelte-1tclfmr"),B(l,"editing",e[13]===e[57]),this.first=l},m(w,z){O(w,l,z),q(l,t),G(n,t,null),q(t,c),q(t,i),q(i,_),q(_,f),q(l,m),v(),A=!0,d||(C=I(i,"click",L),d=!0)},p(w,z){e=w;const J={};z[0]&256&&(J.value=e[56]),z[0]&8448&&(J.edit=e[13]===e[57]),!r&&z[0]&1280&&(r=!0,J.el=e[10][e[57]].input,ae(()=>r=!1)),n.$set(J),(!A||z[0]&2048&&o!==(o="sort-button "+e[11]+" svelte-1tclfmr"))&&b(i,"class",o),(!A||z[0]&6400)&&B(i,"sorted",e[12]===e[55]),(!A||z[0]&6400)&&B(i,"des",e[12]===e[55]&&e[11]==="des"),(!A||z[0]&6400&&p!==(p=e[15](e[56],e[12],e[11])))&&b(l,"aria-sort",p),h!==e[57]&&(R(),h=e[57],v()),(!A||z[0]&8448)&&B(l,"editing",e[13]===e[57])},i(w){A||(M(n.$$.fragment,w),A=!0)},o(w){U(n.$$.fragment,w),A=!1},d(w){w&&S(l),Q(n),R(),d=!1,C()}}}function Te(a,e){let l,t,n,r,c,i=e[57],_,f,o;function m(L){e[34](L,e[56],e[58],e[59])}function p(L){e[35](L,e[57])}let h={edit:e[7]===e[57],datatype:Array.isArray(e[0])?e[0][e[59]]:e[0]};e[56]!==void 0&&(h.value=e[56]),e[10][e[57]].input!==void 0&&(h.el=e[10][e[57]].input),n=new Je({props:h}),Z.push(()=>ne(n,"value",m)),Z.push(()=>ne(n,"el",p));const A=()=>e[36](l,i),d=()=>e[36](null,i);function C(){return e[37](e[57])}function y(){return e[38](e[57])}function g(){return e[39](e[57])}function N(...L){return e[40](e[55],e[59],e[57],...L)}return{key:a,first:null,c(){l=T("td"),t=T("div"),j(n.$$.fragment),b(t,"class","cell-wrap svelte-1tclfmr"),B(t,"border-transparent",e[6]!==e[57]),b(l,"tabindex","0"),b(l,"class","svelte-1tclfmr"),this.first=l},m(L,v){O(L,l,v),q(l,t),G(n,t,null),A(),_=!0,f||(o=[I(l,"touchstart",C,{passive:!0}),I(l,"click",y),I(l,"dblclick",g),I(l,"keydown",N)],f=!0)},p(L,v){e=L;const R={};v[0]&640&&(R.edit=e[7]===e[57]),v[0]&513&&(R.datatype=Array.isArray(e[0])?e[0][e[59]]:e[0]),!r&&v[0]&512&&(r=!0,R.value=e[56],ae(()=>r=!1)),!c&&v[0]&1536&&(c=!0,R.el=e[10][e[57]].input,ae(()=>c=!1)),n.$set(R),(!_||v[0]&576)&&B(t,"border-transparent",e[6]!==e[57]),i!==e[57]&&(d(),i=e[57],A())},i(L){_||(M(n.$$.fragment,L),_=!0)},o(L){U(n.$$.fragment,L),_=!1},d(L){L&&S(l),Q(n),d(),f=!1,be(o)}}}function Ce(a,e){let l,t=[],n=new Map,r,c,i=W(e[53]);const _=f=>f[57];for(let f=0;f<i.length;f+=1){let o=qe(e,i,f),m=_(o);n.set(m,t[f]=Te(m,o))}return{key:a,first:null,c(){l=T("tr");for(let f=0;f<t.length;f+=1)t[f].c();r=K(),b(l,"class","svelte-1tclfmr"),this.first=l},m(f,o){O(f,l,o);for(let m=0;m<t.length;m+=1)t[m]&&t[m].m(l,null);q(l,r),c=!0},p(f,o){e=f,o[0]&460481&&(i=W(e[53]),X(),t=_e(t,o,_,1,e,i,n,l,de,Te,r,qe),x())},i(f){if(!c){for(let o=0;o<i.length;o+=1)M(t[o]);c=!0}},o(f){for(let o=0;o<t.length;o+=1)U(t[o]);c=!1},d(f){f&&S(l);for(let o=0;o<t.length;o+=1)t[o].d()}}}function kl(a){let e,l,t,n,r=[],c=new Map,i,_,f=[],o=new Map,m,p=a[1]&&a[1].length!==0&&Oe(a),h=W(a[8]);const A=y=>y[57];for(let y=0;y<h.length;y+=1){let g=Be(a,h,y),N=A(g);c.set(N,r[y]=Se(N,g))}let d=W(a[9]);const C=y=>y[53];for(let y=0;y<d.length;y+=1){let g=Le(a,d,y),N=C(g);o.set(N,f[y]=Ce(N,g))}return{c(){e=T("table"),p&&p.c(),l=K(),t=T("thead"),n=T("tr");for(let y=0;y<r.length;y+=1)r[y].c();i=K(),_=T("tbody");for(let y=0;y<f.length;y+=1)f[y].c();b(n,"class","svelte-1tclfmr"),b(t,"class","svelte-1tclfmr"),b(_,"class","svelte-1tclfmr"),b(e,"class","svelte-1tclfmr"),B(e,"dragging",a[14])},m(y,g){O(y,e,g),p&&p.m(e,null),q(e,l),q(e,t),q(t,n);for(let N=0;N<r.length;N+=1)r[N]&&r[N].m(n,null);q(e,i),q(e,_);for(let N=0;N<f.length;N+=1)f[N]&&f[N].m(_,null);m=!0},p(y,g){y[1]&&y[1].length!==0?p?p.p(y,g):(p=Oe(y),p.c(),p.m(e,l)):p&&(p.d(1),p=null),g[0]&3718400&&(h=W(y[8]),X(),r=_e(r,g,A,1,y,h,c,n,de,Se,null,Be),x()),g[0]&460481&&(d=W(y[9]),X(),f=_e(f,g,C,1,y,d,o,_,de,Ce,null,Le),x()),(!m||g[0]&16384)&&B(e,"dragging",y[14])},i(y){if(!m){for(let g=0;g<h.length;g+=1)M(r[g]);for(let g=0;g<d.length;g+=1)M(f[g]);m=!0}},o(y){for(let g=0;g<r.length;g+=1)U(r[g]);for(let g=0;g<f.length;g+=1)U(f[g]);m=!1},d(y){y&&S(e),p&&p.d();for(let g=0;g<r.length;g+=1)r[g].d();for(let g=0;g<f.length;g+=1)f[g].d()}}}function Re(a){let e,l,t,n=a[3][1]==="dynamic"&&Ue(a),r=a[2][1]==="dynamic"&&ze(a);return{c(){e=T("div"),n&&n.c(),l=K(),r&&r.c(),b(e,"class","controls-wrap svelte-1tclfmr")},m(c,i){O(c,e,i),n&&n.m(e,null),q(e,l),r&&r.m(e,null),t=!0},p(c,i){c[3][1]==="dynamic"?n?(n.p(c,i),i[0]&8&&M(n,1)):(n=Ue(c),n.c(),M(n,1),n.m(e,l)):n&&(X(),U(n,1,1,()=>{n=null}),x()),c[2][1]==="dynamic"?r?(r.p(c,i),i[0]&4&&M(r,1)):(r=ze(c),r.c(),M(r,1),r.m(e,null)):r&&(X(),U(r,1,1,()=>{r=null}),x())},i(c){t||(M(n),M(r),t=!0)},o(c){U(n),U(r),t=!1},d(c){c&&S(e),n&&n.d(),r&&r.d()}}}function Ue(a){let e,l,t;return l=new He({props:{variant:"secondary",size:"sm",$$slots:{default:[yl]},$$scope:{ctx:a}}}),l.$on("click",a[43]),{c(){e=T("span"),j(l.$$.fragment),b(e,"class","button-wrap svelte-1tclfmr")},m(n,r){O(n,e,r),G(l,e,null),t=!0},p(n,r){const c={};r[1]&1073741824&&(c.$$scope={dirty:r,ctx:n}),l.$set(c)},i(n){t||(M(l.$$.fragment,n),t=!0)},o(n){U(l.$$.fragment,n),t=!1},d(n){n&&S(e),Q(l)}}}function yl(a){let e,l,t;return{c(){e=$("svg"),l=$("path"),t=ee(`
2
- New row`),b(l,"fill","currentColor"),b(l,"d","M24.59 16.59L17 24.17V4h-2v20.17l-7.59-7.58L6 18l10 10l10-10l-1.41-1.41z"),b(e,"xmlns","http://www.w3.org/2000/svg"),b(e,"xmlns:xlink","http://www.w3.org/1999/xlink"),b(e,"aria-hidden","true"),b(e,"role","img"),b(e,"width","1em"),b(e,"height","1em"),b(e,"preserveAspectRatio","xMidYMid meet"),b(e,"viewBox","0 0 32 32"),b(e,"class","svelte-1tclfmr")},m(n,r){O(n,e,r),q(e,l),O(n,t,r)},p:te,d(n){n&&(S(e),S(t))}}}function ze(a){let e,l,t;return l=new He({props:{variant:"secondary",size:"sm",$$slots:{default:[vl]},$$scope:{ctx:a}}}),l.$on("click",a[23]),{c(){e=T("span"),j(l.$$.fragment),b(e,"class","button-wrap svelte-1tclfmr")},m(n,r){O(n,e,r),G(l,e,null),t=!0},p(n,r){const c={};r[1]&1073741824&&(c.$$scope={dirty:r,ctx:n}),l.$set(c)},i(n){t||(M(l.$$.fragment,n),t=!0)},o(n){U(l.$$.fragment,n),t=!1},d(n){n&&S(e),Q(l)}}}function vl(a){let e,l,t;return{c(){e=$("svg"),l=$("path"),t=ee(`
3
- New column`),b(l,"fill","currentColor"),b(l,"d","m18 6l-1.43 1.393L24.15 15H4v2h20.15l-7.58 7.573L18 26l10-10L18 6z"),b(e,"xmlns","http://www.w3.org/2000/svg"),b(e,"xmlns:xlink","http://www.w3.org/1999/xlink"),b(e,"aria-hidden","true"),b(e,"role","img"),b(e,"width","1em"),b(e,"height","1em"),b(e,"preserveAspectRatio","xMidYMid meet"),b(e,"viewBox","0 0 32 32"),b(e,"class","svelte-1tclfmr")},m(n,r){O(n,e,r),q(e,l),O(n,t,r)},p:te,d(n){n&&(S(e),S(t))}}}function Al(a){let e,l,t,n,r,c,i,_,f,o=a[1]&&a[1].length!==0&&Me(a);function m(A){a[41](A)}let p={flex:!1,center:!1,boundedheight:!1,disable_click:!0,$$slots:{default:[kl]},$$scope:{ctx:a}};a[14]!==void 0&&(p.dragging=a[14]),n=new hl({props:p}),Z.push(()=>ne(n,"dragging",m)),n.$on("load",a[42]);let h=a[4]&&Re(a);return{c(){e=T("div"),o&&o.c(),l=K(),t=T("div"),j(n.$$.fragment),c=K(),h&&h.c(),b(t,"class","table-wrap scroll-hide svelte-1tclfmr"),B(t,"dragging",a[14]),B(t,"no-wrap",!a[5]),b(e,"class","svelte-1tclfmr"),B(e,"label",a[1]&&a[1].length!==0)},m(A,d){O(A,e,d),o&&o.m(e,null),q(e,l),q(e,t),G(n,t,null),q(e,c),h&&h.m(e,null),i=!0,_||(f=[I(window,"click",a[24]),I(window,"touchstart",a[24])],_=!0)},p(A,d){A[1]&&A[1].length!==0?o?o.p(A,d):(o=Me(A),o.c(),o.m(e,l)):o&&(o.d(1),o=null);const C={};d[0]&32707|d[1]&1073741824&&(C.$$scope={dirty:d,ctx:A}),!r&&d[0]&16384&&(r=!0,C.dragging=A[14],ae(()=>r=!1)),n.$set(C),(!i||d[0]&16384)&&B(t,"dragging",A[14]),(!i||d[0]&32)&&B(t,"no-wrap",!A[5]),A[4]?h?(h.p(A,d),d[0]&16&&M(h,1)):(h=Re(A),h.c(),M(h,1),h.m(e,null)):h&&(X(),U(h,1,1,()=>{h=null}),x()),(!i||d[0]&2)&&B(e,"label",A[1]&&A[1].length!==0)},i(A){i||(M(n.$$.fragment,A),M(h),i=!0)},o(A){U(n.$$.fragment,A),U(h),i=!1},d(A){A&&S(e),o&&o.d(),Q(n),h&&h.d(),_=!1,be(f)}}}function Dl(a,e){return e.filter(l);function l(t){var n=-1;return a.split(`
4
- `).every(r);function r(c){if(!c)return!0;var i=c.split(t).length;return n<0&&(n=i),n===i&&i>1}}}function Nl(a){const e=atob(a.split(",")[1]),l=a.split(",")[0].split(":")[1].split(";")[0],t=new ArrayBuffer(e.length),n=new Uint8Array(t);for(let r=0;r<e.length;r++)n[r]=e.charCodeAt(r);return new Blob([t],{type:l})}function El(a,e,l){let{datatype:t}=e,{label:n=null}=e,{headers:r=[]}=e,{values:c=[[]]}=e,{col_count:i}=e,{row_count:_}=e,{editable:f=!0}=e,{wrap:o=!1}=e,m=!1;const p=Fe();let h=!1;const A=(s,u)=>v[s][u].value;let d={};function C(s){let u=s||[];if(i[1]==="fixed"&&u.length<i[0]){const k=Array(i[0]-u.length).fill("").map((D,E)=>`${E+u.length}`);u=u.concat(k)}return!u||u.length===0?Array(i[0]).fill(0).map((k,D)=>{const E=`h-${D}`;return l(10,d[E]={cell:null,input:null},d),{id:E,value:JSON.stringify(D+1)}}):u.map((k,D)=>{const E=`h-${D}`;return l(10,d[E]={cell:null,input:null},d),{id:E,value:k??""}})}function y(s){const u=s.length>0?s.length:_[0];return Array(_[1]==="fixed"||u<_[0]?_[0]:u).fill(0).map((k,D)=>Array(i[1]==="fixed"?i[0]:s[0].length).fill(0).map((E,H)=>{const Y=`${D}-${H}`;return l(10,d[Y]={input:null,cell:null},d),{value:s?.[D]?.[H]??"",id:Y}}))}let g=C(r),N;async function L(){typeof h=="string"?(await P(),d[h]?.input?.focus()):typeof m=="string"&&(await P(),d[m]?.input?.focus())}let v=[[]],R;function w(s,u,k){if(!u)return"none";if(r[u]===s){if(k==="asc")return"ascending";if(k==="des")return"descending"}}function z(s){return v.reduce((u,k,D)=>{const E=k.reduce((H,Y,ue)=>s===Y.id?ue:H,-1);return E===-1?u:[D,E]},[-1,-1])}async function J(s,u){if(!f||h===s)return;if(u){const[D,E]=z(s);l(9,v[D][E].value="",v)}l(7,h=s),await P();const{input:k}=d[s];k?.focus()}async function we(s,u,k,D){let E;switch(s.key){case"ArrowRight":if(h)break;s.preventDefault(),E=v[u][k+1],l(6,m=E?E.id:m);break;case"ArrowLeft":if(h)break;s.preventDefault(),E=v[u][k-1],l(6,m=E?E.id:m);break;case"ArrowDown":if(h)break;s.preventDefault(),E=v[u+1],l(6,m=E?E[k].id:m);break;case"ArrowUp":if(h)break;s.preventDefault(),E=v[u-1],l(6,m=E?E[k].id:m);break;case"Escape":if(!f)break;s.preventDefault(),l(6,m=h),l(7,h=!1);break;case"Enter":if(!f)break;if(s.preventDefault(),s.shiftKey){ie(u),await P();const[sl]=z(D);l(6,m=v[sl+1][k].id)}else h===D?l(7,h=!1):J(D);break;case"Backspace":if(!f)break;h||(s.preventDefault(),l(9,v[u][k].value="",v));break;case"Delete":if(!f)break;h||(s.preventDefault(),l(9,v[u][k].value="",v));break;case"Tab":let H=s.shiftKey?-1:1,Y=v[u][k+H],ue=v?.[u+H]?.[H>0?0:g.length-1],oe=Y||ue;oe&&(s.preventDefault(),l(6,m=oe?oe.id:m)),l(7,h=!1);break;default:(!h||h&&h!==D)&&s.key.length===1&&J(D,!0);break}}async function ke(s){h!==s&&m!==s&&(l(7,h=!1),l(6,m=s))}async function ye(s,u){if(u==="edit"&&typeof s=="string"&&(await P(),d[s].input?.focus()),u==="edit"&&typeof s=="boolean"&&typeof m=="string"){let k=d[m]?.cell;await P(),k?.focus()}if(u==="select"&&typeof s=="string"){const{cell:k}=d[s];await P(),k?.focus()}}let V,le;function Ie(s,u){u==="asc"?l(9,v=v.sort((k,D)=>k[s].value<D[s].value?-1:1)):u==="des"&&l(9,v=v.sort((k,D)=>k[s].value>D[s].value?-1:1))}function ve(s){typeof le!="number"||le!==s?(l(11,V="asc"),l(12,le=s)):V==="asc"?l(11,V="des"):V==="des"&&l(11,V="asc"),Ie(s,V)}let F;function Ae(){if(typeof m=="string"){const s=d[m].input?.value;if(g.find(u=>u.id===m)){let u=g.find(k=>k.id===m);s&&(u.value=s)}else s&&g.push({id:m,value:s})}}async function re(s,u){!f||i[1]!=="dynamic"||h===s||(l(13,F=s),await P(),d[s].input?.focus(),u&&d[s].input?.select())}function Ke(s){if(f)switch(s.key){case"Escape":case"Enter":case"Tab":s.preventDefault(),l(6,m=F),l(13,F=!1),Ae();break}}function ie(s){_[1]==="dynamic"&&(v.splice(s?s+1:v.length,0,Array(v[0].length).fill(0).map((u,k)=>{const D=`${v.length}-${k}`;return l(10,d[D]={cell:null,input:null},d),{id:D,value:""}})),l(9,v),l(27,c),l(29,R),l(26,r))}async function Pe(){if(i[1]!=="dynamic")return;for(let u=0;u<v.length;u++){const k=`${u}-${v[u].length}`;l(10,d[k]={cell:null,input:null},d),v[u].push({id:k,value:""})}const s=`h-${g.length}`;l(10,d[s]={cell:null,input:null},d),g.push({id:s,value:`Header ${g.length+1}`}),l(9,v),l(27,c),l(29,R),l(26,r),l(8,g),l(26,r),l(28,N),l(27,c),await P(),re(s,!0)}function Ye(s){typeof h=="string"&&d[h]&&d[h].cell!==s.target&&!d[h].cell?.contains(s?.target)&&l(7,h=!1),typeof F=="string"&&d[F]&&d[F].cell!==s.target&&!d[F].cell?.contains(s.target)&&(l(6,m=F),l(13,F=!1),Ae(),l(13,F=!1))}function De(s){const u=new FileReader;function k(D){if(!D?.target?.result||typeof D.target.result!="string")return;const[E]=Dl(D.target.result,[","," "]),[H,...Y]=gl(E).parseRows(D.target.result);l(8,g=C(i[1]==="fixed"?H.slice(0,i[0]):H)),l(27,c=Y),u.removeEventListener("loadend",k)}u.addEventListener("loadend",k),u.readAsText(s)}let fe=!1;function je(s,u){a.$$.not_equal(d[u].input,s)&&(d[u].input=s,l(10,d))}const Ge=s=>re(s),Qe=s=>ve(s);function Ve(s,u){Z[s?"unshift":"push"](()=>{d[u].cell=s,l(10,d)})}function Ze(s,u,k,D){k[D].value=s,l(9,v),l(27,c),l(29,R),l(26,r)}function We(s,u){a.$$.not_equal(d[u].input,s)&&(d[u].input=s,l(10,d))}function Xe(s,u){Z[s?"unshift":"push"](()=>{d[u].cell=s,l(10,d)})}const xe=s=>J(s),$e=s=>ke(s),el=s=>J(s),ll=(s,u,k,D)=>we(D,s,u,k);function tl(s){fe=s,l(14,fe)}const nl=s=>De(Nl(s.detail.data)),al=()=>ie();return a.$$set=s=>{"datatype"in s&&l(0,t=s.datatype),"label"in s&&l(1,n=s.label),"headers"in s&&l(26,r=s.headers),"values"in s&&l(27,c=s.values),"col_count"in s&&l(2,i=s.col_count),"row_count"in s&&l(3,_=s.row_count),"editable"in s&&l(4,f=s.editable),"wrap"in s&&l(5,o=s.wrap)},a.$$.update=()=>{if(a.$$.dirty[0]&201326592&&(c&&!Array.isArray(c)?(l(26,r=c.headers),l(27,c=c.data.length===0?[Array(r.length).fill("")]:c.data),l(6,m=!1)):c===null&&(l(27,c=[Array(r.length).fill("")]),l(6,m=!1))),a.$$.dirty[0]&64&&m!==!1){const s=m.split("-"),u=parseInt(s[0]),k=parseInt(s[1]);!isNaN(u)&&!isNaN(k)&&p("select",{index:[u,k],value:A(u,k)})}a.$$.dirty[0]&335544320&&(se(r,N)||(l(8,g=C(r)),l(28,N=r),L())),a.$$.dirty[0]&671088640&&(se(c,R)||(l(9,v=y(c)),l(29,R=c),L())),a.$$.dirty[0]&768&&g&&p("change",{data:v.map(s=>s.map(({value:u})=>u)),headers:g.map(s=>s.value)}),a.$$.dirty[0]&128&&ye(h,"edit"),a.$$.dirty[0]&64&&ye(m,"select")},[t,n,i,_,f,o,m,h,g,v,d,V,le,F,fe,w,J,we,ke,ve,re,Ke,ie,Pe,Ye,De,r,c,N,R,je,Ge,Qe,Ve,Ze,We,Xe,xe,$e,el,ll,tl,nl,al]}class Ll extends he{constructor(e){super(),ge(this,e,El,Al,me,{datatype:0,label:1,headers:26,values:27,col_count:2,row_count:3,editable:4,wrap:5},null,[-1,-1])}}function ql(a){let e,l,t,n;const r=[a[13]];let c={};for(let i=0;i<r.length;i+=1)c=ul(c,r[i]);return e=new ol({props:c}),t=new Ll({props:{label:a[8],row_count:a[7],col_count:a[6],values:a[0],headers:a[1],editable:a[5]==="dynamic",wrap:a[9],datatype:a[10]}}),t.$on("change",a[16]),t.$on("select",a[17]),{c(){j(e.$$.fragment),l=K(),j(t.$$.fragment)},m(i,_){G(e,i,_),O(i,l,_),G(t,i,_),n=!0},p(i,_){const f=_&8192?cl(r,[_l(i[13])]):{};e.$set(f);const o={};_&256&&(o.label=i[8]),_&128&&(o.row_count=i[7]),_&64&&(o.col_count=i[6]),_&1&&(o.values=i[0]),_&2&&(o.headers=i[1]),_&32&&(o.editable=i[5]==="dynamic"),_&512&&(o.wrap=i[9]),_&1024&&(o.datatype=i[10]),t.$set(o)},i(i){n||(M(e.$$.fragment,i),M(t.$$.fragment,i),n=!0)},o(i){U(e.$$.fragment,i),U(t.$$.fragment,i),n=!1},d(i){i&&S(l),Q(e,i),Q(t,i)}}}function Bl(a){let e,l;return e=new dl({props:{visible:a[4],padding:!1,elem_id:a[2],elem_classes:a[3],container:!1,scale:a[11],min_width:a[12],allow_overflow:!1,$$slots:{default:[ql]},$$scope:{ctx:a}}}),{c(){j(e.$$.fragment)},m(t,n){G(e,t,n),l=!0},p(t,[n]){const r={};n&16&&(r.visible=t[4]),n&4&&(r.elem_id=t[2]),n&8&&(r.elem_classes=t[3]),n&2048&&(r.scale=t[11]),n&4096&&(r.min_width=t[12]),n&1058787&&(r.$$scope={dirty:n,ctx:t}),e.$set(r)},i(t){l||(M(e.$$.fragment,t),l=!0)},o(t){U(e.$$.fragment,t),l=!1},d(t){Q(e,t)}}}function Ml(a,e,l){let{headers:t=[]}=e,{elem_id:n=""}=e,{elem_classes:r=[]}=e,{visible:c=!0}=e,{value:i={data:[["","",""]],headers:["1","2","3"]}}=e,_=JSON.stringify(i),{value_is_output:f=!1}=e,{mode:o}=e,{col_count:m}=e,{row_count:p}=e,{label:h=null}=e,{wrap:A}=e,{datatype:d}=e,{scale:C=null}=e,{min_width:y=void 0}=e;const g=Fe();let{loading_status:N}=e;function L(){g("change",i),f||g("input")}fl(()=>{l(14,f=!1)});const v=({detail:w})=>{l(0,i=w)};function R(w){ce.call(this,a,w)}return a.$$set=w=>{"headers"in w&&l(1,t=w.headers),"elem_id"in w&&l(2,n=w.elem_id),"elem_classes"in w&&l(3,r=w.elem_classes),"visible"in w&&l(4,c=w.visible),"value"in w&&l(0,i=w.value),"value_is_output"in w&&l(14,f=w.value_is_output),"mode"in w&&l(5,o=w.mode),"col_count"in w&&l(6,m=w.col_count),"row_count"in w&&l(7,p=w.row_count),"label"in w&&l(8,h=w.label),"wrap"in w&&l(9,A=w.wrap),"datatype"in w&&l(10,d=w.datatype),"scale"in w&&l(11,C=w.scale),"min_width"in w&&l(12,y=w.min_width),"loading_status"in w&&l(13,N=w.loading_status)},a.$$.update=()=>{a.$$.dirty&32769&&JSON.stringify(i)!==_&&(l(15,_=JSON.stringify(i)),L())},[i,t,n,r,c,o,m,p,h,A,d,C,y,N,f,_,v,R]}class Ol extends he{constructor(e){super(),ge(this,e,Ml,Bl,me,{headers:1,elem_id:2,elem_classes:3,visible:4,value:0,value_is_output:14,mode:5,col_count:6,row_count:7,label:8,wrap:9,datatype:10,scale:11,min_width:12,loading_status:13})}}const zl=Ol,Fl=["static","dynamic"],Hl=a=>({type:{payload:"{ data: Array<Array<string | number>>; headers: Array<string> }"},description:{payload:"an object with an array of data and an array of headers"},example_data:a.value});export{zl as Component,Hl as document,Fl as modes};
5
- //# sourceMappingURL=index-bacb8946.js.map
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio_client/__init__.py DELETED
@@ -1,7 +0,0 @@
1
- from gradio_client.client import Client
2
- from gradio_client.utils import __version__
3
-
4
- __all__ = [
5
- "Client",
6
- "__version__",
7
- ]
 
 
 
 
 
 
 
 
spaces/Docfile/open_llm_leaderboard/src/display_models/model_metadata_flags.py DELETED
@@ -1,18 +0,0 @@
1
- # Models which have been flagged by users as being problematic for a reason or another
2
- # (Model name to forum discussion link)
3
- FLAGGED_MODELS = {
4
- "Voicelab/trurl-2-13b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/202",
5
- "deepnight-research/llama-2-70B-inst": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/207",
6
- "Aspik101/trurl-2-13b-pl-instruct_unload": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/213",
7
- "Fredithefish/ReasonixPajama-3B-HF": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/236",
8
- "TigerResearch/tigerbot-7b-sft-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/237",
9
- "gaodrew/gaodrew-gorgonzola-13b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/215",
10
- "AIDC-ai-business/Marcoroni-70B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/287",
11
- "AIDC-ai-business/Marcoroni-13B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/287",
12
- "AIDC-ai-business/Marcoroni-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/287",
13
- }
14
-
15
- # Models which have been requested by orgs to not be submitted on the leaderboard
16
- DO_NOT_SUBMIT_MODELS = [
17
- "Voicelab/trurl-2-13b", # trained on MMLU
18
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DragGan/DragGan-Inversion/PTI/utils/log_utils.py DELETED
@@ -1,79 +0,0 @@
1
- import numpy as np
2
- from PIL import Image
3
- import wandb
4
- from PTI.configs import global_config
5
- import torch
6
- import matplotlib.pyplot as plt
7
-
8
-
9
- def log_image_from_w(w, G, name):
10
- img = get_image_from_w(w, G)
11
- pillow_image = Image.fromarray(img)
12
- wandb.log(
13
- {f"{name}": [
14
- wandb.Image(pillow_image, caption=f"current inversion {name}")]},
15
- step=global_config.training_step)
16
-
17
-
18
- def log_images_from_w(ws, G, names):
19
- for name, w in zip(names, ws):
20
- w = w.to(global_config.device)
21
- log_image_from_w(w, G, name)
22
-
23
-
24
- def plot_image_from_w(w, G):
25
- img = get_image_from_w(w, G)
26
- pillow_image = Image.fromarray(img)
27
- plt.imshow(pillow_image)
28
- plt.show()
29
-
30
-
31
- def plot_image(img):
32
- img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).detach().cpu().numpy()
33
- pillow_image = Image.fromarray(img[0])
34
- plt.imshow(pillow_image)
35
- plt.show()
36
-
37
-
38
- def save_image(name, method_type, results_dir, image, run_id):
39
- image.save(f'{results_dir}/{method_type}_{name}_{run_id}.jpg')
40
-
41
-
42
- def save_w(w, G, name, method_type, results_dir):
43
- im = get_image_from_w(w, G)
44
- im = Image.fromarray(im, mode='RGB')
45
- save_image(name, method_type, results_dir, im)
46
-
47
-
48
- def save_concat_image(base_dir, image_latents, new_inv_image_latent, new_G,
49
- old_G,
50
- file_name,
51
- extra_image=None):
52
- images_to_save = []
53
- if extra_image is not None:
54
- images_to_save.append(extra_image)
55
- for latent in image_latents:
56
- images_to_save.append(get_image_from_w(latent, old_G))
57
- images_to_save.append(get_image_from_w(new_inv_image_latent, new_G))
58
- result_image = create_alongside_images(images_to_save)
59
- result_image.save(f'{base_dir}/{file_name}.jpg')
60
-
61
-
62
- def save_single_image(base_dir, image_latent, G, file_name):
63
- image_to_save = get_image_from_w(image_latent, G)
64
- image_to_save = Image.fromarray(image_to_save, mode='RGB')
65
- image_to_save.save(f'{base_dir}/{file_name}.jpg')
66
-
67
-
68
- def create_alongside_images(images):
69
- res = np.concatenate([np.array(image) for image in images], axis=1)
70
- return Image.fromarray(res, mode='RGB')
71
-
72
-
73
- def get_image_from_w(w, G):
74
- if len(w.size()) <= 2:
75
- w = w.unsqueeze(0)
76
- with torch.no_grad():
77
- img = G.synthesis(w, noise_mode='const')
78
- img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).detach().cpu().numpy()
79
- return img[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EPFL-VILAB/MultiMAE/utils/checkpoint.py DELETED
@@ -1,152 +0,0 @@
1
- # --------------------------------------------------------
2
- # Based on the timm and MAE-priv code base
3
- # https://github.com/rwightman/pytorch-image-models/tree/master/timm
4
- # https://github.com/BUPT-PRIV/MAE-priv
5
- # --------------------------------------------------------
6
- import io
7
- import os
8
- from pathlib import Path
9
-
10
- import torch
11
-
12
- from .dist import save_on_master
13
- from .model import get_state_dict
14
-
15
-
16
- def _load_checkpoint_for_ema(model_ema, checkpoint):
17
- """
18
- Workaround for ModelEma._load_checkpoint to accept an already-loaded object
19
- """
20
- mem_file = io.BytesIO()
21
- torch.save(checkpoint, mem_file)
22
- mem_file.seek(0)
23
- model_ema._load_checkpoint(mem_file)
24
-
25
-
26
- def load_state_dict(model, state_dict, prefix='', ignore_missing="relative_position_index"):
27
- missing_keys = []
28
- unexpected_keys = []
29
- error_msgs = []
30
- # copy state_dict so _load_from_state_dict can modify it
31
- metadata = getattr(state_dict, '_metadata', None)
32
- state_dict = state_dict.copy()
33
- if metadata is not None:
34
- state_dict._metadata = metadata
35
-
36
- def load(module, prefix=''):
37
- local_metadata = {} if metadata is None else metadata.get(
38
- prefix[:-1], {})
39
- module._load_from_state_dict(
40
- state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
41
- for name, child in module._modules.items():
42
- if child is not None:
43
- load(child, prefix + name + '.')
44
-
45
- load(model, prefix=prefix)
46
-
47
- warn_missing_keys = []
48
- ignore_missing_keys = []
49
- for key in missing_keys:
50
- keep_flag = True
51
- for ignore_key in ignore_missing.split('|'):
52
- if ignore_key in key:
53
- keep_flag = False
54
- break
55
- if keep_flag:
56
- warn_missing_keys.append(key)
57
- else:
58
- ignore_missing_keys.append(key)
59
-
60
- missing_keys = warn_missing_keys
61
-
62
- if len(missing_keys) > 0:
63
- print("Weights of {} not initialized from pretrained model: {}".format(
64
- model.__class__.__name__, missing_keys))
65
- if len(unexpected_keys) > 0:
66
- print("Weights from pretrained model not used in {}: {}".format(
67
- model.__class__.__name__, unexpected_keys))
68
- if len(ignore_missing_keys) > 0:
69
- print("Ignored weights of {} not initialized from pretrained model: {}".format(
70
- model.__class__.__name__, ignore_missing_keys))
71
- if len(error_msgs) > 0:
72
- print('\n'.join(error_msgs))
73
-
74
-
75
- def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, loss_balancer=None, model_ema=None):
76
- output_dir = Path(args.output_dir)
77
- epoch_name = str(epoch)
78
- if loss_scaler is not None:
79
- checkpoint_paths = [output_dir / ('checkpoint-%s.pth' % epoch_name)]
80
- for checkpoint_path in checkpoint_paths:
81
- to_save = {
82
- 'model': model_without_ddp.state_dict(),
83
- 'optimizer': optimizer.state_dict(),
84
- 'epoch': epoch,
85
- 'scaler': loss_scaler.state_dict(),
86
- 'args': args
87
- }
88
-
89
- if loss_balancer is not None:
90
- to_save['loss_balancer'] = loss_balancer.state_dict()
91
-
92
- if model_ema is not None:
93
- to_save['model_ema'] = get_state_dict(model_ema)
94
-
95
- save_on_master(to_save, checkpoint_path)
96
- else:
97
- client_state = {'epoch': epoch}
98
- if model_ema is not None:
99
- client_state['model_ema'] = get_state_dict(model_ema)
100
- model.save_checkpoint(save_dir=args.output_dir, tag="checkpoint-%s" % epoch_name, client_state=client_state)
101
-
102
-
103
- def auto_load_model(args, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
104
- output_dir = Path(args.output_dir)
105
- if loss_scaler is not None:
106
- # torch.amp
107
- if args.auto_resume and len(args.resume) == 0:
108
- import glob
109
- all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*.pth'))
110
- latest_ckpt = -1
111
- for ckpt in all_checkpoints:
112
- t = ckpt.split('-')[-1].split('.')[0]
113
- if t.isdigit():
114
- latest_ckpt = max(int(t), latest_ckpt)
115
- if latest_ckpt >= 0:
116
- args.resume = os.path.join(output_dir, 'checkpoint-%d.pth' % latest_ckpt)
117
- print("Auto resume checkpoint: %s" % args.resume)
118
-
119
- if args.resume:
120
- if args.resume.startswith('https'):
121
- checkpoint = torch.hub.load_state_dict_from_url(
122
- args.resume, map_location='cpu')
123
- else:
124
- checkpoint = torch.load(args.resume, map_location='cpu')
125
- model_without_ddp.load_state_dict(checkpoint['model'])
126
- print("Resume checkpoint %s" % args.resume)
127
- if 'optimizer' in checkpoint and 'epoch' in checkpoint:
128
- optimizer.load_state_dict(checkpoint['optimizer'])
129
- args.start_epoch = checkpoint['epoch'] + 1
130
- if hasattr(args, 'model_ema') and args.model_ema:
131
- _load_checkpoint_for_ema(model_ema, checkpoint['model_ema'])
132
- if 'scaler' in checkpoint:
133
- loss_scaler.load_state_dict(checkpoint['scaler'])
134
- print("With optim & sched!")
135
- else:
136
- # deepspeed, only support '--auto_resume'.
137
- if args.auto_resume:
138
- import glob
139
- all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*'))
140
- latest_ckpt = -1
141
- for ckpt in all_checkpoints:
142
- t = ckpt.split('-')[-1].split('.')[0]
143
- if t.isdigit():
144
- latest_ckpt = max(int(t), latest_ckpt)
145
- if latest_ckpt >= 0:
146
- args.resume = os.path.join(output_dir, 'checkpoint-%d' % latest_ckpt)
147
- print("Auto resume checkpoint: %d" % latest_ckpt)
148
- _, client_states = model.load_checkpoint(args.output_dir, tag='checkpoint-%d' % latest_ckpt)
149
- args.start_epoch = client_states['epoch'] + 1
150
- if model_ema is not None:
151
- if args.model_ema:
152
- _load_checkpoint_for_ema(model_ema, client_states['model_ema'])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Flux9665/IMS-Toucan/app.py DELETED
@@ -1,160 +0,0 @@
1
- import os
2
-
3
- import gradio as gr
4
- import numpy as np
5
- import torch
6
-
7
- from InferenceInterfaces.Meta_FastSpeech2 import Meta_FastSpeech2
8
-
9
- def float2pcm(sig, dtype='int16'):
10
- """
11
- https://gist.github.com/HudsonHuang/fbdf8e9af7993fe2a91620d3fb86a182
12
- """
13
- sig = np.asarray(sig)
14
- if sig.dtype.kind != 'f':
15
- raise TypeError("'sig' must be a float array")
16
- dtype = np.dtype(dtype)
17
- if dtype.kind not in 'iu':
18
- raise TypeError("'dtype' must be an integer type")
19
- i = np.iinfo(dtype)
20
- abs_max = 2 ** (i.bits - 1)
21
- offset = i.min + abs_max
22
- return (sig * abs_max + offset).clip(i.min, i.max).astype(dtype)
23
-
24
-
25
- class TTS_Interface:
26
-
27
- def __init__(self):
28
- self.device = "cuda" if torch.cuda.is_available() else "cpu"
29
- self.model = Meta_FastSpeech2(device=self.device)
30
- self.current_speaker = "English Speaker's Voice"
31
- self.current_language = "English"
32
- self.current_accent = "English"
33
- self.language_id_lookup = {
34
- "English" : "en",
35
- "German" : "de",
36
- "Greek" : "el",
37
- "Spanish" : "es",
38
- "Finnish" : "fi",
39
- "Russian" : "ru",
40
- "Hungarian" : "hu",
41
- "Dutch" : "nl",
42
- "French" : "fr",
43
- 'Polish' : "pl",
44
- 'Portuguese': "pt",
45
- 'Italian' : "it",
46
- }
47
- self.speaker_path_lookup = {
48
- "English Speaker's Voice" : "reference_audios/english.wav",
49
- "German Speaker's Voice" : "reference_audios/german.wav",
50
- "Greek Speaker's Voice" : "reference_audios/greek.wav",
51
- "Spanish Speaker's Voice" : "reference_audios/spanish.wav",
52
- "Finnish Speaker's Voice" : "reference_audios/finnish.wav",
53
- "Russian Speaker's Voice" : "reference_audios/russian.wav",
54
- "Hungarian Speaker's Voice" : "reference_audios/hungarian.wav",
55
- "Dutch Speaker's Voice" : "reference_audios/dutch.wav",
56
- "French Speaker's Voice" : "reference_audios/french.wav",
57
- "Polish Speaker's Voice" : "reference_audios/polish.flac",
58
- "Portuguese Speaker's Voice": "reference_audios/portuguese.flac",
59
- "Italian Speaker's Voice" : "reference_audios/italian.flac",
60
- }
61
- self.model.set_utterance_embedding(self.speaker_path_lookup[self.current_speaker])
62
-
63
-
64
- def read(self, prompt, language, accent, speaker):
65
- language = language.split()[0]
66
- accent = accent.split()[0]
67
- if self.current_language != language:
68
- self.model.set_phonemizer_language(self.language_id_lookup[language])
69
- self.current_language = language
70
- if self.current_accent != accent:
71
- self.model.set_accent_language(self.language_id_lookup[accent])
72
- self.current_accent = accent
73
- if self.current_speaker != speaker:
74
- self.model.set_utterance_embedding(self.speaker_path_lookup[speaker])
75
- self.current_speaker = speaker
76
-
77
- phones = self.model.text2phone.get_phone_string(prompt)
78
- if len(phones) > 1800:
79
- if language == "English":
80
- prompt = "Your input was too long. Please try either a shorter text or split it into several parts."
81
- elif language == "German":
82
- prompt = "Deine Eingabe war zu lang. Bitte versuche es entweder mit einem kürzeren Text oder teile ihn in mehrere Teile auf."
83
- elif language == "Greek":
84
- prompt = "Η εισήγησή σας ήταν πολύ μεγάλη. Παρακαλώ δοκιμάστε είτε ένα μικρότερο κείμενο είτε χωρίστε το σε διάφορα μέρη."
85
- elif language == "Spanish":
86
- prompt = "Su entrada es demasiado larga. Por favor, intente un texto más corto o divídalo en varias partes."
87
- elif language == "Finnish":
88
- prompt = "Vastauksesi oli liian pitkä. Kokeile joko lyhyempää tekstiä tai jaa se useampaan osaan."
89
- elif language == "Russian":
90
- prompt = "Ваш текст слишком длинный. Пожалуйста, попробуйте либо сократить текст, либо разделить его на несколько частей."
91
- elif language == "Hungarian":
92
- prompt = "Túl hosszú volt a bevitele. Kérjük, próbáljon meg rövidebb szöveget írni, vagy ossza több részre."
93
- elif language == "Dutch":
94
- prompt = "Uw input was te lang. Probeer een kortere tekst of splits het in verschillende delen."
95
- elif language == "French":
96
- prompt = "Votre saisie était trop longue. Veuillez essayer un texte plus court ou le diviser en plusieurs parties."
97
- elif language == 'Polish':
98
- prompt = "Twój wpis był zbyt długi. Spróbuj skrócić tekst lub podzielić go na kilka części."
99
- elif language == 'Portuguese':
100
- prompt = "O seu contributo foi demasiado longo. Por favor, tente um texto mais curto ou divida-o em várias partes."
101
- elif language == 'Italian':
102
- prompt = "Il tuo input era troppo lungo. Per favore, prova un testo più corto o dividilo in più parti."
103
- phones = self.model.text2phone.get_phone_string(prompt)
104
-
105
- wav = self.model(phones)
106
- return 48000, float2pcm(wav.cpu().numpy())
107
-
108
-
109
- meta_model = TTS_Interface()
110
- article = "<p style='text-align: left'>This is still a work in progress, models will be exchanged for better ones as soon as they are done. All of those languages are spoken by a single model. Speakers can be transferred across languages. More languages will be added soon. If you just want to listen to some pregenerated audios <a href='https://multilingualtoucan.github.io/' target='_blank'>click here.</a></p><p style='text-align: center'><a href='https://github.com/DigitalPhonetics/IMS-Toucan' target='_blank'>Click here to learn more about the IMS Toucan Speech Synthesis Toolkit</a></p>"
111
-
112
- iface = gr.Interface(fn=meta_model.read,
113
- inputs=[gr.inputs.Textbox(lines=2,
114
- placeholder="write what you want the synthesis to read here... \n(to prevent out of memory errors, too long inputs get replaced with a placeholder)",
115
- label="Text input"),
116
- gr.inputs.Dropdown(['English Text',
117
- 'German Text',
118
- 'Greek Text',
119
- 'Spanish Text',
120
- 'Finnish Text',
121
- 'Russian Text',
122
- 'Hungarian Text',
123
- 'Dutch Text',
124
- 'French Text',
125
- 'Polish Text',
126
- 'Portuguese Text',
127
- 'Italian Text'], type="value", default='English Text', label="Select the Language of the Text"),
128
- gr.inputs.Dropdown(['English Accent',
129
- 'German Accent',
130
- 'Greek Accent',
131
- 'Spanish Accent',
132
- 'Finnish Accent',
133
- 'Russian Accent',
134
- 'Hungarian Accent',
135
- 'Dutch Accent',
136
- 'French Accent',
137
- 'Polish Accent',
138
- 'Portuguese Accent',
139
- 'Italian Accent'], type="value", default='English Accent', label="Select the Accent of the Speaker"),
140
- gr.inputs.Dropdown(["English Speaker's Voice",
141
- "German Speaker's Voice",
142
- "Greek Speaker's Voice",
143
- "Spanish Speaker's Voice",
144
- "Finnish Speaker's Voice",
145
- "Russian Speaker's Voice",
146
- "Hungarian Speaker's Voice",
147
- "Dutch Speaker's Voice",
148
- "French Speaker's Voice",
149
- "Polish Speaker's Voice",
150
- "Portuguese Speaker's Voice",
151
- "Italian Speaker's Voice"], type="value", default="English Speaker's Voice", label="Select the Voice of the Speaker")],
152
- outputs=gr.outputs.Audio(type="numpy", label=None),
153
- layout="vertical",
154
- title="IMS Toucan - Multilingual Multispeaker",
155
- thumbnail="Utility/toucan.png",
156
- theme="default",
157
- allow_flagging="never",
158
- allow_screenshot=False,
159
- article=article)
160
- iface.launch(enable_queue=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/FrankZxShen/so-vits-svc-models-pcr/diffusion/unit2mel.py DELETED
@@ -1,100 +0,0 @@
1
- import os
2
- import yaml
3
- import torch
4
- import torch.nn as nn
5
- import numpy as np
6
- from .diffusion import GaussianDiffusion
7
- from .wavenet import WaveNet
8
- from .vocoder import Vocoder
9
-
10
- class DotDict(dict):
11
- def __getattr__(*args):
12
- val = dict.get(*args)
13
- return DotDict(val) if type(val) is dict else val
14
-
15
- __setattr__ = dict.__setitem__
16
- __delattr__ = dict.__delitem__
17
-
18
-
19
- def load_model_vocoder(
20
- model_path,
21
- device='cpu',
22
- config_path = None
23
- ):
24
- if config_path is None: config_file = os.path.join(os.path.split(model_path)[0], 'config.yaml')
25
- else: config_file = config_path
26
-
27
- with open(config_file, "r") as config:
28
- args = yaml.safe_load(config)
29
- args = DotDict(args)
30
-
31
- # load vocoder
32
- vocoder = Vocoder(args.vocoder.type, args.vocoder.ckpt, device=device)
33
-
34
- # load model
35
- model = Unit2Mel(
36
- args.data.encoder_out_channels,
37
- args.model.n_spk,
38
- args.model.use_pitch_aug,
39
- vocoder.dimension,
40
- args.model.n_layers,
41
- args.model.n_chans,
42
- args.model.n_hidden)
43
-
44
- print(' [Loading] ' + model_path)
45
- ckpt = torch.load(model_path, map_location=torch.device(device))
46
- model.to(device)
47
- model.load_state_dict(ckpt['model'])
48
- model.eval()
49
- return model, vocoder, args
50
-
51
-
52
- class Unit2Mel(nn.Module):
53
- def __init__(
54
- self,
55
- input_channel,
56
- n_spk,
57
- use_pitch_aug=False,
58
- out_dims=128,
59
- n_layers=20,
60
- n_chans=384,
61
- n_hidden=256):
62
- super().__init__()
63
- self.unit_embed = nn.Linear(input_channel, n_hidden)
64
- self.f0_embed = nn.Linear(1, n_hidden)
65
- self.volume_embed = nn.Linear(1, n_hidden)
66
- if use_pitch_aug:
67
- self.aug_shift_embed = nn.Linear(1, n_hidden, bias=False)
68
- else:
69
- self.aug_shift_embed = None
70
- self.n_spk = n_spk
71
- if n_spk is not None and n_spk > 1:
72
- self.spk_embed = nn.Embedding(n_spk, n_hidden)
73
-
74
- # diffusion
75
- self.decoder = GaussianDiffusion(WaveNet(out_dims, n_layers, n_chans, n_hidden), out_dims=out_dims)
76
-
77
- def forward(self, units, f0, volume, spk_id = None, spk_mix_dict = None, aug_shift = None,
78
- gt_spec=None, infer=True, infer_speedup=10, method='dpm-solver', k_step=300, use_tqdm=True):
79
-
80
- '''
81
- input:
82
- B x n_frames x n_unit
83
- return:
84
- dict of B x n_frames x feat
85
- '''
86
-
87
- x = self.unit_embed(units) + self.f0_embed((1+ f0 / 700).log()) + self.volume_embed(volume)
88
- if self.n_spk is not None and self.n_spk > 1:
89
- if spk_mix_dict is not None:
90
- for k, v in spk_mix_dict.items():
91
- spk_id_torch = torch.LongTensor(np.array([[k]])).to(units.device)
92
- x = x + v * self.spk_embed(spk_id_torch)
93
- else:
94
- x = x + self.spk_embed(spk_id)
95
- if self.aug_shift_embed is not None and aug_shift is not None:
96
- x = x + self.aug_shift_embed(aug_shift / 5)
97
- x = self.decoder(x, gt_spec=gt_spec, infer=infer, infer_speedup=infer_speedup, method=method, k_step=k_step, use_tqdm=use_tqdm)
98
-
99
- return x
100
-