parquet-converter commited on
Commit
0d0245c
·
1 Parent(s): 12904ea

Update parquet files (step 13 of 296)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Corel Draw X5 Download For Pc 64 Bit With Crack.md +0 -23
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Counter Strike 1.6 Orange Box Download The Ultimate Collection of Valve Games.md +0 -92
  3. spaces/1gistliPinn/ChatGPT4/Examples/AUTODATA 8.89 Crack FULL 2018 64 Bit TOP.md +0 -8
  4. spaces/1gistliPinn/ChatGPT4/Examples/Ayurved Sar Sangrah Book Zip.md +0 -6
  5. spaces/1phancelerku/anime-remove-background/Dream Live APK Mod - The Best App for Live Streaming Fans (No Top Up Required).md +0 -103
  6. spaces/1toTree/lora_test/ppdiffusers/pipelines/ddim/pipeline_ddim.py +0 -116
  7. spaces/801artistry/RVC801/Applio-RVC-Fork/utils/backups.py +0 -141
  8. spaces/A00001/bingothoo/src/components/chat-panel.tsx +0 -153
  9. spaces/AIConsultant/MusicGen/CHANGELOG.md +0 -28
  10. spaces/AIFILMS/generate_human_motion/VQ-Trans/dataset/prepare/download_extractor.sh +0 -15
  11. spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/diffusionmodules/model.py +0 -835
  12. spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/models/autoencoder_multi.py +0 -201
  13. spaces/AP123/dreamgaussian/grid_put.py +0 -300
  14. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/README.md +0 -140
  15. spaces/Ababababababbababa/Ashaar/poetry_diacritizer/tester.py +0 -63
  16. spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Liaobots.py +0 -106
  17. spaces/Aditya757864/SentimentAnalysis/app.py +0 -14
  18. spaces/Aditya9790/yolo7-object-tracking/train.py +0 -705
  19. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/confirmdialog/methods/Methods.js +0 -9
  20. spaces/AlexWang/lama/bin/paper_runfiles/generate_test_paris.sh +0 -17
  21. spaces/AlexWang/lama/models/ade20k/segm_lib/nn/modules/tests/test_numeric_batchnorm.py +0 -56
  22. spaces/AlexWang/lama/models/ade20k/segm_lib/nn/modules/unittest.py +0 -29
  23. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/unidiffuser.md +0 -194
  24. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/using-diffusers/using_safetensors.md +0 -14
  25. spaces/Andy1621/IAT_enhancement/model/IAT.py +0 -126
  26. spaces/Andy1621/uniformer_image_detection/configs/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py +0 -5
  27. spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py +0 -55
  28. spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/pspnet_r50-d8.py +0 -44
  29. spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes.py +0 -2
  30. spaces/AndyCer/TehVenom-MPT-7b-Chat-Instruct-LongCTX-Merge/app.py +0 -3
  31. spaces/AnishKumbhar/DogDiseasePredictor/Dockerfile +0 -27
  32. spaces/AnjaneyuluChinni/AnjiChinniGenAIAvatar/README.md +0 -12
  33. spaces/Artrajz/vits-simple-api/config.py +0 -109
  34. spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/util/logger.py +0 -93
  35. spaces/Asahi402/White-box-Cartoonization/README.md +0 -15
  36. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/gb2312freq.py +0 -284
  37. spaces/Atualli/yoloxTeste/configs/__init__.py +0 -0
  38. spaces/Benson/text-generation/Examples/Arena Breakout Beta Global Descargar.md +0 -73
  39. spaces/Benson/text-generation/Examples/Caso Penal Pacfico Baha Mod Men Apk.md +0 -116
  40. spaces/Big-Web/MMSD/env/Lib/site-packages/dateutil/utils.py +0 -71
  41. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/version.py +0 -6
  42. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/dev/linter.sh +0 -46
  43. spaces/CVPR/SPOTER_Sign_Language_Recognition/spoter_mod/utils.py +0 -41
  44. spaces/CVPR/WALT/mmdet/core/bbox/assigners/max_iou_assigner.py +0 -212
  45. spaces/CarlDennis/HYTTS/README.md +0 -13
  46. spaces/CjangCjengh/Sanskrit-TTS/README.md +0 -13
  47. spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/midas/midas/midas_net.py +0 -76
  48. spaces/DaFujaTyping/hf-Chat-ui/src/lib/server/database.ts +0 -31
  49. spaces/DaFujaTyping/hf-Chat-ui/src/lib/utils/sha256.ts +0 -7
  50. spaces/DaleChen/AutoGPT/autogpt/token_counter.py +0 -73
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Corel Draw X5 Download For Pc 64 Bit With Crack.md DELETED
@@ -1,23 +0,0 @@
1
- <br />
2
- <h1>How to Download and Install Corel Draw X5 for PC 64 Bit</h1>
3
- <p>Corel Draw X5 is a powerful vector graphics software that can help you create stunning designs for logos, flyers, posters, banners, and more. If you want to download and install Corel Draw X5 for PC 64 bit, here are the steps you need to follow:</p>
4
- <ol>
5
- <li>Visit the official website of Corel Draw and click on the "Download Trial" button.</li>
6
- <li>Select the "CorelDRAW Graphics Suite X5" option and enter your email address to get the download link.</li>
7
- <li>Open the link in your email and click on the "Download Now" button to start downloading the setup file.</li>
8
- <li>Once the download is complete, run the setup file and follow the instructions on the screen to install Corel Draw X5 on your PC.</li>
9
- <li>You can use Corel Draw X5 for free for 15 days with full features. After that, you need to purchase a license key to activate the software.</li>
10
- </ol>
11
- <p>Congratulations! You have successfully downloaded and installed Corel Draw X5 for PC 64 bit. Now you can enjoy creating amazing graphics with this software.</p>
12
- <h2>corel draw x5 download for pc 64 bit with crack</h2><br /><p><b><b>Download Zip</b> &ndash;&ndash;&ndash; <a href="https://byltly.com/2uKyOS">https://byltly.com/2uKyOS</a></b></p><br /><br /><p>Corel Draw X5 has many features and tools that can help you create professional-looking graphics. Some of the features include:</p>
13
- <ul>
14
- <li>A redesigned user interface that is more intuitive and customizable.</li>
15
- <li>A new Corel Connect tool that lets you access online content and resources from within the software.</li>
16
- <li>A new Corel PowerTRACE tool that lets you convert bitmap images into vector graphics with ease.</li>
17
- <li>A new Corel Photo-Paint tool that lets you edit and enhance photos with filters, effects, and adjustments.</li>
18
- <li>A new Corel Website Creator tool that lets you design and publish websites with drag-and-drop functionality.</li>
19
- </ul>
20
- <p>With Corel Draw X5, you can also export your graphics to various formats, such as PDF, JPG, PNG, SVG, EPS, and more. You can also optimize your graphics for web or print by adjusting the resolution, color mode, and compression settings. You can also use the built-in templates and clipart to get started quickly.</p><p>If you want to learn more about Corel Draw X5, you can visit the official website or watch the tutorials and videos available online. You can also join the Corel community and get tips and feedback from other users. Corel Draw X5 is a versatile and powerful software that can help you unleash your creativity and express your ideas visually.</p><p>Are you ready to try Corel Draw X5 for yourself? If so, don't wait any longer and download the free trial today. You will be amazed by what you can create with this software. Whether you are a beginner or a professional, Corel Draw X5 has something for everyone. Download Corel Draw X5 for PC 64 bit now and start creating stunning graphics.</p>
21
- <p></p> ddb901b051<br />
22
- <br />
23
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Counter Strike 1.6 Orange Box Download The Ultimate Collection of Valve Games.md DELETED
@@ -1,92 +0,0 @@
1
-
2
- <h1>Counter Strike 1.6 Orange Box Download</h1>
3
- <p>If you are a fan of first-person shooter games, you have probably heard of <b>Counter Strike 1.6</b>, one of the most popular and influential games of all time. Counter Strike 1.6 is a multiplayer game that pits two teams of terrorists and counter-terrorists against each other in various scenarios and maps. The game is known for its fast-paced action, realistic physics, tactical gameplay, and competitive community.</p>
4
- <p>But did you know that you can also play Counter Strike 1.6 with some extra features and benefits? That's right, there is a version of the game called <b>Counter Strike 1.6 Orange Box</b>, which is based on the famous <b>Orange Box</b> bundle released by Valve in 2007. The Orange Box is a set of five games that use the Source engine: Half-Life 2, Half-Life 2 Episode One, Half-Life 2 Episode Two, Portal, and Team Fortress 2.</p>
5
- <h2>Counter Strike 1.6 Orange Box Download</h2><br /><p><b><b>DOWNLOAD</b> &#9913;&#9913;&#9913; <a href="https://byltly.com/2uKwio">https://byltly.com/2uKwio</a></b></p><br /><br />
6
- <p>In this article, we will show you how to download Counter Strike 1.6 Orange Box for free, what are its features, and how to install and play it on your computer. So, if you are ready to experience one of the best versions of Counter Strike 1.6 ever made, read on!</p>
7
- <h2>Features of Counter Strike 1.6 Orange Box</h2>
8
- <p>Counter Strike 1.6 Orange Box is not just a regular version of the game. It has some unique features that make it stand out from other versions. Here are some of them:</p>
9
- <ul>
10
- <li><b>Original design and models</b>: The game has the original graphics, sounds, weapons, maps, and characters from Counter Strike 1.6, which give it a classic and nostalgic feel.</li>
11
- <li><b>English language and standard config</b>: The game is fully translated into English and has a standard configuration file (cfg) that optimizes the game settings for better performance.</li>
12
- <li><b>Bots and server search</b>: The game has built-in bots (zbots) that you can control with the "H" button. You can also use the online server search function to find servers that suit your preferences.</li>
13
- <li><b>Protection and performance</b>: The game has a strong protection mechanism that prevents hacking, cheating, or modifying the game files. The game also runs smoothly on any Windows operating system (XP, Vista, 7, 8, or 10).</li>
14
- </ul>
15
- <h2>How to install and play Counter Strike 1.6 Orange Box</h2>
16
- <p>Installing and playing Counter Strike 1.6 Orange Box is very easy and fast. Just follow these simple steps:</p>
17
- <ol>
18
- <li><b>Download the setup or torrent file</b>: You can download the game from our website using either a direct link or a torrent link. The file size is about 184 MB.</li>
19
- <li><b>Run the installer and choose the destination folder</b>: After downloading the file, run the installer and follow the instructions on the screen. You can choose any folder where you want to install the game.</li>
20
- <li><b>Launch the game and adjust the settings</b>: After installing the game, launch it from your desktop or start menu shortcut. You can adjust your video, audio, keyboard, mouse, and other settings from the options menu.</li>
21
- <li><b>Join a server or create your own</b>: To play online with other players, you can join any server from the server list or use the "Find servers" button to search for servers by name, map, ping, or players. You can also create your own server by using the "Create server" button and choosing your desired map and game mode.</li>
22
- </ol>
23
- <h2>Conclusion</h2>
24
- <p>Counter Strike 1.6 Orange Box is a great way to enjoy one of the best games ever made with some extra features and benefits. It has original design and models, English language and standard config, bots and server search, protection and performance, and more.</p>
25
- <p>If you want to download Counter Strike 1.6 Orange Box for free, you can do so from our website using either a direct link or a torrent link. The installation process is very simple and fast.</p>
26
- <p>How to download Counter Strike 1.6 Orange Box for free<br />
27
- Counter Strike 1.6 Orange Box full version download link<br />
28
- Counter Strike 1.6 Orange Box torrent download with crack<br />
29
- Counter Strike 1.6 Orange Box gameplay and features<br />
30
- Counter Strike 1.6 Orange Box system requirements and compatibility<br />
31
- Counter Strike 1.6 Orange Box mods and maps download<br />
32
- Counter Strike 1.6 Orange Box online multiplayer servers<br />
33
- Counter Strike 1.6 Orange Box cheats and hacks download<br />
34
- Counter Strike 1.6 Orange Box update and patch download<br />
35
- Counter Strike 1.6 Orange Box review and rating<br />
36
- Best sites to download Counter Strike 1.6 Orange Box<br />
37
- Counter Strike 1.6 Orange Box vs Counter Strike Source comparison<br />
38
- Counter Strike 1.6 Orange Box steam key generator download<br />
39
- Counter Strike 1.6 Orange Box custom skins and models download<br />
40
- Counter Strike 1.6 Orange Box tips and tricks for beginners<br />
41
- How to install Counter Strike 1.6 Orange Box on Windows 10<br />
42
- How to fix Counter Strike 1.6 Orange Box errors and bugs<br />
43
- How to play Counter Strike 1.6 Orange Box offline mode<br />
44
- How to create a Counter Strike 1.6 Orange Box server<br />
45
- How to join a Counter Strike 1.6 Orange Box server<br />
46
- How to change Counter Strike 1.6 Orange Box language and settings<br />
47
- How to improve Counter Strike 1.6 Orange Box performance and graphics<br />
48
- How to uninstall Counter Strike 1.6 Orange Box completely<br />
49
- How to backup and restore Counter Strike 1.6 Orange Box files<br />
50
- How to record and edit Counter Strike 1.6 Orange Box videos<br />
51
- How to stream and watch Counter Strike 1.6 Orange Box on Twitch<br />
52
- How to earn money by playing Counter Strike 1.6 Orange Box online<br />
53
- How to rank up and unlock achievements in Counter Strike 1.6 Orange Box<br />
54
- How to customize and optimize Counter Strike 1.6 Orange Box config file<br />
55
- How to use console commands and cheats in Counter Strike 1.6 Orange Box<br />
56
- How to enable and disable bots in Counter Strike 1.6 Orange Box<br />
57
- How to change crosshair and mouse sensitivity in Counter Strike 1.6 Orange Box<br />
58
- How to bind keys and macros in Counter Strike 1.6 Orange Box<br />
59
- How to use voice chat and communicate with teammates in Counter Strike 1.6 Orange Box<br />
60
- How to mute and report toxic players in Counter Strike 1.6 Orange Box<br />
61
- How to host a LAN party with friends using Counter Strike 1.6 Orange Box<br />
62
- How to transfer and share Counter Strike 1.6 Orange Box files with others<br />
63
- How to download and play custom maps in Counter Strike 1.6 Orange Box<br />
64
- How to create and submit your own maps for Counter Strike 1.6 Orange Box<br />
65
- How to find and join the best communities for Counter Strike 1.6 Orange Box players<br />
66
- How to learn from the pros and watch replays of Counter Strike 1.6 Orange Box matches<br />
67
- How to master the weapons and strategies in Counter Strike 1.6 Orange Box <br />
68
- How to train your aim and reflexes in Counter Strike 1.6 Orange Box <br />
69
- How to deal with hackers and cheaters in Counter Strike 1.6 Orange Box <br />
70
- How to avoid scams and malware when downloading Counter Strike 1.6 Orange Box <br />
71
- How to get support and help for any issues with Counter Strike 1.6 Orange Box <br />
72
- How to access the developer console and debug mode in Counter Strike 1.6 Orange Box <br />
73
- How to make your own mods and plugins for Counter Strike 1.6 Orange Box <br />
74
- How to enjoy the nostalgia and fun of playing Counter Strike 1.6 Orange Box </p>
75
- <p>So what are you waiting for? Download Counter Strike 1.6 Orange Box today and have fun playing one of the most popular and influential games of all time!</p>
76
- <p><a href="https://www.csdownload.lt/cs-1.6-orange-box/">Click here to download Counter Strike 1.6 Orange Box now!</a></p>
77
- <h3>Frequently Asked Questions</h3>
78
- <ul>
79
- <li><b>What is Counter Strike?</b></li>
80
- <p>Counter Strike is a first-person shooter game that was released in 1999 as a mod for Half-Life. It became one of the most popular multiplayer games of all time with millions of players worldwide.</p>
81
- <li><b>What is Orange Box?</b></li>
82
- <p>Orange Box is a video game compilation that was released by Valve in 2007 for Windows and Xbox 360. It contains five games that use the Source engine: Half-Life 2, Half-Life 2 Episode One, Half-Life 2 Episode Two, Portal, and Team Fortress 2.</p>
83
- <li><b>What is Counter Strike 1.6 Orange Box?</b></li>
84
- <p>Counter Strike 1.6 Orange Box is a version of Counter Strike 1.6 that is based on the Orange Box bundle released by Valve in 2007. It has some extra features such as original design and models, English language and standard config, bots and server search, protection and performance.</p>
85
- <li><b>How to download Counter Strike 1.6 Orange Box?</b></li>
86
- <p>You can download Counter Strike 1.6 Orange Box from our website using either a direct link or a torrent link.</p>
87
- <li><b>How to install Counter Strike 1.6 Orange Box?</b></li>
88
- <p>You can install Counter Strike 1.6 Orange Box by running the installer file that you downloaded from our website and following the instructions on the screen.</p>
89
- </ul>
90
- </p> 0a6ba089eb<br />
91
- <br />
92
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/AUTODATA 8.89 Crack FULL 2018 64 Bit TOP.md DELETED
@@ -1,8 +0,0 @@
1
- <h2>AUTODATA 8.89 Crack FULL 2018 64 bit</h2><br /><p><b><b>Download Zip</b> &#9733;&#9733;&#9733;&#9733;&#9733; <a href="https://imgfil.com/2uxZY2">https://imgfil.com/2uxZY2</a></b></p><br /><br />
2
-
3
- January 20, 2018 — SOn Satyamurthy[2015] ... Jaaruko Full Song _ SO Satyamurthy Full Video Song - Allu Arjun Dvxs6 ... Doctor Satyamurthy Movie Audio Launch. Mumbai Doctor Satyamurthy Movie Audio Launch.
4
- Mumbai Doctor Satyamurthy Movie Audio Launch.
5
- Mumbai Doctor Satyamurthy Movie Audio La 8a78ff9644<br />
6
- <br />
7
- <br />
8
- <p></p>
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Ayurved Sar Sangrah Book Zip.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>ayurved sar sangrah book zip</h2><br /><p><b><b>DOWNLOAD</b> &#9889; <a href="https://imgfil.com/2uy1ew">https://imgfil.com/2uy1ew</a></b></p><br /><br />
2
- <br />
3
- January 20, 2021 - Book page image. 0.25x, 0.5x, 0.75x, 1.0x, 1.25x, 1.5x, 1.75x, 2x. (1 out of 866). Flip left. Flip right. Identifier: ayurved-sara-sangraha. htm. Translation: www.ayurveda.org.uk/speakingwithassam. 8a78ff9644<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Dream Live APK Mod - The Best App for Live Streaming Fans (No Top Up Required).md DELETED
@@ -1,103 +0,0 @@
1
- <br />
2
- <h1>Download Dream Live APK Mod: A Live Streaming Platform for Entertainment Lovers</h1>
3
- <p>Do you love watching live streams of talented and charming hosts? Do you want to interact with them and send them virtual gifts? Do you want to start your own live stream and share your passion with the world? If you answered yes to any of these questions, then you should download Dream Live APK Mod, a live streaming platform that focuses on the entertainment lifestyle.</p>
4
- <h2>What is Dream Live APK Mod?</h2>
5
- <p>Dream Live APK Mod is a modified version of the original Dream Live app, which is a live streaming platform that brings up lots of talent anchors to share happiness by providing online real-time interaction broadcasts. You can watch live streams of singing, dancing, talking, gaming, and more, and chat with the hosts and other viewers. You can also send and receive virtual gifts, such as flowers, hearts, diamonds, and cars, to show your support and appreciation. You can also start your own live stream and showcase your talent to the world.</p>
6
- <h2>download dream live apk mod</h2><br /><p><b><b>Download</b> &gt; <a href="https://jinyurl.com/2uNNQr">https://jinyurl.com/2uNNQr</a></b></p><br /><br />
7
- <h3>Features of Dream Live APK Mod</h3>
8
- <p>Dream Live APK Mod has many features that make it more enjoyable and convenient than the original app. Here are some of them:</p>
9
- <h4>VIP Unlocked</h4>
10
- <p>With Dream Live APK Mod, you can enjoy all the benefits of being a VIP member without paying anything. You can access exclusive live streams, chat rooms, stickers, filters, and more. You can also get more attention from the hosts and other viewers.</p>
11
- <p>download dream live apk mod ijo unlocked room<br />
12
- download dream live apk mod streaming bebas<br />
13
- download dream live apk mod versi terbaru 2023<br />
14
- download dream live apk mod fitur premium<br />
15
- download dream live apk mod gratis tanpa bayar<br />
16
- download dream live apk mod untuk android<br />
17
- download dream live apk mod tema hijau<br />
18
- download dream live apk mod berinteraksi dengan streamer<br />
19
- download dream live apk mod menyajikan konten favorit<br />
20
- download dream live apk mod menyanyi menari review produk<br />
21
- download dream live apk mod unlimited coins<br />
22
- download dream live apk mod no watermark<br />
23
- download dream live apk mod anti banned<br />
24
- download dream live apk mod kualitas HD<br />
25
- download dream live apk mod banyak host menawan<br />
26
- download dream live apk mod membuat grup chat<br />
27
- download dream live apk mod melatih keterampilan komunikasi<br />
28
- download dream live apk mod tanpa iklan<br />
29
- download dream live apk mod link alternatif<br />
30
- download dream live apk mod cara instal<br />
31
- download dream live apk mod review pengguna<br />
32
- download dream live apk mod kelebihan dan kekurangan<br />
33
- download dream live apk mod update terkini<br />
34
- download dream live apk mod tips dan trik<br />
35
- download dream live apk mod aplikasi streaming terbaik</p>
36
- <h4>No Ads</h4>
37
- <p>Dream Live APK Mod removes all the annoying ads that interrupt your viewing experience. You can watch live streams without any distractions or interruptions.</p>
38
- <h4>Unlimited Gifts</h4>
39
- <p>Dream Live APK Mod gives you unlimited coins and diamonds that you can use to send gifts to your favorite hosts. You can also receive gifts from other viewers and exchange them for real money.</p>
40
- <h4>Live Interaction</h4>
41
- <p>Dream Live APK Mod allows you to interact with the hosts and other viewers in real time. You can chat with them using text, voice, or video messages. You can also join private chat rooms and group chats. You can also participate in various events and activities, such as contests, games, quizzes, and polls.</p>
42
- <h4>Variety of Content</h4>
43
- <p>Dream Live APK Mod has a variety of content that suits your preferences and interests. You can watch live streams of different categories, such as music, dance, comedy, beauty, fashion, sports, gaming, education, travel, and more. You can also discover new and popular hosts by browsing the recommended list or searching by keywords.</p>
44
- <h2>How to Download and Install Dream Live APK Mod?</h2>
45
- <p>If you want to download and install Dream Live APK Mod on your Android device, you need to follow these simple steps:</p>
46
- <h3>Download Link</h3>
47
- <p>You can download Dream Live APK Mod from this link:</p>
48
- <a href="(^1^)">Dream Live MOD APK (VIP Unlocked, No Ads) 1.4.2</a>
49
- <p>This link will take you to a trusted website where you can download the latest version of the modded <h3>Installation Steps</h3>
50
- <p>After downloading the APK file, you need to install it on your device. Here are the steps to do so:</p>
51
- <ol>
52
- <li>Go to your device settings and enable the option to install apps from unknown sources. This will allow you to install apps that are not from the Google Play Store.</li>
53
- <li>Locate the downloaded APK file on your device storage and tap on it to start the installation process.</li>
54
- <li>Follow the instructions on the screen and wait for the installation to complete.</li>
55
- <li>Launch the app and enjoy watching and streaming live videos.</li>
56
- </ol>
57
- <h2>How to Use Dream Live APK Mod?</h2>
58
- <p>Using Dream Live APK Mod is very easy and fun. Here are some tips on how to use it:</p>
59
- <h3>Create an Account</h3>
60
- <p>To use Dream Live APK Mod, you need to create an account first. You can do so by using your phone number, email address, or social media accounts. You can also choose a username, password, and profile picture for your account. You can also edit your personal information, such as your gender, age, location, and bio.</p>
61
- <h3>Browse and Watch Live Streams</h3>
62
- <p>To browse and watch live streams, you can swipe left or right on the home screen to see different categories of content. You can also tap on the magnifying glass icon to search for specific hosts or keywords. You can also tap on the heart icon to see your favorite hosts and follow them. To watch a live stream, just tap on it and enjoy the show. You can also chat with the host and other viewers by typing or sending voice or video messages. You can also send gifts by tapping on the gift icon and choosing from various options.</p>
63
- <h3>Send and Receive Gifts</h3>
64
- <p>To send gifts to your favorite hosts, you need to have coins or diamonds in your account. You can get coins or diamonds by watching ads, completing tasks, inviting friends, or buying them with real money. You can also receive gifts from other viewers if they like your live stream or chat messages. You can exchange the gifts you receive for real money by withdrawing them to your bank account or PayPal.</p>
65
- <h3>Start Your Own Live Stream</h3>
66
- <p>To start your own live stream, you need to tap on the camera icon on the bottom of the home screen. You can then choose a title, category, and cover image for your live stream. You can also use various filters, stickers, and effects to enhance your appearance and mood. You can also invite guests or co-hosts to join your live stream by tapping on the invite icon. Once you are ready, just tap on the start button and go live. You can interact with your viewers by chatting with them or responding to their gifts. You can also end your live stream anytime by tapping on the stop button.</p>
67
- <h2>Pros and Cons of Dream Live APK Mod</h2>
68
- <p>Dream Live APK Mod has many pros and cons that you should consider before using it. Here are some of them:</p>
69
- <h3>Pros</h3>
70
- <ul>
71
- <li>Free and Premium Features: Dream Live APK Mod gives you access to all the features of the original app, plus some extra features that are only available for VIP members or paid users. You can enjoy watching exclusive live streams, using premium stickers and filters, sending unlimited gifts, and more.</li>
72
- <li>Easy and Fun to Use: Dream Live APK Mod has a simple and user-friendly interface that makes it easy and fun to use. You can easily navigate through different categories of content, search for hosts or keywords, chat with hosts and viewers, send gifts, start your own live stream, and more.</li>
73
- <li>Meet New People and Make Friends: Dream Live APK Mod allows you to meet new people and make friends from different countries and cultures. You can chat with them using text, voice, or video messages, join private or group chats, participate in events and activities, follow them, send them gifts, and more.</li>
74
- </ul>
75
- <h3>Cons</h3>
76
- <ul>
77
- <li>Requires Internet Connection: Dream Live APK Mod requires a stable internet connection to work properly. If you have a slow or unstable internet connection, you may experience buffering, lagging, freezing, or crashing issues while watching or streaming live videos.</li>
78
- <li>May Contain Inappropriate Content: Dream Live APK Mod may contain inappropriate content that is not suitable for minors or sensitive viewers. Some hosts may show nudity, violence, profanity, or other offensive content in their live streams. You should be careful when choosing what to watch and whom to interact with.</li>
79
- <li>May Not Be Compatible with Some Devices: Dream Live APK Mod may not be compatible with some devices or operating systems. Some devices may not support the installation of apps from unknown sources or may have security or performance issues while running the app. You should check the compatibility of your device before downloading and installing the app.</li>
80
- </ul>
81
- <h2>Conclusion</h2>
82
- <p>Dream Live APK Mod is a live streaming platform that focuses on the entertainment lifestyle. You can watch live streams of various categories, chat with hosts and viewers, send and receive gifts, and start your own live stream. You can also enjoy all the premium features of the app for free, such as VIP unlocked, no ads, and unlimited gifts. However, you should also be aware of the cons of the app, such as requiring internet connection, containing inappropriate content, and not being compatible with some devices. If you are looking for a fun and interactive way to spend your time online, you should download Dream Live APK Mod and give it a try.</p>
83
- <h2>FAQs</h2>
84
- <p>Here are some frequently asked questions about Dream Live APK Mod:</p>
85
- <ol>
86
- <li>Is Dream Live APK Mod safe to use?</li>
87
- <p>Dream Live APK Mod is safe to use as long as you download it from a trusted website and scan it with an antivirus program before installing it. You should also avoid clicking on suspicious links or downloading unknown files while using the app.</p>
88
- <li>Is Dream Live APK Mod legal to use?</li>
89
- <p>Dream Live APK Mod is not legal to use as it violates the terms and conditions of the original app. You may face legal consequences if you use the app for illegal purposes or infringe on the rights of the original app developers or hosts. You should use the app at your own risk and responsibility.</p>
90
- <li>How can I update Dream Live APK Mod?</li>
91
- <p>Dream Live APK Mod does not update automatically as it is not from the Google Play Store. You need to check for updates manually by visiting the website where you downloaded the app or searching for other sources online. You should also uninstall the previous version of the app before installing the new one.</p>
92
- <li>How can I contact Dream Live APK Mod support?</li>
93
- <p>Dream Live APK Mod does not have an official support team as it is not from the original app developers. You can try to contact the modders who created the app or other users who have used the app for help or feedback. You can also check online forums or blogs for tips and tricks on how to use the app.</p>
94
- <li>How can I delete Dream Live APK Mod?</li>
95
- <p>If you want to delete Dream Live APK Mod from your device, you can do so by following these steps:</p>
96
- <ul>
97
- <li>Go to your device settings and tap on apps or applications.</li>
98
- <li>Find and tap on Dream Live APK Mod and tap on uninstall.</li>
99
- <li>Confirm your action and wait for the uninstallation to complete.</li>
100
- </ul>
101
- </ol></p> 197e85843d<br />
102
- <br />
103
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/pipelines/ddim/pipeline_ddim.py DELETED
@@ -1,116 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- # Copyright 2022 The HuggingFace Team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- from typing import List, Optional, Tuple, Union
17
-
18
- import paddle
19
-
20
- from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput
21
-
22
-
23
- class DDIMPipeline(DiffusionPipeline):
24
- r"""
25
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
26
- library implements for all the pipelines (such as downloading or saving, running on a particular xxxx, etc.)
27
-
28
- Parameters:
29
- unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image.
30
- scheduler ([`SchedulerMixin`]):
31
- A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
32
- [`DDPMScheduler`], or [`DDIMScheduler`].
33
- """
34
-
35
- def __init__(self, unet, scheduler):
36
- super().__init__()
37
- self.register_modules(unet=unet, scheduler=scheduler)
38
-
39
- @paddle.no_grad()
40
- def __call__(
41
- self,
42
- batch_size: int = 1,
43
- generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None,
44
- eta: float = 0.0,
45
- num_inference_steps: int = 50,
46
- use_clipped_model_output: Optional[bool] = None,
47
- output_type: Optional[str] = "pil",
48
- return_dict: bool = True,
49
- ) -> Union[ImagePipelineOutput, Tuple]:
50
- r"""
51
- Args:
52
- batch_size (`int`, *optional*, defaults to 1):
53
- The number of images to generate.
54
- generator (`paddle.Generator`, *optional*):
55
- One or a list of paddle generator(s) to make generation deterministic.
56
- eta (`float`, *optional*, defaults to 0.0):
57
- The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM).
58
- num_inference_steps (`int`, *optional*, defaults to 50):
59
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
60
- expense of slower inference.
61
- use_clipped_model_output (`bool`, *optional*, defaults to `None`):
62
- if `True` or `False`, see documentation for `DDIMScheduler.step`. If `None`, nothing is passed
63
- downstream to the scheduler. So use `None` for schedulers which don't support this argument.
64
- output_type (`str`, *optional*, defaults to `"pil"`):
65
- The output format of the generate image. Choose between
66
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
67
- return_dict (`bool`, *optional*, defaults to `True`):
68
- Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple.
69
-
70
- Returns:
71
- [`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if
72
- `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the
73
- generated images.
74
- """
75
- # Sample gaussian noise to begin loop
76
- if isinstance(self.unet.sample_size, int):
77
- image_shape = (batch_size, self.unet.in_channels, self.unet.sample_size, self.unet.sample_size)
78
- else:
79
- image_shape = (batch_size, self.unet.in_channels, *self.unet.sample_size)
80
-
81
- if isinstance(generator, list) and len(generator) != batch_size:
82
- raise ValueError(
83
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
84
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
85
- )
86
-
87
- if isinstance(generator, list):
88
- shape = (1,) + image_shape[1:]
89
- image = [paddle.randn(shape, generator=generator[i], dtype=self.unet.dtype) for i in range(batch_size)]
90
- image = paddle.concat(image, axis=0)
91
- else:
92
- image = paddle.randn(image_shape, generator=generator, dtype=self.unet.dtype)
93
-
94
- # set step values
95
- self.scheduler.set_timesteps(num_inference_steps)
96
-
97
- for t in self.progress_bar(self.scheduler.timesteps):
98
- # 1. predict noise model_output
99
- model_output = self.unet(image, t).sample
100
-
101
- # 2. predict previous mean of image x_t-1 and add variance depending on eta
102
- # eta corresponds to η in paper and should be between [0, 1]
103
- # do x_t -> x_t-1
104
- image = self.scheduler.step(
105
- model_output, t, image, eta=eta, use_clipped_model_output=use_clipped_model_output, generator=generator
106
- ).prev_sample
107
-
108
- image = (image / 2 + 0.5).clip(0, 1)
109
- image = image.transpose([0, 2, 3, 1]).cast("float32").numpy()
110
- if output_type == "pil":
111
- image = self.numpy_to_pil(image)
112
-
113
- if not return_dict:
114
- return (image,)
115
-
116
- return ImagePipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/Applio-RVC-Fork/utils/backups.py DELETED
@@ -1,141 +0,0 @@
1
- import os
2
- import shutil
3
- import hashlib
4
- import time
5
- import base64
6
-
7
-
8
-
9
-
10
- LOGS_FOLDER = '/content/Applio-RVC-Fork/logs'
11
- WEIGHTS_FOLDER = '/content/Applio-RVC-Fork/weights'
12
- GOOGLE_DRIVE_PATH = '/content/drive/MyDrive/RVC_Backup'
13
-
14
- def import_google_drive_backup():
15
- print("Importing Google Drive backup...")
16
- weights_exist = False
17
- for root, dirs, files in os.walk(GOOGLE_DRIVE_PATH):
18
- for filename in files:
19
- filepath = os.path.join(root, filename)
20
- if os.path.isfile(filepath) and not filepath.startswith(os.path.join(GOOGLE_DRIVE_PATH, 'weights')):
21
- backup_filepath = os.path.join(LOGS_FOLDER, os.path.relpath(filepath, GOOGLE_DRIVE_PATH))
22
- backup_folderpath = os.path.dirname(backup_filepath)
23
- if not os.path.exists(backup_folderpath):
24
- os.makedirs(backup_folderpath)
25
- print(f'Created backup folder: {backup_folderpath}', flush=True)
26
- shutil.copy2(filepath, backup_filepath) # copy file with metadata
27
- print(f'Imported file from Google Drive backup: {filename}')
28
- elif filepath.startswith(os.path.join(GOOGLE_DRIVE_PATH, 'weights')) and filename.endswith('.pth'):
29
- weights_exist = True
30
- weights_filepath = os.path.join(WEIGHTS_FOLDER, os.path.relpath(filepath, os.path.join(GOOGLE_DRIVE_PATH, 'weights')))
31
- weights_folderpath = os.path.dirname(weights_filepath)
32
- if not os.path.exists(weights_folderpath):
33
- os.makedirs(weights_folderpath)
34
- print(f'Created weights folder: {weights_folderpath}', flush=True)
35
- shutil.copy2(filepath, weights_filepath) # copy file with metadata
36
- print(f'Imported file from weights: {filename}')
37
- if weights_exist:
38
- print("Copied weights from Google Drive backup to local weights folder.")
39
- else:
40
- print("No weights found in Google Drive backup.")
41
- print("Google Drive backup import completed.")
42
-
43
- def get_md5_hash(file_path):
44
- hash_md5 = hashlib.md5()
45
- with open(file_path, "rb") as f:
46
- for chunk in iter(lambda: f.read(4096), b""):
47
- hash_md5.update(chunk)
48
- return hash_md5.hexdigest()
49
-
50
- def copy_weights_folder_to_drive():
51
- destination_folder = os.path.join(GOOGLE_DRIVE_PATH, 'weights')
52
- try:
53
- if not os.path.exists(destination_folder):
54
- os.makedirs(destination_folder)
55
-
56
- num_copied = 0
57
- for filename in os.listdir(WEIGHTS_FOLDER):
58
- if filename.endswith('.pth'):
59
- source_file = os.path.join(WEIGHTS_FOLDER, filename)
60
- destination_file = os.path.join(destination_folder, filename)
61
- if not os.path.exists(destination_file):
62
- shutil.copy2(source_file, destination_file)
63
- num_copied += 1
64
- print(f"Copied {filename} to Google Drive!")
65
-
66
- if num_copied == 0:
67
- print("No new finished models found for copying.")
68
- else:
69
- print(f"Finished copying {num_copied} files to Google Drive!")
70
-
71
- except Exception as e:
72
- print(f"An error occurred while copying weights: {str(e)}")
73
- # You can log the error or take appropriate actions here.
74
-
75
- def backup_files():
76
- print("\nStarting backup loop...")
77
- last_backup_timestamps_path = os.path.join(LOGS_FOLDER, 'last_backup_timestamps.txt')
78
- fully_updated = False # boolean to track if all files are up to date
79
-
80
- while True:
81
- try:
82
- updated = False # flag to check if any files were updated
83
- last_backup_timestamps = {}
84
-
85
- try:
86
- with open(last_backup_timestamps_path, 'r') as f:
87
- last_backup_timestamps = dict(line.strip().split(':') for line in f)
88
- except FileNotFoundError:
89
- pass # File does not exist yet, which is fine
90
-
91
- for root, dirs, files in os.walk(LOGS_FOLDER):
92
- for filename in files:
93
- if filename != 'last_backup_timestamps.txt':
94
- filepath = os.path.join(root, filename)
95
- if os.path.isfile(filepath):
96
- backup_filepath = os.path.join(GOOGLE_DRIVE_PATH, os.path.relpath(filepath, LOGS_FOLDER))
97
- backup_folderpath = os.path.dirname(backup_filepath)
98
- if not os.path.exists(backup_folderpath):
99
- os.makedirs(backup_folderpath)
100
- print(f'Created backup folder: {backup_folderpath}', flush=True)
101
- # check if file has changed since last backup
102
- last_backup_timestamp = last_backup_timestamps.get(filepath)
103
- current_timestamp = os.path.getmtime(filepath)
104
- if last_backup_timestamp is None or float(last_backup_timestamp) < current_timestamp:
105
- shutil.copy2(filepath, backup_filepath) # copy file with metadata
106
- last_backup_timestamps[filepath] = str(current_timestamp) # update last backup timestamp
107
- if last_backup_timestamp is None:
108
- print(f'Backed up file: {filename}')
109
- else:
110
- print(f'Updating backed up file: {filename}')
111
- updated = True
112
- fully_updated = False # if a file is updated, all files are not up to date
113
-
114
- # check if any files were deleted in Colab and delete them from the backup drive
115
- for filepath in list(last_backup_timestamps.keys()):
116
- if not os.path.exists(filepath):
117
- backup_filepath = os.path.join(GOOGLE_DRIVE_PATH, os.path.relpath(filepath, LOGS_FOLDER))
118
- if os.path.exists(backup_filepath):
119
- os.remove(backup_filepath)
120
- print(f'Deleted file: {filepath}')
121
- del last_backup_timestamps[filepath]
122
- updated = True
123
- fully_updated = False # if a file is deleted, all files are not up to date
124
-
125
- if not updated and not fully_updated:
126
- print("Files are up to date.")
127
- fully_updated = True # if all files are up to date, set the boolean to True
128
- copy_weights_folder_to_drive()
129
- sleep_time = 15
130
- else:
131
- sleep_time = 0.1
132
-
133
- with open(last_backup_timestamps_path, 'w') as f:
134
- for filepath, timestamp in last_backup_timestamps.items():
135
- f.write(f'{filepath}:{timestamp}\n')
136
-
137
- time.sleep(sleep_time) # wait for 15 seconds before checking again, or 0.1s if not fully up to date to speed up backups
138
-
139
- except Exception as e:
140
- print(f"An error occurred: {str(e)}")
141
- # You can log the error or take appropriate actions here.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A00001/bingothoo/src/components/chat-panel.tsx DELETED
@@ -1,153 +0,0 @@
1
- 'use client'
2
-
3
- import * as React from 'react'
4
- import Image from 'next/image'
5
- import Textarea from 'react-textarea-autosize'
6
- import { useAtomValue } from 'jotai'
7
- import { useEnterSubmit } from '@/lib/hooks/use-enter-submit'
8
- import { cn } from '@/lib/utils'
9
-
10
- import BrushIcon from '@/assets/images/brush.svg'
11
- import ChatIcon from '@/assets/images/chat.svg'
12
- import VisualSearchIcon from '@/assets/images/visual-search.svg'
13
- import SendIcon from '@/assets/images/send.svg'
14
- import PinIcon from '@/assets/images/pin.svg'
15
- import PinFillIcon from '@/assets/images/pin-fill.svg'
16
-
17
- import { useBing } from '@/lib/hooks/use-bing'
18
- import { voiceListenAtom } from '@/state'
19
- import Voice from './voice'
20
- import { ChatImage } from './chat-image'
21
- import { ChatAttachments } from './chat-attachments'
22
-
23
- export interface ChatPanelProps
24
- extends Pick<
25
- ReturnType<typeof useBing>,
26
- | 'generating'
27
- | 'input'
28
- | 'setInput'
29
- | 'sendMessage'
30
- | 'resetConversation'
31
- | 'isSpeaking'
32
- | 'attachmentList'
33
- | 'uploadImage'
34
- | 'setAttachmentList'
35
- > {
36
- id?: string
37
- className?: string
38
- }
39
-
40
- export function ChatPanel({
41
- isSpeaking,
42
- generating,
43
- input,
44
- setInput,
45
- className,
46
- sendMessage,
47
- resetConversation,
48
- attachmentList,
49
- uploadImage,
50
- setAttachmentList
51
- }: ChatPanelProps) {
52
- const inputRef = React.useRef<HTMLTextAreaElement>(null)
53
- const {formRef, onKeyDown} = useEnterSubmit()
54
- const [focused, setFocused] = React.useState(false)
55
- const [active, setActive] = React.useState(false)
56
- const [pin, setPin] = React.useState(false)
57
- const [tid, setTid] = React.useState<any>()
58
- const voiceListening = useAtomValue(voiceListenAtom)
59
-
60
- const setBlur = React.useCallback(() => {
61
- clearTimeout(tid)
62
- setActive(false)
63
- const _tid = setTimeout(() => setFocused(false), 2000);
64
- setTid(_tid)
65
- }, [tid])
66
-
67
- const setFocus = React.useCallback(() => {
68
- setFocused(true)
69
- setActive(true)
70
- clearTimeout(tid)
71
- inputRef.current?.focus()
72
- }, [tid])
73
-
74
- React.useEffect(() => {
75
- if (input) {
76
- setFocus()
77
- }
78
- }, [input])
79
-
80
- return (
81
- <form
82
- className={cn('chat-panel', className)}
83
- onSubmit={async e => {
84
- e.preventDefault()
85
- if (generating) {
86
- return;
87
- }
88
- if (!input?.trim()) {
89
- return
90
- }
91
- setInput('')
92
- setPin(false)
93
- await sendMessage(input)
94
- }}
95
- ref={formRef}
96
- >
97
- <div className="action-bar pb-4">
98
- <div className={cn('action-root', { focus: active || pin })} speech-state="hidden" visual-search="" drop-target="">
99
- <div className="fade bottom">
100
- <div className="background"></div>
101
- </div>
102
- <div className={cn('outside-left-container', { collapsed: focused })}>
103
- <div className="button-compose-wrapper">
104
- <button className="body-2 button-compose" type="button" aria-label="新主题" onClick={resetConversation}>
105
- <div className="button-compose-content">
106
- <Image className="pl-2" alt="brush" src={BrushIcon} width={40} />
107
- <div className="button-compose-text">新主题</div>
108
- </div>
109
- </button>
110
- </div>
111
- </div>
112
- <div
113
- className={cn('main-container', { active: active || pin })}
114
- style={{ minHeight: pin ? '360px' : undefined }}
115
- onClick={setFocus}
116
- onBlur={setBlur}
117
- >
118
- <div className="main-bar">
119
- <Image alt="chat" src={ChatIcon} width={20} color="blue" />
120
- <Textarea
121
- ref={inputRef}
122
- tabIndex={0}
123
- onKeyDown={onKeyDown}
124
- rows={1}
125
- value={input}
126
- onChange={e => setInput(e.target.value.slice(0, 4000))}
127
- placeholder={voiceListening ? '持续对话中...对话完成说“发送”即可' : 'Shift + Enter 换行'}
128
- spellCheck={false}
129
- className="message-input min-h-[24px] -mx-1 w-full text-base resize-none bg-transparent focus-within:outline-none"
130
- />
131
- <ChatImage uploadImage={uploadImage}>
132
- <Image alt="visual-search" src={VisualSearchIcon} width={24} />
133
- </ChatImage>
134
- <Voice setInput={setInput} sendMessage={sendMessage} isSpeaking={isSpeaking} input={input} />
135
- <button type="submit">
136
- <Image alt="send" src={SendIcon} width={20} style={{ marginTop: '2px' }} />
137
- </button>
138
- </div>
139
- <ChatAttachments attachmentList={attachmentList} setAttachmentList={setAttachmentList} uploadImage={uploadImage} />
140
- <div className="body-1 bottom-bar">
141
- <div className="letter-counter"><span>{input.length}</span>/4000</div>
142
- <button onClick={() => {
143
- setPin(!pin)
144
- }} className="pr-2">
145
- <Image alt="pin" src={pin ? PinFillIcon : PinIcon} width={20} />
146
- </button>
147
- </div>
148
- </div>
149
- </div>
150
- </div>
151
- </form>
152
- )
153
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/CHANGELOG.md DELETED
@@ -1,28 +0,0 @@
1
- # Changelog
2
-
3
- All notable changes to this project will be documented in this file.
4
-
5
- The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
6
-
7
- ## [1.0.0] - 2023-08-02
8
-
9
- Major revision, added training code for EnCodec, AudioGen, MusicGen, and MultiBandDiffusion.
10
- Added pretrained model for AudioGen and MultiBandDiffusion.
11
-
12
- ## [0.0.2] - 2023-08-01
13
-
14
- Improved demo, fixed top p (thanks @jnordberg).
15
-
16
- Compressor tanh on output to avoid clipping with some style (especially piano).
17
- Now repeating the conditioning periodically if it is too short.
18
-
19
- More options when launching Gradio app locally (thanks @ashleykleynhans).
20
-
21
- Testing out PyTorch 2.0 memory efficient attention.
22
-
23
- Added extended generation (infinite length) by slowly moving the windows.
24
- Note that other implementations exist: https://github.com/camenduru/MusicGen-colab.
25
-
26
- ## [0.0.1] - 2023-06-09
27
-
28
- Initial release, with model evaluation only.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/generate_human_motion/VQ-Trans/dataset/prepare/download_extractor.sh DELETED
@@ -1,15 +0,0 @@
1
- rm -rf checkpoints
2
- mkdir checkpoints
3
- cd checkpoints
4
- echo -e "Downloading extractors"
5
- gdown --fuzzy https://drive.google.com/file/d/1o7RTDQcToJjTm9_mNWTyzvZvjTWpZfug/view
6
- gdown --fuzzy https://drive.google.com/file/d/1tX79xk0fflp07EZ660Xz1RAFE33iEyJR/view
7
-
8
-
9
- unzip t2m.zip
10
- unzip kit.zip
11
-
12
- echo -e "Cleaning\n"
13
- rm t2m.zip
14
- rm kit.zip
15
- echo -e "Downloading done!"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/diffusionmodules/model.py DELETED
@@ -1,835 +0,0 @@
1
- # pytorch_diffusion + derived encoder decoder
2
- import math
3
- import torch
4
- import torch.nn as nn
5
- import numpy as np
6
- from einops import rearrange
7
-
8
- from ldm.util import instantiate_from_config
9
- from ldm.modules.attention import LinearAttention
10
-
11
-
12
- def get_timestep_embedding(timesteps, embedding_dim):
13
- """
14
- This matches the implementation in Denoising Diffusion Probabilistic Models:
15
- From Fairseq.
16
- Build sinusoidal embeddings.
17
- This matches the implementation in tensor2tensor, but differs slightly
18
- from the description in Section 3.5 of "Attention Is All You Need".
19
- """
20
- assert len(timesteps.shape) == 1
21
-
22
- half_dim = embedding_dim // 2
23
- emb = math.log(10000) / (half_dim - 1)
24
- emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
25
- emb = emb.to(device=timesteps.device)
26
- emb = timesteps.float()[:, None] * emb[None, :]
27
- emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
28
- if embedding_dim % 2 == 1: # zero pad
29
- emb = torch.nn.functional.pad(emb, (0,1,0,0))
30
- return emb
31
-
32
-
33
- def nonlinearity(x):
34
- # swish
35
- return x*torch.sigmoid(x)
36
-
37
-
38
- def Normalize(in_channels, num_groups=32):
39
- return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True)
40
-
41
-
42
- class Upsample(nn.Module):
43
- def __init__(self, in_channels, with_conv):
44
- super().__init__()
45
- self.with_conv = with_conv
46
- if self.with_conv:
47
- self.conv = torch.nn.Conv2d(in_channels,
48
- in_channels,
49
- kernel_size=3,
50
- stride=1,
51
- padding=1)
52
-
53
- def forward(self, x):
54
- x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
55
- if self.with_conv:
56
- x = self.conv(x)
57
- return x
58
-
59
-
60
- class Downsample(nn.Module):
61
- def __init__(self, in_channels, with_conv):
62
- super().__init__()
63
- self.with_conv = with_conv
64
- if self.with_conv:
65
- # no asymmetric padding in torch conv, must do it ourselves
66
- self.conv = torch.nn.Conv2d(in_channels,
67
- in_channels,
68
- kernel_size=3,
69
- stride=2,
70
- padding=0)
71
-
72
- def forward(self, x):
73
- if self.with_conv:
74
- pad = (0,1,0,1)
75
- x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
76
- x = self.conv(x)
77
- else:
78
- x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
79
- return x
80
-
81
-
82
- class ResnetBlock(nn.Module):
83
- def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False,
84
- dropout, temb_channels=512):
85
- super().__init__()
86
- self.in_channels = in_channels
87
- out_channels = in_channels if out_channels is None else out_channels
88
- self.out_channels = out_channels
89
- self.use_conv_shortcut = conv_shortcut
90
-
91
- self.norm1 = Normalize(in_channels)
92
- self.conv1 = torch.nn.Conv2d(in_channels,
93
- out_channels,
94
- kernel_size=3,
95
- stride=1,
96
- padding=1)
97
- if temb_channels > 0:
98
- self.temb_proj = torch.nn.Linear(temb_channels,
99
- out_channels)
100
- self.norm2 = Normalize(out_channels)
101
- self.dropout = torch.nn.Dropout(dropout)
102
- self.conv2 = torch.nn.Conv2d(out_channels,
103
- out_channels,
104
- kernel_size=3,
105
- stride=1,
106
- padding=1)
107
- if self.in_channels != self.out_channels:
108
- if self.use_conv_shortcut:
109
- self.conv_shortcut = torch.nn.Conv2d(in_channels,
110
- out_channels,
111
- kernel_size=3,
112
- stride=1,
113
- padding=1)
114
- else:
115
- self.nin_shortcut = torch.nn.Conv2d(in_channels,
116
- out_channels,
117
- kernel_size=1,
118
- stride=1,
119
- padding=0)
120
-
121
- def forward(self, x, temb):
122
- h = x
123
- h = self.norm1(h)
124
- h = nonlinearity(h)
125
- h = self.conv1(h)
126
-
127
- if temb is not None:
128
- h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None]
129
-
130
- h = self.norm2(h)
131
- h = nonlinearity(h)
132
- h = self.dropout(h)
133
- h = self.conv2(h)
134
-
135
- if self.in_channels != self.out_channels:
136
- if self.use_conv_shortcut:
137
- x = self.conv_shortcut(x)
138
- else:
139
- x = self.nin_shortcut(x)
140
-
141
- return x+h
142
-
143
-
144
- class LinAttnBlock(LinearAttention):
145
- """to match AttnBlock usage"""
146
- def __init__(self, in_channels):
147
- super().__init__(dim=in_channels, heads=1, dim_head=in_channels)
148
-
149
-
150
- class AttnBlock(nn.Module):
151
- def __init__(self, in_channels):
152
- super().__init__()
153
- self.in_channels = in_channels
154
-
155
- self.norm = Normalize(in_channels)
156
- self.q = torch.nn.Conv2d(in_channels,
157
- in_channels,
158
- kernel_size=1,
159
- stride=1,
160
- padding=0)
161
- self.k = torch.nn.Conv2d(in_channels,
162
- in_channels,
163
- kernel_size=1,
164
- stride=1,
165
- padding=0)
166
- self.v = torch.nn.Conv2d(in_channels,
167
- in_channels,
168
- kernel_size=1,
169
- stride=1,
170
- padding=0)
171
- self.proj_out = torch.nn.Conv2d(in_channels,
172
- in_channels,
173
- kernel_size=1,
174
- stride=1,
175
- padding=0)
176
-
177
-
178
- def forward(self, x):
179
- h_ = x
180
- h_ = self.norm(h_)
181
- q = self.q(h_)
182
- k = self.k(h_)
183
- v = self.v(h_)
184
-
185
- # compute attention
186
- b,c,h,w = q.shape
187
- q = q.reshape(b,c,h*w)
188
- q = q.permute(0,2,1) # b,hw,c
189
- k = k.reshape(b,c,h*w) # b,c,hw
190
- w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
191
- w_ = w_ * (int(c)**(-0.5))
192
- w_ = torch.nn.functional.softmax(w_, dim=2)
193
-
194
- # attend to values
195
- v = v.reshape(b,c,h*w)
196
- w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q)
197
- h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
198
- h_ = h_.reshape(b,c,h,w)
199
-
200
- h_ = self.proj_out(h_)
201
-
202
- return x+h_
203
-
204
-
205
- def make_attn(in_channels, attn_type="vanilla"):
206
- assert attn_type in ["vanilla", "linear", "none"], f'attn_type {attn_type} unknown'
207
- print(f"making attention of type '{attn_type}' with {in_channels} in_channels")
208
- if attn_type == "vanilla":
209
- return AttnBlock(in_channels)
210
- elif attn_type == "none":
211
- return nn.Identity(in_channels)
212
- else:
213
- return LinAttnBlock(in_channels)
214
-
215
-
216
- class Model(nn.Module):
217
- def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
218
- attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
219
- resolution, use_timestep=True, use_linear_attn=False, attn_type="vanilla"):
220
- super().__init__()
221
- if use_linear_attn: attn_type = "linear"
222
- self.ch = ch
223
- self.temb_ch = self.ch*4
224
- self.num_resolutions = len(ch_mult)
225
- self.num_res_blocks = num_res_blocks
226
- self.resolution = resolution
227
- self.in_channels = in_channels
228
-
229
- self.use_timestep = use_timestep
230
- if self.use_timestep:
231
- # timestep embedding
232
- self.temb = nn.Module()
233
- self.temb.dense = nn.ModuleList([
234
- torch.nn.Linear(self.ch,
235
- self.temb_ch),
236
- torch.nn.Linear(self.temb_ch,
237
- self.temb_ch),
238
- ])
239
-
240
- # downsampling
241
- self.conv_in = torch.nn.Conv2d(in_channels,
242
- self.ch,
243
- kernel_size=3,
244
- stride=1,
245
- padding=1)
246
-
247
- curr_res = resolution
248
- in_ch_mult = (1,)+tuple(ch_mult)
249
- self.down = nn.ModuleList()
250
- for i_level in range(self.num_resolutions):
251
- block = nn.ModuleList()
252
- attn = nn.ModuleList()
253
- block_in = ch*in_ch_mult[i_level]
254
- block_out = ch*ch_mult[i_level]
255
- for i_block in range(self.num_res_blocks):
256
- block.append(ResnetBlock(in_channels=block_in,
257
- out_channels=block_out,
258
- temb_channels=self.temb_ch,
259
- dropout=dropout))
260
- block_in = block_out
261
- if curr_res in attn_resolutions:
262
- attn.append(make_attn(block_in, attn_type=attn_type))
263
- down = nn.Module()
264
- down.block = block
265
- down.attn = attn
266
- if i_level != self.num_resolutions-1:
267
- down.downsample = Downsample(block_in, resamp_with_conv)
268
- curr_res = curr_res // 2
269
- self.down.append(down)
270
-
271
- # middle
272
- self.mid = nn.Module()
273
- self.mid.block_1 = ResnetBlock(in_channels=block_in,
274
- out_channels=block_in,
275
- temb_channels=self.temb_ch,
276
- dropout=dropout)
277
- self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
278
- self.mid.block_2 = ResnetBlock(in_channels=block_in,
279
- out_channels=block_in,
280
- temb_channels=self.temb_ch,
281
- dropout=dropout)
282
-
283
- # upsampling
284
- self.up = nn.ModuleList()
285
- for i_level in reversed(range(self.num_resolutions)):
286
- block = nn.ModuleList()
287
- attn = nn.ModuleList()
288
- block_out = ch*ch_mult[i_level]
289
- skip_in = ch*ch_mult[i_level]
290
- for i_block in range(self.num_res_blocks+1):
291
- if i_block == self.num_res_blocks:
292
- skip_in = ch*in_ch_mult[i_level]
293
- block.append(ResnetBlock(in_channels=block_in+skip_in,
294
- out_channels=block_out,
295
- temb_channels=self.temb_ch,
296
- dropout=dropout))
297
- block_in = block_out
298
- if curr_res in attn_resolutions:
299
- attn.append(make_attn(block_in, attn_type=attn_type))
300
- up = nn.Module()
301
- up.block = block
302
- up.attn = attn
303
- if i_level != 0:
304
- up.upsample = Upsample(block_in, resamp_with_conv)
305
- curr_res = curr_res * 2
306
- self.up.insert(0, up) # prepend to get consistent order
307
-
308
- # end
309
- self.norm_out = Normalize(block_in)
310
- self.conv_out = torch.nn.Conv2d(block_in,
311
- out_ch,
312
- kernel_size=3,
313
- stride=1,
314
- padding=1)
315
-
316
- def forward(self, x, t=None, context=None):
317
- #assert x.shape[2] == x.shape[3] == self.resolution
318
- if context is not None:
319
- # assume aligned context, cat along channel axis
320
- x = torch.cat((x, context), dim=1)
321
- if self.use_timestep:
322
- # timestep embedding
323
- assert t is not None
324
- temb = get_timestep_embedding(t, self.ch)
325
- temb = self.temb.dense[0](temb)
326
- temb = nonlinearity(temb)
327
- temb = self.temb.dense[1](temb)
328
- else:
329
- temb = None
330
-
331
- # downsampling
332
- hs = [self.conv_in(x)]
333
- for i_level in range(self.num_resolutions):
334
- for i_block in range(self.num_res_blocks):
335
- h = self.down[i_level].block[i_block](hs[-1], temb)
336
- if len(self.down[i_level].attn) > 0:
337
- h = self.down[i_level].attn[i_block](h)
338
- hs.append(h)
339
- if i_level != self.num_resolutions-1:
340
- hs.append(self.down[i_level].downsample(hs[-1]))
341
-
342
- # middle
343
- h = hs[-1]
344
- h = self.mid.block_1(h, temb)
345
- h = self.mid.attn_1(h)
346
- h = self.mid.block_2(h, temb)
347
-
348
- # upsampling
349
- for i_level in reversed(range(self.num_resolutions)):
350
- for i_block in range(self.num_res_blocks+1):
351
- h = self.up[i_level].block[i_block](
352
- torch.cat([h, hs.pop()], dim=1), temb)
353
- if len(self.up[i_level].attn) > 0:
354
- h = self.up[i_level].attn[i_block](h)
355
- if i_level != 0:
356
- h = self.up[i_level].upsample(h)
357
-
358
- # end
359
- h = self.norm_out(h)
360
- h = nonlinearity(h)
361
- h = self.conv_out(h)
362
- return h
363
-
364
- def get_last_layer(self):
365
- return self.conv_out.weight
366
-
367
-
368
- class Encoder(nn.Module):
369
- def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
370
- attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
371
- resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla",
372
- **ignore_kwargs):
373
- super().__init__()
374
- if use_linear_attn: attn_type = "linear"
375
- self.ch = ch
376
- self.temb_ch = 0
377
- self.num_resolutions = len(ch_mult)
378
- self.num_res_blocks = num_res_blocks
379
- self.resolution = resolution
380
- self.in_channels = in_channels
381
-
382
- # downsampling
383
- self.conv_in = torch.nn.Conv2d(in_channels,
384
- self.ch,
385
- kernel_size=3,
386
- stride=1,
387
- padding=1)
388
-
389
- curr_res = resolution
390
- in_ch_mult = (1,)+tuple(ch_mult)
391
- self.in_ch_mult = in_ch_mult
392
- self.down = nn.ModuleList()
393
- for i_level in range(self.num_resolutions):
394
- block = nn.ModuleList()
395
- attn = nn.ModuleList()
396
- block_in = ch*in_ch_mult[i_level]
397
- block_out = ch*ch_mult[i_level]
398
- for i_block in range(self.num_res_blocks):
399
- block.append(ResnetBlock(in_channels=block_in,
400
- out_channels=block_out,
401
- temb_channels=self.temb_ch,
402
- dropout=dropout))
403
- block_in = block_out
404
- if curr_res in attn_resolutions:
405
- attn.append(make_attn(block_in, attn_type=attn_type))# vanilla attention
406
- down = nn.Module()
407
- down.block = block
408
- down.attn = attn
409
- if i_level != self.num_resolutions-1:
410
- down.downsample = Downsample(block_in, resamp_with_conv)
411
- curr_res = curr_res // 2
412
- self.down.append(down)
413
-
414
- # middle
415
- self.mid = nn.Module()
416
- self.mid.block_1 = ResnetBlock(in_channels=block_in,
417
- out_channels=block_in,
418
- temb_channels=self.temb_ch,
419
- dropout=dropout)
420
- self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
421
- self.mid.block_2 = ResnetBlock(in_channels=block_in,
422
- out_channels=block_in,
423
- temb_channels=self.temb_ch,
424
- dropout=dropout)
425
-
426
- # end
427
- self.norm_out = Normalize(block_in)# GroupNorm
428
- self.conv_out = torch.nn.Conv2d(block_in,
429
- 2*z_channels if double_z else z_channels,
430
- kernel_size=3,
431
- stride=1,
432
- padding=1)
433
-
434
- def forward(self, x):
435
- # timestep embedding
436
- temb = None
437
-
438
- # downsampling
439
- hs = [self.conv_in(x)]
440
- for i_level in range(self.num_resolutions):
441
- for i_block in range(self.num_res_blocks):
442
- h = self.down[i_level].block[i_block](hs[-1], temb)
443
- if len(self.down[i_level].attn) > 0:
444
- h = self.down[i_level].attn[i_block](h)
445
- hs.append(h)
446
- if i_level != self.num_resolutions-1:
447
- hs.append(self.down[i_level].downsample(hs[-1]))
448
-
449
- # middle
450
- h = hs[-1]
451
- h = self.mid.block_1(h, temb)
452
- h = self.mid.attn_1(h)
453
- h = self.mid.block_2(h, temb)
454
-
455
- # end
456
- h = self.norm_out(h)
457
- h = nonlinearity(h)
458
- h = self.conv_out(h)
459
- return h
460
-
461
-
462
- class Decoder(nn.Module):
463
- def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
464
- attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
465
- resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,
466
- attn_type="vanilla", **ignorekwargs):
467
- super().__init__()
468
- if use_linear_attn: attn_type = "linear"
469
- self.ch = ch
470
- self.temb_ch = 0
471
- self.num_resolutions = len(ch_mult)
472
- self.num_res_blocks = num_res_blocks
473
- self.resolution = resolution
474
- self.in_channels = in_channels
475
- self.give_pre_end = give_pre_end
476
- self.tanh_out = tanh_out
477
-
478
- # compute in_ch_mult, block_in and curr_res at lowest res
479
- in_ch_mult = (1,)+tuple(ch_mult)
480
- block_in = ch*ch_mult[self.num_resolutions-1]
481
- curr_res = resolution // 2**(self.num_resolutions-1)
482
- self.z_shape = (1,z_channels,curr_res,curr_res)
483
- print("Working with z of shape {} = {} dimensions.".format(
484
- self.z_shape, np.prod(self.z_shape)))
485
-
486
- # z to block_in
487
- self.conv_in = torch.nn.Conv2d(z_channels,
488
- block_in,
489
- kernel_size=3,
490
- stride=1,
491
- padding=1)
492
-
493
- # middle
494
- self.mid = nn.Module()
495
- self.mid.block_1 = ResnetBlock(in_channels=block_in,
496
- out_channels=block_in,
497
- temb_channels=self.temb_ch,
498
- dropout=dropout)
499
- self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
500
- self.mid.block_2 = ResnetBlock(in_channels=block_in,
501
- out_channels=block_in,
502
- temb_channels=self.temb_ch,
503
- dropout=dropout)
504
-
505
- # upsampling
506
- self.up = nn.ModuleList()
507
- for i_level in reversed(range(self.num_resolutions)):
508
- block = nn.ModuleList()
509
- attn = nn.ModuleList()
510
- block_out = ch*ch_mult[i_level]
511
- for i_block in range(self.num_res_blocks+1):
512
- block.append(ResnetBlock(in_channels=block_in,
513
- out_channels=block_out,
514
- temb_channels=self.temb_ch,
515
- dropout=dropout))
516
- block_in = block_out
517
- if curr_res in attn_resolutions:
518
- attn.append(make_attn(block_in, attn_type=attn_type))
519
- up = nn.Module()
520
- up.block = block
521
- up.attn = attn
522
- if i_level != 0:
523
- up.upsample = Upsample(block_in, resamp_with_conv)
524
- curr_res = curr_res * 2
525
- self.up.insert(0, up) # prepend to get consistent order
526
-
527
- # end
528
- self.norm_out = Normalize(block_in)
529
- self.conv_out = torch.nn.Conv2d(block_in,
530
- out_ch,
531
- kernel_size=3,
532
- stride=1,
533
- padding=1)
534
-
535
- def forward(self, z):
536
- #assert z.shape[1:] == self.z_shape[1:]
537
- self.last_z_shape = z.shape
538
-
539
- # timestep embedding
540
- temb = None
541
-
542
- # z to block_in
543
- h = self.conv_in(z)
544
-
545
- # middle
546
- h = self.mid.block_1(h, temb)
547
- h = self.mid.attn_1(h)
548
- h = self.mid.block_2(h, temb)
549
-
550
- # upsampling
551
- for i_level in reversed(range(self.num_resolutions)):
552
- for i_block in range(self.num_res_blocks+1):
553
- h = self.up[i_level].block[i_block](h, temb)
554
- if len(self.up[i_level].attn) > 0:
555
- h = self.up[i_level].attn[i_block](h)
556
- if i_level != 0:
557
- h = self.up[i_level].upsample(h)
558
-
559
- # end
560
- if self.give_pre_end:
561
- return h
562
-
563
- h = self.norm_out(h)
564
- h = nonlinearity(h)
565
- h = self.conv_out(h)
566
- if self.tanh_out:
567
- h = torch.tanh(h)
568
- return h
569
-
570
-
571
- class SimpleDecoder(nn.Module):
572
- def __init__(self, in_channels, out_channels, *args, **kwargs):
573
- super().__init__()
574
- self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1),
575
- ResnetBlock(in_channels=in_channels,
576
- out_channels=2 * in_channels,
577
- temb_channels=0, dropout=0.0),
578
- ResnetBlock(in_channels=2 * in_channels,
579
- out_channels=4 * in_channels,
580
- temb_channels=0, dropout=0.0),
581
- ResnetBlock(in_channels=4 * in_channels,
582
- out_channels=2 * in_channels,
583
- temb_channels=0, dropout=0.0),
584
- nn.Conv2d(2*in_channels, in_channels, 1),
585
- Upsample(in_channels, with_conv=True)])
586
- # end
587
- self.norm_out = Normalize(in_channels)
588
- self.conv_out = torch.nn.Conv2d(in_channels,
589
- out_channels,
590
- kernel_size=3,
591
- stride=1,
592
- padding=1)
593
-
594
- def forward(self, x):
595
- for i, layer in enumerate(self.model):
596
- if i in [1,2,3]:
597
- x = layer(x, None)
598
- else:
599
- x = layer(x)
600
-
601
- h = self.norm_out(x)
602
- h = nonlinearity(h)
603
- x = self.conv_out(h)
604
- return x
605
-
606
-
607
- class UpsampleDecoder(nn.Module):
608
- def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution,
609
- ch_mult=(2,2), dropout=0.0):
610
- super().__init__()
611
- # upsampling
612
- self.temb_ch = 0
613
- self.num_resolutions = len(ch_mult)
614
- self.num_res_blocks = num_res_blocks
615
- block_in = in_channels
616
- curr_res = resolution // 2 ** (self.num_resolutions - 1)
617
- self.res_blocks = nn.ModuleList()
618
- self.upsample_blocks = nn.ModuleList()
619
- for i_level in range(self.num_resolutions):
620
- res_block = []
621
- block_out = ch * ch_mult[i_level]
622
- for i_block in range(self.num_res_blocks + 1):
623
- res_block.append(ResnetBlock(in_channels=block_in,
624
- out_channels=block_out,
625
- temb_channels=self.temb_ch,
626
- dropout=dropout))
627
- block_in = block_out
628
- self.res_blocks.append(nn.ModuleList(res_block))
629
- if i_level != self.num_resolutions - 1:
630
- self.upsample_blocks.append(Upsample(block_in, True))
631
- curr_res = curr_res * 2
632
-
633
- # end
634
- self.norm_out = Normalize(block_in)
635
- self.conv_out = torch.nn.Conv2d(block_in,
636
- out_channels,
637
- kernel_size=3,
638
- stride=1,
639
- padding=1)
640
-
641
- def forward(self, x):
642
- # upsampling
643
- h = x
644
- for k, i_level in enumerate(range(self.num_resolutions)):
645
- for i_block in range(self.num_res_blocks + 1):
646
- h = self.res_blocks[i_level][i_block](h, None)
647
- if i_level != self.num_resolutions - 1:
648
- h = self.upsample_blocks[k](h)
649
- h = self.norm_out(h)
650
- h = nonlinearity(h)
651
- h = self.conv_out(h)
652
- return h
653
-
654
-
655
- class LatentRescaler(nn.Module):
656
- def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2):
657
- super().__init__()
658
- # residual block, interpolate, residual block
659
- self.factor = factor
660
- self.conv_in = nn.Conv2d(in_channels,
661
- mid_channels,
662
- kernel_size=3,
663
- stride=1,
664
- padding=1)
665
- self.res_block1 = nn.ModuleList([ResnetBlock(in_channels=mid_channels,
666
- out_channels=mid_channels,
667
- temb_channels=0,
668
- dropout=0.0) for _ in range(depth)])
669
- self.attn = AttnBlock(mid_channels)
670
- self.res_block2 = nn.ModuleList([ResnetBlock(in_channels=mid_channels,
671
- out_channels=mid_channels,
672
- temb_channels=0,
673
- dropout=0.0) for _ in range(depth)])
674
-
675
- self.conv_out = nn.Conv2d(mid_channels,
676
- out_channels,
677
- kernel_size=1,
678
- )
679
-
680
- def forward(self, x):
681
- x = self.conv_in(x)
682
- for block in self.res_block1:
683
- x = block(x, None)
684
- x = torch.nn.functional.interpolate(x, size=(int(round(x.shape[2]*self.factor)), int(round(x.shape[3]*self.factor))))
685
- x = self.attn(x)
686
- for block in self.res_block2:
687
- x = block(x, None)
688
- x = self.conv_out(x)
689
- return x
690
-
691
-
692
- class MergedRescaleEncoder(nn.Module):
693
- def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks,
694
- attn_resolutions, dropout=0.0, resamp_with_conv=True,
695
- ch_mult=(1,2,4,8), rescale_factor=1.0, rescale_module_depth=1):
696
- super().__init__()
697
- intermediate_chn = ch * ch_mult[-1]
698
- self.encoder = Encoder(in_channels=in_channels, num_res_blocks=num_res_blocks, ch=ch, ch_mult=ch_mult,
699
- z_channels=intermediate_chn, double_z=False, resolution=resolution,
700
- attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv,
701
- out_ch=None)
702
- self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=intermediate_chn,
703
- mid_channels=intermediate_chn, out_channels=out_ch, depth=rescale_module_depth)
704
-
705
- def forward(self, x):
706
- x = self.encoder(x)
707
- x = self.rescaler(x)
708
- return x
709
-
710
-
711
- class MergedRescaleDecoder(nn.Module):
712
- def __init__(self, z_channels, out_ch, resolution, num_res_blocks, attn_resolutions, ch, ch_mult=(1,2,4,8),
713
- dropout=0.0, resamp_with_conv=True, rescale_factor=1.0, rescale_module_depth=1):
714
- super().__init__()
715
- tmp_chn = z_channels*ch_mult[-1]
716
- self.decoder = Decoder(out_ch=out_ch, z_channels=tmp_chn, attn_resolutions=attn_resolutions, dropout=dropout,
717
- resamp_with_conv=resamp_with_conv, in_channels=None, num_res_blocks=num_res_blocks,
718
- ch_mult=ch_mult, resolution=resolution, ch=ch)
719
- self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=z_channels, mid_channels=tmp_chn,
720
- out_channels=tmp_chn, depth=rescale_module_depth)
721
-
722
- def forward(self, x):
723
- x = self.rescaler(x)
724
- x = self.decoder(x)
725
- return x
726
-
727
-
728
- class Upsampler(nn.Module):
729
- def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2):
730
- super().__init__()
731
- assert out_size >= in_size
732
- num_blocks = int(np.log2(out_size//in_size))+1
733
- factor_up = 1.+ (out_size % in_size)
734
- print(f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}")
735
- self.rescaler = LatentRescaler(factor=factor_up, in_channels=in_channels, mid_channels=2*in_channels,
736
- out_channels=in_channels)
737
- self.decoder = Decoder(out_ch=out_channels, resolution=out_size, z_channels=in_channels, num_res_blocks=2,
738
- attn_resolutions=[], in_channels=None, ch=in_channels,
739
- ch_mult=[ch_mult for _ in range(num_blocks)])
740
-
741
- def forward(self, x):
742
- x = self.rescaler(x)
743
- x = self.decoder(x)
744
- return x
745
-
746
-
747
- class Resize(nn.Module):
748
- def __init__(self, in_channels=None, learned=False, mode="bilinear"):
749
- super().__init__()
750
- self.with_conv = learned
751
- self.mode = mode
752
- if self.with_conv:
753
- print(f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode")
754
- raise NotImplementedError()
755
- assert in_channels is not None
756
- # no asymmetric padding in torch conv, must do it ourselves
757
- self.conv = torch.nn.Conv2d(in_channels,
758
- in_channels,
759
- kernel_size=4,
760
- stride=2,
761
- padding=1)
762
-
763
- def forward(self, x, scale_factor=1.0):
764
- if scale_factor==1.0:
765
- return x
766
- else:
767
- x = torch.nn.functional.interpolate(x, mode=self.mode, align_corners=False, scale_factor=scale_factor)
768
- return x
769
-
770
- class FirstStagePostProcessor(nn.Module):
771
-
772
- def __init__(self, ch_mult:list, in_channels,
773
- pretrained_model:nn.Module=None,
774
- reshape=False,
775
- n_channels=None,
776
- dropout=0.,
777
- pretrained_config=None):
778
- super().__init__()
779
- if pretrained_config is None:
780
- assert pretrained_model is not None, 'Either "pretrained_model" or "pretrained_config" must not be None'
781
- self.pretrained_model = pretrained_model
782
- else:
783
- assert pretrained_config is not None, 'Either "pretrained_model" or "pretrained_config" must not be None'
784
- self.instantiate_pretrained(pretrained_config)
785
-
786
- self.do_reshape = reshape
787
-
788
- if n_channels is None:
789
- n_channels = self.pretrained_model.encoder.ch
790
-
791
- self.proj_norm = Normalize(in_channels,num_groups=in_channels//2)
792
- self.proj = nn.Conv2d(in_channels,n_channels,kernel_size=3,
793
- stride=1,padding=1)
794
-
795
- blocks = []
796
- downs = []
797
- ch_in = n_channels
798
- for m in ch_mult:
799
- blocks.append(ResnetBlock(in_channels=ch_in,out_channels=m*n_channels,dropout=dropout))
800
- ch_in = m * n_channels
801
- downs.append(Downsample(ch_in, with_conv=False))
802
-
803
- self.model = nn.ModuleList(blocks)
804
- self.downsampler = nn.ModuleList(downs)
805
-
806
-
807
- def instantiate_pretrained(self, config):
808
- model = instantiate_from_config(config)
809
- self.pretrained_model = model.eval()
810
- # self.pretrained_model.train = False
811
- for param in self.pretrained_model.parameters():
812
- param.requires_grad = False
813
-
814
-
815
- @torch.no_grad()
816
- def encode_with_pretrained(self,x):
817
- c = self.pretrained_model.encode(x)
818
- if isinstance(c, DiagonalGaussianDistribution):
819
- c = c.mode()
820
- return c
821
-
822
- def forward(self,x):
823
- z_fs = self.encode_with_pretrained(x)
824
- z = self.proj_norm(z_fs)
825
- z = self.proj(z)
826
- z = nonlinearity(z)
827
-
828
- for submodel, downmodel in zip(self.model,self.downsampler):
829
- z = submodel(z,temb=None)
830
- z = downmodel(z)
831
-
832
- if self.do_reshape:
833
- z = rearrange(z,'b c h w -> b (h w) c')
834
- return z
835
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio_inpaint/ldm/models/autoencoder_multi.py DELETED
@@ -1,201 +0,0 @@
1
- """
2
- 与autoencoder.py的区别在于,autoencoder.py计算loss时只有一个discriminator,而此处又多了个multiwindowDiscriminator,所以优化器
3
- 优化的参数改为:
4
- opt_disc = torch.optim.Adam(list(self.loss.discriminator.parameters()) + list(self.loss.discriminator_multi.parameters()),
5
- lr=lr, betas=(0.5, 0.9))
6
- """
7
-
8
- import os
9
- import torch
10
- import pytorch_lightning as pl
11
- import torch.nn.functional as F
12
- from contextlib import contextmanager
13
-
14
- from packaging import version
15
- import numpy as np
16
- from ldm.modules.diffusionmodules.model import Encoder, Decoder
17
- from ldm.modules.distributions.distributions import DiagonalGaussianDistribution
18
- from torch.optim.lr_scheduler import LambdaLR
19
- from ldm.util import instantiate_from_config
20
-
21
-
22
-
23
- class AutoencoderKL(pl.LightningModule):
24
- def __init__(self,
25
- ddconfig,
26
- lossconfig,
27
- embed_dim,
28
- ckpt_path=None,
29
- ignore_keys=[],
30
- image_key="image",
31
- colorize_nlabels=None,
32
- monitor=None,
33
- ):
34
- super().__init__()
35
- self.image_key = image_key
36
- self.encoder = Encoder(**ddconfig)
37
- self.decoder = Decoder(**ddconfig)
38
- self.loss = instantiate_from_config(lossconfig)
39
- assert ddconfig["double_z"]
40
- self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1)
41
- self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
42
- self.embed_dim = embed_dim
43
- if colorize_nlabels is not None:
44
- assert type(colorize_nlabels)==int
45
- self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
46
- if monitor is not None:
47
- self.monitor = monitor
48
- if ckpt_path is not None:
49
- self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
50
-
51
- def init_from_ckpt(self, path, ignore_keys=list()):
52
- sd = torch.load(path, map_location="cpu")["state_dict"]
53
- keys = list(sd.keys())
54
- for k in keys:
55
- for ik in ignore_keys:
56
- if k.startswith(ik):
57
- print("Deleting key {} from state_dict.".format(k))
58
- del sd[k]
59
- self.load_state_dict(sd, strict=False)
60
- print(f"Restored from {path}")
61
-
62
- def encode(self, x):
63
- h = self.encoder(x)
64
- moments = self.quant_conv(h)
65
- posterior = DiagonalGaussianDistribution(moments)
66
- return posterior
67
-
68
- def decode(self, z):
69
- z = self.post_quant_conv(z)
70
- dec = self.decoder(z)
71
- return dec
72
-
73
- def forward(self, input, sample_posterior=True):
74
- posterior = self.encode(input)
75
- if sample_posterior:
76
- z = posterior.sample()
77
- else:
78
- z = posterior.mode()
79
- dec = self.decode(z)
80
- return dec, posterior
81
-
82
- def get_input(self, batch, k):
83
- x = batch[k]
84
- if len(x.shape) == 3:
85
- x = x[..., None]
86
- x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
87
- return x
88
-
89
- def training_step(self, batch, batch_idx, optimizer_idx):
90
- inputs = self.get_input(batch, self.image_key)
91
- reconstructions, posterior = self(inputs)
92
-
93
- if optimizer_idx == 0:
94
- # train encoder+decoder+logvar
95
- aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
96
- last_layer=self.get_last_layer(), split="train")
97
- self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
98
- self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)
99
- return aeloss
100
-
101
- if optimizer_idx == 1:
102
- # train the discriminator
103
- discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
104
- last_layer=self.get_last_layer(), split="train")
105
-
106
- self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
107
- self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)
108
- return discloss
109
-
110
- def validation_step(self, batch, batch_idx):
111
- inputs = self.get_input(batch, self.image_key)
112
- reconstructions, posterior = self(inputs)
113
- aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,
114
- last_layer=self.get_last_layer(), split="val")
115
-
116
- discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,
117
- last_layer=self.get_last_layer(), split="val")
118
-
119
- self.log("val/rec_loss", log_dict_ae["val/rec_loss"])
120
- self.log_dict(log_dict_ae)
121
- self.log_dict(log_dict_disc)
122
- return self.log_dict
123
-
124
- def test_step(self, batch, batch_idx):
125
- inputs = self.get_input(batch, self.image_key)# inputs shape:(b,c,mel_len,T) or (b,c,h,w)
126
- reconstructions, posterior = self(inputs)# reconstructions:(b,c,mel_len,T) or (b,c,h,w)
127
- reconstructions = (reconstructions + 1)/2 # to mel scale
128
- test_ckpt_path = os.path.basename(self.trainer.tested_ckpt_path)
129
- savedir = os.path.join(self.trainer.log_dir,f'output_imgs_{test_ckpt_path}','fake_class')
130
- if not os.path.exists(savedir):
131
- os.makedirs(savedir)
132
-
133
- file_names = batch['f_name']
134
- # print(f"reconstructions.shape:{reconstructions.shape}",file_names)
135
- reconstructions = reconstructions.cpu().numpy().squeeze(1) # squuze channel dim
136
- for b in range(reconstructions.shape[0]):
137
- vname_num_split_index = file_names[b].rfind('_')# file_names[b]:video_name+'_'+num
138
- v_n,num = file_names[b][:vname_num_split_index],file_names[b][vname_num_split_index+1:]
139
- save_img_path = os.path.join(savedir,f'{v_n}_sample_{num}.npy')
140
- np.save(save_img_path,reconstructions[b])
141
-
142
- return None
143
-
144
- def configure_optimizers(self):
145
- lr = self.learning_rate
146
- opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
147
- list(self.decoder.parameters())+
148
- list(self.quant_conv.parameters())+
149
- list(self.post_quant_conv.parameters()),
150
- lr=lr, betas=(0.5, 0.9))
151
- opt_disc = torch.optim.Adam(list(self.loss.discriminator.parameters()) + list(self.loss.discriminator_multi.parameters()),
152
- lr=lr, betas=(0.5, 0.9))
153
- return [opt_ae, opt_disc], []
154
-
155
- def get_last_layer(self):
156
- return self.decoder.conv_out.weight
157
-
158
- @torch.no_grad()
159
- def log_images(self, batch, only_inputs=False, **kwargs):
160
- log = dict()
161
- x = self.get_input(batch, self.image_key)
162
- x = x.to(self.device)
163
- if not only_inputs:
164
- xrec, posterior = self(x)
165
- if x.shape[1] > 3:
166
- # colorize with random projection
167
- assert xrec.shape[1] > 3
168
- x = self.to_rgb(x)
169
- xrec = self.to_rgb(xrec)
170
- log["samples"] = self.decode(torch.randn_like(posterior.sample()))
171
- log["reconstructions"] = xrec
172
- log["inputs"] = x
173
- return log
174
-
175
- def to_rgb(self, x):
176
- assert self.image_key == "segmentation"
177
- if not hasattr(self, "colorize"):
178
- self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
179
- x = F.conv2d(x, weight=self.colorize)
180
- x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
181
- return x
182
-
183
-
184
- class IdentityFirstStage(torch.nn.Module):
185
- def __init__(self, *args, vq_interface=False, **kwargs):
186
- self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff
187
- super().__init__()
188
-
189
- def encode(self, x, *args, **kwargs):
190
- return x
191
-
192
- def decode(self, x, *args, **kwargs):
193
- return x
194
-
195
- def quantize(self, x, *args, **kwargs):
196
- if self.vq_interface:
197
- return x, None, [None, None, None]
198
- return x
199
-
200
- def forward(self, x, *args, **kwargs):
201
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AP123/dreamgaussian/grid_put.py DELETED
@@ -1,300 +0,0 @@
1
- import torch
2
- import torch.nn.functional as F
3
-
4
- def stride_from_shape(shape):
5
- stride = [1]
6
- for x in reversed(shape[1:]):
7
- stride.append(stride[-1] * x)
8
- return list(reversed(stride))
9
-
10
-
11
- def scatter_add_nd(input, indices, values):
12
- # input: [..., C], D dimension + C channel
13
- # indices: [N, D], long
14
- # values: [N, C]
15
-
16
- D = indices.shape[-1]
17
- C = input.shape[-1]
18
- size = input.shape[:-1]
19
- stride = stride_from_shape(size)
20
-
21
- assert len(size) == D
22
-
23
- input = input.view(-1, C) # [HW, C]
24
- flatten_indices = (indices * torch.tensor(stride, dtype=torch.long, device=indices.device)).sum(-1) # [N]
25
-
26
- input.scatter_add_(0, flatten_indices.unsqueeze(1).repeat(1, C), values)
27
-
28
- return input.view(*size, C)
29
-
30
-
31
- def scatter_add_nd_with_count(input, count, indices, values, weights=None):
32
- # input: [..., C], D dimension + C channel
33
- # count: [..., 1], D dimension
34
- # indices: [N, D], long
35
- # values: [N, C]
36
-
37
- D = indices.shape[-1]
38
- C = input.shape[-1]
39
- size = input.shape[:-1]
40
- stride = stride_from_shape(size)
41
-
42
- assert len(size) == D
43
-
44
- input = input.view(-1, C) # [HW, C]
45
- count = count.view(-1, 1)
46
-
47
- flatten_indices = (indices * torch.tensor(stride, dtype=torch.long, device=indices.device)).sum(-1) # [N]
48
-
49
- if weights is None:
50
- weights = torch.ones_like(values[..., :1])
51
-
52
- input.scatter_add_(0, flatten_indices.unsqueeze(1).repeat(1, C), values)
53
- count.scatter_add_(0, flatten_indices.unsqueeze(1), weights)
54
-
55
- return input.view(*size, C), count.view(*size, 1)
56
-
57
- def nearest_grid_put_2d(H, W, coords, values, return_count=False):
58
- # coords: [N, 2], float in [-1, 1]
59
- # values: [N, C]
60
-
61
- C = values.shape[-1]
62
-
63
- indices = (coords * 0.5 + 0.5) * torch.tensor(
64
- [H - 1, W - 1], dtype=torch.float32, device=coords.device
65
- )
66
- indices = indices.round().long() # [N, 2]
67
-
68
- result = torch.zeros(H, W, C, device=values.device, dtype=values.dtype) # [H, W, C]
69
- count = torch.zeros(H, W, 1, device=values.device, dtype=values.dtype) # [H, W, 1]
70
- weights = torch.ones_like(values[..., :1]) # [N, 1]
71
-
72
- result, count = scatter_add_nd_with_count(result, count, indices, values, weights)
73
-
74
- if return_count:
75
- return result, count
76
-
77
- mask = (count.squeeze(-1) > 0)
78
- result[mask] = result[mask] / count[mask].repeat(1, C)
79
-
80
- return result
81
-
82
-
83
- def linear_grid_put_2d(H, W, coords, values, return_count=False):
84
- # coords: [N, 2], float in [-1, 1]
85
- # values: [N, C]
86
-
87
- C = values.shape[-1]
88
-
89
- indices = (coords * 0.5 + 0.5) * torch.tensor(
90
- [H - 1, W - 1], dtype=torch.float32, device=coords.device
91
- )
92
- indices_00 = indices.floor().long() # [N, 2]
93
- indices_00[:, 0].clamp_(0, H - 2)
94
- indices_00[:, 1].clamp_(0, W - 2)
95
- indices_01 = indices_00 + torch.tensor(
96
- [0, 1], dtype=torch.long, device=indices.device
97
- )
98
- indices_10 = indices_00 + torch.tensor(
99
- [1, 0], dtype=torch.long, device=indices.device
100
- )
101
- indices_11 = indices_00 + torch.tensor(
102
- [1, 1], dtype=torch.long, device=indices.device
103
- )
104
-
105
- h = indices[..., 0] - indices_00[..., 0].float()
106
- w = indices[..., 1] - indices_00[..., 1].float()
107
- w_00 = (1 - h) * (1 - w)
108
- w_01 = (1 - h) * w
109
- w_10 = h * (1 - w)
110
- w_11 = h * w
111
-
112
- result = torch.zeros(H, W, C, device=values.device, dtype=values.dtype) # [H, W, C]
113
- count = torch.zeros(H, W, 1, device=values.device, dtype=values.dtype) # [H, W, 1]
114
- weights = torch.ones_like(values[..., :1]) # [N, 1]
115
-
116
- result, count = scatter_add_nd_with_count(result, count, indices_00, values * w_00.unsqueeze(1), weights* w_00.unsqueeze(1))
117
- result, count = scatter_add_nd_with_count(result, count, indices_01, values * w_01.unsqueeze(1), weights* w_01.unsqueeze(1))
118
- result, count = scatter_add_nd_with_count(result, count, indices_10, values * w_10.unsqueeze(1), weights* w_10.unsqueeze(1))
119
- result, count = scatter_add_nd_with_count(result, count, indices_11, values * w_11.unsqueeze(1), weights* w_11.unsqueeze(1))
120
-
121
- if return_count:
122
- return result, count
123
-
124
- mask = (count.squeeze(-1) > 0)
125
- result[mask] = result[mask] / count[mask].repeat(1, C)
126
-
127
- return result
128
-
129
- def mipmap_linear_grid_put_2d(H, W, coords, values, min_resolution=32, return_count=False):
130
- # coords: [N, 2], float in [-1, 1]
131
- # values: [N, C]
132
-
133
- C = values.shape[-1]
134
-
135
- result = torch.zeros(H, W, C, device=values.device, dtype=values.dtype) # [H, W, C]
136
- count = torch.zeros(H, W, 1, device=values.device, dtype=values.dtype) # [H, W, 1]
137
-
138
- cur_H, cur_W = H, W
139
-
140
- while min(cur_H, cur_W) > min_resolution:
141
-
142
- # try to fill the holes
143
- mask = (count.squeeze(-1) == 0)
144
- if not mask.any():
145
- break
146
-
147
- cur_result, cur_count = linear_grid_put_2d(cur_H, cur_W, coords, values, return_count=True)
148
- result[mask] = result[mask] + F.interpolate(cur_result.permute(2,0,1).unsqueeze(0).contiguous(), (H, W), mode='bilinear', align_corners=False).squeeze(0).permute(1,2,0).contiguous()[mask]
149
- count[mask] = count[mask] + F.interpolate(cur_count.view(1, 1, cur_H, cur_W), (H, W), mode='bilinear', align_corners=False).view(H, W, 1)[mask]
150
- cur_H //= 2
151
- cur_W //= 2
152
-
153
- if return_count:
154
- return result, count
155
-
156
- mask = (count.squeeze(-1) > 0)
157
- result[mask] = result[mask] / count[mask].repeat(1, C)
158
-
159
- return result
160
-
161
- def nearest_grid_put_3d(H, W, D, coords, values, return_count=False):
162
- # coords: [N, 3], float in [-1, 1]
163
- # values: [N, C]
164
-
165
- C = values.shape[-1]
166
-
167
- indices = (coords * 0.5 + 0.5) * torch.tensor(
168
- [H - 1, W - 1, D - 1], dtype=torch.float32, device=coords.device
169
- )
170
- indices = indices.round().long() # [N, 2]
171
-
172
- result = torch.zeros(H, W, D, C, device=values.device, dtype=values.dtype) # [H, W, C]
173
- count = torch.zeros(H, W, D, 1, device=values.device, dtype=values.dtype) # [H, W, 1]
174
- weights = torch.ones_like(values[..., :1]) # [N, 1]
175
-
176
- result, count = scatter_add_nd_with_count(result, count, indices, values, weights)
177
-
178
- if return_count:
179
- return result, count
180
-
181
- mask = (count.squeeze(-1) > 0)
182
- result[mask] = result[mask] / count[mask].repeat(1, C)
183
-
184
- return result
185
-
186
-
187
- def linear_grid_put_3d(H, W, D, coords, values, return_count=False):
188
- # coords: [N, 3], float in [-1, 1]
189
- # values: [N, C]
190
-
191
- C = values.shape[-1]
192
-
193
- indices = (coords * 0.5 + 0.5) * torch.tensor(
194
- [H - 1, W - 1, D - 1], dtype=torch.float32, device=coords.device
195
- )
196
- indices_000 = indices.floor().long() # [N, 3]
197
- indices_000[:, 0].clamp_(0, H - 2)
198
- indices_000[:, 1].clamp_(0, W - 2)
199
- indices_000[:, 2].clamp_(0, D - 2)
200
-
201
- indices_001 = indices_000 + torch.tensor([0, 0, 1], dtype=torch.long, device=indices.device)
202
- indices_010 = indices_000 + torch.tensor([0, 1, 0], dtype=torch.long, device=indices.device)
203
- indices_011 = indices_000 + torch.tensor([0, 1, 1], dtype=torch.long, device=indices.device)
204
- indices_100 = indices_000 + torch.tensor([1, 0, 0], dtype=torch.long, device=indices.device)
205
- indices_101 = indices_000 + torch.tensor([1, 0, 1], dtype=torch.long, device=indices.device)
206
- indices_110 = indices_000 + torch.tensor([1, 1, 0], dtype=torch.long, device=indices.device)
207
- indices_111 = indices_000 + torch.tensor([1, 1, 1], dtype=torch.long, device=indices.device)
208
-
209
- h = indices[..., 0] - indices_000[..., 0].float()
210
- w = indices[..., 1] - indices_000[..., 1].float()
211
- d = indices[..., 2] - indices_000[..., 2].float()
212
-
213
- w_000 = (1 - h) * (1 - w) * (1 - d)
214
- w_001 = (1 - h) * w * (1 - d)
215
- w_010 = h * (1 - w) * (1 - d)
216
- w_011 = h * w * (1 - d)
217
- w_100 = (1 - h) * (1 - w) * d
218
- w_101 = (1 - h) * w * d
219
- w_110 = h * (1 - w) * d
220
- w_111 = h * w * d
221
-
222
- result = torch.zeros(H, W, D, C, device=values.device, dtype=values.dtype) # [H, W, D, C]
223
- count = torch.zeros(H, W, D, 1, device=values.device, dtype=values.dtype) # [H, W, D, 1]
224
- weights = torch.ones_like(values[..., :1]) # [N, 1]
225
-
226
- result, count = scatter_add_nd_with_count(result, count, indices_000, values * w_000.unsqueeze(1), weights * w_000.unsqueeze(1))
227
- result, count = scatter_add_nd_with_count(result, count, indices_001, values * w_001.unsqueeze(1), weights * w_001.unsqueeze(1))
228
- result, count = scatter_add_nd_with_count(result, count, indices_010, values * w_010.unsqueeze(1), weights * w_010.unsqueeze(1))
229
- result, count = scatter_add_nd_with_count(result, count, indices_011, values * w_011.unsqueeze(1), weights * w_011.unsqueeze(1))
230
- result, count = scatter_add_nd_with_count(result, count, indices_100, values * w_100.unsqueeze(1), weights * w_100.unsqueeze(1))
231
- result, count = scatter_add_nd_with_count(result, count, indices_101, values * w_101.unsqueeze(1), weights * w_101.unsqueeze(1))
232
- result, count = scatter_add_nd_with_count(result, count, indices_110, values * w_110.unsqueeze(1), weights * w_110.unsqueeze(1))
233
- result, count = scatter_add_nd_with_count(result, count, indices_111, values * w_111.unsqueeze(1), weights * w_111.unsqueeze(1))
234
-
235
- if return_count:
236
- return result, count
237
-
238
- mask = (count.squeeze(-1) > 0)
239
- result[mask] = result[mask] / count[mask].repeat(1, C)
240
-
241
- return result
242
-
243
- def mipmap_linear_grid_put_3d(H, W, D, coords, values, min_resolution=32, return_count=False):
244
- # coords: [N, 3], float in [-1, 1]
245
- # values: [N, C]
246
-
247
- C = values.shape[-1]
248
-
249
- result = torch.zeros(H, W, D, C, device=values.device, dtype=values.dtype) # [H, W, D, C]
250
- count = torch.zeros(H, W, D, 1, device=values.device, dtype=values.dtype) # [H, W, D, 1]
251
- cur_H, cur_W, cur_D = H, W, D
252
-
253
- while min(min(cur_H, cur_W), cur_D) > min_resolution:
254
-
255
- # try to fill the holes
256
- mask = (count.squeeze(-1) == 0)
257
- if not mask.any():
258
- break
259
-
260
- cur_result, cur_count = linear_grid_put_3d(cur_H, cur_W, cur_D, coords, values, return_count=True)
261
- result[mask] = result[mask] + F.interpolate(cur_result.permute(3,0,1,2).unsqueeze(0).contiguous(), (H, W, D), mode='trilinear', align_corners=False).squeeze(0).permute(1,2,3,0).contiguous()[mask]
262
- count[mask] = count[mask] + F.interpolate(cur_count.view(1, 1, cur_H, cur_W, cur_D), (H, W, D), mode='trilinear', align_corners=False).view(H, W, D, 1)[mask]
263
- cur_H //= 2
264
- cur_W //= 2
265
- cur_D //= 2
266
-
267
- if return_count:
268
- return result, count
269
-
270
- mask = (count.squeeze(-1) > 0)
271
- result[mask] = result[mask] / count[mask].repeat(1, C)
272
-
273
- return result
274
-
275
-
276
- def grid_put(shape, coords, values, mode='linear-mipmap', min_resolution=32, return_raw=False):
277
- # shape: [D], list/tuple
278
- # coords: [N, D], float in [-1, 1]
279
- # values: [N, C]
280
-
281
- D = len(shape)
282
- assert D in [2, 3], f'only support D == 2 or 3, but got D == {D}'
283
-
284
- if mode == 'nearest':
285
- if D == 2:
286
- return nearest_grid_put_2d(*shape, coords, values, return_raw)
287
- else:
288
- return nearest_grid_put_3d(*shape, coords, values, return_raw)
289
- elif mode == 'linear':
290
- if D == 2:
291
- return linear_grid_put_2d(*shape, coords, values, return_raw)
292
- else:
293
- return linear_grid_put_3d(*shape, coords, values, return_raw)
294
- elif mode == 'linear-mipmap':
295
- if D == 2:
296
- return mipmap_linear_grid_put_2d(*shape, coords, values, min_resolution, return_raw)
297
- else:
298
- return mipmap_linear_grid_put_3d(*shape, coords, values, min_resolution, return_raw)
299
- else:
300
- raise NotImplementedError(f"got mode {mode}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/README.md DELETED
@@ -1,140 +0,0 @@
1
- # ResNet
2
-
3
- > [Deep Residual Learning for Image Recognition](https://openaccess.thecvf.com/content_cvpr_2016/html/He_Deep_Residual_Learning_CVPR_2016_paper.html)
4
-
5
- <!-- [ALGORITHM] -->
6
-
7
- ## Introduction
8
-
9
- **Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of
10
- learning unreferenced functions. In the mainstream previous works, like VGG, the neural networks are a stack
11
- of layers and every layer attempts to fit a desired underlying mapping. In ResNets, a few stacked layers are
12
- grouped as a block, and the layers in a block attempts to learn a residual mapping.
13
-
14
- Formally, denoting the desired underlying mapping of a block as $\mathcal{H}(x)$, split the underlying mapping
15
- into the sum of the identity and the residual mapping as $\mathcal{H}(x) = x + \mathcal{F}(x)$, and let the
16
- stacked non-linear layers fit the residual mapping $\mathcal{F}(x)$.
17
-
18
- Many works proved this method makes deep neural networks easier to optimize, and can gain accuracy from
19
- considerably increased depth. Recently, the residual structure is widely used in various models.
20
-
21
- <div align=center>
22
- <img src="https://user-images.githubusercontent.com/26739999/142574068-60cfdeea-c4ec-4c49-abb2-5dc2facafc3b.png" width="40%"/>
23
- </div>
24
-
25
- ## Abstract
26
-
27
- <details>
28
-
29
- <summary>Show the paper's abstract</summary>
30
-
31
- <br>
32
- Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers---8x deeper than VGG nets but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers.
33
-
34
- The depth of representations is of central importance for many visual recognition tasks. Solely due to our extremely deep representations, we obtain a 28% relative improvement on the COCO object detection dataset. Deep residual nets are foundations of our submissions to ILSVRC & COCO 2015 competitions, where we also won the 1st places on the tasks of ImageNet detection, ImageNet localization, COCO detection, and COCO segmentation.
35
- </br>
36
-
37
- </details>
38
-
39
- ## How to use it?
40
-
41
- <!-- [TABS-BEGIN] -->
42
-
43
- **Predict image**
44
-
45
- ```python
46
- from mmpretrain import inference_model
47
-
48
- predict = inference_model('resnet18_8xb16_cifar10', 'demo/bird.JPEG')
49
- print(predict['pred_class'])
50
- print(predict['pred_score'])
51
- ```
52
-
53
- **Use the model**
54
-
55
- ```python
56
- import torch
57
- from mmpretrain import get_model
58
-
59
- model = get_model('resnet18_8xb16_cifar10', pretrained=True)
60
- inputs = torch.rand(1, 3, 224, 224)
61
- out = model(inputs)
62
- print(type(out))
63
- # To extract features.
64
- feats = model.extract_feat(inputs)
65
- print(type(feats))
66
- ```
67
-
68
- **Train/Test Command**
69
-
70
- Prepare your dataset according to the [docs](https://mmpretrain.readthedocs.io/en/latest/user_guides/dataset_prepare.html#prepare-dataset).
71
-
72
- Train:
73
-
74
- ```shell
75
- python tools/train.py configs/resnet/resnet18_8xb16_cifar10.py
76
- ```
77
-
78
- Test:
79
-
80
- ```shell
81
- python tools/test.py configs/resnet/resnet18_8xb16_cifar10.py https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.pth
82
- ```
83
-
84
- <!-- [TABS-END] -->
85
-
86
- ## Models and results
87
-
88
- ### Image Classification on ImageNet-1k
89
-
90
- | Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download |
91
- | :--------------------------------- | :----------: | :--------: | :-------: | :-------: | :-------: | :-------------------------------------------: | :----------------------------------------------------------------------: |
92
- | `resnet18_8xb32_in1k` | From scratch | 11.69 | 1.82 | 69.90 | 89.43 | [config](resnet18_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_8xb32_in1k_20210831-fbbb1da6.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_8xb32_in1k_20210831-fbbb1da6.json) |
93
- | `resnet34_8xb32_in1k` | From scratch | 2.18 | 3.68 | 73.62 | 91.59 | [config](resnet34_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_8xb32_in1k_20210831-f257d4e6.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_8xb32_in1k_20210831-f257d4e6.json) |
94
- | `resnet50_8xb32_in1k` | From scratch | 25.56 | 4.12 | 76.55 | 93.06 | [config](resnet50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.json) |
95
- | `resnet101_8xb32_in1k` | From scratch | 44.55 | 7.85 | 77.97 | 94.06 | [config](resnet101_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_8xb32_in1k_20210831-539c63f8.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_8xb32_in1k_20210831-539c63f8.json) |
96
- | `resnet152_8xb32_in1k` | From scratch | 60.19 | 11.58 | 78.48 | 94.13 | [config](resnet152_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_8xb32_in1k_20210901-4d7582fa.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_8xb32_in1k_20210901-4d7582fa.json) |
97
- | `resnetv1d50_8xb32_in1k` | From scratch | 25.58 | 4.36 | 77.54 | 93.57 | [config](resnetv1d50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d50_b32x8_imagenet_20210531-db14775a.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d50_b32x8_imagenet_20210531-db14775a.json) |
98
- | `resnetv1d101_8xb32_in1k` | From scratch | 44.57 | 8.09 | 78.93 | 94.48 | [config](resnetv1d101_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d101_b32x8_imagenet_20210531-6e13bcd3.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d101_b32x8_imagenet_20210531-6e13bcd3.json) |
99
- | `resnetv1d152_8xb32_in1k` | From scratch | 60.21 | 11.82 | 79.41 | 94.70 | [config](resnetv1d152_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d152_b32x8_imagenet_20210531-278cf22a.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1d152_b32x8_imagenet_20210531-278cf22a.json) |
100
- | `resnet50_8xb32-fp16_in1k` | From scratch | 25.56 | 4.12 | 76.30 | 93.07 | [config](resnet50_8xb32-fp16_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/fp16/resnet50_batch256_fp16_imagenet_20210320-b3964210.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/fp16/resnet50_batch256_fp16_imagenet_20210320-b3964210.json) |
101
- | `resnet50_8xb256-rsb-a1-600e_in1k` | From scratch | 25.56 | 4.12 | 80.12 | 94.78 | [config](resnet50_8xb256-rsb-a1-600e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.json) |
102
- | `resnet50_8xb256-rsb-a2-300e_in1k` | From scratch | 25.56 | 4.12 | 79.55 | 94.37 | [config](resnet50_8xb256-rsb-a2-300e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a2-300e_in1k_20211228-0fd8be6e.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a2-300e_in1k_20211228-0fd8be6e.json) |
103
- | `resnet50_8xb256-rsb-a3-100e_in1k` | From scratch | 25.56 | 4.12 | 78.30 | 93.80 | [config](resnet50_8xb256-rsb-a3-100e_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a3-100e_in1k_20211228-3493673c.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a3-100e_in1k_20211228-3493673c.json) |
104
- | `resnetv1c50_8xb32_in1k` | From scratch | 25.58 | 4.36 | 77.01 | 93.58 | [config](resnetv1c50_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c50_8xb32_in1k_20220214-3343eccd.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c50_8xb32_in1k_20220214-3343eccd.json) |
105
- | `resnetv1c101_8xb32_in1k` | From scratch | 44.57 | 8.09 | 78.30 | 94.27 | [config](resnetv1c101_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c101_8xb32_in1k_20220214-434fe45f.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c101_8xb32_in1k_20220214-434fe45f.json) |
106
- | `resnetv1c152_8xb32_in1k` | From scratch | 60.21 | 11.82 | 78.76 | 94.41 | [config](resnetv1c152_8xb32_in1k.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c152_8xb32_in1k_20220214-c013291f.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnetv1c152_8xb32_in1k_20220214-c013291f.json) |
107
-
108
- ### Image Classification on CIFAR-10
109
-
110
- | Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Config | Download |
111
- | :------------------------ | :----------: | :--------: | :-------: | :-------: | :----------------------------------: | :-------------------------------------------------------------------------------------------------: |
112
- | `resnet18_8xb16_cifar10` | From scratch | 11.17 | 0.56 | 94.82 | [config](resnet18_8xb16_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet18_b16x8_cifar10_20210528-bd6371c8.json) |
113
- | `resnet34_8xb16_cifar10` | From scratch | 21.28 | 1.16 | 95.34 | [config](resnet34_8xb16_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_b16x8_cifar10_20210528-a8aa36a6.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet34_b16x8_cifar10_20210528-a8aa36a6.json) |
114
- | `resnet50_8xb16_cifar10` | From scratch | 23.52 | 1.31 | 95.55 | [config](resnet50_8xb16_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-f54bfad9.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar10_20210528-f54bfad9.json) |
115
- | `resnet101_8xb16_cifar10` | From scratch | 42.51 | 2.52 | 95.58 | [config](resnet101_8xb16_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_b16x8_cifar10_20210528-2d29e936.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet101_b16x8_cifar10_20210528-2d29e936.json) |
116
- | `resnet152_8xb16_cifar10` | From scratch | 58.16 | 3.74 | 95.76 | [config](resnet152_8xb16_cifar10.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_b16x8_cifar10_20210528-3e8e9178.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet152_b16x8_cifar10_20210528-3e8e9178.json) |
117
-
118
- ### Image Classification on CIFAR-100
119
-
120
- | Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Top-5 (%) | Config | Download |
121
- | :------------------------ | :----------: | :--------: | :-------: | :-------: | :-------: | :----------------------------------: | :----------------------------------------------------------------------------------------: |
122
- | `resnet50_8xb16_cifar100` | From scratch | 23.71 | 1.31 | 79.90 | 95.19 | [config](resnet50_8xb16_cifar100.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar100_20210528-67b58a1b.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_b16x8_cifar100_20210528-67b58a1b.json) |
123
-
124
- ### Image Classification on CUB-200-2011
125
-
126
- | Model | Pretrain | Params (M) | Flops (G) | Top-1 (%) | Config | Download |
127
- | :------------------ | :----------: | :--------: | :-------: | :-------: | :----------------------------: | :-------------------------------------------------------------------------------------------------------------: |
128
- | `resnet50_8xb8_cub` | From scratch | 23.92 | 16.48 | 88.45 | [config](resnet50_8xb8_cub.py) | [model](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb8_cub_20220307-57840e60.pth) \| [log](https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb8_cub_20220307-57840e60.json) |
129
-
130
- ## Citation
131
-
132
- ```bibtex
133
- @inproceedings{he2016deep,
134
- title={Deep residual learning for image recognition},
135
- author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian},
136
- booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition},
137
- pages={770--778},
138
- year={2016}
139
- }
140
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ababababababbababa/Ashaar/poetry_diacritizer/tester.py DELETED
@@ -1,63 +0,0 @@
1
- from .config_manager import ConfigManager
2
- import os
3
- from typing import Dict
4
-
5
- from torch import nn
6
- from tqdm import tqdm
7
- from tqdm import trange
8
-
9
- from dataset import load_iterators
10
- from trainer import GeneralTrainer
11
-
12
-
13
- class DiacritizationTester(GeneralTrainer):
14
- def __init__(self, config_path: str, model_kind: str) -> None:
15
- self.config_path = config_path
16
- self.model_kind = model_kind
17
- self.config_manager = ConfigManager(
18
- config_path=config_path, model_kind=model_kind
19
- )
20
- self.config = self.config_manager.config
21
- self.pad_idx = 0
22
- self.criterion = nn.CrossEntropyLoss(ignore_index=self.pad_idx)
23
- self.set_device()
24
-
25
- self.text_encoder = self.config_manager.text_encoder
26
- self.start_symbol_id = self.text_encoder.start_symbol_id
27
-
28
- self.model = self.config_manager.get_model()
29
-
30
- self.model = self.model.to(self.device)
31
-
32
- self.load_model(model_path=self.config["test_model_path"], load_optimizer=False)
33
- self.load_diacritizer()
34
- self.diacritizer.set_model(self.model)
35
-
36
- self.initialize_model()
37
-
38
- self.print_config()
39
-
40
- def run(self):
41
- self.config_manager.config["load_training_data"] = False
42
- self.config_manager.config["load_validation_data"] = False
43
- self.config_manager.config["load_test_data"] = True
44
- _, test_iterator, _ = load_iterators(self.config_manager)
45
- tqdm_eval = trange(0, len(test_iterator), leave=True)
46
- tqdm_error_rates = trange(0, len(test_iterator), leave=True)
47
-
48
- loss, acc = self.evaluate(test_iterator, tqdm_eval, log = False)
49
- error_rates, _ = self.evaluate_with_error_rates(test_iterator, tqdm_error_rates, log = False)
50
-
51
- tqdm_eval.close()
52
- tqdm_error_rates.close()
53
-
54
- WER = error_rates["WER"]
55
- DER = error_rates["DER"]
56
- DER1 = error_rates["DER*"]
57
- WER1 = error_rates["WER*"]
58
-
59
- error_rates = f"DER: {DER}, WER: {WER}, DER*: {DER1}, WER*: {WER1}"
60
-
61
- print(f"global step : {self.global_step}")
62
- print(f"Evaluate {self.global_step}: accuracy, {acc}, loss: {loss}")
63
- print(f"WER/DER {self.global_step}: {error_rates}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Liaobots.py DELETED
@@ -1,106 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import uuid
4
-
5
- from aiohttp import ClientSession
6
-
7
- from ..typing import AsyncGenerator
8
- from .base_provider import AsyncGeneratorProvider
9
-
10
- models = {
11
- "gpt-4": {
12
- "id": "gpt-4",
13
- "name": "GPT-4",
14
- "maxLength": 24000,
15
- "tokenLimit": 8000,
16
- },
17
- "gpt-3.5-turbo": {
18
- "id": "gpt-3.5-turbo",
19
- "name": "GPT-3.5",
20
- "maxLength": 12000,
21
- "tokenLimit": 4000,
22
- },
23
- "gpt-3.5-turbo-16k": {
24
- "id": "gpt-3.5-turbo-16k",
25
- "name": "GPT-3.5-16k",
26
- "maxLength": 48000,
27
- "tokenLimit": 16000,
28
- },
29
- }
30
-
31
- class Liaobots(AsyncGeneratorProvider):
32
- url = "https://liaobots.site"
33
- working = True
34
- supports_gpt_35_turbo = True
35
- supports_gpt_4 = True
36
- _auth_code = None
37
-
38
- @classmethod
39
- async def create_async_generator(
40
- cls,
41
- model: str,
42
- messages: list[dict[str, str]],
43
- auth: str = None,
44
- proxy: str = None,
45
- **kwargs
46
- ) -> AsyncGenerator:
47
- model = model if model in models else "gpt-3.5-turbo"
48
- headers = {
49
- "authority": "liaobots.com",
50
- "content-type": "application/json",
51
- "origin": cls.url,
52
- "referer": cls.url + "/",
53
- "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
54
- }
55
- async with ClientSession(
56
- headers=headers
57
- ) as session:
58
- cls._auth_code = auth if isinstance(auth, str) else cls._auth_code
59
- if not cls._auth_code:
60
- async with session.post(
61
- "https://liaobots.work/recaptcha/api/login",
62
- proxy=proxy,
63
- data={"token": "abcdefghijklmnopqrst"},
64
- verify_ssl=False
65
- ) as response:
66
- response.raise_for_status()
67
- async with session.post(
68
- "https://liaobots.work/api/user",
69
- proxy=proxy,
70
- json={"authcode": ""},
71
- verify_ssl=False
72
- ) as response:
73
- response.raise_for_status()
74
- cls._auth_code = (await response.json(content_type=None))["authCode"]
75
- data = {
76
- "conversationId": str(uuid.uuid4()),
77
- "model": models[model],
78
- "messages": messages,
79
- "key": "",
80
- "prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully.",
81
- }
82
- async with session.post(
83
- "https://liaobots.work/api/chat",
84
- proxy=proxy,
85
- json=data,
86
- headers={"x-auth-code": cls._auth_code},
87
- verify_ssl=False
88
- ) as response:
89
- response.raise_for_status()
90
- async for stream in response.content.iter_any():
91
- if stream:
92
- yield stream.decode()
93
-
94
-
95
- @classmethod
96
- @property
97
- def params(cls):
98
- params = [
99
- ("model", "str"),
100
- ("messages", "list[dict[str, str]]"),
101
- ("stream", "bool"),
102
- ("proxy", "str"),
103
- ("auth", "str"),
104
- ]
105
- param = ", ".join([": ".join(p) for p in params])
106
- return f"g4f.provider.{cls.__name__} supports: ({param})"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aditya757864/SentimentAnalysis/app.py DELETED
@@ -1,14 +0,0 @@
1
- import gradio as gr
2
- from transformers import pipeline
3
- sentiment = pipeline('sentiment-analysis')
4
- def get_sentiment(input_text):
5
- return sentiment (input_text)
6
- iface = gr.Interface(fn = get_sentiment,
7
- inputs = 'text',
8
- outputs = ['text'],
9
- title = 'Sentiment Analysis',
10
- examples = ['The movie was very bad', 'Every day is a new opportunity.'],
11
- article = 'This project is for software engineering with team members Aditya Jadhav, Sujal Kuthe, Sujal Wakalkar, and Adesh Ingle. We developed a web application for sentiment analysis that takes text data as input and classifies whether it is positive or negative.',
12
- thumbnail = '/content/sentiment-analysis.png',
13
- theme = gr.themes.Soft())
14
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aditya9790/yolo7-object-tracking/train.py DELETED
@@ -1,705 +0,0 @@
1
- import argparse
2
- import logging
3
- import math
4
- import os
5
- import random
6
- import time
7
- from copy import deepcopy
8
- from pathlib import Path
9
- from threading import Thread
10
-
11
- import numpy as np
12
- import torch.distributed as dist
13
- import torch.nn as nn
14
- import torch.nn.functional as F
15
- import torch.optim as optim
16
- import torch.optim.lr_scheduler as lr_scheduler
17
- import torch.utils.data
18
- import yaml
19
- from torch.cuda import amp
20
- from torch.nn.parallel import DistributedDataParallel as DDP
21
- from torch.utils.tensorboard import SummaryWriter
22
- from tqdm import tqdm
23
-
24
- import test # import test.py to get mAP after each epoch
25
- from models.experimental import attempt_load
26
- from models.yolo import Model
27
- from utils.autoanchor import check_anchors
28
- from utils.datasets import create_dataloader
29
- from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \
30
- fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \
31
- check_requirements, print_mutation, set_logging, one_cycle, colorstr
32
- from utils.google_utils import attempt_download
33
- from utils.loss import ComputeLoss, ComputeLossOTA
34
- from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
35
- from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel
36
- from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume
37
-
38
- logger = logging.getLogger(__name__)
39
-
40
-
41
- def train(hyp, opt, device, tb_writer=None):
42
- logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))
43
- save_dir, epochs, batch_size, total_batch_size, weights, rank, freeze = \
44
- Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank, opt.freeze
45
-
46
- # Directories
47
- wdir = save_dir / 'weights'
48
- wdir.mkdir(parents=True, exist_ok=True) # make dir
49
- last = wdir / 'last.pt'
50
- best = wdir / 'best.pt'
51
- results_file = save_dir / 'results.txt'
52
-
53
- # Save run settings
54
- with open(save_dir / 'hyp.yaml', 'w') as f:
55
- yaml.dump(hyp, f, sort_keys=False)
56
- with open(save_dir / 'opt.yaml', 'w') as f:
57
- yaml.dump(vars(opt), f, sort_keys=False)
58
-
59
- # Configure
60
- plots = not opt.evolve # create plots
61
- cuda = device.type != 'cpu'
62
- init_seeds(2 + rank)
63
- with open(opt.data) as f:
64
- data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict
65
- is_coco = opt.data.endswith('coco.yaml')
66
-
67
- # Logging- Doing this before checking the dataset. Might update data_dict
68
- loggers = {'wandb': None} # loggers dict
69
- if rank in [-1, 0]:
70
- opt.hyp = hyp # add hyperparameters
71
- run_id = torch.load(weights, map_location=device).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None
72
- wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict)
73
- loggers['wandb'] = wandb_logger.wandb
74
- data_dict = wandb_logger.data_dict
75
- if wandb_logger.wandb:
76
- weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming
77
-
78
- nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes
79
- names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
80
- assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check
81
-
82
- # Model
83
- pretrained = weights.endswith('.pt')
84
- if pretrained:
85
- with torch_distributed_zero_first(rank):
86
- attempt_download(weights) # download if not found locally
87
- ckpt = torch.load(weights, map_location=device) # load checkpoint
88
- model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
89
- exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys
90
- state_dict = ckpt['model'].float().state_dict() # to FP32
91
- state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect
92
- model.load_state_dict(state_dict, strict=False) # load
93
- logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report
94
- else:
95
- model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
96
- with torch_distributed_zero_first(rank):
97
- check_dataset(data_dict) # check
98
- train_path = data_dict['train']
99
- test_path = data_dict['val']
100
-
101
- # Freeze
102
- freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # parameter names to freeze (full or partial)
103
- for k, v in model.named_parameters():
104
- v.requires_grad = True # train all layers
105
- if any(x in k for x in freeze):
106
- print('freezing %s' % k)
107
- v.requires_grad = False
108
-
109
- # Optimizer
110
- nbs = 64 # nominal batch size
111
- accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing
112
- hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay
113
- logger.info(f"Scaled weight_decay = {hyp['weight_decay']}")
114
-
115
- pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
116
- for k, v in model.named_modules():
117
- if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):
118
- pg2.append(v.bias) # biases
119
- if isinstance(v, nn.BatchNorm2d):
120
- pg0.append(v.weight) # no decay
121
- elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):
122
- pg1.append(v.weight) # apply decay
123
- if hasattr(v, 'im'):
124
- if hasattr(v.im, 'implicit'):
125
- pg0.append(v.im.implicit)
126
- else:
127
- for iv in v.im:
128
- pg0.append(iv.implicit)
129
- if hasattr(v, 'imc'):
130
- if hasattr(v.imc, 'implicit'):
131
- pg0.append(v.imc.implicit)
132
- else:
133
- for iv in v.imc:
134
- pg0.append(iv.implicit)
135
- if hasattr(v, 'imb'):
136
- if hasattr(v.imb, 'implicit'):
137
- pg0.append(v.imb.implicit)
138
- else:
139
- for iv in v.imb:
140
- pg0.append(iv.implicit)
141
- if hasattr(v, 'imo'):
142
- if hasattr(v.imo, 'implicit'):
143
- pg0.append(v.imo.implicit)
144
- else:
145
- for iv in v.imo:
146
- pg0.append(iv.implicit)
147
- if hasattr(v, 'ia'):
148
- if hasattr(v.ia, 'implicit'):
149
- pg0.append(v.ia.implicit)
150
- else:
151
- for iv in v.ia:
152
- pg0.append(iv.implicit)
153
- if hasattr(v, 'attn'):
154
- if hasattr(v.attn, 'logit_scale'):
155
- pg0.append(v.attn.logit_scale)
156
- if hasattr(v.attn, 'q_bias'):
157
- pg0.append(v.attn.q_bias)
158
- if hasattr(v.attn, 'v_bias'):
159
- pg0.append(v.attn.v_bias)
160
- if hasattr(v.attn, 'relative_position_bias_table'):
161
- pg0.append(v.attn.relative_position_bias_table)
162
- if hasattr(v, 'rbr_dense'):
163
- if hasattr(v.rbr_dense, 'weight_rbr_origin'):
164
- pg0.append(v.rbr_dense.weight_rbr_origin)
165
- if hasattr(v.rbr_dense, 'weight_rbr_avg_conv'):
166
- pg0.append(v.rbr_dense.weight_rbr_avg_conv)
167
- if hasattr(v.rbr_dense, 'weight_rbr_pfir_conv'):
168
- pg0.append(v.rbr_dense.weight_rbr_pfir_conv)
169
- if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_idconv1'):
170
- pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_idconv1)
171
- if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_conv2'):
172
- pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_conv2)
173
- if hasattr(v.rbr_dense, 'weight_rbr_gconv_dw'):
174
- pg0.append(v.rbr_dense.weight_rbr_gconv_dw)
175
- if hasattr(v.rbr_dense, 'weight_rbr_gconv_pw'):
176
- pg0.append(v.rbr_dense.weight_rbr_gconv_pw)
177
- if hasattr(v.rbr_dense, 'vector'):
178
- pg0.append(v.rbr_dense.vector)
179
-
180
- if opt.adam:
181
- optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum
182
- else:
183
- optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
184
-
185
- optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
186
- optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
187
- logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
188
- del pg0, pg1, pg2
189
-
190
- # Scheduler https://arxiv.org/pdf/1812.01187.pdf
191
- # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
192
- if opt.linear_lr:
193
- lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear
194
- else:
195
- lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']
196
- scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
197
- # plot_lr_scheduler(optimizer, scheduler, epochs)
198
-
199
- # EMA
200
- ema = ModelEMA(model) if rank in [-1, 0] else None
201
-
202
- # Resume
203
- start_epoch, best_fitness = 0, 0.0
204
- if pretrained:
205
- # Optimizer
206
- if ckpt['optimizer'] is not None:
207
- optimizer.load_state_dict(ckpt['optimizer'])
208
- best_fitness = ckpt['best_fitness']
209
-
210
- # EMA
211
- if ema and ckpt.get('ema'):
212
- ema.ema.load_state_dict(ckpt['ema'].float().state_dict())
213
- ema.updates = ckpt['updates']
214
-
215
- # Results
216
- if ckpt.get('training_results') is not None:
217
- results_file.write_text(ckpt['training_results']) # write results.txt
218
-
219
- # Epochs
220
- start_epoch = ckpt['epoch'] + 1
221
- if opt.resume:
222
- assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)
223
- if epochs < start_epoch:
224
- logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
225
- (weights, ckpt['epoch'], epochs))
226
- epochs += ckpt['epoch'] # finetune additional epochs
227
-
228
- del ckpt, state_dict
229
-
230
- # Image sizes
231
- gs = max(int(model.stride.max()), 32) # grid size (max stride)
232
- nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj'])
233
- imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples
234
-
235
- # DP mode
236
- if cuda and rank == -1 and torch.cuda.device_count() > 1:
237
- model = torch.nn.DataParallel(model)
238
-
239
- # SyncBatchNorm
240
- if opt.sync_bn and cuda and rank != -1:
241
- model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
242
- logger.info('Using SyncBatchNorm()')
243
-
244
- # Trainloader
245
- dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
246
- hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,
247
- world_size=opt.world_size, workers=opt.workers,
248
- image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: '))
249
- mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
250
- nb = len(dataloader) # number of batches
251
- assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)
252
-
253
- # Process 0
254
- if rank in [-1, 0]:
255
- testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt, # testloader
256
- hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1,
257
- world_size=opt.world_size, workers=opt.workers,
258
- pad=0.5, prefix=colorstr('val: '))[0]
259
-
260
- if not opt.resume:
261
- labels = np.concatenate(dataset.labels, 0)
262
- c = torch.tensor(labels[:, 0]) # classes
263
- # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
264
- # model._initialize_biases(cf.to(device))
265
- if plots:
266
- #plot_labels(labels, names, save_dir, loggers)
267
- if tb_writer:
268
- tb_writer.add_histogram('classes', c, 0)
269
-
270
- # Anchors
271
- if not opt.noautoanchor:
272
- check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)
273
- model.half().float() # pre-reduce anchor precision
274
-
275
- # DDP mode
276
- if cuda and rank != -1:
277
- model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank,
278
- # nn.MultiheadAttention incompatibility with DDP https://github.com/pytorch/pytorch/issues/26698
279
- find_unused_parameters=any(isinstance(layer, nn.MultiheadAttention) for layer in model.modules()))
280
-
281
- # Model parameters
282
- hyp['box'] *= 3. / nl # scale to layers
283
- hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers
284
- hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers
285
- hyp['label_smoothing'] = opt.label_smoothing
286
- model.nc = nc # attach number of classes to model
287
- model.hyp = hyp # attach hyperparameters to model
288
- model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
289
- model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights
290
- model.names = names
291
-
292
- # Start training
293
- t0 = time.time()
294
- nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations)
295
- # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
296
- maps = np.zeros(nc) # mAP per class
297
- results = (0, 0, 0, 0, 0, 0, 0) # P, R, [email protected], [email protected], val_loss(box, obj, cls)
298
- scheduler.last_epoch = start_epoch - 1 # do not move
299
- scaler = amp.GradScaler(enabled=cuda)
300
- compute_loss_ota = ComputeLossOTA(model) # init loss class
301
- compute_loss = ComputeLoss(model) # init loss class
302
- logger.info(f'Image sizes {imgsz} train, {imgsz_test} test\n'
303
- f'Using {dataloader.num_workers} dataloader workers\n'
304
- f'Logging results to {save_dir}\n'
305
- f'Starting training for {epochs} epochs...')
306
- torch.save(model, wdir / 'init.pt')
307
- for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
308
- model.train()
309
-
310
- # Update image weights (optional)
311
- if opt.image_weights:
312
- # Generate indices
313
- if rank in [-1, 0]:
314
- cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights
315
- iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights
316
- dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx
317
- # Broadcast if DDP
318
- if rank != -1:
319
- indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int()
320
- dist.broadcast(indices, 0)
321
- if rank != 0:
322
- dataset.indices = indices.cpu().numpy()
323
-
324
- # Update mosaic border
325
- # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
326
- # dataset.mosaic_border = [b - imgsz, -b] # height, width borders
327
-
328
- mloss = torch.zeros(4, device=device) # mean losses
329
- if rank != -1:
330
- dataloader.sampler.set_epoch(epoch)
331
- pbar = enumerate(dataloader)
332
- logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size'))
333
- if rank in [-1, 0]:
334
- pbar = tqdm(pbar, total=nb) # progress bar
335
- optimizer.zero_grad()
336
- for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
337
- ni = i + nb * epoch # number integrated batches (since train start)
338
- imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0
339
-
340
- # Warmup
341
- if ni <= nw:
342
- xi = [0, nw] # x interp
343
- # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
344
- accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())
345
- for j, x in enumerate(optimizer.param_groups):
346
- # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
347
- x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
348
- if 'momentum' in x:
349
- x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])
350
-
351
- # Multi-scale
352
- if opt.multi_scale:
353
- sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
354
- sf = sz / max(imgs.shape[2:]) # scale factor
355
- if sf != 1:
356
- ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)
357
- imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
358
-
359
- # Forward
360
- with amp.autocast(enabled=cuda):
361
- pred = model(imgs) # forward
362
- if 'loss_ota' not in hyp or hyp['loss_ota'] == 1:
363
- loss, loss_items = compute_loss_ota(pred, targets.to(device), imgs) # loss scaled by batch_size
364
- else:
365
- loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size
366
- if rank != -1:
367
- loss *= opt.world_size # gradient averaged between devices in DDP mode
368
- if opt.quad:
369
- loss *= 4.
370
-
371
- # Backward
372
- scaler.scale(loss).backward()
373
-
374
- # Optimize
375
- if ni % accumulate == 0:
376
- scaler.step(optimizer) # optimizer.step
377
- scaler.update()
378
- optimizer.zero_grad()
379
- if ema:
380
- ema.update(model)
381
-
382
- # Print
383
- if rank in [-1, 0]:
384
- mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
385
- mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)
386
- s = ('%10s' * 2 + '%10.4g' * 6) % (
387
- '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])
388
- pbar.set_description(s)
389
-
390
- # Plot
391
- if plots and ni < 10:
392
- f = save_dir / f'train_batch{ni}.jpg' # filename
393
- Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()
394
- # if tb_writer:
395
- # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
396
- # tb_writer.add_graph(torch.jit.trace(model, imgs, strict=False), []) # add model graph
397
- elif plots and ni == 10 and wandb_logger.wandb:
398
- wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in
399
- save_dir.glob('train*.jpg') if x.exists()]})
400
-
401
- # end batch ------------------------------------------------------------------------------------------------
402
- # end epoch ----------------------------------------------------------------------------------------------------
403
-
404
- # Scheduler
405
- lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard
406
- scheduler.step()
407
-
408
- # DDP process 0 or single-GPU
409
- if rank in [-1, 0]:
410
- # mAP
411
- ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights'])
412
- final_epoch = epoch + 1 == epochs
413
- if not opt.notest or final_epoch: # Calculate mAP
414
- wandb_logger.current_epoch = epoch + 1
415
- results, maps, times = test.test(data_dict,
416
- batch_size=batch_size * 2,
417
- imgsz=imgsz_test,
418
- model=ema.ema,
419
- single_cls=opt.single_cls,
420
- dataloader=testloader,
421
- save_dir=save_dir,
422
- verbose=nc < 50 and final_epoch,
423
- plots=plots and final_epoch,
424
- wandb_logger=wandb_logger,
425
- compute_loss=compute_loss,
426
- is_coco=is_coco,
427
- v5_metric=opt.v5_metric)
428
-
429
- # Write
430
- with open(results_file, 'a') as f:
431
- f.write(s + '%10.4g' * 7 % results + '\n') # append metrics, val_loss
432
- if len(opt.name) and opt.bucket:
433
- os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name))
434
-
435
- # Log
436
- tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss
437
- 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95',
438
- 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss
439
- 'x/lr0', 'x/lr1', 'x/lr2'] # params
440
- for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
441
- if tb_writer:
442
- tb_writer.add_scalar(tag, x, epoch) # tensorboard
443
- if wandb_logger.wandb:
444
- wandb_logger.log({tag: x}) # W&B
445
-
446
- # Update best mAP
447
- fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, [email protected], [email protected]]
448
- if fi > best_fitness:
449
- best_fitness = fi
450
- wandb_logger.end_epoch(best_result=best_fitness == fi)
451
-
452
- # Save model
453
- if (not opt.nosave) or (final_epoch and not opt.evolve): # if save
454
- ckpt = {'epoch': epoch,
455
- 'best_fitness': best_fitness,
456
- 'training_results': results_file.read_text(),
457
- 'model': deepcopy(model.module if is_parallel(model) else model).half(),
458
- 'ema': deepcopy(ema.ema).half(),
459
- 'updates': ema.updates,
460
- 'optimizer': optimizer.state_dict(),
461
- 'wandb_id': wandb_logger.wandb_run.id if wandb_logger.wandb else None}
462
-
463
- # Save last, best and delete
464
- torch.save(ckpt, last)
465
- if best_fitness == fi:
466
- torch.save(ckpt, best)
467
- if (best_fitness == fi) and (epoch >= 200):
468
- torch.save(ckpt, wdir / 'best_{:03d}.pt'.format(epoch))
469
- if epoch == 0:
470
- torch.save(ckpt, wdir / 'epoch_{:03d}.pt'.format(epoch))
471
- elif ((epoch+1) % 25) == 0:
472
- torch.save(ckpt, wdir / 'epoch_{:03d}.pt'.format(epoch))
473
- elif epoch >= (epochs-5):
474
- torch.save(ckpt, wdir / 'epoch_{:03d}.pt'.format(epoch))
475
- if wandb_logger.wandb:
476
- if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1:
477
- wandb_logger.log_model(
478
- last.parent, opt, epoch, fi, best_model=best_fitness == fi)
479
- del ckpt
480
-
481
- # end epoch ----------------------------------------------------------------------------------------------------
482
- # end training
483
- if rank in [-1, 0]:
484
- # Plots
485
- if plots:
486
- plot_results(save_dir=save_dir) # save as results.png
487
- if wandb_logger.wandb:
488
- files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]
489
- wandb_logger.log({"Results": [wandb_logger.wandb.Image(str(save_dir / f), caption=f) for f in files
490
- if (save_dir / f).exists()]})
491
- # Test best.pt
492
- logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
493
- if opt.data.endswith('coco.yaml') and nc == 80: # if COCO
494
- for m in (last, best) if best.exists() else (last): # speed, mAP tests
495
- results, _, _ = test.test(opt.data,
496
- batch_size=batch_size * 2,
497
- imgsz=imgsz_test,
498
- conf_thres=0.001,
499
- iou_thres=0.7,
500
- model=attempt_load(m, device).half(),
501
- single_cls=opt.single_cls,
502
- dataloader=testloader,
503
- save_dir=save_dir,
504
- save_json=True,
505
- plots=False,
506
- is_coco=is_coco,
507
- v5_metric=opt.v5_metric)
508
-
509
- # Strip optimizers
510
- final = best if best.exists() else last # final model
511
- for f in last, best:
512
- if f.exists():
513
- strip_optimizer(f) # strip optimizers
514
- if opt.bucket:
515
- os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload
516
- if wandb_logger.wandb and not opt.evolve: # Log the stripped model
517
- wandb_logger.wandb.log_artifact(str(final), type='model',
518
- name='run_' + wandb_logger.wandb_run.id + '_model',
519
- aliases=['last', 'best', 'stripped'])
520
- wandb_logger.finish_run()
521
- else:
522
- dist.destroy_process_group()
523
- torch.cuda.empty_cache()
524
- return results
525
-
526
-
527
- if __name__ == '__main__':
528
- parser = argparse.ArgumentParser()
529
- parser.add_argument('--weights', type=str, default='yolo7.pt', help='initial weights path')
530
- parser.add_argument('--cfg', type=str, default='', help='model.yaml path')
531
- parser.add_argument('--data', type=str, default='data/coco.yaml', help='data.yaml path')
532
- parser.add_argument('--hyp', type=str, default='data/hyp.scratch.p5.yaml', help='hyperparameters path')
533
- parser.add_argument('--epochs', type=int, default=300)
534
- parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs')
535
- parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes')
536
- parser.add_argument('--rect', action='store_true', help='rectangular training')
537
- parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')
538
- parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
539
- parser.add_argument('--notest', action='store_true', help='only test final epoch')
540
- parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')
541
- parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')
542
- parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
543
- parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
544
- parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')
545
- parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
546
- parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')
547
- parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')
548
- parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')
549
- parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')
550
- parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')
551
- parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')
552
- parser.add_argument('--project', default='runs/train', help='save to project/name')
553
- parser.add_argument('--entity', default=None, help='W&B entity')
554
- parser.add_argument('--name', default='exp', help='save to project/name')
555
- parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
556
- parser.add_argument('--quad', action='store_true', help='quad dataloader')
557
- parser.add_argument('--linear-lr', action='store_true', help='linear LR')
558
- parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')
559
- parser.add_argument('--upload_dataset', action='store_true', help='Upload dataset as W&B artifact table')
560
- parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval for W&B')
561
- parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch')
562
- parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used')
563
- parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone of yolov7=50, first3=0 1 2')
564
- parser.add_argument('--v5-metric', action='store_true', help='assume maximum recall as 1.0 in AP calculation')
565
- opt = parser.parse_args()
566
-
567
- # Set DDP variables
568
- opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
569
- opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1
570
- set_logging(opt.global_rank)
571
- #if opt.global_rank in [-1, 0]:
572
- # check_git_status()
573
- # check_requirements()
574
-
575
- # Resume
576
- wandb_run = check_wandb_resume(opt)
577
- if opt.resume and not wandb_run: # resume an interrupted run
578
- ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path
579
- assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'
580
- apriori = opt.global_rank, opt.local_rank
581
- with open(Path(ckpt).parent.parent / 'opt.yaml') as f:
582
- opt = argparse.Namespace(**yaml.load(f, Loader=yaml.SafeLoader)) # replace
583
- opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = '', ckpt, True, opt.total_batch_size, *apriori # reinstate
584
- logger.info('Resuming training from %s' % ckpt)
585
- else:
586
- # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')
587
- opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files
588
- assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'
589
- opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)
590
- opt.name = 'evolve' if opt.evolve else opt.name
591
- opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve) # increment run
592
-
593
- # DDP mode
594
- opt.total_batch_size = opt.batch_size
595
- device = select_device(opt.device, batch_size=opt.batch_size)
596
- if opt.local_rank != -1:
597
- assert torch.cuda.device_count() > opt.local_rank
598
- torch.cuda.set_device(opt.local_rank)
599
- device = torch.device('cuda', opt.local_rank)
600
- dist.init_process_group(backend='nccl', init_method='env://') # distributed backend
601
- assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'
602
- opt.batch_size = opt.total_batch_size // opt.world_size
603
-
604
- # Hyperparameters
605
- with open(opt.hyp) as f:
606
- hyp = yaml.load(f, Loader=yaml.SafeLoader) # load hyps
607
-
608
- # Train
609
- logger.info(opt)
610
- if not opt.evolve:
611
- tb_writer = None # init loggers
612
- if opt.global_rank in [-1, 0]:
613
- prefix = colorstr('tensorboard: ')
614
- logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/")
615
- tb_writer = SummaryWriter(opt.save_dir) # Tensorboard
616
- train(hyp, opt, device, tb_writer)
617
-
618
- # Evolve hyperparameters (optional)
619
- else:
620
- # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
621
- meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
622
- 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
623
- 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
624
- 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay
625
- 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok)
626
- 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum
627
- 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr
628
- 'box': (1, 0.02, 0.2), # box loss gain
629
- 'cls': (1, 0.2, 4.0), # cls loss gain
630
- 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight
631
- 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels)
632
- 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight
633
- 'iou_t': (0, 0.1, 0.7), # IoU training threshold
634
- 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold
635
- 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
636
- 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5)
637
- 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
638
- 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
639
- 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
640
- 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg)
641
- 'translate': (1, 0.0, 0.9), # image translation (+/- fraction)
642
- 'scale': (1, 0.0, 0.9), # image scale (+/- gain)
643
- 'shear': (1, 0.0, 10.0), # image shear (+/- deg)
644
- 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001
645
- 'flipud': (1, 0.0, 1.0), # image flip up-down (probability)
646
- 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability)
647
- 'mosaic': (1, 0.0, 1.0), # image mixup (probability)
648
- 'mixup': (1, 0.0, 1.0), # image mixup (probability)
649
- 'copy_paste': (1, 0.0, 1.0), # segment copy-paste (probability)
650
- 'paste_in': (1, 0.0, 1.0)} # segment copy-paste (probability)
651
-
652
- with open(opt.hyp, errors='ignore') as f:
653
- hyp = yaml.safe_load(f) # load hyps dict
654
- if 'anchors' not in hyp: # anchors commented in hyp.yaml
655
- hyp['anchors'] = 3
656
-
657
- assert opt.local_rank == -1, 'DDP mode not implemented for --evolve'
658
- opt.notest, opt.nosave = True, True # only test/save final epoch
659
- # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
660
- yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here
661
- if opt.bucket:
662
- os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists
663
-
664
- for _ in range(300): # generations to evolve
665
- if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate
666
- # Select parent(s)
667
- parent = 'single' # parent selection method: 'single' or 'weighted'
668
- x = np.loadtxt('evolve.txt', ndmin=2)
669
- n = min(5, len(x)) # number of previous results to consider
670
- x = x[np.argsort(-fitness(x))][:n] # top n mutations
671
- w = fitness(x) - fitness(x).min() # weights
672
- if parent == 'single' or len(x) == 1:
673
- # x = x[random.randint(0, n - 1)] # random selection
674
- x = x[random.choices(range(n), weights=w)[0]] # weighted selection
675
- elif parent == 'weighted':
676
- x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
677
-
678
- # Mutate
679
- mp, s = 0.8, 0.2 # mutation probability, sigma
680
- npr = np.random
681
- npr.seed(int(time.time()))
682
- g = np.array([x[0] for x in meta.values()]) # gains 0-1
683
- ng = len(meta)
684
- v = np.ones(ng)
685
- while all(v == 1): # mutate until a change occurs (prevent duplicates)
686
- v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
687
- for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
688
- hyp[k] = float(x[i + 7] * v[i]) # mutate
689
-
690
- # Constrain to limits
691
- for k, v in meta.items():
692
- hyp[k] = max(hyp[k], v[1]) # lower limit
693
- hyp[k] = min(hyp[k], v[2]) # upper limit
694
- hyp[k] = round(hyp[k], 5) # significant digits
695
-
696
- # Train mutation
697
- results = train(hyp.copy(), opt, device)
698
-
699
- # Write mutation results
700
- print_mutation(hyp.copy(), results, yaml_file, opt.bucket)
701
-
702
- # Plot results
703
- plot_evolution(yaml_file)
704
- print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n'
705
- f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/confirmdialog/methods/Methods.js DELETED
@@ -1,9 +0,0 @@
1
- import ResetDisplayContent from './ResetDisplayContent.js';
2
- import Modal from './Modal.js';
3
-
4
- var Methods = {
5
- resetDisplayContent: ResetDisplayContent,
6
- modal: Modal,
7
- }
8
-
9
- export default Methods;
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/bin/paper_runfiles/generate_test_paris.sh DELETED
@@ -1,17 +0,0 @@
1
- #!/usr/bin/env bash
2
-
3
- # paths to data are valid for mml-ws01
4
- OUT_DIR="/media/inpainting/paper_data/Paris_StreetView_Dataset_val"
5
-
6
- source "$(dirname $0)/env.sh"
7
-
8
- for datadir in paris_eval_gt
9
- do
10
- for conf in random_thin_256 random_medium_256 random_thick_256 segm_256
11
- do
12
- "$BINDIR/gen_mask_dataset_hydra.py" -cn $conf datadir=$datadir location=mml-ws01-paris \
13
- location.out_dir=OUT_DIR cropping.out_square_crop=False cropping.out_min_size=227
14
-
15
- "$BINDIR/calc_dataset_stats.py" --samples-n 20 "$OUT_DIR/$datadir/$conf" "$OUT_DIR/$datadir/${conf}_stats"
16
- done
17
- done
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/models/ade20k/segm_lib/nn/modules/tests/test_numeric_batchnorm.py DELETED
@@ -1,56 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # File : test_numeric_batchnorm.py
3
- # Author : Jiayuan Mao
4
- # Email : [email protected]
5
- # Date : 27/01/2018
6
- #
7
- # This file is part of Synchronized-BatchNorm-PyTorch.
8
-
9
- import unittest
10
-
11
- import torch
12
- import torch.nn as nn
13
- from torch.autograd import Variable
14
-
15
- from sync_batchnorm.unittest import TorchTestCase
16
-
17
-
18
- def handy_var(a, unbias=True):
19
- n = a.size(0)
20
- asum = a.sum(dim=0)
21
- as_sum = (a ** 2).sum(dim=0) # a square sum
22
- sumvar = as_sum - asum * asum / n
23
- if unbias:
24
- return sumvar / (n - 1)
25
- else:
26
- return sumvar / n
27
-
28
-
29
- class NumericTestCase(TorchTestCase):
30
- def testNumericBatchNorm(self):
31
- a = torch.rand(16, 10)
32
- bn = nn.BatchNorm2d(10, momentum=1, eps=1e-5, affine=False)
33
- bn.train()
34
-
35
- a_var1 = Variable(a, requires_grad=True)
36
- b_var1 = bn(a_var1)
37
- loss1 = b_var1.sum()
38
- loss1.backward()
39
-
40
- a_var2 = Variable(a, requires_grad=True)
41
- a_mean2 = a_var2.mean(dim=0, keepdim=True)
42
- a_std2 = torch.sqrt(handy_var(a_var2, unbias=False).clamp(min=1e-5))
43
- # a_std2 = torch.sqrt(a_var2.var(dim=0, keepdim=True, unbiased=False) + 1e-5)
44
- b_var2 = (a_var2 - a_mean2) / a_std2
45
- loss2 = b_var2.sum()
46
- loss2.backward()
47
-
48
- self.assertTensorClose(bn.running_mean, a.mean(dim=0))
49
- self.assertTensorClose(bn.running_var, handy_var(a))
50
- self.assertTensorClose(a_var1.data, a_var2.data)
51
- self.assertTensorClose(b_var1.data, b_var2.data)
52
- self.assertTensorClose(a_var1.grad, a_var2.grad)
53
-
54
-
55
- if __name__ == '__main__':
56
- unittest.main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/models/ade20k/segm_lib/nn/modules/unittest.py DELETED
@@ -1,29 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # File : unittest.py
3
- # Author : Jiayuan Mao
4
- # Email : [email protected]
5
- # Date : 27/01/2018
6
- #
7
- # This file is part of Synchronized-BatchNorm-PyTorch.
8
- # https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
9
- # Distributed under MIT License.
10
-
11
- import unittest
12
-
13
- import numpy as np
14
- from torch.autograd import Variable
15
-
16
-
17
- def as_numpy(v):
18
- if isinstance(v, Variable):
19
- v = v.data
20
- return v.cpu().numpy()
21
-
22
-
23
- class TorchTestCase(unittest.TestCase):
24
- def assertTensorClose(self, a, b, atol=1e-3, rtol=1e-3):
25
- npa, npb = as_numpy(a), as_numpy(b)
26
- self.assertTrue(
27
- np.allclose(npa, npb, atol=atol),
28
- 'Tensor close check failed\n{}\n{}\nadiff={}, rdiff={}'.format(a, b, np.abs(npa - npb).max(), np.abs((npa - npb) / np.fmax(npa, 1e-5)).max())
29
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/unidiffuser.md DELETED
@@ -1,194 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # UniDiffuser
14
-
15
- The UniDiffuser model was proposed in [One Transformer Fits All Distributions in Multi-Modal Diffusion at Scale](https://huggingface.co/papers/2303.06555) by Fan Bao, Shen Nie, Kaiwen Xue, Chongxuan Li, Shi Pu, Yaole Wang, Gang Yue, Yue Cao, Hang Su, Jun Zhu.
16
-
17
- The abstract from the [paper](https://arxiv.org/abs/2303.06555) is:
18
-
19
- *This paper proposes a unified diffusion framework (dubbed UniDiffuser) to fit all distributions relevant to a set of multi-modal data in one model. Our key insight is -- learning diffusion models for marginal, conditional, and joint distributions can be unified as predicting the noise in the perturbed data, where the perturbation levels (i.e. timesteps) can be different for different modalities. Inspired by the unified view, UniDiffuser learns all distributions simultaneously with a minimal modification to the original diffusion model -- perturbs data in all modalities instead of a single modality, inputs individual timesteps in different modalities, and predicts the noise of all modalities instead of a single modality. UniDiffuser is parameterized by a transformer for diffusion models to handle input types of different modalities. Implemented on large-scale paired image-text data, UniDiffuser is able to perform image, text, text-to-image, image-to-text, and image-text pair generation by setting proper timesteps without additional overhead. In particular, UniDiffuser is able to produce perceptually realistic samples in all tasks and its quantitative results (e.g., the FID and CLIP score) are not only superior to existing general-purpose models but also comparable to the bespoken models (e.g., Stable Diffusion and DALL-E 2) in representative tasks (e.g., text-to-image generation).*
20
-
21
- You can find the original codebase at [thu-ml/unidiffuser](https://github.com/thu-ml/unidiffuser) and additional checkpoints at [thu-ml](https://huggingface.co/thu-ml).
22
-
23
- This pipeline was contributed by [dg845](https://github.com/dg845). ❤️
24
-
25
- ## Usage Examples
26
-
27
- Because the UniDiffuser model is trained to model the joint distribution of (image, text) pairs, it is capable of performing a diverse range of generation tasks:
28
-
29
- ### Unconditional Image and Text Generation
30
-
31
- Unconditional generation (where we start from only latents sampled from a standard Gaussian prior) from a [`UniDiffuserPipeline`] will produce a (image, text) pair:
32
-
33
- ```python
34
- import torch
35
-
36
- from diffusers import UniDiffuserPipeline
37
-
38
- device = "cuda"
39
- model_id_or_path = "thu-ml/unidiffuser-v1"
40
- pipe = UniDiffuserPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16)
41
- pipe.to(device)
42
-
43
- # Unconditional image and text generation. The generation task is automatically inferred.
44
- sample = pipe(num_inference_steps=20, guidance_scale=8.0)
45
- image = sample.images[0]
46
- text = sample.text[0]
47
- image.save("unidiffuser_joint_sample_image.png")
48
- print(text)
49
- ```
50
-
51
- This is also called "joint" generation in the UniDiffusers paper, since we are sampling from the joint image-text distribution.
52
-
53
- Note that the generation task is inferred from the inputs used when calling the pipeline.
54
- It is also possible to manually specify the unconditional generation task ("mode") manually with [`UniDiffuserPipeline.set_joint_mode`]:
55
-
56
- ```python
57
- # Equivalent to the above.
58
- pipe.set_joint_mode()
59
- sample = pipe(num_inference_steps=20, guidance_scale=8.0)
60
- ```
61
-
62
- When the mode is set manually, subsequent calls to the pipeline will use the set mode without attempting the infer the mode.
63
- You can reset the mode with [`UniDiffuserPipeline.reset_mode`], after which the pipeline will once again infer the mode.
64
-
65
- You can also generate only an image or only text (which the UniDiffuser paper calls "marginal" generation since we sample from the marginal distribution of images and text, respectively):
66
-
67
- ```python
68
- # Unlike other generation tasks, image-only and text-only generation don't use classifier-free guidance
69
- # Image-only generation
70
- pipe.set_image_mode()
71
- sample_image = pipe(num_inference_steps=20).images[0]
72
- # Text-only generation
73
- pipe.set_text_mode()
74
- sample_text = pipe(num_inference_steps=20).text[0]
75
- ```
76
-
77
- ### Text-to-Image Generation
78
-
79
- UniDiffuser is also capable of sampling from conditional distributions; that is, the distribution of images conditioned on a text prompt or the distribution of texts conditioned on an image.
80
- Here is an example of sampling from the conditional image distribution (text-to-image generation or text-conditioned image generation):
81
-
82
- ```python
83
- import torch
84
-
85
- from diffusers import UniDiffuserPipeline
86
-
87
- device = "cuda"
88
- model_id_or_path = "thu-ml/unidiffuser-v1"
89
- pipe = UniDiffuserPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16)
90
- pipe.to(device)
91
-
92
- # Text-to-image generation
93
- prompt = "an elephant under the sea"
94
-
95
- sample = pipe(prompt=prompt, num_inference_steps=20, guidance_scale=8.0)
96
- t2i_image = sample.images[0]
97
- t2i_image.save("unidiffuser_text2img_sample_image.png")
98
- ```
99
-
100
- The `text2img` mode requires that either an input `prompt` or `prompt_embeds` be supplied. You can set the `text2img` mode manually with [`UniDiffuserPipeline.set_text_to_image_mode`].
101
-
102
- ### Image-to-Text Generation
103
-
104
- Similarly, UniDiffuser can also produce text samples given an image (image-to-text or image-conditioned text generation):
105
-
106
- ```python
107
- import torch
108
-
109
- from diffusers import UniDiffuserPipeline
110
- from diffusers.utils import load_image
111
-
112
- device = "cuda"
113
- model_id_or_path = "thu-ml/unidiffuser-v1"
114
- pipe = UniDiffuserPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16)
115
- pipe.to(device)
116
-
117
- # Image-to-text generation
118
- image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unidiffuser/unidiffuser_example_image.jpg"
119
- init_image = load_image(image_url).resize((512, 512))
120
-
121
- sample = pipe(image=init_image, num_inference_steps=20, guidance_scale=8.0)
122
- i2t_text = sample.text[0]
123
- print(i2t_text)
124
- ```
125
-
126
- The `img2text` mode requires that an input `image` be supplied. You can set the `img2text` mode manually with [`UniDiffuserPipeline.set_image_to_text_mode`].
127
-
128
- ### Image Variation
129
-
130
- The UniDiffuser authors suggest performing image variation through a "round-trip" generation method, where given an input image, we first perform an image-to-text generation, and the perform a text-to-image generation on the outputs of the first generation.
131
- This produces a new image which is semantically similar to the input image:
132
-
133
- ```python
134
- import torch
135
-
136
- from diffusers import UniDiffuserPipeline
137
- from diffusers.utils import load_image
138
-
139
- device = "cuda"
140
- model_id_or_path = "thu-ml/unidiffuser-v1"
141
- pipe = UniDiffuserPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16)
142
- pipe.to(device)
143
-
144
- # Image variation can be performed with a image-to-text generation followed by a text-to-image generation:
145
- # 1. Image-to-text generation
146
- image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unidiffuser/unidiffuser_example_image.jpg"
147
- init_image = load_image(image_url).resize((512, 512))
148
-
149
- sample = pipe(image=init_image, num_inference_steps=20, guidance_scale=8.0)
150
- i2t_text = sample.text[0]
151
- print(i2t_text)
152
-
153
- # 2. Text-to-image generation
154
- sample = pipe(prompt=i2t_text, num_inference_steps=20, guidance_scale=8.0)
155
- final_image = sample.images[0]
156
- final_image.save("unidiffuser_image_variation_sample.png")
157
- ```
158
-
159
- ### Text Variation
160
-
161
-
162
- Similarly, text variation can be performed on an input prompt with a text-to-image generation followed by a image-to-text generation:
163
-
164
- ```python
165
- import torch
166
-
167
- from diffusers import UniDiffuserPipeline
168
-
169
- device = "cuda"
170
- model_id_or_path = "thu-ml/unidiffuser-v1"
171
- pipe = UniDiffuserPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16)
172
- pipe.to(device)
173
-
174
- # Text variation can be performed with a text-to-image generation followed by a image-to-text generation:
175
- # 1. Text-to-image generation
176
- prompt = "an elephant under the sea"
177
-
178
- sample = pipe(prompt=prompt, num_inference_steps=20, guidance_scale=8.0)
179
- t2i_image = sample.images[0]
180
- t2i_image.save("unidiffuser_text2img_sample_image.png")
181
-
182
- # 2. Image-to-text generation
183
- sample = pipe(image=t2i_image, num_inference_steps=20, guidance_scale=8.0)
184
- final_prompt = sample.text[0]
185
- print(final_prompt)
186
- ```
187
-
188
- ## UniDiffuserPipeline
189
- [[autodoc]] UniDiffuserPipeline
190
- - all
191
- - __call__
192
-
193
- ## ImageTextPipelineOutput
194
- [[autodoc]] pipelines.ImageTextPipelineOutput
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/using-diffusers/using_safetensors.md DELETED
@@ -1,14 +0,0 @@
1
- # 세이프센서란 무엇인가요?
2
-
3
- [세이프텐서](https://github.com/huggingface/safetensors)는 피클을 사용하는 파이토치를 사용하는 기존의 '.bin'과는 다른 형식입니다.
4
-
5
- 피클은 악의적인 파일이 임의의 코드를 실행할 수 있는 안전하지 않은 것으로 악명이 높습니다.
6
- 허브 자체에서 문제를 방지하기 위해 노력하고 있지만 만병통치약은 아닙니다.
7
-
8
- 세이프텐서의 가장 중요한 목표는 컴퓨터를 탈취할 수 없다는 의미에서 머신 러닝 모델 로딩을 *안전하게* 만드는 것입니다.
9
-
10
- # 왜 세이프센서를 사용하나요?
11
-
12
- **잘 알려지지 않은 모델을 사용하려는 경우, 그리고 파일의 출처가 확실하지 않은 경우 "안전성"이 하나의 이유가 될 수 있습니다.
13
-
14
- 그리고 두 번째 이유는 **로딩 속도**입니다. 세이프센서는 일반 피클 파일보다 훨씬 빠르게 모델을 훨씬 빠르게 로드할 수 있습니다. 모델을 전환하는 데 많은 시간을 소비하는 경우, 이는 엄청난 시간 절약이 가능합니다.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/IAT_enhancement/model/IAT.py DELETED
@@ -1,126 +0,0 @@
1
- import torch
2
- import numpy as np
3
- from torch import nn
4
- import torch.nn.functional as F
5
- import os
6
- import math
7
-
8
- from timm.models.layers import trunc_normal_
9
- from .blocks import CBlock_ln, SwinTransformerBlock
10
- from .global_net import Global_pred
11
-
12
-
13
- class Local_pred(nn.Module):
14
- def __init__(self, dim=16, number=4, type='ccc'):
15
- super(Local_pred, self).__init__()
16
- # initial convolution
17
- self.conv1 = nn.Conv2d(3, dim, 3, padding=1, groups=1)
18
- self.relu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
19
- # main blocks
20
- block = CBlock_ln(dim)
21
- block_t = SwinTransformerBlock(dim) # head number
22
- if type =='ccc':
23
- #blocks1, blocks2 = [block for _ in range(number)], [block for _ in range(number)]
24
- blocks1 = [CBlock_ln(16, drop_path=0.01), CBlock_ln(16, drop_path=0.05), CBlock_ln(16, drop_path=0.1)]
25
- blocks2 = [CBlock_ln(16, drop_path=0.01), CBlock_ln(16, drop_path=0.05), CBlock_ln(16, drop_path=0.1)]
26
- elif type =='ttt':
27
- blocks1, blocks2 = [block_t for _ in range(number)], [block_t for _ in range(number)]
28
- elif type =='cct':
29
- blocks1, blocks2 = [block, block, block_t], [block, block, block_t]
30
- # block1 = [CBlock_ln(16), nn.Conv2d(16,24,3,1,1)]
31
- self.mul_blocks = nn.Sequential(*blocks1, nn.Conv2d(dim, 3, 3, 1, 1), nn.ReLU())
32
- self.add_blocks = nn.Sequential(*blocks2, nn.Conv2d(dim, 3, 3, 1, 1), nn.Tanh())
33
-
34
- def forward(self, img):
35
- img1 = self.relu(self.conv1(img))
36
- mul = self.mul_blocks(img1)
37
- add = self.add_blocks(img1)
38
- return mul, add
39
-
40
-
41
- # Short Cut Connection on Final Layer
42
- class Local_pred_S(nn.Module):
43
- def __init__(self, in_dim=3, dim=16, number=4, type='ccc'):
44
- super(Local_pred_S, self).__init__()
45
- # initial convolution
46
- self.conv1 = nn.Conv2d(in_dim, dim, 3, padding=1, groups=1)
47
- self.relu = nn.LeakyReLU(negative_slope=0.2, inplace=True)
48
- # main blocks
49
- block = CBlock_ln(dim)
50
- block_t = SwinTransformerBlock(dim) # head number
51
- if type =='ccc':
52
- blocks1 = [CBlock_ln(16, drop_path=0.01), CBlock_ln(16, drop_path=0.05), CBlock_ln(16, drop_path=0.1)]
53
- blocks2 = [CBlock_ln(16, drop_path=0.01), CBlock_ln(16, drop_path=0.05), CBlock_ln(16, drop_path=0.1)]
54
- elif type =='ttt':
55
- blocks1, blocks2 = [block_t for _ in range(number)], [block_t for _ in range(number)]
56
- elif type =='cct':
57
- blocks1, blocks2 = [block, block, block_t], [block, block, block_t]
58
- # block1 = [CBlock_ln(16), nn.Conv2d(16,24,3,1,1)]
59
- self.mul_blocks = nn.Sequential(*blocks1)
60
- self.add_blocks = nn.Sequential(*blocks2)
61
-
62
- self.mul_end = nn.Sequential(nn.Conv2d(dim, 3, 3, 1, 1), nn.ReLU())
63
- self.add_end = nn.Sequential(nn.Conv2d(dim, 3, 3, 1, 1), nn.Tanh())
64
- self.apply(self._init_weights)
65
-
66
- def _init_weights(self, m):
67
- if isinstance(m, nn.Linear):
68
- trunc_normal_(m.weight, std=.02)
69
- if isinstance(m, nn.Linear) and m.bias is not None:
70
- nn.init.constant_(m.bias, 0)
71
- elif isinstance(m, nn.LayerNorm):
72
- nn.init.constant_(m.bias, 0)
73
- nn.init.constant_(m.weight, 1.0)
74
- elif isinstance(m, nn.Conv2d):
75
- fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
76
- fan_out //= m.groups
77
- m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
78
- if m.bias is not None:
79
- m.bias.data.zero_()
80
-
81
- def forward(self, img):
82
- img1 = self.relu(self.conv1(img))
83
- # short cut connection
84
- mul = self.mul_blocks(img1) + img1
85
- add = self.add_blocks(img1) + img1
86
- mul = self.mul_end(mul)
87
- add = self.add_end(add)
88
- return mul, add
89
-
90
-
91
- class IAT(nn.Module):
92
- def __init__(self, in_dim=3, with_global=True, type='lol'):
93
- super(IAT, self).__init__()
94
- self.local_net = Local_pred_S(in_dim=in_dim)
95
- self.with_global = with_global
96
- if self.with_global:
97
- self.global_net = Global_pred(in_channels=in_dim, type=type)
98
-
99
- def apply_color(self, image, ccm):
100
- shape = image.shape
101
- image = image.view(-1, 3)
102
- image = torch.tensordot(image, ccm, dims=[[-1], [-1]])
103
- image = image.view(shape)
104
- return torch.clamp(image, 1e-8, 1.0)
105
-
106
- def forward(self, img_low):
107
- #print(self.with_global)
108
- mul, add = self.local_net(img_low)
109
- img_high = (img_low.mul(mul)).add(add)
110
-
111
- if not self.with_global:
112
- return img_high
113
- else:
114
- gamma, color = self.global_net(img_low)
115
- b = img_high.shape[0]
116
- img_high = img_high.permute(0, 2, 3, 1) # (B,C,H,W) -- (B,H,W,C)
117
- img_high = torch.stack([self.apply_color(img_high[i,:,:,:], color[i,:,:])**gamma[i,:] for i in range(b)], dim=0)
118
- img_high = img_high.permute(0, 3, 1, 2) # (B,H,W,C) -- (B,C,H,W)
119
- return img_high
120
-
121
-
122
- if __name__ == "__main__":
123
- img = torch.Tensor(1, 3, 400, 600)
124
- net = IAT()
125
- print('total parameters:', sum(param.numel() for param in net.parameters()))
126
- high = net(img)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py DELETED
@@ -1,5 +0,0 @@
1
- _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
2
- model = dict(
3
- backbone=dict(
4
- dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
5
- stage_with_dcn=(False, True, True, True)))
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py DELETED
@@ -1,55 +0,0 @@
1
- import numpy as np
2
- import torch
3
-
4
- from ..builder import BBOX_SAMPLERS
5
- from .random_sampler import RandomSampler
6
-
7
-
8
- @BBOX_SAMPLERS.register_module()
9
- class InstanceBalancedPosSampler(RandomSampler):
10
- """Instance balanced sampler that samples equal number of positive samples
11
- for each instance."""
12
-
13
- def _sample_pos(self, assign_result, num_expected, **kwargs):
14
- """Sample positive boxes.
15
-
16
- Args:
17
- assign_result (:obj:`AssignResult`): The assigned results of boxes.
18
- num_expected (int): The number of expected positive samples
19
-
20
- Returns:
21
- Tensor or ndarray: sampled indices.
22
- """
23
- pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
24
- if pos_inds.numel() != 0:
25
- pos_inds = pos_inds.squeeze(1)
26
- if pos_inds.numel() <= num_expected:
27
- return pos_inds
28
- else:
29
- unique_gt_inds = assign_result.gt_inds[pos_inds].unique()
30
- num_gts = len(unique_gt_inds)
31
- num_per_gt = int(round(num_expected / float(num_gts)) + 1)
32
- sampled_inds = []
33
- for i in unique_gt_inds:
34
- inds = torch.nonzero(
35
- assign_result.gt_inds == i.item(), as_tuple=False)
36
- if inds.numel() != 0:
37
- inds = inds.squeeze(1)
38
- else:
39
- continue
40
- if len(inds) > num_per_gt:
41
- inds = self.random_choice(inds, num_per_gt)
42
- sampled_inds.append(inds)
43
- sampled_inds = torch.cat(sampled_inds)
44
- if len(sampled_inds) < num_expected:
45
- num_extra = num_expected - len(sampled_inds)
46
- extra_inds = np.array(
47
- list(set(pos_inds.cpu()) - set(sampled_inds.cpu())))
48
- if len(extra_inds) > num_extra:
49
- extra_inds = self.random_choice(extra_inds, num_extra)
50
- extra_inds = torch.from_numpy(extra_inds).to(
51
- assign_result.gt_inds.device).long()
52
- sampled_inds = torch.cat([sampled_inds, extra_inds])
53
- elif len(sampled_inds) > num_expected:
54
- sampled_inds = self.random_choice(sampled_inds, num_expected)
55
- return sampled_inds
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/_base_/models/pspnet_r50-d8.py DELETED
@@ -1,44 +0,0 @@
1
- # model settings
2
- norm_cfg = dict(type='SyncBN', requires_grad=True)
3
- model = dict(
4
- type='EncoderDecoder',
5
- pretrained='open-mmlab://resnet50_v1c',
6
- backbone=dict(
7
- type='ResNetV1c',
8
- depth=50,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- dilations=(1, 1, 2, 4),
12
- strides=(1, 2, 1, 1),
13
- norm_cfg=norm_cfg,
14
- norm_eval=False,
15
- style='pytorch',
16
- contract_dilation=True),
17
- decode_head=dict(
18
- type='PSPHead',
19
- in_channels=2048,
20
- in_index=3,
21
- channels=512,
22
- pool_scales=(1, 2, 3, 6),
23
- dropout_ratio=0.1,
24
- num_classes=19,
25
- norm_cfg=norm_cfg,
26
- align_corners=False,
27
- loss_decode=dict(
28
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
29
- auxiliary_head=dict(
30
- type='FCNHead',
31
- in_channels=1024,
32
- in_index=2,
33
- channels=256,
34
- num_convs=1,
35
- concat_input=False,
36
- dropout_ratio=0.1,
37
- num_classes=19,
38
- norm_cfg=norm_cfg,
39
- align_corners=False,
40
- loss_decode=dict(
41
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
42
- # model training and testing settings
43
- train_cfg=dict(),
44
- test_cfg=dict(mode='whole'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3/deeplabv3_r101-d8_769x769_40k_cityscapes.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './deeplabv3_r50-d8_769x769_40k_cityscapes.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/AndyCer/TehVenom-MPT-7b-Chat-Instruct-LongCTX-Merge/app.py DELETED
@@ -1,3 +0,0 @@
1
- import gradio as gr
2
-
3
- gr.Interface.load("models/TehVenom/MPT-7b-Chat-Instruct-LongCTX-Merge").launch()
 
 
 
 
spaces/AnishKumbhar/DogDiseasePredictor/Dockerfile DELETED
@@ -1,27 +0,0 @@
1
- # Use the official Python 3.9 image
2
- FROM python:3.9
3
-
4
- # Set the working directory to /code
5
- WORKDIR /code
6
-
7
- # Copy the current directory contents into the container at /code
8
- COPY ./requirements.txt /code/requirements.txt
9
-
10
- # Install requirements.txt
11
- RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
12
-
13
- # Set up a new user named "user" with user ID 1000
14
- RUN useradd -m -u 1000 user
15
- # Switch to the "user" user
16
- USER user
17
- # Set home to the user's home directory
18
- ENV HOME=/home/user \
19
- PATH=/home/user/.local/bin:$PATH
20
-
21
- # Set the working directory to the user's home directory
22
- WORKDIR $HOME/app
23
-
24
- # Copy the current directory contents into the container at $HOME/app setting the owner to the user
25
- COPY --chown=user . $HOME/app
26
-
27
- CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnjaneyuluChinni/AnjiChinniGenAIAvatar/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: AnjiChinniGenAIAvatar
3
- emoji: 🦀
4
- colorFrom: blue
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.39.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Artrajz/vits-simple-api/config.py DELETED
@@ -1,109 +0,0 @@
1
- import os
2
- import sys
3
-
4
- import torch
5
-
6
- JSON_AS_ASCII = False
7
-
8
- MAX_CONTENT_LENGTH = 5242880
9
-
10
- # Flask debug mode
11
- DEBUG = False
12
-
13
- # Server port
14
- PORT = 7860
15
-
16
- # Absolute path of vits-simple-api
17
- ABS_PATH = os.path.dirname(os.path.realpath(__file__))
18
-
19
- # Upload path
20
- UPLOAD_FOLDER = ABS_PATH + "/upload"
21
-
22
- # Cahce path
23
- CACHE_PATH = ABS_PATH + "/cache"
24
-
25
- # Logs path
26
- LOGS_PATH = ABS_PATH + "/logs"
27
-
28
- # Set the number of backup log files to keep.
29
- LOGS_BACKUPCOUNT = 30
30
-
31
- # If CLEAN_INTERVAL_SECONDS <= 0, the cleaning task will not be executed.
32
- CLEAN_INTERVAL_SECONDS = 3600
33
-
34
- # save audio to CACHE_PATH
35
- SAVE_AUDIO = False
36
-
37
- # zh ja ko en... If it is empty, it will be read based on the text_cleaners specified in the config.json.
38
- LANGUAGE_AUTOMATIC_DETECT = []
39
-
40
- # Set to True to enable API Key authentication
41
- API_KEY_ENABLED = False
42
-
43
- # API_KEY is required for authentication
44
- API_KEY = "api-key"
45
-
46
- # logging_level:DEBUG/INFO/WARNING/ERROR/CRITICAL
47
- LOGGING_LEVEL = "DEBUG"
48
-
49
- # Language identification library. Optional fastlid, langid
50
- LANGUAGE_IDENTIFICATION_LIBRARY = "langid"
51
-
52
- # To use the english_cleaner, you need to install espeak and provide the path of libespeak-ng.dll as input here.
53
- # If ESPEAK_LIBRARY is set to empty, it will be read from the environment variable.
54
- # For windows : "C:/Program Files/eSpeak NG/libespeak-ng.dll"
55
- ESPEAK_LIBRARY = ""
56
-
57
- # Fill in the model path here
58
- MODEL_LIST = [
59
- # VITS
60
- [ABS_PATH + "/Model/Nene_Nanami_Rong_Tang/1374_epochs.pth", ABS_PATH + "/Model/Nene_Nanami_Rong_Tang/config.json"],
61
- [ABS_PATH + "/Model/vctk/pretrained_vctk.pth", ABS_PATH + "/Model/vctk/vctk_base.json"],
62
- [ABS_PATH + "/Model/paimon/paimon6k_390000.pth", ABS_PATH + "/Model/paimon/paimon6k.json"],
63
- [ABS_PATH + "/Model/vits_chinese/vits_bert_model.pth", ABS_PATH + "/Model/vits_chinese/bert_vits.json"],
64
- [ABS_PATH + "/Model/Bishojo_Mangekyo/generator_mangekyo.pth", ABS_PATH + "/Model/Bishojo_Mangekyo/config_mangekyo.json"],
65
- [ABS_PATH + "/Model/Cantonese/model.pth", ABS_PATH + "/Model/Cantonese/config.json"],
66
- [ABS_PATH + "/Model/shanghainese/2796_epochs.pth", ABS_PATH + "/Model/shanghainese/config.json"],
67
- [ABS_PATH + "/Model/genshin/G_953000.pth", ABS_PATH + "/Model/genshin/config.json"],
68
- # HuBert-VITS (Need to configure HUBERT_SOFT_MODEL)
69
- [ABS_PATH + "/Model/louise/360_epochs.pth", ABS_PATH + "/Model/louise/config.json"],
70
- # W2V2-VITS (Need to configure DIMENSIONAL_EMOTION_NPY)
71
- [ABS_PATH + "/Model/w2v2-vits/1026_epochs.pth", ABS_PATH + "/Model/w2v2-vits/config.json"],
72
- ]
73
-
74
- # hubert-vits: hubert soft model
75
- HUBERT_SOFT_MODEL = ABS_PATH + "/Model/hubert-soft-0d54a1f4.pt"
76
-
77
- # w2v2-vits: Dimensional emotion npy file
78
- # load single npy: ABS_PATH+"/all_emotions.npy
79
- # load mutiple npy: [ABS_PATH + "/emotions1.npy", ABS_PATH + "/emotions2.npy"]
80
- # load mutiple npy from folder: ABS_PATH + "/Model/npy"
81
- DIMENSIONAL_EMOTION_NPY = ABS_PATH + "/Model/npy"
82
-
83
- # w2v2-vits: Need to have both `model.onnx` and `model.yaml` files in the same path.
84
- # DIMENSIONAL_EMOTION_MODEL = ABS_PATH + "/Model/model.yaml"
85
-
86
- DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
87
-
88
- """
89
- Default parameter
90
- """
91
-
92
- ID = 0
93
-
94
- FORMAT = "wav"
95
-
96
- LANG = "AUTO"
97
-
98
- LENGTH = 1
99
-
100
- NOISE = 0.33
101
-
102
- NOISEW = 0.4
103
-
104
- # 长文本分段阈值,max<=0表示不分段.
105
- # Batch processing threshold. Text will not be processed in batches if max<=0
106
- MAX = 50
107
-
108
- # Bert_VITS2
109
- SDP_RATIO = 0.2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/util/logger.py DELETED
@@ -1,93 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import functools
3
- import logging
4
- import os
5
- import sys
6
-
7
- from termcolor import colored
8
-
9
-
10
- class _ColorfulFormatter(logging.Formatter):
11
- def __init__(self, *args, **kwargs):
12
- self._root_name = kwargs.pop("root_name") + "."
13
- self._abbrev_name = kwargs.pop("abbrev_name", "")
14
- if len(self._abbrev_name):
15
- self._abbrev_name = self._abbrev_name + "."
16
- super(_ColorfulFormatter, self).__init__(*args, **kwargs)
17
-
18
- def formatMessage(self, record):
19
- record.name = record.name.replace(self._root_name, self._abbrev_name)
20
- log = super(_ColorfulFormatter, self).formatMessage(record)
21
- if record.levelno == logging.WARNING:
22
- prefix = colored("WARNING", "red", attrs=["blink"])
23
- elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:
24
- prefix = colored("ERROR", "red", attrs=["blink", "underline"])
25
- else:
26
- return log
27
- return prefix + " " + log
28
-
29
-
30
- # so that calling setup_logger multiple times won't add many handlers
31
- @functools.lru_cache()
32
- def setup_logger(output=None, distributed_rank=0, *, color=True, name="imagenet", abbrev_name=None):
33
- """
34
- Initialize the detectron2 logger and set its verbosity level to "INFO".
35
-
36
- Args:
37
- output (str): a file name or a directory to save log. If None, will not save log file.
38
- If ends with ".txt" or ".log", assumed to be a file name.
39
- Otherwise, logs will be saved to `output/log.txt`.
40
- name (str): the root module name of this logger
41
-
42
- Returns:
43
- logging.Logger: a logger
44
- """
45
- logger = logging.getLogger(name)
46
- logger.setLevel(logging.DEBUG)
47
- logger.propagate = False
48
-
49
- if abbrev_name is None:
50
- abbrev_name = name
51
-
52
- plain_formatter = logging.Formatter(
53
- "[%(asctime)s.%(msecs)03d]: %(message)s", datefmt="%m/%d %H:%M:%S"
54
- )
55
- # stdout logging: master only
56
- if distributed_rank == 0:
57
- ch = logging.StreamHandler(stream=sys.stdout)
58
- ch.setLevel(logging.DEBUG)
59
- if color:
60
- formatter = _ColorfulFormatter(
61
- colored("[%(asctime)s.%(msecs)03d]: ", "green") + "%(message)s",
62
- datefmt="%m/%d %H:%M:%S",
63
- root_name=name,
64
- abbrev_name=str(abbrev_name),
65
- )
66
- else:
67
- formatter = plain_formatter
68
- ch.setFormatter(formatter)
69
- logger.addHandler(ch)
70
-
71
- # file logging: all workers
72
- if output is not None:
73
- if output.endswith(".txt") or output.endswith(".log"):
74
- filename = output
75
- else:
76
- filename = os.path.join(output, "log.txt")
77
- if distributed_rank > 0:
78
- filename = filename + f".rank{distributed_rank}"
79
- os.makedirs(os.path.dirname(filename), exist_ok=True)
80
-
81
- fh = logging.StreamHandler(_cached_log_stream(filename))
82
- fh.setLevel(logging.DEBUG)
83
- fh.setFormatter(plain_formatter)
84
- logger.addHandler(fh)
85
-
86
- return logger
87
-
88
-
89
- # cache the opened file object, so that different calls to `setup_logger`
90
- # with the same file name can safely write to the same file.
91
- @functools.lru_cache(maxsize=None)
92
- def _cached_log_stream(filename):
93
- return open(filename, "a")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Asahi402/White-box-Cartoonization/README.md DELETED
@@ -1,15 +0,0 @@
1
- ---
2
- python_version: 3.7
3
- title: White Box Cartoonization
4
- emoji: 📚
5
- colorFrom: purple
6
- colorTo: green
7
- sdk: gradio
8
- sdk_version: 2.9.4
9
- app_file: app.py
10
- pinned: false
11
- license: apache-2.0
12
- duplicated_from: hylee/White-box-Cartoonization
13
- ---
14
-
15
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/gb2312freq.py DELETED
@@ -1,284 +0,0 @@
1
- ######################## BEGIN LICENSE BLOCK ########################
2
- # The Original Code is Mozilla Communicator client code.
3
- #
4
- # The Initial Developer of the Original Code is
5
- # Netscape Communications Corporation.
6
- # Portions created by the Initial Developer are Copyright (C) 1998
7
- # the Initial Developer. All Rights Reserved.
8
- #
9
- # Contributor(s):
10
- # Mark Pilgrim - port to Python
11
- #
12
- # This library is free software; you can redistribute it and/or
13
- # modify it under the terms of the GNU Lesser General Public
14
- # License as published by the Free Software Foundation; either
15
- # version 2.1 of the License, or (at your option) any later version.
16
- #
17
- # This library is distributed in the hope that it will be useful,
18
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
19
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20
- # Lesser General Public License for more details.
21
- #
22
- # You should have received a copy of the GNU Lesser General Public
23
- # License along with this library; if not, write to the Free Software
24
- # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25
- # 02110-1301 USA
26
- ######################### END LICENSE BLOCK #########################
27
-
28
- # GB2312 most frequently used character table
29
- #
30
- # Char to FreqOrder table , from hz6763
31
-
32
- # 512 --> 0.79 -- 0.79
33
- # 1024 --> 0.92 -- 0.13
34
- # 2048 --> 0.98 -- 0.06
35
- # 6768 --> 1.00 -- 0.02
36
- #
37
- # Ideal Distribution Ratio = 0.79135/(1-0.79135) = 3.79
38
- # Random Distribution Ration = 512 / (3755 - 512) = 0.157
39
- #
40
- # Typical Distribution Ratio about 25% of Ideal one, still much higher that RDR
41
-
42
- GB2312_TYPICAL_DISTRIBUTION_RATIO = 0.9
43
-
44
- GB2312_TABLE_SIZE = 3760
45
-
46
- # fmt: off
47
- GB2312_CHAR_TO_FREQ_ORDER = (
48
- 1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205,
49
- 2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842,
50
- 2204, 869,4207, 970,2678,5626,2944,2956,1479,4048, 514,3595, 588,1346,2820,3409,
51
- 249,4088,1746,1873,2047,1774, 581,1813, 358,1174,3590,1014,1561,4844,2245, 670,
52
- 1636,3112, 889,1286, 953, 556,2327,3060,1290,3141, 613, 185,3477,1367, 850,3820,
53
- 1715,2428,2642,2303,2732,3041,2562,2648,3566,3946,1349, 388,3098,2091,1360,3585,
54
- 152,1687,1539, 738,1559, 59,1232,2925,2267,1388,1249,1741,1679,2960, 151,1566,
55
- 1125,1352,4271, 924,4296, 385,3166,4459, 310,1245,2850, 70,3285,2729,3534,3575,
56
- 2398,3298,3466,1960,2265, 217,3647, 864,1909,2084,4401,2773,1010,3269,5152, 853,
57
- 3051,3121,1244,4251,1895, 364,1499,1540,2313,1180,3655,2268, 562, 715,2417,3061,
58
- 544, 336,3768,2380,1752,4075, 950, 280,2425,4382, 183,2759,3272, 333,4297,2155,
59
- 1688,2356,1444,1039,4540, 736,1177,3349,2443,2368,2144,2225, 565, 196,1482,3406,
60
- 927,1335,4147, 692, 878,1311,1653,3911,3622,1378,4200,1840,2969,3149,2126,1816,
61
- 2534,1546,2393,2760, 737,2494, 13, 447, 245,2747, 38,2765,2129,2589,1079, 606,
62
- 360, 471,3755,2890, 404, 848, 699,1785,1236, 370,2221,1023,3746,2074,2026,2023,
63
- 2388,1581,2119, 812,1141,3091,2536,1519, 804,2053, 406,1596,1090, 784, 548,4414,
64
- 1806,2264,2936,1100, 343,4114,5096, 622,3358, 743,3668,1510,1626,5020,3567,2513,
65
- 3195,4115,5627,2489,2991, 24,2065,2697,1087,2719, 48,1634, 315, 68, 985,2052,
66
- 198,2239,1347,1107,1439, 597,2366,2172, 871,3307, 919,2487,2790,1867, 236,2570,
67
- 1413,3794, 906,3365,3381,1701,1982,1818,1524,2924,1205, 616,2586,2072,2004, 575,
68
- 253,3099, 32,1365,1182, 197,1714,2454,1201, 554,3388,3224,2748, 756,2587, 250,
69
- 2567,1507,1517,3529,1922,2761,2337,3416,1961,1677,2452,2238,3153, 615, 911,1506,
70
- 1474,2495,1265,1906,2749,3756,3280,2161, 898,2714,1759,3450,2243,2444, 563, 26,
71
- 3286,2266,3769,3344,2707,3677, 611,1402, 531,1028,2871,4548,1375, 261,2948, 835,
72
- 1190,4134, 353, 840,2684,1900,3082,1435,2109,1207,1674, 329,1872,2781,4055,2686,
73
- 2104, 608,3318,2423,2957,2768,1108,3739,3512,3271,3985,2203,1771,3520,1418,2054,
74
- 1681,1153, 225,1627,2929, 162,2050,2511,3687,1954, 124,1859,2431,1684,3032,2894,
75
- 585,4805,3969,2869,2704,2088,2032,2095,3656,2635,4362,2209, 256, 518,2042,2105,
76
- 3777,3657, 643,2298,1148,1779, 190, 989,3544, 414, 11,2135,2063,2979,1471, 403,
77
- 3678, 126, 770,1563, 671,2499,3216,2877, 600,1179, 307,2805,4937,1268,1297,2694,
78
- 252,4032,1448,1494,1331,1394, 127,2256, 222,1647,1035,1481,3056,1915,1048, 873,
79
- 3651, 210, 33,1608,2516, 200,1520, 415, 102, 0,3389,1287, 817, 91,3299,2940,
80
- 836,1814, 549,2197,1396,1669,2987,3582,2297,2848,4528,1070, 687, 20,1819, 121,
81
- 1552,1364,1461,1968,2617,3540,2824,2083, 177, 948,4938,2291, 110,4549,2066, 648,
82
- 3359,1755,2110,2114,4642,4845,1693,3937,3308,1257,1869,2123, 208,1804,3159,2992,
83
- 2531,2549,3361,2418,1350,2347,2800,2568,1291,2036,2680, 72, 842,1990, 212,1233,
84
- 1154,1586, 75,2027,3410,4900,1823,1337,2710,2676, 728,2810,1522,3026,4995, 157,
85
- 755,1050,4022, 710, 785,1936,2194,2085,1406,2777,2400, 150,1250,4049,1206, 807,
86
- 1910, 534, 529,3309,1721,1660, 274, 39,2827, 661,2670,1578, 925,3248,3815,1094,
87
- 4278,4901,4252, 41,1150,3747,2572,2227,4501,3658,4902,3813,3357,3617,2884,2258,
88
- 887, 538,4187,3199,1294,2439,3042,2329,2343,2497,1255, 107, 543,1527, 521,3478,
89
- 3568, 194,5062, 15, 961,3870,1241,1192,2664, 66,5215,3260,2111,1295,1127,2152,
90
- 3805,4135, 901,1164,1976, 398,1278, 530,1460, 748, 904,1054,1966,1426, 53,2909,
91
- 509, 523,2279,1534, 536,1019, 239,1685, 460,2353, 673,1065,2401,3600,4298,2272,
92
- 1272,2363, 284,1753,3679,4064,1695, 81, 815,2677,2757,2731,1386, 859, 500,4221,
93
- 2190,2566, 757,1006,2519,2068,1166,1455, 337,2654,3203,1863,1682,1914,3025,1252,
94
- 1409,1366, 847, 714,2834,2038,3209, 964,2970,1901, 885,2553,1078,1756,3049, 301,
95
- 1572,3326, 688,2130,1996,2429,1805,1648,2930,3421,2750,3652,3088, 262,1158,1254,
96
- 389,1641,1812, 526,1719, 923,2073,1073,1902, 468, 489,4625,1140, 857,2375,3070,
97
- 3319,2863, 380, 116,1328,2693,1161,2244, 273,1212,1884,2769,3011,1775,1142, 461,
98
- 3066,1200,2147,2212, 790, 702,2695,4222,1601,1058, 434,2338,5153,3640, 67,2360,
99
- 4099,2502, 618,3472,1329, 416,1132, 830,2782,1807,2653,3211,3510,1662, 192,2124,
100
- 296,3979,1739,1611,3684, 23, 118, 324, 446,1239,1225, 293,2520,3814,3795,2535,
101
- 3116, 17,1074, 467,2692,2201, 387,2922, 45,1326,3055,1645,3659,2817, 958, 243,
102
- 1903,2320,1339,2825,1784,3289, 356, 576, 865,2315,2381,3377,3916,1088,3122,1713,
103
- 1655, 935, 628,4689,1034,1327, 441, 800, 720, 894,1979,2183,1528,5289,2702,1071,
104
- 4046,3572,2399,1571,3281, 79, 761,1103, 327, 134, 758,1899,1371,1615, 879, 442,
105
- 215,2605,2579, 173,2048,2485,1057,2975,3317,1097,2253,3801,4263,1403,1650,2946,
106
- 814,4968,3487,1548,2644,1567,1285, 2, 295,2636, 97, 946,3576, 832, 141,4257,
107
- 3273, 760,3821,3521,3156,2607, 949,1024,1733,1516,1803,1920,2125,2283,2665,3180,
108
- 1501,2064,3560,2171,1592, 803,3518,1416, 732,3897,4258,1363,1362,2458, 119,1427,
109
- 602,1525,2608,1605,1639,3175, 694,3064, 10, 465, 76,2000,4846,4208, 444,3781,
110
- 1619,3353,2206,1273,3796, 740,2483, 320,1723,2377,3660,2619,1359,1137,1762,1724,
111
- 2345,2842,1850,1862, 912, 821,1866, 612,2625,1735,2573,3369,1093, 844, 89, 937,
112
- 930,1424,3564,2413,2972,1004,3046,3019,2011, 711,3171,1452,4178, 428, 801,1943,
113
- 432, 445,2811, 206,4136,1472, 730, 349, 73, 397,2802,2547, 998,1637,1167, 789,
114
- 396,3217, 154,1218, 716,1120,1780,2819,4826,1931,3334,3762,2139,1215,2627, 552,
115
- 3664,3628,3232,1405,2383,3111,1356,2652,3577,3320,3101,1703, 640,1045,1370,1246,
116
- 4996, 371,1575,2436,1621,2210, 984,4033,1734,2638, 16,4529, 663,2755,3255,1451,
117
- 3917,2257,1253,1955,2234,1263,2951, 214,1229, 617, 485, 359,1831,1969, 473,2310,
118
- 750,2058, 165, 80,2864,2419, 361,4344,2416,2479,1134, 796,3726,1266,2943, 860,
119
- 2715, 938, 390,2734,1313,1384, 248, 202, 877,1064,2854, 522,3907, 279,1602, 297,
120
- 2357, 395,3740, 137,2075, 944,4089,2584,1267,3802, 62,1533,2285, 178, 176, 780,
121
- 2440, 201,3707, 590, 478,1560,4354,2117,1075, 30, 74,4643,4004,1635,1441,2745,
122
- 776,2596, 238,1077,1692,1912,2844, 605, 499,1742,3947, 241,3053, 980,1749, 936,
123
- 2640,4511,2582, 515,1543,2162,5322,2892,2993, 890,2148,1924, 665,1827,3581,1032,
124
- 968,3163, 339,1044,1896, 270, 583,1791,1720,4367,1194,3488,3669, 43,2523,1657,
125
- 163,2167, 290,1209,1622,3378, 550, 634,2508,2510, 695,2634,2384,2512,1476,1414,
126
- 220,1469,2341,2138,2852,3183,2900,4939,2865,3502,1211,3680, 854,3227,1299,2976,
127
- 3172, 186,2998,1459, 443,1067,3251,1495, 321,1932,3054, 909, 753,1410,1828, 436,
128
- 2441,1119,1587,3164,2186,1258, 227, 231,1425,1890,3200,3942, 247, 959, 725,5254,
129
- 2741, 577,2158,2079, 929, 120, 174, 838,2813, 591,1115, 417,2024, 40,3240,1536,
130
- 1037, 291,4151,2354, 632,1298,2406,2500,3535,1825,1846,3451, 205,1171, 345,4238,
131
- 18,1163, 811, 685,2208,1217, 425,1312,1508,1175,4308,2552,1033, 587,1381,3059,
132
- 2984,3482, 340,1316,4023,3972, 792,3176, 519, 777,4690, 918, 933,4130,2981,3741,
133
- 90,3360,2911,2200,5184,4550, 609,3079,2030, 272,3379,2736, 363,3881,1130,1447,
134
- 286, 779, 357,1169,3350,3137,1630,1220,2687,2391, 747,1277,3688,2618,2682,2601,
135
- 1156,3196,5290,4034,3102,1689,3596,3128, 874, 219,2783, 798, 508,1843,2461, 269,
136
- 1658,1776,1392,1913,2983,3287,2866,2159,2372, 829,4076, 46,4253,2873,1889,1894,
137
- 915,1834,1631,2181,2318, 298, 664,2818,3555,2735, 954,3228,3117, 527,3511,2173,
138
- 681,2712,3033,2247,2346,3467,1652, 155,2164,3382, 113,1994, 450, 899, 494, 994,
139
- 1237,2958,1875,2336,1926,3727, 545,1577,1550, 633,3473, 204,1305,3072,2410,1956,
140
- 2471, 707,2134, 841,2195,2196,2663,3843,1026,4940, 990,3252,4997, 368,1092, 437,
141
- 3212,3258,1933,1829, 675,2977,2893, 412, 943,3723,4644,3294,3283,2230,2373,5154,
142
- 2389,2241,2661,2323,1404,2524, 593, 787, 677,3008,1275,2059, 438,2709,2609,2240,
143
- 2269,2246,1446, 36,1568,1373,3892,1574,2301,1456,3962, 693,2276,5216,2035,1143,
144
- 2720,1919,1797,1811,2763,4137,2597,1830,1699,1488,1198,2090, 424,1694, 312,3634,
145
- 3390,4179,3335,2252,1214, 561,1059,3243,2295,2561, 975,5155,2321,2751,3772, 472,
146
- 1537,3282,3398,1047,2077,2348,2878,1323,3340,3076, 690,2906, 51, 369, 170,3541,
147
- 1060,2187,2688,3670,2541,1083,1683, 928,3918, 459, 109,4427, 599,3744,4286, 143,
148
- 2101,2730,2490, 82,1588,3036,2121, 281,1860, 477,4035,1238,2812,3020,2716,3312,
149
- 1530,2188,2055,1317, 843, 636,1808,1173,3495, 649, 181,1002, 147,3641,1159,2414,
150
- 3750,2289,2795, 813,3123,2610,1136,4368, 5,3391,4541,2174, 420, 429,1728, 754,
151
- 1228,2115,2219, 347,2223,2733, 735,1518,3003,2355,3134,1764,3948,3329,1888,2424,
152
- 1001,1234,1972,3321,3363,1672,1021,1450,1584, 226, 765, 655,2526,3404,3244,2302,
153
- 3665, 731, 594,2184, 319,1576, 621, 658,2656,4299,2099,3864,1279,2071,2598,2739,
154
- 795,3086,3699,3908,1707,2352,2402,1382,3136,2475,1465,4847,3496,3865,1085,3004,
155
- 2591,1084, 213,2287,1963,3565,2250, 822, 793,4574,3187,1772,1789,3050, 595,1484,
156
- 1959,2770,1080,2650, 456, 422,2996, 940,3322,4328,4345,3092,2742, 965,2784, 739,
157
- 4124, 952,1358,2498,2949,2565, 332,2698,2378, 660,2260,2473,4194,3856,2919, 535,
158
- 1260,2651,1208,1428,1300,1949,1303,2942, 433,2455,2450,1251,1946, 614,1269, 641,
159
- 1306,1810,2737,3078,2912, 564,2365,1419,1415,1497,4460,2367,2185,1379,3005,1307,
160
- 3218,2175,1897,3063, 682,1157,4040,4005,1712,1160,1941,1399, 394, 402,2952,1573,
161
- 1151,2986,2404, 862, 299,2033,1489,3006, 346, 171,2886,3401,1726,2932, 168,2533,
162
- 47,2507,1030,3735,1145,3370,1395,1318,1579,3609,4560,2857,4116,1457,2529,1965,
163
- 504,1036,2690,2988,2405, 745,5871, 849,2397,2056,3081, 863,2359,3857,2096, 99,
164
- 1397,1769,2300,4428,1643,3455,1978,1757,3718,1440, 35,4879,3742,1296,4228,2280,
165
- 160,5063,1599,2013, 166, 520,3479,1646,3345,3012, 490,1937,1545,1264,2182,2505,
166
- 1096,1188,1369,1436,2421,1667,2792,2460,1270,2122, 727,3167,2143, 806,1706,1012,
167
- 1800,3037, 960,2218,1882, 805, 139,2456,1139,1521, 851,1052,3093,3089, 342,2039,
168
- 744,5097,1468,1502,1585,2087, 223, 939, 326,2140,2577, 892,2481,1623,4077, 982,
169
- 3708, 135,2131, 87,2503,3114,2326,1106, 876,1616, 547,2997,2831,2093,3441,4530,
170
- 4314, 9,3256,4229,4148, 659,1462,1986,1710,2046,2913,2231,4090,4880,5255,3392,
171
- 3274,1368,3689,4645,1477, 705,3384,3635,1068,1529,2941,1458,3782,1509, 100,1656,
172
- 2548, 718,2339, 408,1590,2780,3548,1838,4117,3719,1345,3530, 717,3442,2778,3220,
173
- 2898,1892,4590,3614,3371,2043,1998,1224,3483, 891, 635, 584,2559,3355, 733,1766,
174
- 1729,1172,3789,1891,2307, 781,2982,2271,1957,1580,5773,2633,2005,4195,3097,1535,
175
- 3213,1189,1934,5693,3262, 586,3118,1324,1598, 517,1564,2217,1868,1893,4445,3728,
176
- 2703,3139,1526,1787,1992,3882,2875,1549,1199,1056,2224,1904,2711,5098,4287, 338,
177
- 1993,3129,3489,2689,1809,2815,1997, 957,1855,3898,2550,3275,3057,1105,1319, 627,
178
- 1505,1911,1883,3526, 698,3629,3456,1833,1431, 746, 77,1261,2017,2296,1977,1885,
179
- 125,1334,1600, 525,1798,1109,2222,1470,1945, 559,2236,1186,3443,2476,1929,1411,
180
- 2411,3135,1777,3372,2621,1841,1613,3229, 668,1430,1839,2643,2916, 195,1989,2671,
181
- 2358,1387, 629,3205,2293,5256,4439, 123,1310, 888,1879,4300,3021,3605,1003,1162,
182
- 3192,2910,2010, 140,2395,2859, 55,1082,2012,2901, 662, 419,2081,1438, 680,2774,
183
- 4654,3912,1620,1731,1625,5035,4065,2328, 512,1344, 802,5443,2163,2311,2537, 524,
184
- 3399, 98,1155,2103,1918,2606,3925,2816,1393,2465,1504,3773,2177,3963,1478,4346,
185
- 180,1113,4655,3461,2028,1698, 833,2696,1235,1322,1594,4408,3623,3013,3225,2040,
186
- 3022, 541,2881, 607,3632,2029,1665,1219, 639,1385,1686,1099,2803,3231,1938,3188,
187
- 2858, 427, 676,2772,1168,2025, 454,3253,2486,3556, 230,1950, 580, 791,1991,1280,
188
- 1086,1974,2034, 630, 257,3338,2788,4903,1017, 86,4790, 966,2789,1995,1696,1131,
189
- 259,3095,4188,1308, 179,1463,5257, 289,4107,1248, 42,3413,1725,2288, 896,1947,
190
- 774,4474,4254, 604,3430,4264, 392,2514,2588, 452, 237,1408,3018, 988,4531,1970,
191
- 3034,3310, 540,2370,1562,1288,2990, 502,4765,1147, 4,1853,2708, 207, 294,2814,
192
- 4078,2902,2509, 684, 34,3105,3532,2551, 644, 709,2801,2344, 573,1727,3573,3557,
193
- 2021,1081,3100,4315,2100,3681, 199,2263,1837,2385, 146,3484,1195,2776,3949, 997,
194
- 1939,3973,1008,1091,1202,1962,1847,1149,4209,5444,1076, 493, 117,5400,2521, 972,
195
- 1490,2934,1796,4542,2374,1512,2933,2657, 413,2888,1135,2762,2314,2156,1355,2369,
196
- 766,2007,2527,2170,3124,2491,2593,2632,4757,2437, 234,3125,3591,1898,1750,1376,
197
- 1942,3468,3138, 570,2127,2145,3276,4131, 962, 132,1445,4196, 19, 941,3624,3480,
198
- 3366,1973,1374,4461,3431,2629, 283,2415,2275, 808,2887,3620,2112,2563,1353,3610,
199
- 955,1089,3103,1053, 96, 88,4097, 823,3808,1583, 399, 292,4091,3313, 421,1128,
200
- 642,4006, 903,2539,1877,2082, 596, 29,4066,1790, 722,2157, 130, 995,1569, 769,
201
- 1485, 464, 513,2213, 288,1923,1101,2453,4316, 133, 486,2445, 50, 625, 487,2207,
202
- 57, 423, 481,2962, 159,3729,1558, 491, 303, 482, 501, 240,2837, 112,3648,2392,
203
- 1783, 362, 8,3433,3422, 610,2793,3277,1390,1284,1654, 21,3823, 734, 367, 623,
204
- 193, 287, 374,1009,1483, 816, 476, 313,2255,2340,1262,2150,2899,1146,2581, 782,
205
- 2116,1659,2018,1880, 255,3586,3314,1110,2867,2137,2564, 986,2767,5185,2006, 650,
206
- 158, 926, 762, 881,3157,2717,2362,3587, 306,3690,3245,1542,3077,2427,1691,2478,
207
- 2118,2985,3490,2438, 539,2305, 983, 129,1754, 355,4201,2386, 827,2923, 104,1773,
208
- 2838,2771, 411,2905,3919, 376, 767, 122,1114, 828,2422,1817,3506, 266,3460,1007,
209
- 1609,4998, 945,2612,4429,2274, 726,1247,1964,2914,2199,2070,4002,4108, 657,3323,
210
- 1422, 579, 455,2764,4737,1222,2895,1670, 824,1223,1487,2525, 558, 861,3080, 598,
211
- 2659,2515,1967, 752,2583,2376,2214,4180, 977, 704,2464,4999,2622,4109,1210,2961,
212
- 819,1541, 142,2284, 44, 418, 457,1126,3730,4347,4626,1644,1876,3671,1864, 302,
213
- 1063,5694, 624, 723,1984,3745,1314,1676,2488,1610,1449,3558,3569,2166,2098, 409,
214
- 1011,2325,3704,2306, 818,1732,1383,1824,1844,3757, 999,2705,3497,1216,1423,2683,
215
- 2426,2954,2501,2726,2229,1475,2554,5064,1971,1794,1666,2014,1343, 783, 724, 191,
216
- 2434,1354,2220,5065,1763,2752,2472,4152, 131, 175,2885,3434, 92,1466,4920,2616,
217
- 3871,3872,3866, 128,1551,1632, 669,1854,3682,4691,4125,1230, 188,2973,3290,1302,
218
- 1213, 560,3266, 917, 763,3909,3249,1760, 868,1958, 764,1782,2097, 145,2277,3774,
219
- 4462, 64,1491,3062, 971,2132,3606,2442, 221,1226,1617, 218, 323,1185,3207,3147,
220
- 571, 619,1473,1005,1744,2281, 449,1887,2396,3685, 275, 375,3816,1743,3844,3731,
221
- 845,1983,2350,4210,1377, 773, 967,3499,3052,3743,2725,4007,1697,1022,3943,1464,
222
- 3264,2855,2722,1952,1029,2839,2467, 84,4383,2215, 820,1391,2015,2448,3672, 377,
223
- 1948,2168, 797,2545,3536,2578,2645, 94,2874,1678, 405,1259,3071, 771, 546,1315,
224
- 470,1243,3083, 895,2468, 981, 969,2037, 846,4181, 653,1276,2928, 14,2594, 557,
225
- 3007,2474, 156, 902,1338,1740,2574, 537,2518, 973,2282,2216,2433,1928, 138,2903,
226
- 1293,2631,1612, 646,3457, 839,2935, 111, 496,2191,2847, 589,3186, 149,3994,2060,
227
- 4031,2641,4067,3145,1870, 37,3597,2136,1025,2051,3009,3383,3549,1121,1016,3261,
228
- 1301, 251,2446,2599,2153, 872,3246, 637, 334,3705, 831, 884, 921,3065,3140,4092,
229
- 2198,1944, 246,2964, 108,2045,1152,1921,2308,1031, 203,3173,4170,1907,3890, 810,
230
- 1401,2003,1690, 506, 647,1242,2828,1761,1649,3208,2249,1589,3709,2931,5156,1708,
231
- 498, 666,2613, 834,3817,1231, 184,2851,1124, 883,3197,2261,3710,1765,1553,2658,
232
- 1178,2639,2351, 93,1193, 942,2538,2141,4402, 235,1821, 870,1591,2192,1709,1871,
233
- 3341,1618,4126,2595,2334, 603, 651, 69, 701, 268,2662,3411,2555,1380,1606, 503,
234
- 448, 254,2371,2646, 574,1187,2309,1770, 322,2235,1292,1801, 305, 566,1133, 229,
235
- 2067,2057, 706, 167, 483,2002,2672,3295,1820,3561,3067, 316, 378,2746,3452,1112,
236
- 136,1981, 507,1651,2917,1117, 285,4591, 182,2580,3522,1304, 335,3303,1835,2504,
237
- 1795,1792,2248, 674,1018,2106,2449,1857,2292,2845, 976,3047,1781,2600,2727,1389,
238
- 1281, 52,3152, 153, 265,3950, 672,3485,3951,4463, 430,1183, 365, 278,2169, 27,
239
- 1407,1336,2304, 209,1340,1730,2202,1852,2403,2883, 979,1737,1062, 631,2829,2542,
240
- 3876,2592, 825,2086,2226,3048,3625, 352,1417,3724, 542, 991, 431,1351,3938,1861,
241
- 2294, 826,1361,2927,3142,3503,1738, 463,2462,2723, 582,1916,1595,2808, 400,3845,
242
- 3891,2868,3621,2254, 58,2492,1123, 910,2160,2614,1372,1603,1196,1072,3385,1700,
243
- 3267,1980, 696, 480,2430, 920, 799,1570,2920,1951,2041,4047,2540,1321,4223,2469,
244
- 3562,2228,1271,2602, 401,2833,3351,2575,5157, 907,2312,1256, 410, 263,3507,1582,
245
- 996, 678,1849,2316,1480, 908,3545,2237, 703,2322, 667,1826,2849,1531,2604,2999,
246
- 2407,3146,2151,2630,1786,3711, 469,3542, 497,3899,2409, 858, 837,4446,3393,1274,
247
- 786, 620,1845,2001,3311, 484, 308,3367,1204,1815,3691,2332,1532,2557,1842,2020,
248
- 2724,1927,2333,4440, 567, 22,1673,2728,4475,1987,1858,1144,1597, 101,1832,3601,
249
- 12, 974,3783,4391, 951,1412, 1,3720, 453,4608,4041, 528,1041,1027,3230,2628,
250
- 1129, 875,1051,3291,1203,2262,1069,2860,2799,2149,2615,3278, 144,1758,3040, 31,
251
- 475,1680, 366,2685,3184, 311,1642,4008,2466,5036,1593,1493,2809, 216,1420,1668,
252
- 233, 304,2128,3284, 232,1429,1768,1040,2008,3407,2740,2967,2543, 242,2133, 778,
253
- 1565,2022,2620, 505,2189,2756,1098,2273, 372,1614, 708, 553,2846,2094,2278, 169,
254
- 3626,2835,4161, 228,2674,3165, 809,1454,1309, 466,1705,1095, 900,3423, 880,2667,
255
- 3751,5258,2317,3109,2571,4317,2766,1503,1342, 866,4447,1118, 63,2076, 314,1881,
256
- 1348,1061, 172, 978,3515,1747, 532, 511,3970, 6, 601, 905,2699,3300,1751, 276,
257
- 1467,3725,2668, 65,4239,2544,2779,2556,1604, 578,2451,1802, 992,2331,2624,1320,
258
- 3446, 713,1513,1013, 103,2786,2447,1661, 886,1702, 916, 654,3574,2031,1556, 751,
259
- 2178,2821,2179,1498,1538,2176, 271, 914,2251,2080,1325, 638,1953,2937,3877,2432,
260
- 2754, 95,3265,1716, 260,1227,4083, 775, 106,1357,3254, 426,1607, 555,2480, 772,
261
- 1985, 244,2546, 474, 495,1046,2611,1851,2061, 71,2089,1675,2590, 742,3758,2843,
262
- 3222,1433, 267,2180,2576,2826,2233,2092,3913,2435, 956,1745,3075, 856,2113,1116,
263
- 451, 3,1988,2896,1398, 993,2463,1878,2049,1341,2718,2721,2870,2108, 712,2904,
264
- 4363,2753,2324, 277,2872,2349,2649, 384, 987, 435, 691,3000, 922, 164,3939, 652,
265
- 1500,1184,4153,2482,3373,2165,4848,2335,3775,3508,3154,2806,2830,1554,2102,1664,
266
- 2530,1434,2408, 893,1547,2623,3447,2832,2242,2532,3169,2856,3223,2078, 49,3770,
267
- 3469, 462, 318, 656,2259,3250,3069, 679,1629,2758, 344,1138,1104,3120,1836,1283,
268
- 3115,2154,1437,4448, 934, 759,1999, 794,2862,1038, 533,2560,1722,2342, 855,2626,
269
- 1197,1663,4476,3127, 85,4240,2528, 25,1111,1181,3673, 407,3470,4561,2679,2713,
270
- 768,1925,2841,3986,1544,1165, 932, 373,1240,2146,1930,2673, 721,4766, 354,4333,
271
- 391,2963, 187, 61,3364,1442,1102, 330,1940,1767, 341,3809,4118, 393,2496,2062,
272
- 2211, 105, 331, 300, 439, 913,1332, 626, 379,3304,1557, 328, 689,3952, 309,1555,
273
- 931, 317,2517,3027, 325, 569, 686,2107,3084, 60,1042,1333,2794, 264,3177,4014,
274
- 1628, 258,3712, 7,4464,1176,1043,1778, 683, 114,1975, 78,1492, 383,1886, 510,
275
- 386, 645,5291,2891,2069,3305,4138,3867,2939,2603,2493,1935,1066,1848,3588,1015,
276
- 1282,1289,4609, 697,1453,3044,2666,3611,1856,2412, 54, 719,1330, 568,3778,2459,
277
- 1748, 788, 492, 551,1191,1000, 488,3394,3763, 282,1799, 348,2016,1523,3155,2390,
278
- 1049, 382,2019,1788,1170, 729,2968,3523, 897,3926,2785,2938,3292, 350,2319,3238,
279
- 1718,1717,2655,3453,3143,4465, 161,2889,2980,2009,1421, 56,1908,1640,2387,2232,
280
- 1917,1874,2477,4921, 148, 83,3438, 592,4245,2882,1822,1055, 741, 115,1496,1624,
281
- 381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189,
282
- 852,1221,1400,1486, 882,2299,4036, 351, 28,1122, 700,6479,6480,6481,6482,6483, #last 512
283
- )
284
- # fmt: on
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Atualli/yoloxTeste/configs/__init__.py DELETED
File without changes
spaces/Benson/text-generation/Examples/Arena Breakout Beta Global Descargar.md DELETED
@@ -1,73 +0,0 @@
1
- <br />
2
- <h1>Arena Breakout Global Beta Descargar: Cómo unirse al FPS táctico inmersivo de próxima generación en móviles</h1>
3
- <p>Si usted está buscando un nuevo y emocionante juego de disparos que desafía sus habilidades y recompensa sus riesgos, es posible que desee echar un vistazo a Arena Breakout. Este juego es un FPS táctico inmersivo de próxima generación que empuja los límites de la simulación de guerra en el móvil. También es el primer shooter de extracción de saqueadores que te permite disparar, saquear y escapar para ganar. </p>
4
- <p>En este artículo, le diremos todo lo que necesita saber sobre Arena Breakout, cómo descargar y jugar la versión beta global, y lo que es nuevo en la última actualización. ¡Vamos a empezar! </p>
5
- <h2>arena breakout beta global descargar</h2><br /><p><b><b>DOWNLOAD</b> &#9913; <a href="https://bltlly.com/2v6KI1">https://bltlly.com/2v6KI1</a></b></p><br /><br />
6
- <h2>¿Qué es Arena Breakout? </h2>
7
- <p>Arena Breakout es un juego desarrollado por Level Infinite, un estudio que tiene como objetivo crear juegos innovadores e inmersivos para dispositivos móviles. Arena Breakout es su título insignia, y ha estado en desarrollo durante más de dos años. El juego ha sido elogiado por jugadores y críticos por sus gráficos realistas, efectos de sonido y mecánica de juego. </p>
8
- <h3>Un nuevo tipo de juego de disparos</h3>
9
- <p>Arena Breakout no es el típico juego de disparos. Es un juego que combina elementos de FPS tácticos, battle royale y géneros de disparos de saqueo. El juego tiene dos modos: solitario y escuadrón. En el modo solitario, juegas como un lobo solitario que tiene que sobrevivir contra otros jugadores y enemigos de la IA. En el modo de escuadrón, haces equipo con hasta otros tres jugadores y cooperas para eliminar la competencia. </p>
10
- <p>El juego también tiene una característica única llamada breakout. Breakout es la única manera de ganar el juego. Tienes que escapar de la zona de combate vivo con su botín antes de que acabe el tiempo. Si mueres o no te escapas, pierdes todo lo que has recogido en el partido. Esto añade una capa de tensión y estrategia al juego, ya que tienes que decidir cuándo luchar, cuándo saquear y cuándo correr. </p>
11
- <h3>Una experiencia realista e inmersiva</h3>
12
-
13
- <p>El juego también tiene un sistema de disparos realista que simula la física y la mecánica de las armas reales. Usted tiene que manejar su retroceso, recargar sus revistas, parchear sus heridas, y utilizar la cubierta y el movimiento sabiamente. El juego también tiene diferentes condiciones climáticas, ciclos de día y de noche, y entornos destructibles que afectan a su juego. </p>
14
- <h3>Un sistema de alto riesgo y alta recompensa</h3>
15
- <p>Arena Breakout es un juego que recompensa tus riesgos con altas recompensas. El juego tiene un sistema de botín que te permite recoger valiosas armas, accesorios y suministros de la arena. También puedes saquear los cadáveres o las cajas de otros jugadores para obtener más botín. El botín que recojas se puede usar en la partida o guardar para su uso posterior. </p>
16
- <p>El juego también tiene un sistema de divisas que le permite comprar o vender artículos en el mercado. Puedes usar la moneda para comprar mejores equipos o cosméticos para tu personaje. Sin embargo, usted tiene que tener cuidado con su dinero, ya que puede perderlo todo si usted muere o no escapa. El juego también tiene un sistema de clasificación que rastrea tu rendimiento y progreso en el juego. </p>
17
- <p></p>
18
- <h2>¿Cómo descargar y jugar Arena Breakout beta global? </h2>
19
- <p>Si estás interesado en jugar la beta global de Arena Breakout, aquí hay algunas cosas que necesitas saber:</p>
20
- <h <h3>Requisitos y disponibilidad</h3>
21
- <p>Arena Breakout beta global está disponible actualmente solo para dispositivos Android. Necesitas tener un dispositivo Android con al menos 4 GB de RAM y Android 8.0 o superior para jugar el juego. El juego también requiere una conexión a Internet estable y aproximadamente 2 GB de espacio de almacenamiento. </p>
22
-
23
- <h3>Pasos para descargar e instalar</h3>
24
- <p>Una vez que tenga un código de invitación beta, puede seguir estos pasos para descargar e instalar Arena Breakout beta global en su dispositivo Android:</p>
25
- <ol>
26
- <li>Ir a la página web oficial de Arena Breakout y haga clic en el botón de descarga. Serás redirigido a una página donde podrás introducir tu código de invitación beta y tu dirección de correo electrónico. Después de verificar su código y correo electrónico, recibirá un enlace de descarga para el juego. </li>
27
- <li>Alternativamente, puede ir a una fuente de terceros que proporciona el enlace de descarga para el juego, como APKPure o TapTap. Sin embargo, asegúrate de descargar el juego desde una fuente confiable y segura, ya que algunas fuentes pueden contener malware o virus. </li>
28
- <li>Después de descargar el juego, es necesario habilitar la instalación de aplicaciones de fuentes desconocidas en el dispositivo. Para hacer esto, vaya a la configuración del dispositivo, luego a la seguridad, luego a fuentes desconocidas y conéctela. </li>
29
- <li>Luego, busque el archivo descargado en su dispositivo y toque en él para instalarlo. Es posible que necesite conceder algunos permisos para que el juego se ejecute correctamente. </li>
30
- <li>Después de instalar el juego, ejecútelo e ingrese su código de invitación beta nuevamente para iniciar sesión. También es posible que necesite crear una cuenta o vincular su cuenta de redes sociales para jugar el juego. </li>
31
- <li>Disfruta jugando Arena Breakout beta global! </li>
32
- </ol>
33
- <h3>Consejos y trucos para principiantes</h3>
34
- <p>Si eres nuevo en Arena Breakout, aquí hay algunos consejos y trucos que pueden ayudarte a mejorar tus habilidades y ganar más partidos:</p>
35
- <ul>
36
- <li>Aprende los conceptos básicos del juego, como cómo mover, apuntar, disparar, recargar, sanar, saquear y escapar. Puedes practicar estas habilidades en el modo de entrenamiento o en modo individual antes de unirte al modo escuadrón. </li>
37
-
38
- <li>Usa la cubierta y el movimiento sabiamente. Puedes usar paredes, edificios, vehículos y otros objetos como cobertura del fuego enemigo. También puedes usar diferentes movimientos, como agacharte, inclinarte, deslizarte, saltar y rodar para esquivar balas y sorprender a tus enemigos. </li>
39
- <li>Saquea inteligente y estratégicamente. Puedes saquear armas, accesorios y suministros de cajas, cadáveres o edificios en la arena. Sin embargo, ten cuidado de no exponerte demasiado mientras saqueas, ya que puedes atraer la atención no deseada de otros jugadores o enemigos de la IA. También, sea selectivo acerca de lo que saquea, ya que tiene espacio de inventario limitado y capacidad de peso. </li>
40
- <li>Fuga en el momento adecuado. Fuga es la única manera de ganar el juego, pero también es arriesgado. Tienes que escapar de la zona de combate vivo con su botín antes de que acabe el tiempo. Sin embargo, también tienes que tener cuidado con otros jugadores que pueden intentar detenerte o robar tu botín. Por lo tanto, usted tiene que elegir cuándo romper cuidadosamente basado en su situación y estrategia. </li>
41
- </ul>
42
- <h2>¿Qué hay de nuevo en la actualización beta global? </h2>
43
- <p>Arena Breakout beta global se ha actualizado con nuevas características y mejoras que hacen que el juego sea más divertido y atractivo. Estos son algunos de los aspectos más destacados de la actualización:</p>
44
- <h3>Personajes femeninos y opciones de personalización</h3>
45
- <p>Ahora puedes elegir entre personajes masculinos y femeninos en Arena Breakout. También puedes personalizar la apariencia de tu personaje con diferentes peinados, tonos de piel, caras, trajes, accesorios y más. También puedes desbloquear más opciones de personalización completando misiones o comprándolas con moneda. </p>
46
- <h3>En el partido matar cam y equipamiento rápido característica</h3>
47
- <p>Ahora puedes ver cómo moriste o cómo mataste a alguien en el partido con la función kill cam. La cámara mortal te muestra una repetición de los últimos momentos de tu vida o la vida de tu enemigo desde su perspectiva. Puedes usar esta función para aprender de tus errores o para disfrutar de tus victorias. </p>
48
-
49
- <h3>Sistema de préstamo de equipos y de invitación de amigos</h3>
50
- <p>Ahora puede prestar su equipo a sus compañeros de escuadra o pedir prestado equipo de ellos en el partido con el sistema de préstamo de equipos. El sistema de préstamo de equipos le permite compartir sus armas, accesorios y suministros con los miembros de su equipo para ayudarlos o para optimizar su carga. También puede solicitar u ofrecer equipos a sus compañeros de escuadra con un simple toque. </p>
51
- <p>También puedes invitar a tus amigos a jugar contigo en Arena Breakout con el sistema de invitación de amigos. El sistema de invitación de amigos te permite enviar o recibir invitaciones para unirte a un equipo con tus amigos u otros jugadores. También puedes chatear con tus amigos o compañeros de equipo en el lobby del juego o en el partido. </p>
52
- <h3>Sala de trofeos y soporte de idiomas</h3>
53
- <p>Ahora puedes mostrar tus logros y progreso en Arena Breakout con la función de sala de trofeos. La función de sala de trofeos le permite mostrar sus trofeos, medallas, insignias y estadísticas en una sala virtual que puede personalizar y decorar. También puede visitar las salas de trofeos de otros jugadores y comparar su rendimiento con ellos. </p>
54
- <p>También puede jugar Arena Breakout en diferentes idiomas con la función de soporte de idioma. El juego actualmente es compatible con los idiomas inglés, chino, español, portugués, ruso, turco, árabe e indonesio. Puede cambiar el idioma del juego en el menú de configuración. </p>
55
- <h2>Conclusión</h2>
56
- <p>Arena Breakout es un juego que ofrece una nueva y emocionante manera de jugar juegos de disparos en dispositivos móviles. Es un juego que combina FPS tácticos, battle royale y elementos de disparos de saqueo en una experiencia realista e inmersiva. También es un juego que desafía tus habilidades y recompensa tus riesgos con un sistema de alto riesgo y alta recompensa. </p>
57
-
58
- <p>Arena Breakout es un juego que vale la pena probar si estás buscando un FPS táctico de próxima generación en dispositivos móviles. Es un juego que te mantendrá al borde de tu asiento mientras disparas, saqueas y rompes para ganar. </p>
59
- <h2>Preguntas frecuentes</h2>
60
- <ol>
61
- <li>¿Qué es Arena Breakout? </li>
62
- <p>Arena Breakout es un FPS táctico inmersivo de próxima generación que empuja los límites de la simulación de guerra en dispositivos móviles. También es el primer shooter de extracción de saqueadores que te permite disparar, saquear y escapar para ganar. </p>
63
- <li> ¿Cómo descargar y jugar Arena Breakout beta global? </li>
64
- <p>Necesitas tener un dispositivo Android con al menos 4 GB de RAM y Android 8.0 o superior, una conexión a Internet estable y un código de invitación beta. Puedes descargar el juego desde el sitio web oficial o desde fuentes de terceros, y seguir los pasos para instalarlo y jugarlo. </p>
65
- <li>¿Qué hay de nuevo en la actualización beta global? </li>
66
- <p>La actualización beta global ha añadido nuevas características y mejoras, tales como personajes femeninos y opciones de personalización, cámara asesina en el partido y función de equipamiento rápido, sistema de préstamo de equipos e invitación a amigos, sala de trofeos y soporte de idioma, y más. </p>
67
- <li>¿Cómo obtener un código de invitación beta? </li>
68
- <p>Puedes obtener un código de invitación beta siguiendo las cuentas de redes sociales oficiales de Arena Breakout o uniéndote al servidor oficial de Discord del juego. También puedes obtener un código de invitación beta participando en sorteos o eventos organizados por los desarrolladores o influencers. </p>
69
- <li> ¿Cómo romper en Arena Breakout? </li>
70
- <p>Breakout es la única manera de ganar el juego. Tienes que escapar de la zona de combate con tu botín antes de que se acabe el tiempo. Sin embargo, también tienes que tener cuidado con otros jugadores que pueden intentar detenerte o robar tu botín. Por lo tanto, usted tiene que elegir cuándo romper cuidadosamente basado en su situación y estrategia. </p>
71
- </ol> 64aa2da5cf<br />
72
- <br />
73
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Caso Penal Pacfico Baha Mod Men Apk.md DELETED
@@ -1,116 +0,0 @@
1
- <br />
2
- <h1>Caso Penal: Pacific Bay Mod Menu APK - Una guía para los solucionadores de delitos</h1>
3
- <p>¿Te encanta jugar juegos de detectives? ¿Te gusta encontrar pistas, interrogar sospechosos y resolver misterios? Si es así, entonces es posible que haya oído hablar de Criminal Case: Pacific Bay, uno de los juegos de objetos ocultos más populares en Android. Pero ¿sabías que hay una manera de hacer este juego aún más divertido y emocionante? Sí, estamos hablando de Caso Penal: Pacific Bay Mod Menu APK, una herramienta de hackeo que le da recursos ilimitados, compras gratis, sin anuncios, y más. En este artículo, le diremos todo lo que necesita saber acerca de este menú mod apk, incluyendo lo que es, cómo descargarlo e instalarlo, cómo usarlo, y cuáles son sus pros y sus contras. Así que, vamos a empezar! </p>
4
- <h2>caso penal pacífico bahía mod menú apk</h2><br /><p><b><b>Download File</b> &#10026; <a href="https://bltlly.com/2v6M5B">https://bltlly.com/2v6M5B</a></b></p><br /><br />
5
- <h2>¿Qué es un caso criminal: Pacific Bay? </h2>
6
- <h3>Un juego de objetos escondidos popular</h3>
7
- <p>Criminal Case: Pacific Bay es un juego de objetos ocultos desarrollado por Pretty Simple, un estudio francés especializado en juegos casuales. Es la segunda temporada de la serie Criminal Case, que tiene más de 100 millones de descargas en Google Play. En este juego, juegas como un detective que trabaja para el Departamento de Policía de Pacific Bay. Su trabajo es investigar varias escenas del crimen, encontrar pistas, analizar pruebas, interrogar sospechosos y arrestar a los asesinos. También puedes hacer equipo con otros jugadores en línea y competir por las mejores puntuaciones. </p>
8
- <h3>Una aventura emocionante en la Bahía del Pacífico</h3>
9
-
10
- <h3>Una experiencia desafiante y gratificante</h3>
11
- <p>Criminal Case: Pacific Bay no es un juego fácil. Tendrás que usar tus habilidades de observación, deducción y lógica para resolver los puzzles y encontrar a los culpables. También tendrás que administrar tu tiempo y energía sabiamente, ya que son recursos limitados en el juego. Tendrás que ganar estrellas completando tareas en cada escena del crimen. Puedes usar estas estrellas para desbloquear nuevas escenas, comprar objetos o realizar acciones. También tendrás que recoger monedas y dinero en efectivo jugando minijuegos o viendo anuncios. Puedes usar estas monedas para personalizar tu avatar, comprar potenciadores o acceder a funciones premium. También tendrás que subir de nivel ganando puntos de experiencia (XP) y posicionarte ganando medallas. También tendrás que desbloquear logros y trofeos completando ciertos objetivos. </p>
12
- <h2>¿Qué es el caso penal: Pacific Bay Mod Menu APK? </h2>
13
- <h3>Una versión modificada del juego</h3>
14
- <p>Caso Penal: Pacific Bay Mod Menu APK es una versión modificada del juego original que incluye algunas características de hackeo para ayudar a los jugadores a superar fácilmente los niveles más difíciles. No es una aplicación oficial de Pretty Simple, sino una aplicación de terceros creada por algunos fans o desarrolladores que quieren mejorar la experiencia de juego. No está disponible en Google Play , pero se puede descargar desde algunos sitios web que ofrecen aplicaciones y juegos modificados. Sin embargo, debe tener cuidado al descargar e instalar dichas aplicaciones, ya que pueden contener virus, malware o spyware que pueden dañar su dispositivo o robar su información personal. </p>
15
- <p></p>
16
- <h3>Una herramienta de hackeo para recursos ilimitados</h3>
17
-
18
- <h3>Una forma de disfrutar del juego sin anuncios</h3>
19
- <p>Caso Penal: Pacific Bay Mod Menu APK es una manera de disfrutar del juego sin anuncios. Los anuncios son molestos y distraen, especialmente cuando aparecen en medio del juego o cuando estás viendo un video. También consumen sus datos y la batería. Con este menú mod apk, puede eliminar todos los anuncios del juego y jugar sin ninguna interrupción. También puedes evitar ver anuncios para ganar monedas o dinero en el juego. </p>
20
- <h2>Cómo descargar e instalar Caso Penal: Pacific Bay Mod Menu APK? </h2>
21
- <h3>Los requisitos y precauciones</h3>
22
- <p>Antes de descargar e instalar Caso Penal: Pacific Bay Mod Menu APK, es necesario asegurarse de que su dispositivo cumple con los siguientes requisitos y precauciones:</p>
23
- <ul>
24
- <li>Tu dispositivo debe tener Android 4.1 o una versión superior. </li>
25
- <li>El dispositivo debe tener suficiente espacio de almacenamiento para instalar la aplicación. </li>
26
- <li>El dispositivo debe tener una conexión a Internet estable para descargar la aplicación. </li>
27
- <li> Debe habilitar la instalación de aplicaciones de fuentes desconocidas en la configuración del dispositivo. </li>
28
- <li>Debe desinstalar la versión original de Criminal Case: Pacific Bay desde su dispositivo. </li>
29
- <li> Debe hacer una copia de seguridad de los datos del juego antes de instalar el menú mod apk. </li>
30
- <li> Debe ser consciente de los riesgos de usar aplicaciones y juegos modificados, como prohibir, bloquear o perder su cuenta. </li>
31
- </ul>
32
- <h3>Los pasos a seguir</h3>
33
- <p>Después de haber comprobado los requisitos y precauciones, puede seguir estos pasos para descargar e instalar Caso Penal: Pacific Bay Mod Menu APK:</p>
34
- <ol>
35
- <li>Ir a un sitio web que ofrece Caso Penal: Pacific Bay Mod Menu APK, tales como [APKPure], [APKDone], o [ModDroid]. </li>
36
- <li>Encontrar y descargar la última versión de Caso Penal: Pacific Bay Mod Menu APK en su dispositivo. </li>
37
- <li>Localice y toque en el archivo descargado para iniciar el proceso de instalación. </li>
38
- <li>Siga las instrucciones en la pantalla para completar la instalación. </li>
39
-
40
- </ol> <h3>Los beneficios y desventajas</h3>
41
- <p>Caso Penal: Pacific Bay Mod Menu APK tiene algunos beneficios y desventajas que usted debe considerar antes de usarlo. Estos son algunos de ellos:</p>
42
- <tabla>
43
- <tr>
44
- <th>Beneficios</th>
45
- <th>Inconvenientes</th>
46
- </tr>
47
- <tr>
48
- <td>Puedes disfrutar del juego con recursos ilimitados y sin anuncios. </td>
49
- <td>Puede que te prohíban jugar o pierdas tu cuenta. </td>
50
- </tr>
51
- <tr>
52
- <td>Puedes saltarte el tiempo de espera y jugar el juego cuando quieras. </td>
53
- <td>Puedes perderte la diversión y el desafío del juego. </td>
54
- </tr>
55
- <tr>
56
- <td>Puedes personalizar tu avatar y comprar potenciadores sin gastar dinero real. </td>
57
- <td>Puedes encontrar algunos errores o errores en el juego. </td>
58
- </tr>
59
- <tr>
60
- <td>Puedes posicionarte más rápido y desbloquear logros y trofeos fácilmente. </td>
61
- <td>Usted puede perder sus datos de juego o el progreso si el menú mod apk no se actualiza. </td>
62
- </tr>
63
- </tabla>
64
- <h2>Cómo utilizar Caso Penal: Pacific Bay Mod Menu APK? </h2>
65
- <h3>Las características y funciones</h3>
66
- <p>Caso Penal: Pacific Bay Mod Menu APK tiene algunas características y funciones que puede utilizar para mejorar su experiencia de juego. Estos son algunos de ellos:</p>
67
- <ul>
68
- <li>Estrellas ilimitadas: Puedes usar esta función para desbloquear nuevas escenas, comprar objetos o realizar acciones sin ganar estrellas en el juego. </li>
69
- <li>Monedas ilimitadas: Puedes usar esta función para personalizar tu avatar, comprar boosters o acceder a funciones premium sin recoger monedas en el juego. </li>
70
- <li>Dinero ilimitado: Puede utilizar esta función para obtener compras gratis en la tienda de juegos sin gastar dinero real. </li>
71
- <li>Energía ilimitada: Puede utilizar esta función para jugar el juego sin esperar a recargar energía o ver anuncios. </li>
72
- <li>Pistas ilimitadas: Puedes usar esta función para obtener pistas en cada escena del crimen sin usar estrellas o monedas. </li>
73
- <li>Sin anuncios: Puede utilizar esta función para eliminar todos los anuncios del juego y jugar sin ninguna interrupción. </li>
74
-
75
- <li>XP ilimitado: Puedes usar esta función para subir de nivel más rápido y ganar más puntos de experiencia en el juego. </li>
76
- <li>Rank Hack: Puede utilizar esta función para clasificar más rápido y ganar más medallas en el juego. </li>
77
- </ul> <h3>Los consejos y trucos</h3>
78
- <p>Caso Penal: Pacific Bay Mod Menu APK tiene algunos consejos y trucos que puede utilizar para mejorar su juego y puntuación. Estos son algunos de ellos:</p>
79
- <ul>
80
- <li>Utilice la función de sugerencias ilimitadas sabiamente. No confíe en él demasiado, ya que puede reducir la diversión y el desafío del juego. Trata de encontrar las pistas por ti mismo primero, y usa las pistas solo cuando estés atascado o te estés quedando sin tiempo. </li>
81
- <li>Utilice la función de energía ilimitada con moderación. No juegue el juego durante demasiado tiempo, ya que puede causar fatiga ocular, o adicción. Tome descansos entre sesiones y limite su tiempo de reproducción diario. </li>
82
- <li>Usa las monedas ilimitadas y la función de efectivo moderadamente. No compres todo en la tienda de juegos, ya que puede hacer que el juego sea demasiado fácil o aburrido. Guardar algunas monedas y dinero en efectivo para los niveles posteriores, o para los elementos que realmente necesita o quiere. </li>
83
- <li>Usa cuidadosamente la función de estrellas ilimitadas. No desbloquear todas las escenas a la vez, ya que puede estropear la historia o el suspenso del juego. Sigue el orden de los casos y desbloquea las escenas a medida que avanzas. </li>
84
- <li>Utilice la función de compras gratuitas selectivamente. No compres artículos que no sean compatibles con tu dispositivo, ya que puede causar fallos o errores en el juego. Compruebe la compatibilidad y las revisiones de los artículos antes de comprarlos. </li>
85
- <li>Utilice la función ilimitada XP y rango hack con cautela. No suba de nivel ni suba de rango demasiado rápido, ya que puede aumentar la sospecha o la detección de los desarrolladores de juegos u otros jugadores. Mantén tu nivel y rango dentro de un rango razonable, y evita usar esta función en modo online. </li>
86
- </ul>
87
- <h3>Los riesgos y limitaciones</h3>
88
- <p>Caso Penal: Pacific Bay Mod Menu APK tiene algunos riesgos y limitaciones que usted debe ser consciente de antes de usarlo. Estos son algunos de ellos:</p>
89
- <ul>
90
-
91
- <li>Usted puede encontrar algunos errores o errores en el juego si se utiliza este menú mod apk. El menú mod apk puede no ser compatible con su dispositivo, su versión del juego, o sus datos de juego. También puedes experimentar bloqueos, congelaciones, retrasos o fallos en el juego. </li>
92
- <li>Usted puede perder los datos del juego o el progreso si se utiliza este menú mod apk. El menú mod apk puede sobrescribir o corromper los datos del juego o el progreso. También puede perder sus datos o el progreso si desinstalar el menú mod apk o actualizar el juego. </li>
93
- <li>Usted puede perder la diversión y el desafío del juego si se utiliza este menú mod apk. El menú mod apk puede hacer el juego demasiado fácil o aburrido para usted. También puede perder interés en el juego o sentirse culpable por hacer trampa. </li>
94
- </ul>
95
- <h2>Conclusión</h2>
96
- <p>Caso Penal: Pacific Bay Mod Menu APK es una herramienta de hackeo que le da recursos ilimitados, compras gratis, sin anuncios, y más en Criminal Case: Pacific Bay, un popular juego de objetos ocultos en Android. Es una versión modificada del juego original que no está disponible en Google Play, pero en algunos sitios web que ofrecen aplicaciones y juegos modificados. Tiene algunos beneficios y desventajas que debe considerar antes de usarlo. También tiene algunas características y funciones que puede utilizar para mejorar su experiencia de juego. También tiene algunos consejos y trucos que puede utilizar para mejorar su juego y puntuación. También tiene algunos riesgos y limitaciones que debes conocer antes de usarlo. </p>
97
- <h2>Preguntas frecuentes</h2>
98
- <h3>Q: ¿Es el caso penal: Pacific Bay Mod menú APK seguro de usar? </h3>
99
- <p>A: Caso Penal: Pacific Bay Mod Menu APK no es seguro de usar, ya que puede contener virus, malware o spyware que pueden dañar su dispositivo o robar su información personal. También puede hacer que te expulsen del juego o que pierdas tu cuenta. También puede causar errores o errores en el juego. Es mejor utilizar la versión original de Criminal Case: Pacific Bay de Google Play.</p>
100
- <h3>Q: Es Caso Penal: Pacific Bay Mod Menu APK legal de usar? </h3>
101
-
102
- <h3>Q: ¿Cómo puedo actualizar Caso Penal: Pacific Bay Mod Menu APK? </h3>
103
- <p>A: Puede actualizar Caso Penal: Pacific Bay Mod Menu APK mediante la descarga e instalación de la última versión de un sitio web que ofrece aplicaciones y juegos modded. Sin embargo, usted debe tener cuidado al actualizar el menú mod apk, ya que puede no ser compatible con los datos del juego o el progreso. También puede perder sus características de hackeo o enfrentar nuevos riesgos o limitaciones. Es mejor hacer una copia de seguridad de los datos del juego antes de actualizar el menú mod apk. </p>
104
- <h3>Q: ¿Cómo puedo desinstalar Caso Penal: Pacific Bay Mod Menu APK? </h3>
105
- <p>A: Usted puede desinstalar Caso Penal: Pacific Bay Mod Menu APK siguiendo estos pasos:</p>
106
- <ol>
107
- <li>Ir a la configuración del dispositivo y toque en aplicaciones o aplicaciones.</li>
108
- <li>Encontrar y toque en Caso Penal: Pacific Bay Mod Menú APK.</li>
109
- <li>Toque en Desinstalar y confirme su acción. </li>
110
- <li>Espere a que termine el proceso de desinstalación. </li>
111
- </ol>
112
- <p>También puede volver a instalar la versión original de Criminal Case: Pacific Bay de Google Play si desea jugar el juego de nuevo. </p>
113
- <h3>Q: ¿Dónde puedo encontrar más información sobre Caso Penal: Pacific Bay Mod Menu APK? </h3>
114
- <p>A: Usted puede encontrar más información sobre Caso Penal: Pacific Bay Mod Menu APK visitando el sitio web que lo ofrece, o buscando en línea para comentarios, comentarios o tutoriales. Sin embargo, debe tener cuidado al visitar dichos sitios web o fuentes, ya que pueden no ser confiables o confiables. También debe evitar hacer clic en cualquier enlace sospechoso o descargar archivos desconocidos. Es mejor usar un antivirus o una aplicación de seguridad de buena reputación para proteger su dispositivo y sus datos. </p> 64aa2da5cf<br />
115
- <br />
116
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/dateutil/utils.py DELETED
@@ -1,71 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- """
3
- This module offers general convenience and utility functions for dealing with
4
- datetimes.
5
-
6
- .. versionadded:: 2.7.0
7
- """
8
- from __future__ import unicode_literals
9
-
10
- from datetime import datetime, time
11
-
12
-
13
- def today(tzinfo=None):
14
- """
15
- Returns a :py:class:`datetime` representing the current day at midnight
16
-
17
- :param tzinfo:
18
- The time zone to attach (also used to determine the current day).
19
-
20
- :return:
21
- A :py:class:`datetime.datetime` object representing the current day
22
- at midnight.
23
- """
24
-
25
- dt = datetime.now(tzinfo)
26
- return datetime.combine(dt.date(), time(0, tzinfo=tzinfo))
27
-
28
-
29
- def default_tzinfo(dt, tzinfo):
30
- """
31
- Sets the ``tzinfo`` parameter on naive datetimes only
32
-
33
- This is useful for example when you are provided a datetime that may have
34
- either an implicit or explicit time zone, such as when parsing a time zone
35
- string.
36
-
37
- .. doctest::
38
-
39
- >>> from dateutil.tz import tzoffset
40
- >>> from dateutil.parser import parse
41
- >>> from dateutil.utils import default_tzinfo
42
- >>> dflt_tz = tzoffset("EST", -18000)
43
- >>> print(default_tzinfo(parse('2014-01-01 12:30 UTC'), dflt_tz))
44
- 2014-01-01 12:30:00+00:00
45
- >>> print(default_tzinfo(parse('2014-01-01 12:30'), dflt_tz))
46
- 2014-01-01 12:30:00-05:00
47
-
48
- :param dt:
49
- The datetime on which to replace the time zone
50
-
51
- :param tzinfo:
52
- The :py:class:`datetime.tzinfo` subclass instance to assign to
53
- ``dt`` if (and only if) it is naive.
54
-
55
- :return:
56
- Returns an aware :py:class:`datetime.datetime`.
57
- """
58
- if dt.tzinfo is not None:
59
- return dt
60
- else:
61
- return dt.replace(tzinfo=tzinfo)
62
-
63
-
64
- def within_delta(dt1, dt2, delta):
65
- """
66
- Useful for comparing two datetimes that may have a negligible difference
67
- to be considered equal.
68
- """
69
- delta = abs(delta)
70
- difference = dt1 - dt2
71
- return -delta <= difference <= delta
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/version.py DELETED
@@ -1,6 +0,0 @@
1
- import pkg_resources
2
-
3
- try:
4
- __version__ = pkg_resources.get_distribution('setuptools').version
5
- except Exception:
6
- __version__ = 'unknown'
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/dev/linter.sh DELETED
@@ -1,46 +0,0 @@
1
- #!/bin/bash -e
2
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
3
-
4
- # Run this script at project root by "./dev/linter.sh" before you commit
5
-
6
- vergte() {
7
- [ "$2" = "$(echo -e "$1\n$2" | sort -V | head -n1)" ]
8
- }
9
-
10
- {
11
- black --version | grep "19.3b0" > /dev/null
12
- } || {
13
- echo "Linter requires black==19.3b0 !"
14
- exit 1
15
- }
16
-
17
- ISORT_TARGET_VERSION="4.3.21"
18
- ISORT_VERSION=$(isort -v | grep VERSION | awk '{print $2}')
19
- vergte "$ISORT_VERSION" "$ISORT_TARGET_VERSION" || {
20
- echo "Linter requires isort>=${ISORT_TARGET_VERSION} !"
21
- exit 1
22
- }
23
-
24
- set -v
25
-
26
- echo "Running isort ..."
27
- isort -y -sp . --atomic
28
-
29
- echo "Running black ..."
30
- black -l 100 .
31
-
32
- echo "Running flake8 ..."
33
- if [ -x "$(command -v flake8-3)" ]; then
34
- flake8-3 .
35
- else
36
- python3 -m flake8 .
37
- fi
38
-
39
- # echo "Running mypy ..."
40
- # Pytorch does not have enough type annotations
41
- # mypy detectron2/solver detectron2/structures detectron2/config
42
-
43
- echo "Running clang-format ..."
44
- find . -regex ".*\.\(cpp\|c\|cc\|cu\|cxx\|h\|hh\|hpp\|hxx\|tcc\|mm\|m\)" -print0 | xargs -0 clang-format -i
45
-
46
- command -v arc > /dev/null && arc lint
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/SPOTER_Sign_Language_Recognition/spoter_mod/utils.py DELETED
@@ -1,41 +0,0 @@
1
-
2
- import numpy as np
3
-
4
- from collections import Counter
5
- from torch.utils.data import Subset
6
- from sklearn.model_selection import train_test_split
7
-
8
-
9
- def __balance_val_split(dataset, val_split=0.):
10
- targets = np.array(dataset.targets)
11
- train_indices, val_indices = train_test_split(
12
- np.arange(targets.shape[0]),
13
- test_size=val_split,
14
- stratify=targets
15
- )
16
-
17
- train_dataset = Subset(dataset, indices=train_indices)
18
- val_dataset = Subset(dataset, indices=val_indices)
19
-
20
- return train_dataset, val_dataset
21
-
22
-
23
- def __split_of_train_sequence(subset: Subset, train_split=1.0):
24
- if train_split == 1:
25
- return subset
26
-
27
- targets = np.array([subset.dataset.targets[i] for i in subset.indices])
28
- train_indices, _ = train_test_split(
29
- np.arange(targets.shape[0]),
30
- test_size=1 - train_split,
31
- stratify=targets
32
- )
33
-
34
- train_dataset = Subset(subset.dataset, indices=[subset.indices[i] for i in train_indices])
35
-
36
- return train_dataset
37
-
38
-
39
- def __log_class_statistics(subset: Subset):
40
- train_classes = [subset.dataset.targets[i] for i in subset.indices]
41
- print(dict(Counter(train_classes)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/core/bbox/assigners/max_iou_assigner.py DELETED
@@ -1,212 +0,0 @@
1
- import torch
2
-
3
- from ..builder import BBOX_ASSIGNERS
4
- from ..iou_calculators import build_iou_calculator
5
- from .assign_result import AssignResult
6
- from .base_assigner import BaseAssigner
7
-
8
-
9
- @BBOX_ASSIGNERS.register_module()
10
- class MaxIoUAssigner(BaseAssigner):
11
- """Assign a corresponding gt bbox or background to each bbox.
12
-
13
- Each proposals will be assigned with `-1`, or a semi-positive integer
14
- indicating the ground truth index.
15
-
16
- - -1: negative sample, no assigned gt
17
- - semi-positive integer: positive sample, index (0-based) of assigned gt
18
-
19
- Args:
20
- pos_iou_thr (float): IoU threshold for positive bboxes.
21
- neg_iou_thr (float or tuple): IoU threshold for negative bboxes.
22
- min_pos_iou (float): Minimum iou for a bbox to be considered as a
23
- positive bbox. Positive samples can have smaller IoU than
24
- pos_iou_thr due to the 4th step (assign max IoU sample to each gt).
25
- gt_max_assign_all (bool): Whether to assign all bboxes with the same
26
- highest overlap with some gt to that gt.
27
- ignore_iof_thr (float): IoF threshold for ignoring bboxes (if
28
- `gt_bboxes_ignore` is specified). Negative values mean not
29
- ignoring any bboxes.
30
- ignore_wrt_candidates (bool): Whether to compute the iof between
31
- `bboxes` and `gt_bboxes_ignore`, or the contrary.
32
- match_low_quality (bool): Whether to allow low quality matches. This is
33
- usually allowed for RPN and single stage detectors, but not allowed
34
- in the second stage. Details are demonstrated in Step 4.
35
- gpu_assign_thr (int): The upper bound of the number of GT for GPU
36
- assign. When the number of gt is above this threshold, will assign
37
- on CPU device. Negative values mean not assign on CPU.
38
- """
39
-
40
- def __init__(self,
41
- pos_iou_thr,
42
- neg_iou_thr,
43
- min_pos_iou=.0,
44
- gt_max_assign_all=True,
45
- ignore_iof_thr=-1,
46
- ignore_wrt_candidates=True,
47
- match_low_quality=True,
48
- gpu_assign_thr=-1,
49
- iou_calculator=dict(type='BboxOverlaps2D')):
50
- self.pos_iou_thr = pos_iou_thr
51
- self.neg_iou_thr = neg_iou_thr
52
- self.min_pos_iou = min_pos_iou
53
- self.gt_max_assign_all = gt_max_assign_all
54
- self.ignore_iof_thr = ignore_iof_thr
55
- self.ignore_wrt_candidates = ignore_wrt_candidates
56
- self.gpu_assign_thr = gpu_assign_thr
57
- self.match_low_quality = match_low_quality
58
- self.iou_calculator = build_iou_calculator(iou_calculator)
59
-
60
- def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
61
- """Assign gt to bboxes.
62
-
63
- This method assign a gt bbox to every bbox (proposal/anchor), each bbox
64
- will be assigned with -1, or a semi-positive number. -1 means negative
65
- sample, semi-positive number is the index (0-based) of assigned gt.
66
- The assignment is done in following steps, the order matters.
67
-
68
- 1. assign every bbox to the background
69
- 2. assign proposals whose iou with all gts < neg_iou_thr to 0
70
- 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr,
71
- assign it to that bbox
72
- 4. for each gt bbox, assign its nearest proposals (may be more than
73
- one) to itself
74
-
75
- Args:
76
- bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
77
- gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
78
- gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
79
- labelled as `ignored`, e.g., crowd boxes in COCO.
80
- gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
81
-
82
- Returns:
83
- :obj:`AssignResult`: The assign result.
84
-
85
- Example:
86
- >>> self = MaxIoUAssigner(0.5, 0.5)
87
- >>> bboxes = torch.Tensor([[0, 0, 10, 10], [10, 10, 20, 20]])
88
- >>> gt_bboxes = torch.Tensor([[0, 0, 10, 9]])
89
- >>> assign_result = self.assign(bboxes, gt_bboxes)
90
- >>> expected_gt_inds = torch.LongTensor([1, 0])
91
- >>> assert torch.all(assign_result.gt_inds == expected_gt_inds)
92
- """
93
- assign_on_cpu = True if (self.gpu_assign_thr > 0) and (
94
- gt_bboxes.shape[0] > self.gpu_assign_thr) else False
95
- # compute overlap and assign gt on CPU when number of GT is large
96
- if assign_on_cpu:
97
- device = bboxes.device
98
- bboxes = bboxes.cpu()
99
- gt_bboxes = gt_bboxes.cpu()
100
- if gt_bboxes_ignore is not None:
101
- gt_bboxes_ignore = gt_bboxes_ignore.cpu()
102
- if gt_labels is not None:
103
- gt_labels = gt_labels.cpu()
104
-
105
- overlaps = self.iou_calculator(gt_bboxes, bboxes)
106
-
107
- if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None
108
- and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0):
109
- if self.ignore_wrt_candidates:
110
- ignore_overlaps = self.iou_calculator(
111
- bboxes, gt_bboxes_ignore, mode='iof')
112
- ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
113
- else:
114
- ignore_overlaps = self.iou_calculator(
115
- gt_bboxes_ignore, bboxes, mode='iof')
116
- ignore_max_overlaps, _ = ignore_overlaps.max(dim=0)
117
- overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1
118
-
119
- assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)
120
- if assign_on_cpu:
121
- assign_result.gt_inds = assign_result.gt_inds.to(device)
122
- assign_result.max_overlaps = assign_result.max_overlaps.to(device)
123
- if assign_result.labels is not None:
124
- assign_result.labels = assign_result.labels.to(device)
125
- return assign_result
126
-
127
- def assign_wrt_overlaps(self, overlaps, gt_labels=None):
128
- """Assign w.r.t. the overlaps of bboxes with gts.
129
-
130
- Args:
131
- overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes,
132
- shape(k, n).
133
- gt_labels (Tensor, optional): Labels of k gt_bboxes, shape (k, ).
134
-
135
- Returns:
136
- :obj:`AssignResult`: The assign result.
137
- """
138
- num_gts, num_bboxes = overlaps.size(0), overlaps.size(1)
139
-
140
- # 1. assign -1 by default
141
- assigned_gt_inds = overlaps.new_full((num_bboxes, ),
142
- -1,
143
- dtype=torch.long)
144
-
145
- if num_gts == 0 or num_bboxes == 0:
146
- # No ground truth or boxes, return empty assignment
147
- max_overlaps = overlaps.new_zeros((num_bboxes, ))
148
- if num_gts == 0:
149
- # No truth, assign everything to background
150
- assigned_gt_inds[:] = 0
151
- if gt_labels is None:
152
- assigned_labels = None
153
- else:
154
- assigned_labels = overlaps.new_full((num_bboxes, ),
155
- -1,
156
- dtype=torch.long)
157
- return AssignResult(
158
- num_gts,
159
- assigned_gt_inds,
160
- max_overlaps,
161
- labels=assigned_labels)
162
-
163
- # for each anchor, which gt best overlaps with it
164
- # for each anchor, the max iou of all gts
165
- max_overlaps, argmax_overlaps = overlaps.max(dim=0)
166
- # for each gt, which anchor best overlaps with it
167
- # for each gt, the max iou of all proposals
168
- gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1)
169
-
170
- # 2. assign negative: below
171
- # the negative inds are set to be 0
172
- if isinstance(self.neg_iou_thr, float):
173
- assigned_gt_inds[(max_overlaps >= 0)
174
- & (max_overlaps < self.neg_iou_thr)] = 0
175
- elif isinstance(self.neg_iou_thr, tuple):
176
- assert len(self.neg_iou_thr) == 2
177
- assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0])
178
- & (max_overlaps < self.neg_iou_thr[1])] = 0
179
-
180
- # 3. assign positive: above positive IoU threshold
181
- pos_inds = max_overlaps >= self.pos_iou_thr
182
- assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1
183
-
184
- if self.match_low_quality:
185
- # Low-quality matching will overwrite the assigned_gt_inds assigned
186
- # in Step 3. Thus, the assigned gt might not be the best one for
187
- # prediction.
188
- # For example, if bbox A has 0.9 and 0.8 iou with GT bbox 1 & 2,
189
- # bbox 1 will be assigned as the best target for bbox A in step 3.
190
- # However, if GT bbox 2's gt_argmax_overlaps = A, bbox A's
191
- # assigned_gt_inds will be overwritten to be bbox B.
192
- # This might be the reason that it is not used in ROI Heads.
193
- for i in range(num_gts):
194
- if gt_max_overlaps[i] >= self.min_pos_iou:
195
- if self.gt_max_assign_all:
196
- max_iou_inds = overlaps[i, :] == gt_max_overlaps[i]
197
- assigned_gt_inds[max_iou_inds] = i + 1
198
- else:
199
- assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1
200
-
201
- if gt_labels is not None:
202
- assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)
203
- pos_inds = torch.nonzero(
204
- assigned_gt_inds > 0, as_tuple=False).squeeze()
205
- if pos_inds.numel() > 0:
206
- assigned_labels[pos_inds] = gt_labels[
207
- assigned_gt_inds[pos_inds] - 1]
208
- else:
209
- assigned_labels = None
210
-
211
- return AssignResult(
212
- num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CarlDennis/HYTTS/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: HYTTS
3
- emoji: 👁
4
- colorFrom: gray
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.19.1
8
- app_file: app.py
9
- pinned: false
10
- license: cc-by-3.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CjangCjengh/Sanskrit-TTS/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Sanskrit TTS
3
- emoji: 👀
4
- colorFrom: blue
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.3.1
8
- app_file: app.py
9
- pinned: false
10
- license: gpl-3.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/modules/midas/midas/midas_net.py DELETED
@@ -1,76 +0,0 @@
1
- """MidashNet: Network for monocular depth estimation trained by mixing several datasets.
2
- This file contains code that is adapted from
3
- https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
4
- """
5
- import torch
6
- import torch.nn as nn
7
-
8
- from .base_model import BaseModel
9
- from .blocks import FeatureFusionBlock, Interpolate, _make_encoder
10
-
11
-
12
- class MidasNet(BaseModel):
13
- """Network for monocular depth estimation.
14
- """
15
-
16
- def __init__(self, path=None, features=256, non_negative=True):
17
- """Init.
18
-
19
- Args:
20
- path (str, optional): Path to saved model. Defaults to None.
21
- features (int, optional): Number of features. Defaults to 256.
22
- backbone (str, optional): Backbone network for encoder. Defaults to resnet50
23
- """
24
- print("Loading weights: ", path)
25
-
26
- super(MidasNet, self).__init__()
27
-
28
- use_pretrained = False if path is None else True
29
-
30
- self.pretrained, self.scratch = _make_encoder(backbone="resnext101_wsl", features=features, use_pretrained=use_pretrained)
31
-
32
- self.scratch.refinenet4 = FeatureFusionBlock(features)
33
- self.scratch.refinenet3 = FeatureFusionBlock(features)
34
- self.scratch.refinenet2 = FeatureFusionBlock(features)
35
- self.scratch.refinenet1 = FeatureFusionBlock(features)
36
-
37
- self.scratch.output_conv = nn.Sequential(
38
- nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1),
39
- Interpolate(scale_factor=2, mode="bilinear"),
40
- nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1),
41
- nn.ReLU(True),
42
- nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
43
- nn.ReLU(True) if non_negative else nn.Identity(),
44
- )
45
-
46
- if path:
47
- self.load(path)
48
-
49
- def forward(self, x):
50
- """Forward pass.
51
-
52
- Args:
53
- x (tensor): input data (image)
54
-
55
- Returns:
56
- tensor: depth
57
- """
58
-
59
- layer_1 = self.pretrained.layer1(x)
60
- layer_2 = self.pretrained.layer2(layer_1)
61
- layer_3 = self.pretrained.layer3(layer_2)
62
- layer_4 = self.pretrained.layer4(layer_3)
63
-
64
- layer_1_rn = self.scratch.layer1_rn(layer_1)
65
- layer_2_rn = self.scratch.layer2_rn(layer_2)
66
- layer_3_rn = self.scratch.layer3_rn(layer_3)
67
- layer_4_rn = self.scratch.layer4_rn(layer_4)
68
-
69
- path_4 = self.scratch.refinenet4(layer_4_rn)
70
- path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
71
- path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
72
- path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
73
-
74
- out = self.scratch.output_conv(path_1)
75
-
76
- return torch.squeeze(out, dim=1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DaFujaTyping/hf-Chat-ui/src/lib/server/database.ts DELETED
@@ -1,31 +0,0 @@
1
- import { MONGODB_URL, MONGODB_DB_NAME } from "$env/static/private";
2
- import { MongoClient } from "mongodb";
3
- import type { Conversation } from "$lib/types/Conversation";
4
- import type { SharedConversation } from "$lib/types/SharedConversation";
5
- import type { AbortedGeneration } from "$lib/types/AbortedGeneration";
6
- import type { Settings } from "$lib/types/Settings";
7
-
8
- const client = new MongoClient(MONGODB_URL, {
9
- // directConnection: true
10
- });
11
-
12
- export const connectPromise = client.connect().catch(console.error);
13
-
14
- const db = client.db(MONGODB_DB_NAME);
15
-
16
- const conversations = db.collection<Conversation>("conversations");
17
- const sharedConversations = db.collection<SharedConversation>("sharedConversations");
18
- const abortedGenerations = db.collection<AbortedGeneration>("abortedGenerations");
19
- const settings = db.collection<Settings>("settings");
20
-
21
- export { client, db };
22
- export const collections = { conversations, sharedConversations, abortedGenerations, settings };
23
-
24
- client.on("open", () => {
25
- conversations.createIndex({ sessionId: 1, updatedAt: -1 });
26
- abortedGenerations.createIndex({ updatedAt: 1 }, { expireAfterSeconds: 30 });
27
- abortedGenerations.createIndex({ conversationId: 1 }, { unique: true });
28
- sharedConversations.createIndex({ hash: 1 }, { unique: true });
29
- // Sparse so that we can have settings on userId later
30
- settings.createIndex({ sessionId: 1 }, { unique: true, sparse: true });
31
- });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DaFujaTyping/hf-Chat-ui/src/lib/utils/sha256.ts DELETED
@@ -1,7 +0,0 @@
1
- export async function sha256(input: string): Promise<string> {
2
- const utf8 = new TextEncoder().encode(input);
3
- const hashBuffer = await crypto.subtle.digest("SHA-256", utf8);
4
- const hashArray = Array.from(new Uint8Array(hashBuffer));
5
- const hashHex = hashArray.map((bytes) => bytes.toString(16).padStart(2, "0")).join("");
6
- return hashHex;
7
- }
 
 
 
 
 
 
 
 
spaces/DaleChen/AutoGPT/autogpt/token_counter.py DELETED
@@ -1,73 +0,0 @@
1
- """Functions for counting the number of tokens in a message or string."""
2
- from __future__ import annotations
3
-
4
- import tiktoken
5
-
6
- from autogpt.logs import logger
7
-
8
-
9
- def count_message_tokens(
10
- messages: list[dict[str, str]], model: str = "gpt-3.5-turbo-0301"
11
- ) -> int:
12
- """
13
- Returns the number of tokens used by a list of messages.
14
-
15
- Args:
16
- messages (list): A list of messages, each of which is a dictionary
17
- containing the role and content of the message.
18
- model (str): The name of the model to use for tokenization.
19
- Defaults to "gpt-3.5-turbo-0301".
20
-
21
- Returns:
22
- int: The number of tokens used by the list of messages.
23
- """
24
- try:
25
- encoding = tiktoken.encoding_for_model(model)
26
- except KeyError:
27
- logger.warn("Warning: model not found. Using cl100k_base encoding.")
28
- encoding = tiktoken.get_encoding("cl100k_base")
29
- if model == "gpt-3.5-turbo":
30
- # !Note: gpt-3.5-turbo may change over time.
31
- # Returning num tokens assuming gpt-3.5-turbo-0301.")
32
- return count_message_tokens(messages, model="gpt-3.5-turbo-0301")
33
- elif model == "gpt-4":
34
- # !Note: gpt-4 may change over time. Returning num tokens assuming gpt-4-0314.")
35
- return count_message_tokens(messages, model="gpt-4-0314")
36
- elif model == "gpt-3.5-turbo-0301":
37
- tokens_per_message = (
38
- 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
39
- )
40
- tokens_per_name = -1 # if there's a name, the role is omitted
41
- elif model == "gpt-4-0314":
42
- tokens_per_message = 3
43
- tokens_per_name = 1
44
- else:
45
- raise NotImplementedError(
46
- f"num_tokens_from_messages() is not implemented for model {model}.\n"
47
- " See https://github.com/openai/openai-python/blob/main/chatml.md for"
48
- " information on how messages are converted to tokens."
49
- )
50
- num_tokens = 0
51
- for message in messages:
52
- num_tokens += tokens_per_message
53
- for key, value in message.items():
54
- num_tokens += len(encoding.encode(value))
55
- if key == "name":
56
- num_tokens += tokens_per_name
57
- num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
58
- return num_tokens
59
-
60
-
61
- def count_string_tokens(string: str, model_name: str) -> int:
62
- """
63
- Returns the number of tokens in a text string.
64
-
65
- Args:
66
- string (str): The text string.
67
- model_name (str): The name of the encoding to use. (e.g., "gpt-3.5-turbo")
68
-
69
- Returns:
70
- int: The number of tokens in the text string.
71
- """
72
- encoding = tiktoken.encoding_for_model(model_name)
73
- return len(encoding.encode(string))