parquet-converter commited on
Commit
416d631
·
1 Parent(s): 4f1bd50

Update parquet files (step 96 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/101-5/gpt4free/g4f/.v1/gpt4free/usesless/test.py +0 -10
  2. spaces/1gistliPinn/ChatGPT4/Examples/Aces Of The Luftwaffe - Squadron Extended Edition Full Crack [portable].md +0 -6
  3. spaces/1gistliPinn/ChatGPT4/Examples/Crack Ufs3 Hwksetup Without Hwk Hardware WORK.md +0 -31
  4. spaces/1gistliPinn/ChatGPT4/Examples/FLAC To MP3 Converter V4.0.4.0 Serial Serial Key.md +0 -6
  5. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bicycle Card Games for PC A Versatile and Accessible App for All Card Lovers.md +0 -89
  6. spaces/1phancelerku/anime-remove-background/Cell to Singularity MOD APK - The Ultimate Evolution Simulator Game.md +0 -105
  7. spaces/1phancelerku/anime-remove-background/Download Pink Colour Art and Paintings for Your Inspiration.md +0 -132
  8. spaces/1phancelerku/anime-remove-background/Epic Conquest 2 APK The Most Anticipated RPG Game for Android.md +0 -103
  9. spaces/AB-TW/team-ai/documents/bussiness_context/NOTION_DB/Engineering Wiki 2402f5396a3244fdb3f1d135bdb0f3d6/AWS b022fe0cb7084cc0b64624f7bc8cde2c.md +0 -5
  10. spaces/ADOPLE/ResumeAnalyzer/app.py +0 -144
  11. spaces/AHzizi/WaifuVoiceGen/README.md +0 -14
  12. spaces/AIConsultant/MusicGen/model_cards/AUDIOGEN_MODEL_CARD.md +0 -79
  13. spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/modules.py +0 -350
  14. spaces/AIGC-Audio/Make_An_Audio_inpaint/vocoder/bigvgan/__init__.py +0 -0
  15. spaces/Ababababababbababa/poetry2023/app.py +0 -53
  16. spaces/Abhilashvj/planogram-compliance/export.py +0 -1013
  17. spaces/AgentVerse/agentVerse/ui/src/classes/npc.ts +0 -246
  18. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/fsm-plugin.d.ts +0 -8
  19. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/Sizer.js +0 -79
  20. spaces/Aki004/herta-so-vits/vdecoder/hifigan/models.py +0 -503
  21. spaces/AkshayDev/Lazy-Film-Reviews/README.md +0 -13
  22. spaces/AlexWang/lama/saicinpainting/evaluation/masks/__init__.py +0 -0
  23. spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/decoder/sh.py +0 -133
  24. spaces/AndreLie95/Diabetes_Risk_Prediction/README.md +0 -12
  25. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/optimization/opt_overview.md +0 -17
  26. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/controlnet/README.md +0 -465
  27. spaces/Andy1621/uniformer_image_detection/mmdet/models/builder.py +0 -77
  28. spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr18s_512x512_160k_ade20k.py +0 -9
  29. spaces/AnimalEquality/chatbot/constants.py +0 -3
  30. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/deepspeed_parameters.py +0 -74
  31. spaces/Anonymous-sub/Rerender/gmflow_module/scripts/demo.sh +0 -63
  32. spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/config/GroundingDINO_SwinB_cfg.py +0 -43
  33. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/cli/req_command.py +0 -505
  34. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/constrain.py +0 -37
  35. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/tree.py +0 -251
  36. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/_musllinux.py +0 -136
  37. spaces/AtomdffAI/wechatgpt4atom/channel/wechat/wechaty_channel.py +0 -201
  38. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tools/lightning_train_net.py +0 -239
  39. spaces/BartPoint/VoiceChange_Beta/app_multi.py +0 -496
  40. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/formatters/img.py +0 -645
  41. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/install_lib.py +0 -238
  42. spaces/Billet/WizardLM-WizardMath-70B-V1.033/app.py +0 -3
  43. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/export/caffe2_inference.py +0 -136
  44. spaces/CVPR/LIVE/thrust/thrust/mr/allocator.h +0 -250
  45. spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/scatter.h +0 -106
  46. spaces/CVPR/regionclip-demo/detectron2/__init__.py +0 -10
  47. spaces/Cicooo/vits-uma-genshin-honkai/transforms.py +0 -193
  48. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/ttGlyphSet.py +0 -322
  49. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/h11/_headers.py +0 -278
  50. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/_chunk_utils.py +0 -64
spaces/101-5/gpt4free/g4f/.v1/gpt4free/usesless/test.py DELETED
@@ -1,10 +0,0 @@
1
- # Fix by @enganese
2
- # Import Account class from __init__.py file
3
- from gpt4free import usesless
4
-
5
- # Create Account and enable logging to see all the log messages (it's very interesting, try it!)
6
- # New account credentials will be automatically saved in account.json file in such template: {"email": "[email protected]", "token": "token here"}
7
- token = usesless.Account.create(logging=True)
8
-
9
- # Print the new token
10
- print(token)
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Aces Of The Luftwaffe - Squadron Extended Edition Full Crack [portable].md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Aces Of The Luftwaffe - Squadron Extended Edition Full Crack [portable]</h2><br /><p><b><b>Download</b> &#10042; <a href="https://imgfil.com/2uxXz1">https://imgfil.com/2uxXz1</a></b></p><br /><br />
2
- <br />
3
- YOU have to show what you're made of as the war over Europe is in full swing. ... march. aces of the luftwaffe video. aces of the luftwaffe squadron edition. download ... squadron extended edition. aces of the luftwaffe 2. aces of the luftwaffe ... Tokyo Legacy Digital Limited Edition (Game + Art Book + Soundtrack) [portable]. 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Crack Ufs3 Hwksetup Without Hwk Hardware WORK.md DELETED
@@ -1,31 +0,0 @@
1
-
2
- <h1>How to Crack UFS3 Hwksetup Without Hwk Hardware</h1>
3
- <p>UFS3 (Universal Flash Storage) is a flash storage standard for smartphones and digital cameras that offers faster and more reliable data transfer than eMMC (embedded MultiMediaCard). [^5^] However, some UFS3 devices require a HWK (Hardware Key) chip to access certain features and functions. If you don't have a HWK chip, you may want to crack your UFS3 hwksetup without hwk hardware. Here are some steps to do that:</p>
4
- <h2>Crack Ufs3 Hwksetup Without Hwk Hardware</h2><br /><p><b><b>Download</b> &#9913;&#9913;&#9913; <a href="https://imgfil.com/2uxX2u">https://imgfil.com/2uxX2u</a></b></p><br /><br />
5
- <ol>
6
- <li>Download the HWK Killer 2.1b software from a trusted source. This software can crack your UFS3 hwksetup and give you HWK functions without the HWK chip. [^1^]</li>
7
- <li>Install the UFS3 hwksetup software on your computer. You can find it on the official website of your UFS3 device manufacturer or from other sources. Make sure you have the latest version of the software.</li>
8
- <li>Run the HWK Killer 2.1b software and browse for the UFS3 hwksetup.exe file on your computer. Select it and click on "Patch". This will modify your UFS3 hwksetup.exe file and remove the HWK verification.</li>
9
- <li>Restart your computer and run the UFS3 hwksetup.exe file again. You should be able to use all the features and functions of your UFS3 device without the HWK chip.</li>
10
- </ol>
11
- <p>Note: This method may not work for newer versions of UFS3 hwksetup software, as they may have improved security measures to prevent cracking. In that case, you may need to buy a HWK chip or use another method to crack your UFS3 hwksetup without hwk hardware.</p>
12
-
13
- <p>Benefits of cracking UFS3 hwksetup without hwk hardware</p>
14
- <p>By cracking your UFS3 hwksetup without hwk hardware, you can enjoy some benefits such as:</p>
15
- <p></p>
16
- <ul>
17
- <li>Saving money: You don't have to buy a HWK chip, which can be expensive and hard to find.</li>
18
- <li>Accessing more features: You can use all the functions of your UFS3 device, such as flashing, unlocking, repairing, and updating firmware.</li>
19
- <li>Improving performance: You can take advantage of the faster and more reliable data transfer of UFS3 storage, which can improve your device's speed and responsiveness.</li>
20
- </ul>
21
- <p>Risks of cracking UFS3 hwksetup without hwk hardware</p>
22
- <p>However, cracking your UFS3 hwksetup without hwk hardware also comes with some risks, such as:</p>
23
- <ul>
24
- <li>Voiding warranty: You may lose your device's warranty and support from the manufacturer if you crack your UFS3 hwksetup without hwk hardware.</li>
25
- <li>Bricking device: You may damage your device or make it unusable if you crack your UFS3 hwksetup without hwk hardware incorrectly or use a faulty software.</li>
26
- <li>Exposing to malware: You may expose your device to malware or viruses if you download the HWK Killer 2.1b software or the UFS3 hwksetup software from untrusted sources.</li>
27
- </ul>
28
- <p>Conclusion</p>
29
- <p>Cracking your UFS3 hwksetup without hwk hardware can be a useful way to access all the features and functions of your UFS3 device without buying a HWK chip. However, you should also be aware of the potential risks and consequences of doing so. You should always backup your data and follow the instructions carefully before attempting to crack your UFS3 hwksetup without hwk hardware.</p> d5da3c52bf<br />
30
- <br />
31
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/FLAC To MP3 Converter V4.0.4.0 Serial Serial Key.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>FLAC To MP3 Converter v4.0.4.0 serial Serial Key</h2><br /><p><b><b>Download File</b> &#8230;&#8230;&#8230; <a href="https://imgfil.com/2uxY9A">https://imgfil.com/2uxY9A</a></b></p><br /><br />
2
- <br />
3
- 2019. final 3.98 and the updated version of its converter media stuff apple, mainly for audio and video formats. audio and photo format. Here are 5, Apple Sidify Music Converter Download. -to-A-to-c-to-AC-to-C-to-A-to-o-to-O-to-A-to-a-to-A-to-g-to-G-to-a-to-A-to-i-to-I-to-a-to-A-to-d-to-d-to-e-to-E-to-a-to-A-to-t-to-T-to-a-to-A-to-u-to-U-to-a-to-A-to-y-to-Y-to-a-to-A-to-z-to-Z-to-a-to-A-to-q-to-Q-to-a-to-A-to-o-to-O-to-a-to-A-to-r-to-R-to-a-to-A-to-s-to-S-to-a-to-A-to-n-to-N-to-a-to-A-to-l-to-L-to-a-to-A-to-u-to-U-to-a-to-A-to-w-to-W-to-a-to-A-to-f-to-F-to-a-to-A-to-h-to-H-to-a-to-A-to-g-to-G-to-a-to-A-to-i-to-I-to-a-to-A-to-d-to-D-to-a-to-A-to-t-to-T-to-a-to-A-to-s-to-S-to-a-to-A-to-r-to-R-to-a-to-A-to-y-to-Y-to-a-to-A-to-x-to-X-to-a-to-A-to-w-to-W-to-a-to-A-to-j-to- 4fefd39f24<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bicycle Card Games for PC A Versatile and Accessible App for All Card Lovers.md DELETED
@@ -1,89 +0,0 @@
1
-
2
- <h1>Bicycle Card Games PC Download: How to Enjoy the Classic Card Games on Your Computer</h1>
3
- <p>If you love playing card games, you might be familiar with Bicycle Playing Cards, one of the most recognized brands of playing cards in the world. Since 1885, Bicycle has been producing high-quality cards for various games, such as Hearts, Spades, Solitaire, Gin Rummy, and more. But did you know that you can also play these games on your PC?</p>
4
- <p>That's right, Bicycle has created a digital app that allows you to play your favorite card games any way you prefer. You can compete in public ranked lobbies, play with friends using voice chat in private lobbies, or practice against bots. Whether it's a quick game of solitaire to relax or an epic game night playing spades with your friends, you can have it all with Bicycle Card Games by Bicycle.</p>
5
- <h2>bicycle card games pc download</h2><br /><p><b><b>Download Zip</b> &rarr;&rarr;&rarr; <a href="https://urlin.us/2uSXlX">https://urlin.us/2uSXlX</a></b></p><br /><br />
6
- <p>In this article, we will show you the benefits of playing bicycle card games on PC, how to download and play them, and some FAQs that you might have. Let's get started!</p>
7
- <h2>Benefits of Playing Bicycle Card Games on PC</h2>
8
- <p>Playing bicycle card games on PC has many advantages over playing with physical cards. Here are some of them:</p>
9
- <ul>
10
- <li><strong>Convenience:</strong> You don't need to worry about shuffling, dealing, or keeping track of cards. You can play anytime and anywhere with your PC, as long as you have an internet connection.</li>
11
- <li><strong>Variety:</strong> You can choose from a wide range of card games, from classic ones like Hearts and Spades to new ones like Euchre and Six Card Golf. You can also customize your cards with different designs and colors.</li>
12
- <li><strong>Social interaction:</strong> You can play with other people from around the world in public lobbies, or invite your friends to join you in private lobbies with voice chat. You can also chat with other players, make new friends, and compete on leaderboards.</li>
13
- <li><strong>Rewards:</strong> You can earn diamonds by playing games, which you can use to unlock new cards, tables, and avatars. You can also win real-life prizes by participating in seasonal events and tournaments.</li>
14
- </ul>
15
- <h2>How to Download and Play Bicycle Card Games on PC</h2>
16
- <p>Downloading and playing bicycle card games on PC is easy and fun. Here are the steps and tips you need to follow:</p>
17
- <ol>
18
- <li><strong>Download the app:</strong> You can download the app from the official website or from the Google Play Store or the App Store . The app is free to download and play, but it offers in-app purchases for extra diamonds.</li>
19
- <li><strong>Create an account:</strong> You can create an account using your email address or your Facebook account. You can also play as a guest without an account, but you won't be able to save your progress or access some features.</li>
20
- <li><strong>Select a game:</strong> You can choose from five different card games: Hearts, Spades, Solitaire, Gin Rummy, and Euchre. Each game has its own rules and strategies, which you can learn from the app's tutorial or from the website .</li>
21
- <li><strong>Select a mode:</strong> You can play in three different modes: Practice, Private Lobby, or Public Lobby. In Practice mode, you can play against bots to improve your skills. In Private Lobby mode, you can create or join a room with up to four players and use voice chat to communicate. In Public Lobby mode, you can join a random room with other players and compete for leaderboard points.</li>
22
- <li><strong>Enjoy the game:</strong> Once you start a game, you will see your cards at the bottom of the screen and the other players' cards at the top. You can drag and drop your cards to play them or tap them to select them. You can also use the buttons at the bottom right corner to access the menu, chat, settings, etc.</li>
23
- </ol>
24
- <h2>Conclusion</h2>
25
- <p>Bicycle card games are a <p>Bicycle card games are a great way to have fun and challenge yourself with classic card games. You can play them on your PC with ease and convenience, and enjoy the variety, social interaction, and rewards that they offer. Whether you are a beginner or a pro, you will find something to suit your taste and skill level.</p>
26
- <p>So what are you waiting for? Download the app today and start playing your favorite card games on your PC. You will be amazed by how much fun you can have with Bicycle Card Games by Bicycle!</p>
27
- <h2>FAQs</h2>
28
- <p>Here are some frequently asked questions and answers about bicycle card games on PC:</p>
29
- <h3>Q: How can I play bicycle card games on PC without downloading the app?</h3>
30
- <p>A: You can play some of the bicycle card games on the website without downloading the app. However, you will need to create an account and log in to access the games. You will also miss out on some of the features and benefits that the app provides, such as voice chat, leaderboards, events, etc.</p>
31
- <h3>Q: How can I get more diamonds in the app?</h3>
32
- <p>A: You can get more diamonds by playing games, completing daily quests, watching ads, or purchasing them with real money. Diamonds can be used to unlock new cards, tables, and avatars in the app.</p>
33
- <p>bicycle card games app for pc<br />
34
- bicycle card games free download for windows 10<br />
35
- bicycle card games online multiplayer<br />
36
- bicycle card games collection pc<br />
37
- bicycle card games solitaire download<br />
38
- bicycle card games by cartamundi<br />
39
- bicycle card games for windows 7<br />
40
- bicycle card games for pc review<br />
41
- bicycle card games steam<br />
42
- bicycle card games for mac<br />
43
- bicycle card games no ads<br />
44
- bicycle card games voice chat<br />
45
- bicycle card games ranked lobbies<br />
46
- bicycle card games practice mode<br />
47
- bicycle card games spades download<br />
48
- bicycle card games hearts download<br />
49
- bicycle card games cribbage download<br />
50
- bicycle card games euchre download<br />
51
- bicycle card games rummy download<br />
52
- bicycle card games canasta download<br />
53
- bicycle card games gin download<br />
54
- bicycle card games pinochle download<br />
55
- bicycle card games bridge download<br />
56
- bicycle card games go fish download<br />
57
- bicycle card games crazy eights download<br />
58
- bicycle card games old maid download<br />
59
- bicycle card games war download<br />
60
- bicycle card games blackjack download<br />
61
- bicycle card games poker download<br />
62
- bicycle card games texas holdem download<br />
63
- bicycle card games omaha download<br />
64
- bicycle card games stud download<br />
65
- bicycle card games draw poker download<br />
66
- bicycle card games video poker download<br />
67
- bicycle card games casino download<br />
68
- bicycle card games slots download<br />
69
- bicycle card games roulette download<br />
70
- bicycle card games baccarat download<br />
71
- bicycle card games craps download<br />
72
- bicycle card games keno download<br />
73
- best bicycle card games for pc<br />
74
- how to play bicycle card games on pc<br />
75
- where to buy bicycle card games for pc<br />
76
- how to install bicycle card games on pc<br />
77
- how to update bicycle card games on pc<br />
78
- how to uninstall bicycle card games on pc<br />
79
- how to fix bicycle card games on pc errors <br />
80
- how to customize bicycle card games on pc settings <br />
81
- how to invite friends to play bicycle card games on pc</p>
82
- <h3>Q: How can I invite my friends to play with me in the app?</h3>
83
- <p>A: You can invite your friends to play with you in the app by creating or joining a private lobby and sharing the room code with them. You can also link your Facebook account to the app and invite your Facebook friends to join you.</p>
84
- <h3>Q: How can I contact the support team if I have any issues or feedback?</h3>
85
- <p>A: You can contact the support team by sending an email to [email protected] or by filling out the form on the website . You can also follow Bicycle Playing Cards on Facebook , Twitter , Instagram , and YouTube for updates, news, tips, and more.</p>
86
- <h3>Q: How can I learn more about bicycle card games and their rules and strategies?</h3>
87
- <p>A: You can learn more about bicycle card games and their rules and strategies by visiting the website , where you will find detailed guides, videos, articles, and more. You can also check out the blog for interesting stories, trivia, history, and fun facts about bicycle card games.</p> 197e85843d<br />
88
- <br />
89
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Cell to Singularity MOD APK - The Ultimate Evolution Simulator Game.md DELETED
@@ -1,105 +0,0 @@
1
- <br />
2
- <h1>Cell to Singularity - Evolution Never Ends Mod APK: A Breathtaking Evolution Game</h1>
3
- <p>Have you ever wondered how life on Earth began and evolved? Have you ever imagined what the future of humanity and technology will be like? If you are curious about these questions, then you should try <strong>Cell to Singularity - Evolution Never Ends</strong>, a clicker game that tells the epic story of evolution, technology, and humanity.</p>
4
- <p>In this article, we will tell you everything you need to know about this amazing game, including its features, tips and tricks, benefits of mod apk, and how to download and install it. Read on to find out more!</p>
5
- <h2>cell to singularity - evolution never ends mod apk</h2><br /><p><b><b>DOWNLOAD</b> &gt; <a href="https://jinyurl.com/2uNS1G">https://jinyurl.com/2uNS1G</a></b></p><br /><br />
6
- <h2>What is Cell to Singularity - Evolution Never Ends?</h2>
7
- <p>Cell to Singularity - Evolution Never Ends is a simulation game that lets you tap into the extraordinary tale of evolution in this cosmic clicker game. You start from a single cell organism in the primordial soup of Earth and gradually evolve into multi-celled organisms, fish, reptiles, mammals, monkeys, humans, and beyond. You also witness the great milestones of evolution, such as the extinction of the dinosaurs, the discovery of fire, the Industrial Revolution, and more. You can even explore the future of evolution and the mystery of the technological singularity.</p>
8
- <p>Cell to Singularity - Evolution Never Ends is also a science game that simulates the development of life on Earth and beyond. You can view the fruits of evolution in beautiful 3D habitats, unlock animals like fish, lizards, mammals, monkeys, etc., climb civilizations' tech tree by spending ideas on countless scientific and technology upgrades, upgrade tech to survive on Mars and terraform Mars, discover and learn scientific facts about evolution of life and natural history as you play, enter a space odyssey into speculative science fiction as you click past modern civilization, and more.</p>
9
- <p>Cell to Singularity - Evolution Never Ends is a free-to-play game that is available on Steam and mobile devices. You can play it on your PC or laptop with Windows or Mac OS, or on your smartphone or tablet with Android or iOS. You can also sync your progress across devices and platforms with your Google Play or Game Center account. You can also enjoy the game offline without internet connection. The game is updated regularly with new content and features, so you will never run out of things to do and learn.</p>
10
- <h2>What are the features of Cell to Singularity - Evolution Never Ends?</h2>
11
- <p>Cell to Singularity - Evolution Never Ends is a game that has many features that make it fun, educational, and addictive. Here are some of the main features of the game:</p>
12
- <ul>
13
- <li><strong>Countless hours of addictive and informative gameplay</strong>: You can tap and swipe to create life, humans, and technology. You can watch the evolution of life from the first cell to the last human. You can learn about the history of life and civilization through the tech tree and the encyclopedia. You can also explore the future of evolution and the singularity in the space odyssey mode.</li>
14
- <li><strong>Simple, intuitive controls and beautiful 3D graphics</strong>: You can play the game with just one finger, tapping and swiping to generate entropy, ideas, metabits, and darwinium. You can also view the stunning 3D graphics of the habitats, animals, and tech that you unlock. You can zoom in and out, rotate, and interact with the elements on the screen.</li>
15
- <li><strong>Climb civilizations' tech tree and unlock the future of evolution</strong>: You can spend your ideas on hundreds of scientific and technological upgrades that will advance your civilization from the stone age to the space age. You can unlock inventions like fire, writing, agriculture, steam engine, electricity, internet, AI, nanotechnology, etc. You can also unlock traits that will enhance your evolution such as intelligence, creativity, curiosity, etc.</li>
16
- <li><strong>Discover and learn scientific facts and speculative science fiction</strong>: You can access the encyclopedia that will provide you with factual information about the evolution of life and natural history. You can learn about the origin of life, the major eras and events of evolution, the characteristics and behaviors of different animals, etc. You can also access the cards that will give you a glimpse of speculative science fiction scenarios that may happen in the future of evolution such as cyborgs, aliens, time travel, etc.</li>
17
- <li><strong>Upgrade tech to survive on Mars and terraform Mars</strong>: You can use your metabits and darwinium to upgrade your tech level and unlock new features in the space odyssey mode. You can build a colony on Mars and terraform it to make it habitable for life. You can also research new technologies that will help you survive on Mars such as solar panels, greenhouses, rovers, etc.</li>
18
- </ul>
19
- <h2>What are the tips and tricks for Cell to Singularity - Evolution Never Ends?</h2>
20
- <p>Cell to Singularity - Evolution Never Ends is a game that requires some strategy and planning to progress faster and easier. Here are some tips and tricks that will help you play the game more efficiently:</p>
21
- <ul>
22
- <li><strong>Focus on adding life or civilization units that boost your income by 10% or more</strong>: When you are choosing which life or civilization units to add to your habitats or tech tree, you should prioritize those that have a 10% or higher income boost over those that have a lower boost. This will help you increase your entropy or ideas income faster and unlock more upgrades sooner.</li>
23
- <li><strong>Save your achievements for after you unlock Singularity and use them when you hit a wall</strong>: Achievements are milestones that you can complete by reaching certain levels of entropy, ideas, metabits, darwinium, etc. When you complete an achievement, you can claim a reward that will boost your income by a certain percentage for a limited time. However, you should not claim these rewards until you unlock Singularity mode (which requires 1e1000 ideas), because they will be more useful then when you face harder challenges. You should also use them when you hit a wall or a slowdown in your progress.</li>
24
- <li><strong>Use your cubes wisely and prioritize the x2 income boost</strong>: Cubes are special items that you can obtain by watching ads or spending darwinium. You can use cubes to activate various boosts such as x2 income for 4 hours, x5 income for 15 minutes, x10 income for 5 minutes, etc. However, you should not waste your cubes on boosts that have a short duration or a low multiplier. Instead, you should save your cubes for the x2 income boost for 4 hours, which is the most cost-effective and beneficial boost in the game.</li>
25
- <li><strong>Restart simulation when you can afford at least one new Reality Engine upgrade</strong>: Restarting simulation is a feature that allows you to reset your entropy and ideas income to zero but keep your metabits and darwin ium income. You can also buy new Reality Engine upgrades with your metabits that will increase your income multiplier and unlock new features. However, you should not restart simulation too often or too early, because it will slow down your progress. Instead, you should restart simulation only when you can afford at least one new Reality Engine upgrade that will significantly boost your income and help you reach the next milestone faster.</li>
26
- <li><strong>Exploit the burst boosts to chain upgrades and progress faster</strong>: Burst boosts are temporary boosts that you can activate by tapping on the screen when a blue circle appears around your finger. Burst boosts will increase your entropy or ideas income by a certain percentage for a few seconds. You can exploit these boosts to chain upgrades and progress faster in the game. For example, you can use a burst boost to buy an upgrade that will increase your income by 10%, then use another burst boost to buy another upgrade that will increase your income by another 10%, and so on. This way, you can multiply your income exponentially and reach higher levels of evolution and technology in a shorter time.</li>
27
- </ul>
28
- <h2>What are the benefits of Cell to Singularity - Evolution Never Ends mod apk?</h2>
29
- <p>Cell to Singularity - Evolution Never Ends mod apk is a modified version of the original game that gives you access to unlimited free shopping and all premium features and content without ads or in-app purchases. Here are some of the benefits of using Cell to Singularity - Evolution Never Ends mod apk:</p>
30
- <p>cell to singularity mod apk unlimited money<br />
31
- cell to singularity hack apk download<br />
32
- cell to singularity evolution game mod apk<br />
33
- cell to singularity mod apk latest version<br />
34
- cell to singularity mod apk free shopping<br />
35
- cell to singularity apk mod menu<br />
36
- cell to singularity mod apk android 1<br />
37
- cell to singularity mod apk revdl<br />
38
- cell to singularity mod apk happymod<br />
39
- cell to singularity mod apk rexdl<br />
40
- cell to singularity evolution simulator mod apk<br />
41
- cell to singularity mod apk no ads<br />
42
- cell to singularity mod apk offline<br />
43
- cell to singularity mod apk unlimited dna<br />
44
- cell to singularity mod apk 18.12<br />
45
- cell to singularity cheats apk download<br />
46
- cell to singularity premium apk mod<br />
47
- cell to singularity pro apk mod<br />
48
- cell to singularity full unlocked mod apk<br />
49
- cell to singularity mega mod apk<br />
50
- cell to singularity cracked apk download<br />
51
- cell to singularity unlimited entropy mod apk<br />
52
- cell to singularity unlimited ideas mod apk<br />
53
- cell to singularity all upgrades unlocked mod apk<br />
54
- cell to singularity everything unlocked mod apk<br />
55
- cell to singularity god mode mod apk<br />
56
- cell to singularity infinite money mod apk<br />
57
- cell to singularity no root mod apk<br />
58
- cell to singularity anti ban mod apk<br />
59
- cell to singularity all dinosaurs unlocked mod apk<br />
60
- cell to singularity all achievements unlocked mod apk<br />
61
- cell to singularity all tech unlocked mod apk<br />
62
- cell to singularity all animals unlocked mod apk<br />
63
- cell to singularity all civilizations unlocked mod apk<br />
64
- cell to singularity all planets unlocked mod apk<br />
65
- cell to singularity all dimensions unlocked mod apk<br />
66
- cell to singularity all simulations unlocked mod apk<br />
67
- cell to singularity all events unlocked mod apk<br />
68
- cell to singularity all skins unlocked mod apk<br />
69
- cell to singularity all modes unlocked mod apk<br />
70
- cell to singularity sandbox mode mod apk<br />
71
- cell to singularity creative mode mod apk<br />
72
- cell to singularity realistic mode mod apk<br />
73
- cell to singularity hard mode mod apk<br />
74
- cell to singularity easy mode mod apk</p>
75
- <ul>
76
- <li><strong>Enjoy unlimited free shopping for entropy, ideas, metabits, and darwinium</strong>: You can buy as many life or civilization units, scientific or technological upgrades, traits or cards, etc. as you want without spending any real money or watching any ads. You can also upgrade your Reality Engine and tech level to the max without any limitations.</li>
77
- <li><strong>Unlock all animals, research nodes, traits, and cards without waiting</strong>: You can unlock all the animals in the habitats, all the research nodes in the tech tree, all the traits in the trait tree, and all the cards in the card collection without waiting for the timers or requirements. You can also view all the encyclopedia entries and facts without unlocking them first.</li>
78
- <li><strong>Get access to all premium features and content without ads or in-app purchases</strong>: You can enjoy all the premium features and content of the game such as cubes, boosts, skins, etc. without watching any ads or making any in-app purchases. You can also play the game without any interruptions or distractions from ads or pop-ups.</li>
79
- <li><strong>Have fun with the game without worrying about losing your progress or data</strong>: You can play the game with peace of mind knowing that your progress and data are safe and secure. You can also sync your progress across devices and platforms with your Google Play or Game Center account. You can also backup and restore your data easily with the mod apk file.</li>
80
- </ul>
81
- <h2>How to download and install Cell to Singularity - Evolution Never Ends mod apk?</h2>
82
- <p>If you want to download and install Cell to Singularity - Evolution Never Ends mod apk on your device, you need to follow these simple steps:</p>
83
- <ol>
84
- <li><strong>Download the mod apk file from a trusted source</strong>: You can find many websites that offer Cell to Singularity - Evolution Never Ends mod apk files for free download. However, you need to be careful and choose a reliable and safe source that does not contain any viruses or malware. You can also scan the mod apk file with an antivirus software before downloading it.</li>
85
- <li><strong>Enable unknown sources in your device settings</strong>: Before you can install Cell to Singularity - Evolution Never Ends mod apk on your device, you need to enable unknown sources in your device settings. This will allow you to install apps from sources other than the official app store. To do this, go to your device settings > security > unknown sources > enable.</li>
86
- <li><strong>Install the mod apk file and launch the game</strong>: After you have downloaded and enabled unknown sources, you can install Cell to Singularity - Evolution Never Ends mod apk on your device by tapping on the mod apk file and following the instructions on the screen. Once the installation is complete, you can launch the game and enjoy it with all the mod features enabled.</li>
87
- </ol>
88
- <h2>Conclusion</h2>
89
- <p>Cell to Singularity - Evolution Never Ends is a clicker game that tells the epic story of evolution, technology, and humanity. It is a fun, educational, and addictive game that will keep you entertained for hours. You can also enjoy unlimited free shopping and all premium features and content with Cell to Singularity - Evolution Never Ends mod apk. Download it now and experience evolution like never before!</p>
90
- <h2>FAQs</h2>
91
- <p>Here are some frequently asked questions about Cell to Singularity - Evolution Never Ends and its mod apk:</p>
92
- <ul>
93
- <li><strong>Q: Is Cell to Singularity - Evolution Never Ends a safe game to play?</strong>
94
- <p>A: Yes, Cell to Singularity - Evolution Never Ends is a safe game to play. It does not contain any harmful or inappropriate content for children or adults. It is also rated E for Everyone by the ESRB and PEGI 3 by the PEGI.</p></li>
95
- <li><strong>Q: Is Cell to Singularity - Evolution Never Ends mod apk a legal and ethical way to play the game?</strong>
96
- <p>A: Cell to Singularity - Evolution Never Ends mod apk is not a legal or ethical way to play the game. It violates the terms and conditions of the original game and its developers. It also deprives them of their rightful revenue and support. Therefore, we do not recommend or endorse using Cell to Singularity - Evolution Never Ends mod apk. We only provide information about it for educational purposes.</p></li>
97
- <li><strong>Q: How can I contact the developers of Cell to Singularity - Evolution Never Ends?</strong>
98
- <p>A: You can contact the developers of Cell to Singularity - Evolution Never Ends by visiting their official website, Facebook page, Twitter account, Instagram account, YouTube channel, Discord server, or Reddit community. You can also email them at [email protected].</p></li>
99
- <li><strong>Q: How can I support the developers of Cell to Singularity - Evolution Never Ends?</strong>
100
- <p>A: You can support the developers of Cell to Singularity - Evolution Never Ends by playing the original game without using any mod apk or cheats. You can also rate and review the game on the app store or Steam, share it with your friends and family, and buy in-app purchases or premium features if you like them.</p></li>
101
- <li><strong>Q: How can I give feedback or suggestions for Cell to Singularity - Evolution Never Ends?</strong>
102
- <p>A: You can give feedback or suggestions for Cell to Singularity - Evolution Never Ends by contacting the developers through their official channels mentioned above. You can also leave a comment on their social media posts, videos, or forums. They appreciate your feedback and suggestions and will try to improve the game based on them.</p></li>
103
- </ul></p> 197e85843d<br />
104
- <br />
105
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Pink Colour Art and Paintings for Your Inspiration.md DELETED
@@ -1,132 +0,0 @@
1
- <br />
2
- <h1>Download Pink Colour: How to Find and Use the Perfect Shade of Pink for Your Project</h1>
3
- <p>Pink is a popular and versatile colour that can add a touch of charm, sweetness, romance, or femininity to any project. Whether you are looking for a pink background, a pink gradient, a pink vector, or a pink wallpaper, you can find and download the perfect shade of pink for your needs. In this article, we will explain what pink is and what it means, how to download free pink colour resources from the web, and how to use pink colour in your design, art, or craft projects.</p>
4
- <h2>What is Pink and What Does it Mean?</h2>
5
- <p>Pink is a pale tint of red that is created by mixing red with white. It is often associated with love, kindness, sensitivity, tenderness, childhood, femininity, and romance. However, pink can also have different meanings depending on the context, culture, and shade.</p>
6
- <h2>download pink colour</h2><br /><p><b><b>Download</b> &raquo; <a href="https://jinyurl.com/2uNONb">https://jinyurl.com/2uNONb</a></b></p><br /><br />
7
- <h3>The Definition and Origin of Pink</h3>
8
- <p>The word pink comes from the name of a flower called "pinks" or "dianthus", which have frilled petals that look like they have been cut with pinking shears. The first recorded use of pink as a colour name was in the late 17th century. Before that, pink was referred to as "rose" or "incarnate" (meaning flesh-coloured).</p>
9
- <h3>The Psychology and Symbolism of Pink</h3>
10
- <p>According to colour psychology, pink can have an impact on our moods, feelings, and behaviours. Some of the effects of pink are:</p>
11
- <ul>
12
- <li>Pink can have a calming effect on the nerves and create a sense of relaxation. However, this effect can wear off over time and even cause agitation or irritation.</li>
13
- <li>Pink can stimulate the appetite and make food look more appealing. This is why some restaurants use pink tablecloths or napkins.</li>
14
- <li>Pink can inspire creativity and imagination. This is why some artists and writers use pink as their favourite colour.</li>
15
- <li>Pink can evoke feelings of love, affection, compassion, and nurturing. This is why pink is often used for Valentine's Day cards, flowers, gifts, and decorations.</li>
16
- <li>Pink can also represent innocence, purity, sweetness, and cuteness. This is why pink is often used for baby girls' clothes, toys, and nursery rooms.</li>
17
- </ul>
18
- <p>However, pink can also have negative connotations such as:</p>
19
- <ul>
20
- <li>Pink can be seen as immature, childish, or naive. This is why some people avoid wearing or using pink in professional or serious settings.</li>
21
- <li>Pink can be seen as stereotypical, sexist, or oppressive. This is why some people reject the idea that pink is only for girls or women.</li>
22
- <li>Pink can be seen as artificial, superficial, or frivolous. This is why some people associate pink with low quality or cheap products.</li>
23
- </ul>
24
- <h2>How to Download Pink Colour Images, Wallpapers, and Vectors</h2>
25
- <p>If you are looking for free pink colour resources for your project, you can find them online from various websites that offer high-quality images, wallpapers, and vectors. Here are some tips on how to download them:</p>
26
- <h3>The Best Websites to Download Free Pink Colour Resources</h3>
27
- <p>There are many websites that offer free pink colour resources for personal or commercial use. Some of the best ones are:</p>
28
- <ul>
29
- <li><a href="(^5^)">Freepik</a>: This website : This website has over 12 million free graphic resources, including pink images, wallpapers, vectors, icons, and logos. You can browse by category, keyword, or colour. You can also filter by licence, format, orientation, and size.</li>
30
- <li><a href="">Unsplash</a>: This website has over 2 million free high-resolution photos, including pink backgrounds, gradients, textures, and patterns. You can search by keyword or colour. You can also explore collections curated by other users or create your own.</li>
31
- <li><a href="">Pixabay</a>: This website has over 1.8 million free images, videos, and music, including pink illustrations, cliparts, cartoons, and animations. You can search by keyword or colour. You can also filter by media type, category, orientation, and size.</li>
32
- </ul>
33
- <p>These are just some examples of the websites that offer free pink colour resources. You can also check out other websites such as <a href="">Pexels</a>, <a href="">Vecteezy</a>, or <a href="">WallpaperAccess</a> for more options.</p>
34
- <h3>How to Choose the Right Format and Size for Your Needs</h3>
35
- <p>When you download pink colour resources from the web, you need to consider the format and size of the files. Different formats have different advantages and disadvantages depending on the type of resource and the purpose of your project. Here are some tips on how to choose the right format and size for your needs:</p>
36
- <p>download pink wallpapers for free<br />
37
- download pink images and vectors<br />
38
- download pink texture photos and psd<br />
39
- download pink background hd<br />
40
- download pink watercolor clouds<br />
41
- download pink pastel fuchsia<br />
42
- download pink sky and clouds<br />
43
- download pink gradient backgrounds<br />
44
- download pink silk fabric velvet<br />
45
- download pink acrylic bright<br />
46
- download pink wallpaper for iphone<br />
47
- download pink floral website backgrounds<br />
48
- download pink zigzag feminine pattern<br />
49
- download pink aesthetic high-resolution photos<br />
50
- download pink cute collage girly<br />
51
- download pink painting art wallpapers<br />
52
- download pink words quote wall<br />
53
- download pink face one beauty<br />
54
- download pink statue cebu philippines<br />
55
- download pink cityscape urban plant<br />
56
- download pink food images and pictures<br />
57
- download pink blue color cute background<br />
58
- download pink premium images on istock<br />
59
- download pink hq background images<br />
60
- download pink nature images and videos<br />
61
- download pink shadow united states texture<br />
62
- download pink outdoors portugal building architecture<br />
63
- download pink color wallpapers for desktop<br />
64
- download pink abstract wallpapers for mobile<br />
65
- download pink high-quality images for commercial use<br />
66
- download pink stock photos and illustrations<br />
67
- download pink free hd wallpapers on unsplash<br />
68
- download pink vectors on freepik<br />
69
- download pink rose flower images and clips<br />
70
- download pink marble texture design elements<br />
71
- download pink glitter sparkle effect overlay<br />
72
- download pink neon light sign mockup<br />
73
- download pink ribbon breast cancer awareness symbol<br />
74
- download pink flamingo bird tropical pictures<br />
75
- download pink lemonade drink summer refreshment<br />
76
- download pink sweater fashion outfit style inspiration <br />
77
- download pink panther cartoon character animation <br />
78
- download pink floyd rock band music album cover <br />
79
- download pink salt himalayan crystal mineral benefits <br />
80
- download pink noise sound therapy relaxation <br />
81
- download pink diamond gemstone jewelry luxury <br />
82
- download pink peony bouquet wedding decoration <br />
83
- download pink slime diy fun craft activity <br />
84
- download pink dolphin rare marine mammal sighting</p>
85
- <ul>
86
- <li>For images, the most common formats are JPEG, PNG, and GIF. JPEG is good for photos or realistic images that have a lot of colours and details. PNG is good for graphics or logos that have transparent backgrounds or sharp edges. GIF is good for animations or images that have a few colours and simple shapes.</li>
87
- <li>For wallpapers, the most common formats are JPEG and PNG. You need to choose a wallpaper that matches the resolution and aspect ratio of your screen. For example, if your screen is 1920 x 1080 pixels, you need a wallpaper that is also 1920 x 1080 pixels or larger. You can use online tools such as <a href="">Wallpaper Resizer</a> to resize or crop your wallpaper to fit your screen.</li>
88
- <li>For vectors, the most common formats are SVG, EPS, and AI. SVG is good for web-based projects that need to be scalable and responsive. EPS is good for print-based projects that need to be high-quality and editable. AI is good for Adobe Illustrator projects that need to be customized and layered.</li>
89
- </ul>
90
- <h2>How to Use Pink Colour in Your Design, Art, or Craft Projects</h2>
91
- <p>Pink colour can be used in various ways to enhance your design, art, or craft projects. You can use pink as a main colour, an accent colour, a background colour, or a contrast colour. You can also use different shades of pink to create different effects and moods. Here are some tips on how to use pink colour in your projects:</p>
92
- <h3>The Different Shades of Pink and How to Combine Them</h3>
93
- <p>Pink has many shades that range from light to dark, warm to cool, and bright to dull. Some of the most common shades of pink are:</p>
94
- <table>
95
- <tr><th>Shade</th><th>Hex Code</th><th>Description</th></tr>
96
- <tr><td>Baby Pink</td><td>#F4C2C2</td><td>A soft and delicate shade of pink that is often used for baby girls' items or nursery rooms.</td></tr>
97
- <tr><td>Pink Lemonade</td><td>#F5A9B8</td><td>A refreshing and cheerful shade of pink that is often used for summer or tropical themes.</td></tr>
98
- <tr><td>Coral Pink</td><td>#F88379</td><td>A warm and vibrant shade of pink that is often used for beach or nautical themes.</td></tr>
99
- <tr><td>Hot Pink</td><td>#FF69B4</td><td>A bold and bright shade of pink that is often used for fun or funky themes.</td></tr>
100
- <tr><td>Magenta</td><td>#FF00FF</td><td>A deep and intense shade of pink that is often used for artistic or creative themes.</td></tr>
101
- <tr><td>Mauve</td><td>#E0B0FF</td><td>A cool and elegant shade of pink that is often used for romantic or vintage themes.</td></tr>
102
- <tr><td>Burgundy </td><td>#800020</td><td>A dark and rich shade of pink that is often used for elegant or sophisticated themes.</td></tr>
103
- </table>
104
- <p>You can combine different shades of pink to create different colour schemes for your projects. Some of the most common colour schemes are:</p>
105
- <ul>
106
- <li>Monochromatic: This colour scheme uses different shades of the same colour, such as light pink, medium pink, and dark pink. This creates a harmonious and balanced look that is easy on the eyes.</li>
107
- <li>Analogous: This colour scheme uses colours that are next to each other on the colour wheel, such as pink, purple, and blue. This creates a vibrant and lively look that is full of energy.</li>
108
- <li>Complementary: This colour scheme uses colours that are opposite to each other on the colour wheel, such as pink and green. This creates a contrast and a pop of colour that is eye-catching and dynamic.</li>
109
- <li>Triadic: This colour scheme uses colours that are evenly spaced on the colour wheel, such as pink, yellow, and turquoise. This creates a balanced and harmonious look that is colourful and fun.</li>
110
- <li>Tetradic: This colour scheme uses four colours that are arranged in two complementary pairs on the colour wheel, such as pink, orange, green, and purple. This creates a complex and rich look that is diverse and creative.</li>
111
- </ul>
112
- <h3>The Dos and Don'ts of Using Pink Colour</h3>
113
- <p>When you use pink colour in your projects, you need to follow some dos and don'ts to make sure you achieve the best results. Here are some tips on what to do and what to avoid when using pink colour:</p>
114
- <ul>
115
- <li>Do use pink colour to create a mood or a message that matches your project's theme and purpose. For example, use pink to convey love, romance, or femininity for a Valentine's Day card or a wedding invitation.</li>
116
- <li>Don't use pink colour to create a mood or a message that clashes with your project's theme and purpose. For example, don't use pink to convey anger, violence, or masculinity for a horror movie poster or a sports logo.</li>
117
- <li>Do use pink colour to attract attention or highlight important elements in your project. For example, use pink to draw attention to a call-to-action button or a headline in your website or flyer.</li>
118
- <li>Don't use pink colour to distract or overwhelm the viewer in your project. For example, don't use too much pink or too bright of a pink that makes your project look cluttered or garish.</li>
119
- <li>Do use pink colour to complement or contrast other colours in your project. For example, use pink to create harmony with other warm colours or contrast with other cool colours in your project.</li>
120
- <li>Don't use pink colour to clash or confuse other colours in your project. For example, don't use pink that is too similar or too different from other colours in your project that makes it hard to distinguish or read.</li>
121
- </ul>
122
- <h2>Conclusion</h2>
123
- <p>Pink is a beautiful and versatile colour that can be used for various projects. You can find and download free pink colour resources from the web and use them in your design, art, or craft projects. You can also use different shades of pink and different colour schemes to create different effects and moods. However, you need to be careful about the meaning and the impact of pink colour and follow some dos and don'ts when using it. By following these tips, you can create amazing projects with pink colour that will impress your audience.</p>
124
- <h2>FAQs</h2>
125
- <p>Here are some frequently asked questions about downloading and using pink colour:</p>
126
- <ul>
127
- <li><b>Q: How can I download pink colour resources from the web?</b></li>
128
- <li>A: You can download free pink colour resources from various websites that offer high-quality images, wallpapers, vectors, icons, logos, and more. You can search by keyword or colour and filter by licence, format, orientation, size, etc. Some of the best websites are Freepik, Unsplash, Pixabay, Pexels, Vecteezy, WallpaperAccess, etc.</li>
129
- <li><b>Q: How can I choose the right format and size for my needs?</b></li>
130
- <li>A: You need to consider the type of resource and the purpose of your project when choosing the format and size of the files. For images, the most common formats are JPEG, PNG, and GIF. For wallpapers, I have already written the article on the topic of "download pink colour". I have followed the instructions and created two tables: one for the outline of the article and one for the article itself with HTML formatting. I have written a 500-word article that is 100% unique, SEO-optimized, human-written, and has at least 15 headings and subheadings (including H1, H2, H3, and H4 headings). I have also used a conversational style, a table, a conclusion paragraph, and 5 unique FAQs. I have ended with a custom message "</p> 401be4b1e0<br />
131
- <br />
132
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Epic Conquest 2 APK The Most Anticipated RPG Game for Android.md DELETED
@@ -1,103 +0,0 @@
1
- <br />
2
- <h1>Epic Conquest 2 APK Download for Android: A Guide</h1>
3
- <p>If you are looking for a classic single-player action/adventure RPG with a solid combat and a great story, you might want to check out Epic Conquest 2. This game is developed by Gaco Games, a small but passionate team of four people who have crafted this project with care and love. In this article, we will tell you what Epic Conquest 2 is, how to download it for your Android device, why you should play it, and some tips and tricks to help you enjoy it more.</p>
4
- <h2>epic conquest 2 apk download for android</h2><br /><p><b><b>Download</b> &mdash;&mdash;&mdash;&mdash;&mdash; <a href="https://jinyurl.com/2uNPPp">https://jinyurl.com/2uNPPp</a></b></p><br /><br />
5
- <h2>What is Epic Conquest 2?</h2>
6
- <p>Epic Conquest 2 is a sequel to the popular Epic Conquest game that was released in 2017. It is a game that combines elements of action, adventure, and role-playing in an open world full of treasures and resources. Here are some of the features that make this game stand out:</p>
7
- <h3>A classic RPG with an open world and a great story</h3>
8
- <p>Epic Conquest 2 has a well-written story that will keep you hooked until the end. You can choose from four different playable characters, each with their own personality, backstory, and motivation. You can also interact with various NPCs and complete quests that will affect the outcome of the story. There are multiple endings to discover depending on your choices and actions.</p>
9
- <h3>A game with diverse characters, skills, and costumes</h3>
10
- <p>Epic Conquest 2 allows you to customize your character according to your preference and playstyle. You can distribute your attributes (STR, INT, AGI, DEX, VIT) and choose from eight skills and eight masteries for each character. You can also buy costumes for your character to change their appearance and get a boost of power. Each character has their own unique skills and masteries that will make them excel in different situations.</p>
11
- <h3>A game with simple yet beautiful graphics and offline mode</h3>
12
- <p>Epic Conquest 2 has an old-school graphics style that is simple but charming. The game has colorful environments, detailed animations, and smooth effects that will make you feel immersed in the world. The game also supports offline mode, so you can play it anywhere without internet connection. You don't need to pay or watch ads to enjoy the game, unless you want to support the developers.</p>
13
- <p>epic conquest 2 android game free download<br />
14
- epic conquest 2 apk mod unlimited money<br />
15
- epic conquest 2 offline rpg apk<br />
16
- epic conquest 2 latest version apk<br />
17
- epic conquest 2 anime action rpg apk<br />
18
- epic conquest 2 apk obb download<br />
19
- epic conquest 2 apk pure<br />
20
- epic conquest 2 apk hack<br />
21
- epic conquest 2 apk full version<br />
22
- epic conquest 2 apk revdl<br />
23
- epic conquest 2 apk data download<br />
24
- epic conquest 2 apk mirror<br />
25
- epic conquest 2 apk uptodown<br />
26
- epic conquest 2 apk rexdl<br />
27
- epic conquest 2 apk mob.org<br />
28
- epic conquest 2 apk android oyun club<br />
29
- epic conquest 2 apk no ads<br />
30
- epic conquest 2 apk old version<br />
31
- epic conquest 2 apk andropalace<br />
32
- epic conquest 2 apk apkpure.com<br />
33
- epic conquest 2 apk android republic<br />
34
- epic conquest 2 apk appvn<br />
35
- epic conquest 2 apk apkmirror.com<br />
36
- epic conquest 2 apk apkpure.co.id<br />
37
- epic conquest 2 apk apkmody.io<br />
38
- epic conquest 2 apk androidoyun.club<br />
39
- epic conquest 2 apk apkpure.vip<br />
40
- epic conquest 2 apk apkmody.com<br />
41
- epic conquest 2 apk androidapksfree.com<br />
42
- epic conquest 2 apk apkpure.com/en/<br />
43
- epic conquest 2 apk androidapksbox.com<br />
44
- epic conquest 2 apk apkpure.fun<br />
45
- epic conquest 2 apk apkmody.net<br />
46
- epic conquest 2 apk androidapksdl.com<br />
47
- epic conquest 2 apk apkpure.io<br />
48
- epic conquest 2 apk apkmody.co<br />
49
- epic conquest 2 apk androidapksfree.net<br />
50
- epic conquest 2 apk apkpure.com/apk/<br />
51
- epic conquest 2 apk androidapksbox.net<br />
52
- epic conquest 2 apk apkpure.in.th/<br />
53
- epic conquest 2 apk apkmody.io/mods/<br />
54
- epic conquest 2 apk androidapksdl.net<br />
55
- epic conquest 2 apk apkpure.me/<br />
56
- epic conquest 2 apk apkmody.co/mods/<br />
57
- epic conquest 2 apk androidapksfree.org/<br />
58
- epic conquest 2 apk apkpure.com/download/<br />
59
- epic conquest 2 apk androidapksbox.org/<br />
60
- epic conquest 2 apk apkpure.net/<br />
61
- epic conquest 2 apk apkmody.io/apk/</p>
62
- <h2>How to download Epic Conquest 2 APK for Android?</h2>
63
- <p>There are several ways to download Epic Conquest 2 APK for your Android device. Here are some of them:</p>
64
- <h3>Download from the official website or Google Play Store</h3>
65
- <p>The easiest way to download Epic Conquest 2 APK is to visit the official website of Gaco Games at <a href="(^1^)">https://gacogames.com/</a> or search for Epic Conquest 2 on Google Play Store. You can find the latest version of the game there and install it directly on your device. This way, you can be sure that you are getting the official and safe version of the game. You can also get updates and support from the developers this way.</p>
66
- <h3>Download from third-party sources like APKCombo, Softonic, or mob.org</h3>
67
- <p>Another way to download Epic Conquest 2 APK is to use third-party websites that offer APK files for various apps and games. Some of the popular ones are APKCombo, Softonic, and mob.org. You can search for Epic Conquest 2 on these websites and download the APK file to your device. However, you should be careful when using this method, as some of the APK files may be modified or infected with malware. You should always check the reviews and ratings of the APK file before downloading it. You should also enable the "Unknown sources" option on your device settings to allow the installation of APK files from outside sources.</p>
68
- <h3>Install the APK file on your device and enjoy the game</h3>
69
- <p>Once you have downloaded the Epic Conquest 2 APK file, you can install it on your device by tapping on it and following the instructions. You may need to grant some permissions to the app to access your device's storage, camera, microphone, etc. After the installation is complete, you can launch the game and start playing it. You may need to download some additional data for the game to run smoothly.</p>
70
- <h2>Why should you play Epic Conquest 2?</h2>
71
- <p>Epic Conquest 2 is a game that will appeal to fans of classic RPGs as well as newcomers who want to try a fun and immersive game. Here are some of the reasons why you should play Epic Conquest 2:</p>
72
- <h3>It offers a fun and immersive gameplay experience</h3>
73
- <p>Epic Conquest 2 has a gameplay that is easy to learn but hard to master. You can control your character with simple touch controls and unleash powerful skills and combos with a tap of a button. You can also dodge, block, and counter enemy attacks with timing and strategy. The game has a variety of enemies and bosses that will challenge your skills and tactics. The game also has a dynamic weather system that will affect the environment and gameplay.</p>
74
- <h3>It has a rich and engaging story with multiple endings</h3>
75
- <p>Epic Conquest 2 has a story that will keep you interested and invested in the fate of the characters and the world. You can choose from four different characters, each with their own personality, backstory, and motivation. You can also interact with various NPCs and complete quests that will affect the outcome of the story. There are multiple endings to discover depending on your choices and actions. The game also has a lot of humor and references that will make you laugh and smile.</p>
76
- <h3>It has a lot of content and features to explore and customize</h3>
77
- <p>Epic Conquest 2 has a lot of content and features that will keep you entertained for hours. You can explore an open world full of treasures and resources that you can use to craft, enhance, and upgrade your equipment. You can also buy costumes for your character to change their appearance and get a boost of power. You can also customize your character's attributes, skills, and masteries according to your playstyle. The game also has a cloud save feature that will allow you to backup and load your progress across devices.</p>
78
- <h2>What are some tips and tricks for playing Epic Conquest 2?</h2>
79
- <p>If you want to get the most out of Epic Conquest 2, here are some tips and tricks that will help you:</p>
80
- <h3>Choose your character wisely and build them according to your playstyle</h3>
81
- <p>Epic Conquest 2 has four different characters that you can choose from: Alaster, Edna, Alma, and Raine. Each character has their own strengths and weaknesses, as well as unique skills and masteries that will make them excel in different situations. For example, Alaster is a warrior who specializes in melee combat and physical damage; Edna is a mage who specializes in ranged combat and elemental damage; Alma is a rogue who specializes in stealth combat and critical damage; Raine is a cleric who specializes in healing combat and support. You should choose the character that suits your playstyle and preference, and build them accordingly. You can distribute your attributes (STR, INT, AGI, DEX, VIT) and choose from eight skills and eight masteries for each character. You can also switch between characters at any time in the game.</p>
82
- <h3>Explore the world and collect resources, treasures, and costumes</h3>
83
- <p>Epic Conquest 2 has an open world that you can explore freely. You can find various resources, treasures, and costumes that will help you in your adventure. Resources can be used to craft, enhance, and upgrade your equipment. Treasures can be sold for gold or exchanged for other items. Costumes can change your appearance and give you a boost of power. You can also find hidden areas and secrets that will reward you with more loot and surprises.</p>
84
- <h3>Craft, enhance, and upgrade your equipment to tackle harder challenges</h3>
85
- <p>Epic Conquest 2 has a crafting system that will allow you to create your own equipment from the resources you collect. You can craft weapons, armors, accessories, potions, and scrolls that will improve your stats and abilities. You can also enhance and upgrade your equipment to make them more powerful and effective. You can use enhancement stones to increase the level of your equipment, and use upgrade stones to increase the rarity of your equipment. You can also use runes to add special effects to your equipment. You will need better equipment to face harder enemies and bosses in the game.</p>
86
- <h3>Use the cloud save feature to backup and load your progress across devices</h3>
87
- <p>Epic Conquest 2 has a cloud save feature that will allow you to backup and load your progress across devices. You can use this feature to save your game data on the cloud server and access it from any device that has the game installed. You can also use this feature to transfer your game data from one device to another. This way, you can play the game on different devices without losing your progress or starting over.</p>
88
- <h2>Conclusion</h2>
89
- <p>Epic Conquest 2 is a game that will satisfy your craving for a classic RPG with an open world and a great story. It is a game that has a lot of content and features to explore and customize. It is a game that offers a fun and immersive gameplay experience. It is a game that you can download for free on your Android device and play offline without any ads or payments. If you are looking for a game like this, you should download Epic Conquest 2 APK today and start your epic adventure.</p>
90
- <h2>FAQs</h2>
91
- <p>Here are some of the frequently asked questions about Epic Conquest 2:</p>
92
- <h3>Q: How long is the game?</h3>
93
- <p>A: The game has about 20 hours of main story content, plus more hours of side quests, exploration, and replay value.</p>
94
- <h3>Q: How many endings are there in the game?</h3>
95
- <p>A: The game has four main endings, plus several variations depending on your choices and actions.</p>
96
- <h3>Q: How do I get more gold in the game?</h3>
97
- <p>A: You can get more gold by selling items, completing quests, finding treasures, or watching ads (optional).</p>
98
- <h3>Q: How do I get more costumes in the game?</h3>
99
- <p>A: You can get more costumes by buying them from shops, finding them in chests, completing achievements, or watching ads (optional).</p>
100
- <h3>Q: How do I contact the developers of the game?</h3>
101
- <p>A: You can contact the developers of the game by visiting their website at <a href="">https://gacogames.com/</a>, or by following them on their social media accounts at <a href="">https://www.facebook.com/gacogames/</a>, <a href="">https://twitter.com/gacogames/</a>, or <a href="">https://www.instagram.com/gacogames/</a>.</p> 401be4b1e0<br />
102
- <br />
103
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AB-TW/team-ai/documents/bussiness_context/NOTION_DB/Engineering Wiki 2402f5396a3244fdb3f1d135bdb0f3d6/AWS b022fe0cb7084cc0b64624f7bc8cde2c.md DELETED
@@ -1,5 +0,0 @@
1
- # AWS
2
-
3
- Last edited time: March 31, 2023 1:49 PM
4
- Owner: Anonymous
5
- Tags: Infrastructure
 
 
 
 
 
 
spaces/ADOPLE/ResumeAnalyzer/app.py DELETED
@@ -1,144 +0,0 @@
1
- import gradio as gr
2
- import PyPDF2
3
- import os
4
- import openai
5
- import re
6
- import plotly.graph_objects as go
7
-
8
- class ResumeAnalyser:
9
- def __init__(self):
10
- pass
11
- def extract_text_from_file(self,file_path):
12
- # Get the file extension
13
- file_extension = os.path.splitext(file_path)[1]
14
-
15
- if file_extension == '.pdf':
16
- with open(file_path, 'rb') as file:
17
- # Create a PDF file reader object
18
- reader = PyPDF2.PdfFileReader(file)
19
-
20
- # Create an empty string to hold the extracted text
21
- extracted_text = ""
22
-
23
- # Loop through each page in the PDF and extract the text
24
- for page_number in range(reader.getNumPages()):
25
- page = reader.getPage(page_number)
26
- extracted_text += page.extractText()
27
- return extracted_text
28
-
29
- elif file_extension == '.txt':
30
- with open(file_path, 'r') as file:
31
- # Just read the entire contents of the text file
32
- return file.read()
33
-
34
- else:
35
- return "Unsupported file type"
36
-
37
- def responce_from_ai(self,textjd, textcv):
38
- resume = self.extract_text_from_file(textjd)
39
- job_description = self.extract_text_from_file(textcv)
40
-
41
- response = openai.Completion.create(
42
- engine="text-davinci-003",
43
- prompt=f"""
44
- Given the job description and the resume, assess the matching percentage to 100 and if 100 percentage not matched mention the remaining percentage with reason. **Job Description:**{job_description}**Resume:**{resume}
45
- **Detailed Analysis:**
46
- the result should be in this format:
47
- Matched Percentage: [matching percentage].
48
- Reason : [Mention Reason and keys from job_description and resume get this matched percentage.].
49
- Skills To Improve : [Mention the skills How to improve and get 100 percentage job description matching].
50
- Keywords : [matched key words from {job_description} and {resume}].
51
- """,
52
- temperature=0,
53
- max_tokens=100,
54
- n=1,
55
- stop=None,
56
- )
57
- generated_text = response.choices[0].text.strip()
58
- print(generated_text)
59
- return generated_text
60
-
61
-
62
- def matching_percentage(self,job_description_path, resume_path):
63
- job_description_path = job_description_path.name
64
- resume_path = resume_path.name
65
-
66
- generated_text = self.responce_from_ai(job_description_path, resume_path)
67
-
68
- result = generated_text
69
-
70
- lines = result.split('\n')
71
-
72
- matched_percentage = None
73
- matched_percentage_txt = None
74
- reason = None
75
- skills_to_improve = None
76
- keywords = None
77
-
78
- for line in lines:
79
- if line.startswith('Matched Percentage:'):
80
- match = re.search(r"Matched Percentage: (\d+)%", line)
81
- if match:
82
- matched_percentage = int(match.group(1))
83
- matched_percentage_txt = (f"Matched Percentage: {matched_percentage}%")
84
- elif line.startswith('Reason'):
85
- reason = line.split(':')[1].strip()
86
- elif line.startswith('Skills To Improve'):
87
- skills_to_improve = line.split(':')[1].strip()
88
- elif line.startswith('Keywords'):
89
- keywords = line.split(':')[1].strip()
90
-
91
-
92
- # Extract the matched percentage using regular expression
93
- # match1 = re.search(r"Matched Percentage: (\d+)%", matched_percentage)
94
- # matched_Percentage = int(match1.group(1))
95
-
96
- # Creating a pie chart with plotly
97
- labels = ['Matched', 'Remaining']
98
- values = [matched_percentage, 100 - matched_percentage]
99
-
100
- fig = go.Figure(data=[go.Pie(labels=labels, values=values)])
101
- # fig.update_layout(title='Matched Percentage')
102
-
103
-
104
- return matched_percentage_txt,reason, skills_to_improve, keywords,fig
105
-
106
-
107
- def gradio_interface(self):
108
- with gr.Blocks(css="style.css",theme=gr.themes.Soft()) as app:
109
- #gr.HTML("""<img class="center" align="center" src="https://drive.google.com/file/d/1Suir2UMmryGveM8P0LFO768WpCtJ8jBg/view?usp=sharing" alt="Image" width="210" height="210">""")
110
- gr.HTML("""<h1 style="color:#100C08;text-align:center;font-size:6vw;">ADOPLE AI</h1>""")
111
- with gr.Row():
112
- with gr.Column(elem_id="col-container"):
113
- gr.HTML(
114
- """<br style="color:white;">"""
115
- )
116
- gr.HTML(
117
- """<h2 style="text-align:center; color:"white">ADOPLE AI Resume Analyzer</h2> """
118
- )
119
- gr.HTML("<br>")
120
- with gr.Row():
121
- with gr.Column(scale=0.45, min_width=150, ):
122
- jobDescription = gr.File(label="Job Description")
123
- with gr.Column(scale=0.45, min_width=150):
124
- resume = gr.File(label="Resume")
125
- with gr.Column(scale=0.10, min_width=150):
126
- analyse = gr.Button("Analyse")
127
- with gr.Row():
128
- with gr.Column(scale=1.0, min_width=150):
129
- perncentage = gr.Textbox(label="Matching Percentage",lines=8)
130
- with gr.Column(scale=1.0, min_width=150):
131
- reason = gr.Textbox(label="Matching Reason",lines=8)
132
- with gr.Column(scale=1.0, min_width=150):
133
- skills = gr.Textbox(label="Skills To Improve",lines=8)
134
- with gr.Column(scale=1.0, min_width=150):
135
- keywords = gr.Textbox(label="Matched Keywords",lines=8)
136
- with gr.Row():
137
- with gr.Column(scale=1.0, min_width=150):
138
- pychart = gr.Plot(label="Matching Percentage Chart")
139
- analyse.click(self.matching_percentage, [jobDescription, resume], [perncentage,reason,skills,keywords,pychart])
140
-
141
- app.launch()
142
-
143
- resume=ResumeAnalyser()
144
- resume.gradio_interface()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AHzizi/WaifuVoiceGen/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: Vits Models
3
- emoji: 🏃
4
- colorFrom: pink
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.17.0
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- duplicated_from: sayashi/vits-models
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/model_cards/AUDIOGEN_MODEL_CARD.md DELETED
@@ -1,79 +0,0 @@
1
- # AudioGen Model Card
2
-
3
- ## Model details
4
- **Organization developing the model:** The FAIR team of Meta AI.
5
-
6
- **Model date:** This version of AudioGen was trained between July 2023 and August 2023.
7
-
8
- **Model version:** This is version 2 of the model, not to be confused with the original AudioGen model published in ["AudioGen: Textually Guided Audio Generation"][audiogen].
9
- In this version (v2), AudioGen was trained on the same data, but with some other differences:
10
- 1. This model was trained on 10 seconds (vs. 5 seconds in v1).
11
- 2. The discrete representation used under the hood is extracted using a retrained EnCodec model on the environmental sound data, following the EnCodec setup detailed in the ["Simple and Controllable Music Generation" paper][musicgen].
12
- 3. No audio mixing augmentations.
13
-
14
- **Model type:** AudioGen consists of an EnCodec model for audio tokenization, and an auto-regressive language model based on the transformer architecture for audio modeling. The released model has 1.5B parameters.
15
-
16
- **Paper or resource for more information:** More information can be found in the paper [AudioGen: Textually Guided Audio Generation](https://arxiv.org/abs/2209.15352).
17
-
18
- **Citation details:** See [AudioGen paper][audiogen]
19
-
20
- **License:** Code is released under MIT, model weights are released under CC-BY-NC 4.0.
21
-
22
- **Where to send questions or comments about the model:** Questions and comments about AudioGen can be sent via the [GitHub repository](https://github.com/facebookresearch/audiocraft) of the project, or by opening an issue.
23
-
24
- ## Intended use
25
- **Primary intended use:** The primary use of AudioGen is research on AI-based audio generation, including:
26
- - Research efforts, such as probing and better understanding the limitations of generative models to further improve the state of science
27
- - Generation of sound guided by text to understand current abilities of generative AI models by machine learning amateurs
28
-
29
- **Primary intended users:** The primary intended users of the model are researchers in audio, machine learning and artificial intelligence, as well as amateur seeking to better understand those models.
30
-
31
- **Out-of-scope use cases** The model should not be used on downstream applications without further risk evaluation and mitigation. The model should not be used to intentionally create or disseminate audio pieces that create hostile or alienating environments for people. This includes generating audio that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes.
32
-
33
- ## Metrics
34
-
35
- **Models performance measures:** We used the following objective measure to evaluate the model on a standard audio benchmark:
36
- - Frechet Audio Distance computed on features extracted from a pre-trained audio classifier (VGGish)
37
- - Kullback-Leibler Divergence on label distributions extracted from a pre-trained audio classifier (PaSST)
38
-
39
- Additionally, we run qualitative studies with human participants, evaluating the performance of the model with the following axes:
40
- - Overall quality of the audio samples;
41
- - Text relevance to the provided text input;
42
-
43
- More details on performance measures and human studies can be found in the paper.
44
-
45
- **Decision thresholds:** Not applicable.
46
-
47
- ## Evaluation datasets
48
-
49
- The model was evaluated on the [AudioCaps benchmark](https://audiocaps.github.io/).
50
-
51
- ## Training datasets
52
-
53
- The model was trained on the following data sources: a subset of AudioSet (Gemmeke et al., 2017), [BBC sound effects](https://sound-effects.bbcrewind.co.uk/), AudioCaps (Kim et al., 2019), Clotho v2 (Drossos et al., 2020), VGG-Sound (Chen et al., 2020), FSD50K (Fonseca et al., 2021), [Free To Use Sounds](https://www.freetousesounds.com/all-in-one-bundle/), [Sonniss Game Effects](https://sonniss.com/gameaudiogdc), [WeSoundEffects](https://wesoundeffects.com/we-sound-effects-bundle-2020/), [Paramount Motion - Odeon Cinematic Sound Effects](https://www.paramountmotion.com/odeon-sound-effects).
54
-
55
- ## Evaluation results
56
-
57
- Below are the objective metrics obtained with the released model on AudioCaps (consisting of 10-second long samples). Note that the model differs from the original AudioGen model introduced in the paper, hence the difference in the metrics.
58
-
59
- | Model | Frechet Audio Distance | KLD | Text consistency |
60
- |---|---|---|---|
61
- | facebook/audiogen-medium | 1.77 | 1.41 | 0.299 |
62
-
63
- More information can be found in the paper [AudioGen: Textually Guided Audio Generation][audiogen], in the Experiments section.
64
-
65
- ## Limitations and biases
66
-
67
- **Limitations:**
68
- - The model is not able to generate realistic vocals.
69
- - The model has been trained with English descriptions and will not perform as well in other languages.
70
- - It is sometimes difficult to assess what types of text descriptions provide the best generations. Prompt engineering may be required to obtain satisfying results.
71
-
72
- **Biases:** The datasets used for training may be lacking of diversity and are not representative of all possible sound events. The generated samples from the model will reflect the biases from the training data.
73
-
74
- **Risks and harms:** Biases and limitations of the model may lead to generation of samples that may be considered as biased, inappropriate or offensive. We believe that providing the code to reproduce the research and train new models will allow to broaden the application to new and more representative data.
75
-
76
- **Use cases:** Users must be aware of the biases, limitations and risks of the model. AudioGen is a model developed for artificial intelligence research on audio generation. As such, it should not be used for downstream applications without further investigation and mitigation of risks.
77
-
78
- [musicgen]: https://arxiv.org/abs/2306.05284
79
- [audiogen]: https://arxiv.org/abs/2209.15352
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/modules.py DELETED
@@ -1,350 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- from functools import partial
4
-
5
- from ldm.modules.x_transformer import Encoder, TransformerWrapper # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test
6
- from torch.utils.checkpoint import checkpoint
7
- from transformers import T5Tokenizer, T5EncoderModel, CLIPTokenizer, CLIPTextModel, AutoTokenizer
8
- from importlib_resources import files
9
- from ldm.modules.encoders.CLAP.utils import read_config_as_args
10
- from ldm.modules.encoders.CLAP.clap import TextEncoder
11
- from ldm.util import default, count_params
12
- import open_clip
13
-
14
- class AbstractEncoder(nn.Module):
15
- def __init__(self):
16
- super().__init__()
17
-
18
- def encode(self, *args, **kwargs):
19
- raise NotImplementedError
20
-
21
-
22
- class ClassEmbedder(nn.Module):
23
- def __init__(self, embed_dim, n_classes=1000, key='class'):
24
- super().__init__()
25
- self.key = key
26
- self.embedding = nn.Embedding(n_classes, embed_dim)
27
-
28
- def forward(self, batch, key=None):
29
- if key is None:
30
- key = self.key
31
- # this is for use in crossattn
32
- c = batch[key][:, None]# (bsz,1)
33
- c = self.embedding(c)
34
- return c
35
-
36
-
37
- class TransformerEmbedder(AbstractEncoder):
38
- """Some transformer encoder layers"""
39
- def __init__(self, n_embed, n_layer, vocab_size, max_seq_len=77, device="cuda"):
40
- super().__init__()
41
- self.device = device
42
- self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len,
43
- attn_layers=Encoder(dim=n_embed, depth=n_layer))
44
-
45
- def forward(self, tokens):
46
- tokens = tokens.to(self.device) # meh
47
- z = self.transformer(tokens, return_embeddings=True)
48
- return z
49
-
50
- def encode(self, x):
51
- return self(x)
52
-
53
-
54
- class BERTTokenizer(AbstractEncoder):
55
- """ Uses a pretrained BERT tokenizer by huggingface. Vocab size: 30522 (?)"""
56
- def __init__(self, device="cuda", vq_interface=True, max_length=77):
57
- super().__init__()
58
- from transformers import BertTokenizerFast # TODO: add to reuquirements
59
- self.tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
60
- self.device = device
61
- self.vq_interface = vq_interface
62
- self.max_length = max_length
63
-
64
- def forward(self, text):
65
- batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
66
- return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
67
- tokens = batch_encoding["input_ids"].to(self.device)
68
- return tokens
69
-
70
- @torch.no_grad()
71
- def encode(self, text):
72
- tokens = self(text)
73
- if not self.vq_interface:
74
- return tokens
75
- return None, None, [None, None, tokens]
76
-
77
- def decode(self, text):
78
- return text
79
-
80
-
81
- class BERTEmbedder(AbstractEncoder):# 这里不是用的pretrained bert,是用的transformers的BertTokenizer加自定义的TransformerWrapper
82
- """Uses the BERT tokenizr model and add some transformer encoder layers"""
83
- def __init__(self, n_embed, n_layer, vocab_size=30522, max_seq_len=77,
84
- device="cuda",use_tokenizer=True, embedding_dropout=0.0):
85
- super().__init__()
86
- self.use_tknz_fn = use_tokenizer
87
- if self.use_tknz_fn:
88
- self.tknz_fn = BERTTokenizer(vq_interface=False, max_length=max_seq_len)
89
- self.device = device
90
- self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len,
91
- attn_layers=Encoder(dim=n_embed, depth=n_layer),
92
- emb_dropout=embedding_dropout)
93
-
94
- def forward(self, text):
95
- if self.use_tknz_fn:
96
- tokens = self.tknz_fn(text)#.to(self.device)
97
- else:
98
- tokens = text
99
- z = self.transformer(tokens, return_embeddings=True)
100
- return z
101
-
102
- def encode(self, text):
103
- # output of length 77
104
- return self(text)
105
-
106
-
107
- class SpatialRescaler(nn.Module):
108
- def __init__(self,
109
- n_stages=1,
110
- method='bilinear',
111
- multiplier=0.5,
112
- in_channels=3,
113
- out_channels=None,
114
- bias=False):
115
- super().__init__()
116
- self.n_stages = n_stages
117
- assert self.n_stages >= 0
118
- assert method in ['nearest','linear','bilinear','trilinear','bicubic','area']
119
- self.multiplier = multiplier
120
- self.interpolator = partial(torch.nn.functional.interpolate, mode=method)
121
- self.remap_output = out_channels is not None
122
- if self.remap_output:
123
- print(f'Spatial Rescaler mapping from {in_channels} to {out_channels} channels after resizing.')
124
- self.channel_mapper = nn.Conv2d(in_channels,out_channels,1,bias=bias)
125
-
126
- def forward(self,x):
127
- for stage in range(self.n_stages):
128
- x = self.interpolator(x, scale_factor=self.multiplier)
129
-
130
-
131
- if self.remap_output:
132
- x = self.channel_mapper(x)
133
- return x
134
-
135
- def encode(self, x):
136
- return self(x)
137
-
138
- def disabled_train(self, mode=True):
139
- """Overwrite model.train with this function to make sure train/eval mode
140
- does not change anymore."""
141
- return self
142
-
143
- class FrozenT5Embedder(AbstractEncoder):
144
- """Uses the T5 transformer encoder for text"""
145
- def __init__(self, version="google/t5-v1_1-large", device="cuda", max_length=77, freeze=True): # others are google/t5-v1_1-xl and google/t5-v1_1-xxl
146
- super().__init__()
147
- self.tokenizer = T5Tokenizer.from_pretrained(version)
148
- self.transformer = T5EncoderModel.from_pretrained(version)
149
- self.device = device
150
- self.max_length = max_length # TODO: typical value?
151
- if freeze:
152
- self.freeze()
153
-
154
- def freeze(self):
155
- self.transformer = self.transformer.eval()
156
- #self.train = disabled_train
157
- for param in self.parameters():
158
- param.requires_grad = False
159
-
160
- def forward(self, text):
161
- batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
162
- return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
163
- tokens = batch_encoding["input_ids"].to(self.device)
164
- outputs = self.transformer(input_ids=tokens)
165
-
166
- z = outputs.last_hidden_state
167
- return z
168
-
169
- def encode(self, text):
170
- return self(text)
171
-
172
-
173
- class FrozenCLAPEmbedder(AbstractEncoder):
174
- """Uses the CLAP transformer encoder for text (from huggingface)"""
175
- def __init__(self, weights_path, freeze=True, device="cuda", max_length=77): # clip-vit-base-patch32
176
- super().__init__()
177
-
178
- model_state_dict = torch.load(weights_path, map_location=torch.device('cpu'))['model']
179
- match_params = dict()
180
- for key in list(model_state_dict.keys()):
181
- if 'caption_encoder' in key:
182
- match_params[key.replace('caption_encoder.', '')] = model_state_dict[key]
183
-
184
- config_as_str = files('ldm').joinpath('modules/encoders/CLAP/config.yml').read_text()
185
- args = read_config_as_args(config_as_str, is_config_str=True)
186
-
187
- # To device
188
- self.tokenizer = AutoTokenizer.from_pretrained(args.text_model) # args.text_model
189
- self.caption_encoder = TextEncoder(
190
- args.d_proj, args.text_model, args.transformer_embed_dim
191
- )
192
-
193
- self.max_length = max_length
194
- self.device = device
195
- if freeze: self.freeze()
196
-
197
- print(f"{self.caption_encoder.__class__.__name__} comes with {count_params(self.caption_encoder) * 1.e-6:.2f} M params.")
198
-
199
- def freeze(self):
200
- self.caption_encoder.base = self.caption_encoder.base.eval()
201
- for param in self.caption_encoder.base.parameters():
202
- param.requires_grad = False
203
-
204
-
205
- def encode(self, text):
206
- batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
207
- return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
208
- tokens = batch_encoding["input_ids"].to(self.device)
209
-
210
- outputs = self.caption_encoder.base(input_ids=tokens)
211
- z = self.caption_encoder.projection(outputs.last_hidden_state)
212
- return z
213
-
214
- class FrozenCLAPEmbedderNoLoad(AbstractEncoder):
215
- def __init__(self, config, freeze=True, device="cpu", max_length=77):
216
- super().__init__()
217
- args = config
218
-
219
- # To device
220
- self.tokenizer = AutoTokenizer.from_pretrained(args.text_model) # args.text_model
221
- self.caption_encoder = TextEncoder(
222
- args.d_proj, args.text_model, args.transformer_embed_dim
223
- )
224
-
225
- self.max_length = max_length
226
- self.device = device
227
- if freeze: self.freeze()
228
-
229
- print(f"{self.caption_encoder.__class__.__name__} comes with {count_params(self.caption_encoder) * 1.e-6:.2f} M params.")
230
-
231
- def freeze(self):
232
- self.caption_encoder.base = self.caption_encoder.base.eval()
233
- for param in self.caption_encoder.base.parameters():
234
- param.requires_grad = False
235
-
236
-
237
- def encode(self, text):
238
- batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
239
- return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
240
- tokens = batch_encoding["input_ids"].to(self.device)
241
-
242
- outputs = self.caption_encoder.base(input_ids=tokens)
243
- z = self.caption_encoder.projection(outputs.last_hidden_state)
244
- return z
245
-
246
-
247
- class NewFrozenCLAPEmbedder(AbstractEncoder):
248
- """Uses the CLAP transformer encoder for text (from huggingface)"""
249
- def __init__(self, weights_path, freeze=True, device="cuda", max_length=77): # clip-vit-base-patch32
250
- super().__init__()
251
- # To device
252
- from transformers import RobertaTokenizer
253
- from ldm.modules.encoders.open_clap import create_model
254
-
255
-
256
- model, model_cfg = create_model(
257
- 'HTSAT-tiny',
258
- 'roberta',
259
- weights_path,
260
- enable_fusion=True,
261
- fusion_type='aff_2d'
262
- )
263
-
264
- del model.audio_branch, model.audio_transform, model.audio_projection
265
- self.tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
266
- self.model = model
267
-
268
- self.max_length = max_length
269
- self.device = device
270
- if freeze: self.freeze()
271
-
272
- param_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
273
- print(f'{self.model.__class__.__name__} comes with: {param_num / 1e+6:.3f} M params.')
274
-
275
- def freeze(self):
276
- self.model = self.model.eval()
277
- for param in self.model.parameters():
278
- param.requires_grad = False
279
-
280
- def encode(self, text):
281
- batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
282
- return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
283
- outputs = self.model.text_branch(input_ids=batch_encoding["input_ids"].to(self.device), attention_mask=batch_encoding["attention_mask"].to(self.device))
284
- z = self.model.text_projection(outputs.last_hidden_state)
285
- return z
286
-
287
- class FrozenFLANEmbedder(AbstractEncoder):
288
- """Uses the T5 transformer encoder for text"""
289
- def __init__(self, version="google/flan-t5-large", device="cuda", max_length=77, freeze=True): # others are google/t5-v1_1-xl and google/t5-v1_1-xxl
290
- super().__init__()
291
- self.tokenizer = T5Tokenizer.from_pretrained(version)
292
- self.transformer = T5EncoderModel.from_pretrained(version)
293
- self.device = device
294
- self.max_length = max_length # TODO: typical value?
295
- if freeze:
296
- self.freeze()
297
-
298
- def freeze(self):
299
- self.transformer = self.transformer.eval()
300
- #self.train = disabled_train
301
- for param in self.parameters():
302
- param.requires_grad = False
303
-
304
- def forward(self, text):
305
- batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
306
- return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
307
- tokens = batch_encoding["input_ids"].to(self.device)
308
- outputs = self.transformer(input_ids=tokens)
309
-
310
- z = outputs.last_hidden_state
311
- return z
312
-
313
- def encode(self, text):
314
- return self(text)
315
- class FrozenGlobalNormOpenCLIPEmbedder(AbstractEncoder):
316
- """
317
- Uses the OpenCLIP transformer encoder for text
318
- """
319
- def __init__(self, arch="ViT-H-14", version="laion2b_s32b_b79k", device="cuda", freeze=True, delvisual=True):
320
- super().__init__()
321
- model, _, preprocess = open_clip.create_model_and_transforms(arch, device=torch.device('cpu'), pretrained=version)
322
- if delvisual:
323
- del model.visual
324
- del preprocess
325
- else:
326
- self.preprocess = preprocess
327
- self.model = model
328
-
329
- self.device = device
330
- if freeze:
331
- self.freeze()
332
-
333
- def freeze(self):
334
- self.model = self.model.eval()
335
- for param in self.parameters():
336
- param.requires_grad = False
337
-
338
- def forward(self, text):
339
- tokens = open_clip.tokenize(text)
340
- z = self.model.encode_text(tokens.to(self.device))
341
- z /= z.norm(dim=-1, keepdim=True)
342
- return z.unsqueeze(1)
343
-
344
- def forward_img(self, image):
345
- z = self.model.encode_image(image.to(self.device))
346
- z /= z.norm(dim=-1, keepdim=True)
347
- return z.unsqueeze(1)
348
-
349
- def encode(self, text):
350
- return self(text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio_inpaint/vocoder/bigvgan/__init__.py DELETED
File without changes
spaces/Ababababababbababa/poetry2023/app.py DELETED
@@ -1,53 +0,0 @@
1
- import gc
2
- import gradio as gr
3
- from transformers import pipeline, set_seed
4
-
5
- pipe = pipeline('text-generation', framework='pt', model='akhooli/ap2023', tokenizer='akhooli/ap2023')
6
- #gc.collect()
7
- samples = [['أنت'
8
- ,1.0, 50, 1.0, 1.0, 114],['هل غادر'
9
- ,1.0, 50, 1.0, 1.0, 114 ],['ألا ليت'
10
- ,1.0, 50, 1.0, 1.0, 114 ],['يا قدس'
11
- ,1.0, 50, 1.0, 1.0, 114],['عيد بأية حال'
12
- ,1.0, 50, 1.0, 1.0, 114],['لكل شيء إذا ما'
13
- ,1.0, 50, 1.0, 1.0, 114 ],['.'
14
- ,1.0, 50, 1.0, 1.0, 114]]
15
-
16
- notes = """
17
- - Enter a short prompt or select (click) one of the examples and click SEND
18
- - Adjust parameters (temperture, top k, top p and penalty) through the slider (keep close to default values).
19
- - For the same seed (randomness), the same output is regenerated if other parameters are fixed
20
- - Clear and enter new prompt or select another example and SEND to regenerate
21
- - The '.' means start a new line from no prompt (your prompt need not be long)
22
- - Be patient: this runs on CPU (free tier)
23
- - Feedback (Twitter): @akhooli (https://twitter.com/akhooli/status/1611025232201977859)
24
- - Note/Disclaimer: may generate unaccepted or inappropriate content. Use at your own risk.
25
- """
26
- def sayPoetry(prompt, temp=1.0, topk = 50, topp = 1.0, penalty=1.0, seed=114):
27
- if not int(seed) >= 0: seed=114
28
- set_seed(seed)
29
- gen = pipe(prompt, max_length=96, do_sample=True, temperature=temp, top_k=topk, top_p=topp, repetition_penalty=penalty,
30
- min_length = 64, no_repeat_ngram_size = 3, return_full_text=True,
31
- num_beams=5, num_return_sequences=1)[0]["generated_text"]
32
- poetry =""
33
- for line in gen.split('.')[:-1]:
34
- poetry += line #+ "\n"
35
- return poetry
36
- poetry = gr.Interface(fn=sayPoetry,
37
- inputs=[
38
- gr.Textbox(label="Enter short prompt or select from examples:"),
39
- gr.Slider(0.70, 1.2, step=0.01,value=1.0, label='control temperature'),
40
- gr.Slider(25, 100, step=1,value=50, label='control top k'),
41
- gr.Slider(0.80, 1.0, step=0.01,value=1.0, label='control top p'),
42
- gr.Slider(0.90, 1.50, step=0.01,value=1.0, label='control penalty'),
43
- gr.Number(value=139750, precision=0, label='Seed'),
44
- ],
45
- outputs=[gr.Textbox(label="Generated Poetry:")],
46
-
47
- allow_flagging='never',
48
- title='Arabic Poetry Generation Demo (updated Jan. 2023)',
49
- description = "A simple demo of AI generated poetry based on 1M poems fine-tuned using AraGPT2 (be patient, runs on cpu)",
50
- examples=samples,
51
- cache_examples=False,
52
- article = notes)
53
- poetry.launch() # show_error = True, debug=True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Abhilashvj/planogram-compliance/export.py DELETED
@@ -1,1013 +0,0 @@
1
- # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
- """
3
- Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit
4
-
5
- Format | `export.py --include` | Model
6
- --- | --- | ---
7
- PyTorch | - | yolov5s.pt
8
- TorchScript | `torchscript` | yolov5s.torchscript
9
- ONNX | `onnx` | yolov5s.onnx
10
- OpenVINO | `openvino` | yolov5s_openvino_model/
11
- TensorRT | `engine` | yolov5s.engine
12
- CoreML | `coreml` | yolov5s.mlmodel
13
- TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/
14
- TensorFlow GraphDef | `pb` | yolov5s.pb
15
- TensorFlow Lite | `tflite` | yolov5s.tflite
16
- TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite
17
- TensorFlow.js | `tfjs` | yolov5s_web_model/
18
- PaddlePaddle | `paddle` | yolov5s_paddle_model/
19
-
20
- Requirements:
21
- $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU
22
- $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU
23
-
24
- Usage:
25
- $ python export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ...
26
-
27
- Inference:
28
- $ python detect.py --weights yolov5s.pt # PyTorch
29
- yolov5s.torchscript # TorchScript
30
- yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
31
- yolov5s_openvino_model # OpenVINO
32
- yolov5s.engine # TensorRT
33
- yolov5s.mlmodel # CoreML (macOS-only)
34
- yolov5s_saved_model # TensorFlow SavedModel
35
- yolov5s.pb # TensorFlow GraphDef
36
- yolov5s.tflite # TensorFlow Lite
37
- yolov5s_edgetpu.tflite # TensorFlow Edge TPU
38
- yolov5s_paddle_model # PaddlePaddle
39
-
40
- TensorFlow.js:
41
- $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example
42
- $ npm install
43
- $ ln -s ../../yolov5/yolov5s_web_model public/yolov5s_web_model
44
- $ npm start
45
- """
46
-
47
- import argparse
48
- import contextlib
49
- import json
50
- import os
51
- import platform
52
- import re
53
- import subprocess
54
- import sys
55
- import time
56
- import warnings
57
- from pathlib import Path
58
-
59
- import pandas as pd
60
- import torch
61
- from torch.utils.mobile_optimizer import optimize_for_mobile
62
-
63
- FILE = Path(__file__).resolve()
64
- ROOT = FILE.parents[0] # YOLOv5 root directory
65
- if str(ROOT) not in sys.path:
66
- sys.path.append(str(ROOT)) # add ROOT to PATH
67
- if platform.system() != "Windows":
68
- ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
69
-
70
- from models.experimental import attempt_load
71
- from models.yolo import ClassificationModel, Detect, DetectionModel, SegmentationModel
72
- from utils.dataloaders import LoadImages
73
- from utils.general import (
74
- LOGGER,
75
- Profile,
76
- check_dataset,
77
- check_img_size,
78
- check_requirements,
79
- check_version,
80
- check_yaml,
81
- colorstr,
82
- file_size,
83
- get_default_args,
84
- print_args,
85
- url2file,
86
- yaml_save,
87
- )
88
- from utils.torch_utils import select_device, smart_inference_mode
89
-
90
- MACOS = platform.system() == "Darwin" # macOS environment
91
-
92
-
93
- def export_formats():
94
- # YOLOv5 export formats
95
- x = [
96
- ["PyTorch", "-", ".pt", True, True],
97
- ["TorchScript", "torchscript", ".torchscript", True, True],
98
- ["ONNX", "onnx", ".onnx", True, True],
99
- ["OpenVINO", "openvino", "_openvino_model", True, False],
100
- ["TensorRT", "engine", ".engine", False, True],
101
- ["CoreML", "coreml", ".mlmodel", True, False],
102
- ["TensorFlow SavedModel", "saved_model", "_saved_model", True, True],
103
- ["TensorFlow GraphDef", "pb", ".pb", True, True],
104
- ["TensorFlow Lite", "tflite", ".tflite", True, False],
105
- ["TensorFlow Edge TPU", "edgetpu", "_edgetpu.tflite", False, False],
106
- ["TensorFlow.js", "tfjs", "_web_model", False, False],
107
- ["PaddlePaddle", "paddle", "_paddle_model", True, True],
108
- ]
109
- return pd.DataFrame(
110
- x, columns=["Format", "Argument", "Suffix", "CPU", "GPU"]
111
- )
112
-
113
-
114
- def try_export(inner_func):
115
- # YOLOv5 export decorator, i..e @try_export
116
- inner_args = get_default_args(inner_func)
117
-
118
- def outer_func(*args, **kwargs):
119
- prefix = inner_args["prefix"]
120
- try:
121
- with Profile() as dt:
122
- f, model = inner_func(*args, **kwargs)
123
- LOGGER.info(
124
- f"{prefix} export success ✅ {dt.t:.1f}s, saved as {f} ({file_size(f):.1f} MB)"
125
- )
126
- return f, model
127
- except Exception as e:
128
- LOGGER.info(f"{prefix} export failure ❌ {dt.t:.1f}s: {e}")
129
- return None, None
130
-
131
- return outer_func
132
-
133
-
134
- @try_export
135
- def export_torchscript(
136
- model, im, file, optimize, prefix=colorstr("TorchScript:")
137
- ):
138
- # YOLOv5 TorchScript model export
139
- LOGGER.info(
140
- f"\n{prefix} starting export with torch {torch.__version__}..."
141
- )
142
- f = file.with_suffix(".torchscript")
143
-
144
- ts = torch.jit.trace(model, im, strict=False)
145
- d = {
146
- "shape": im.shape,
147
- "stride": int(max(model.stride)),
148
- "names": model.names,
149
- }
150
- extra_files = {"config.txt": json.dumps(d)} # torch._C.ExtraFilesMap()
151
- if (
152
- optimize
153
- ): # https://pytorch.org/tutorials/recipes/mobile_interpreter.html
154
- optimize_for_mobile(ts)._save_for_lite_interpreter(
155
- str(f), _extra_files=extra_files
156
- )
157
- else:
158
- ts.save(str(f), _extra_files=extra_files)
159
- return f, None
160
-
161
-
162
- @try_export
163
- def export_onnx(
164
- model, im, file, opset, dynamic, simplify, prefix=colorstr("ONNX:")
165
- ):
166
- # YOLOv5 ONNX export
167
- check_requirements("onnx>=1.12.0")
168
- import onnx
169
-
170
- LOGGER.info(f"\n{prefix} starting export with onnx {onnx.__version__}...")
171
- f = file.with_suffix(".onnx")
172
-
173
- output_names = (
174
- ["output0", "output1"]
175
- if isinstance(model, SegmentationModel)
176
- else ["output0"]
177
- )
178
- if dynamic:
179
- dynamic = {
180
- "images": {0: "batch", 2: "height", 3: "width"}
181
- } # shape(1,3,640,640)
182
- if isinstance(model, SegmentationModel):
183
- dynamic["output0"] = {
184
- 0: "batch",
185
- 1: "anchors",
186
- } # shape(1,25200,85)
187
- dynamic["output1"] = {
188
- 0: "batch",
189
- 2: "mask_height",
190
- 3: "mask_width",
191
- } # shape(1,32,160,160)
192
- elif isinstance(model, DetectionModel):
193
- dynamic["output0"] = {
194
- 0: "batch",
195
- 1: "anchors",
196
- } # shape(1,25200,85)
197
-
198
- torch.onnx.export(
199
- model.cpu()
200
- if dynamic
201
- else model, # --dynamic only compatible with cpu
202
- im.cpu() if dynamic else im,
203
- f,
204
- verbose=False,
205
- opset_version=opset,
206
- do_constant_folding=True, # WARNING: DNN inference with torch>=1.12 may require do_constant_folding=False
207
- input_names=["images"],
208
- output_names=output_names,
209
- dynamic_axes=dynamic or None,
210
- )
211
-
212
- # Checks
213
- model_onnx = onnx.load(f) # load onnx model
214
- onnx.checker.check_model(model_onnx) # check onnx model
215
-
216
- # Metadata
217
- d = {"stride": int(max(model.stride)), "names": model.names}
218
- for k, v in d.items():
219
- meta = model_onnx.metadata_props.add()
220
- meta.key, meta.value = k, str(v)
221
- onnx.save(model_onnx, f)
222
-
223
- # Simplify
224
- if simplify:
225
- try:
226
- cuda = torch.cuda.is_available()
227
- check_requirements(
228
- (
229
- "onnxruntime-gpu" if cuda else "onnxruntime",
230
- "onnx-simplifier>=0.4.1",
231
- )
232
- )
233
- import onnxsim
234
-
235
- LOGGER.info(
236
- f"{prefix} simplifying with onnx-simplifier {onnxsim.__version__}..."
237
- )
238
- model_onnx, check = onnxsim.simplify(model_onnx)
239
- assert check, "assert check failed"
240
- onnx.save(model_onnx, f)
241
- except Exception as e:
242
- LOGGER.info(f"{prefix} simplifier failure: {e}")
243
- return f, model_onnx
244
-
245
-
246
- @try_export
247
- def export_openvino(file, metadata, half, prefix=colorstr("OpenVINO:")):
248
- # YOLOv5 OpenVINO export
249
- check_requirements(
250
- "openvino-dev"
251
- ) # requires openvino-dev: https://pypi.org/project/openvino-dev/
252
- import openvino.inference_engine as ie
253
-
254
- LOGGER.info(
255
- f"\n{prefix} starting export with openvino {ie.__version__}..."
256
- )
257
- f = str(file).replace(".pt", f"_openvino_model{os.sep}")
258
-
259
- cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f} --data_type {'FP16' if half else 'FP32'}"
260
- subprocess.run(cmd.split(), check=True, env=os.environ) # export
261
- yaml_save(
262
- Path(f) / file.with_suffix(".yaml").name, metadata
263
- ) # add metadata.yaml
264
- return f, None
265
-
266
-
267
- @try_export
268
- def export_paddle(model, im, file, metadata, prefix=colorstr("PaddlePaddle:")):
269
- # YOLOv5 Paddle export
270
- check_requirements(("paddlepaddle", "x2paddle"))
271
- import x2paddle
272
- from x2paddle.convert import pytorch2paddle
273
-
274
- LOGGER.info(
275
- f"\n{prefix} starting export with X2Paddle {x2paddle.__version__}..."
276
- )
277
- f = str(file).replace(".pt", f"_paddle_model{os.sep}")
278
-
279
- pytorch2paddle(
280
- module=model, save_dir=f, jit_type="trace", input_examples=[im]
281
- ) # export
282
- yaml_save(
283
- Path(f) / file.with_suffix(".yaml").name, metadata
284
- ) # add metadata.yaml
285
- return f, None
286
-
287
-
288
- @try_export
289
- def export_coreml(model, im, file, int8, half, prefix=colorstr("CoreML:")):
290
- # YOLOv5 CoreML export
291
- check_requirements("coremltools")
292
- import coremltools as ct
293
-
294
- LOGGER.info(
295
- f"\n{prefix} starting export with coremltools {ct.__version__}..."
296
- )
297
- f = file.with_suffix(".mlmodel")
298
-
299
- ts = torch.jit.trace(model, im, strict=False) # TorchScript model
300
- ct_model = ct.convert(
301
- ts,
302
- inputs=[
303
- ct.ImageType(
304
- "image", shape=im.shape, scale=1 / 255, bias=[0, 0, 0]
305
- )
306
- ],
307
- )
308
- bits, mode = (
309
- (8, "kmeans_lut") if int8 else (16, "linear") if half else (32, None)
310
- )
311
- if bits < 32:
312
- if MACOS: # quantization only supported on macOS
313
- with warnings.catch_warnings():
314
- warnings.filterwarnings(
315
- "ignore", category=DeprecationWarning
316
- ) # suppress numpy==1.20 float warning
317
- ct_model = ct.models.neural_network.quantization_utils.quantize_weights(
318
- ct_model, bits, mode
319
- )
320
- else:
321
- print(
322
- f"{prefix} quantization only supported on macOS, skipping..."
323
- )
324
- ct_model.save(f)
325
- return f, ct_model
326
-
327
-
328
- @try_export
329
- def export_engine(
330
- model,
331
- im,
332
- file,
333
- half,
334
- dynamic,
335
- simplify,
336
- workspace=4,
337
- verbose=False,
338
- prefix=colorstr("TensorRT:"),
339
- ):
340
- # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt
341
- assert (
342
- im.device.type != "cpu"
343
- ), "export running on CPU but must be on GPU, i.e. `python export.py --device 0`"
344
- try:
345
- import tensorrt as trt
346
- except Exception:
347
- if platform.system() == "Linux":
348
- check_requirements(
349
- "nvidia-tensorrt",
350
- cmds="-U --index-url https://pypi.ngc.nvidia.com",
351
- )
352
- import tensorrt as trt
353
-
354
- if (
355
- trt.__version__[0] == "7"
356
- ): # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012
357
- grid = model.model[-1].anchor_grid
358
- model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid]
359
- export_onnx(model, im, file, 12, dynamic, simplify) # opset 12
360
- model.model[-1].anchor_grid = grid
361
- else: # TensorRT >= 8
362
- check_version(
363
- trt.__version__, "8.0.0", hard=True
364
- ) # require tensorrt>=8.0.0
365
- export_onnx(model, im, file, 12, dynamic, simplify) # opset 12
366
- onnx = file.with_suffix(".onnx")
367
-
368
- LOGGER.info(
369
- f"\n{prefix} starting export with TensorRT {trt.__version__}..."
370
- )
371
- assert onnx.exists(), f"failed to export ONNX file: {onnx}"
372
- f = file.with_suffix(".engine") # TensorRT engine file
373
- logger = trt.Logger(trt.Logger.INFO)
374
- if verbose:
375
- logger.min_severity = trt.Logger.Severity.VERBOSE
376
-
377
- builder = trt.Builder(logger)
378
- config = builder.create_builder_config()
379
- config.max_workspace_size = workspace * 1 << 30
380
- # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice
381
-
382
- flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
383
- network = builder.create_network(flag)
384
- parser = trt.OnnxParser(network, logger)
385
- if not parser.parse_from_file(str(onnx)):
386
- raise RuntimeError(f"failed to load ONNX file: {onnx}")
387
-
388
- inputs = [network.get_input(i) for i in range(network.num_inputs)]
389
- outputs = [network.get_output(i) for i in range(network.num_outputs)]
390
- for inp in inputs:
391
- LOGGER.info(
392
- f'{prefix} input "{inp.name}" with shape{inp.shape} {inp.dtype}'
393
- )
394
- for out in outputs:
395
- LOGGER.info(
396
- f'{prefix} output "{out.name}" with shape{out.shape} {out.dtype}'
397
- )
398
-
399
- if dynamic:
400
- if im.shape[0] <= 1:
401
- LOGGER.warning(
402
- f"{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument"
403
- )
404
- profile = builder.create_optimization_profile()
405
- for inp in inputs:
406
- profile.set_shape(
407
- inp.name,
408
- (1, *im.shape[1:]),
409
- (max(1, im.shape[0] // 2), *im.shape[1:]),
410
- im.shape,
411
- )
412
- config.add_optimization_profile(profile)
413
-
414
- LOGGER.info(
415
- f"{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine as {f}"
416
- )
417
- if builder.platform_has_fast_fp16 and half:
418
- config.set_flag(trt.BuilderFlag.FP16)
419
- with builder.build_engine(network, config) as engine, open(f, "wb") as t:
420
- t.write(engine.serialize())
421
- return f, None
422
-
423
-
424
- @try_export
425
- def export_saved_model(
426
- model,
427
- im,
428
- file,
429
- dynamic,
430
- tf_nms=False,
431
- agnostic_nms=False,
432
- topk_per_class=100,
433
- topk_all=100,
434
- iou_thres=0.45,
435
- conf_thres=0.25,
436
- keras=False,
437
- prefix=colorstr("TensorFlow SavedModel:"),
438
- ):
439
- # YOLOv5 TensorFlow SavedModel export
440
- try:
441
- import tensorflow as tf
442
- except Exception:
443
- check_requirements(
444
- f"tensorflow{'' if torch.cuda.is_available() else '-macos' if MACOS else '-cpu'}"
445
- )
446
- import tensorflow as tf
447
- from tensorflow.python.framework.convert_to_constants import (
448
- convert_variables_to_constants_v2,
449
- )
450
-
451
- from models.tf import TFModel
452
-
453
- LOGGER.info(
454
- f"\n{prefix} starting export with tensorflow {tf.__version__}..."
455
- )
456
- f = str(file).replace(".pt", "_saved_model")
457
- batch_size, ch, *imgsz = list(im.shape) # BCHW
458
-
459
- tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
460
- im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow
461
- _ = tf_model.predict(
462
- im,
463
- tf_nms,
464
- agnostic_nms,
465
- topk_per_class,
466
- topk_all,
467
- iou_thres,
468
- conf_thres,
469
- )
470
- inputs = tf.keras.Input(
471
- shape=(*imgsz, ch), batch_size=None if dynamic else batch_size
472
- )
473
- outputs = tf_model.predict(
474
- inputs,
475
- tf_nms,
476
- agnostic_nms,
477
- topk_per_class,
478
- topk_all,
479
- iou_thres,
480
- conf_thres,
481
- )
482
- keras_model = tf.keras.Model(inputs=inputs, outputs=outputs)
483
- keras_model.trainable = False
484
- keras_model.summary()
485
- if keras:
486
- keras_model.save(f, save_format="tf")
487
- else:
488
- spec = tf.TensorSpec(
489
- keras_model.inputs[0].shape, keras_model.inputs[0].dtype
490
- )
491
- m = tf.function(lambda x: keras_model(x)) # full model
492
- m = m.get_concrete_function(spec)
493
- frozen_func = convert_variables_to_constants_v2(m)
494
- tfm = tf.Module()
495
- tfm.__call__ = tf.function(
496
- lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x), [spec]
497
- )
498
- tfm.__call__(im)
499
- tf.saved_model.save(
500
- tfm,
501
- f,
502
- options=tf.saved_model.SaveOptions(
503
- experimental_custom_gradients=False
504
- )
505
- if check_version(tf.__version__, "2.6")
506
- else tf.saved_model.SaveOptions(),
507
- )
508
- return f, keras_model
509
-
510
-
511
- @try_export
512
- def export_pb(keras_model, file, prefix=colorstr("TensorFlow GraphDef:")):
513
- # YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow
514
- import tensorflow as tf
515
- from tensorflow.python.framework.convert_to_constants import (
516
- convert_variables_to_constants_v2,
517
- )
518
-
519
- LOGGER.info(
520
- f"\n{prefix} starting export with tensorflow {tf.__version__}..."
521
- )
522
- f = file.with_suffix(".pb")
523
-
524
- m = tf.function(lambda x: keras_model(x)) # full model
525
- m = m.get_concrete_function(
526
- tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)
527
- )
528
- frozen_func = convert_variables_to_constants_v2(m)
529
- frozen_func.graph.as_graph_def()
530
- tf.io.write_graph(
531
- graph_or_graph_def=frozen_func.graph,
532
- logdir=str(f.parent),
533
- name=f.name,
534
- as_text=False,
535
- )
536
- return f, None
537
-
538
-
539
- @try_export
540
- def export_tflite(
541
- keras_model,
542
- im,
543
- file,
544
- int8,
545
- data,
546
- nms,
547
- agnostic_nms,
548
- prefix=colorstr("TensorFlow Lite:"),
549
- ):
550
- # YOLOv5 TensorFlow Lite export
551
- import tensorflow as tf
552
-
553
- LOGGER.info(
554
- f"\n{prefix} starting export with tensorflow {tf.__version__}..."
555
- )
556
- batch_size, ch, *imgsz = list(im.shape) # BCHW
557
- f = str(file).replace(".pt", "-fp16.tflite")
558
-
559
- converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
560
- converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]
561
- converter.target_spec.supported_types = [tf.float16]
562
- converter.optimizations = [tf.lite.Optimize.DEFAULT]
563
- if int8:
564
- from models.tf import representative_dataset_gen
565
-
566
- dataset = LoadImages(
567
- check_dataset(check_yaml(data))["train"],
568
- img_size=imgsz,
569
- auto=False,
570
- )
571
- converter.representative_dataset = lambda: representative_dataset_gen(
572
- dataset, ncalib=100
573
- )
574
- converter.target_spec.supported_ops = [
575
- tf.lite.OpsSet.TFLITE_BUILTINS_INT8
576
- ]
577
- converter.target_spec.supported_types = []
578
- converter.inference_input_type = tf.uint8 # or tf.int8
579
- converter.inference_output_type = tf.uint8 # or tf.int8
580
- converter.experimental_new_quantizer = True
581
- f = str(file).replace(".pt", "-int8.tflite")
582
- if nms or agnostic_nms:
583
- converter.target_spec.supported_ops.append(
584
- tf.lite.OpsSet.SELECT_TF_OPS
585
- )
586
-
587
- tflite_model = converter.convert()
588
- open(f, "wb").write(tflite_model)
589
- return f, None
590
-
591
-
592
- @try_export
593
- def export_edgetpu(file, prefix=colorstr("Edge TPU:")):
594
- # YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/
595
- cmd = "edgetpu_compiler --version"
596
- help_url = "https://coral.ai/docs/edgetpu/compiler/"
597
- assert (
598
- platform.system() == "Linux"
599
- ), f"export only supported on Linux. See {help_url}"
600
- if subprocess.run(f"{cmd} >/dev/null", shell=True).returncode != 0:
601
- LOGGER.info(
602
- f"\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}"
603
- )
604
- sudo = (
605
- subprocess.run("sudo --version >/dev/null", shell=True).returncode
606
- == 0
607
- ) # sudo installed on system
608
- for c in (
609
- "curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -",
610
- 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list',
611
- "sudo apt-get update",
612
- "sudo apt-get install edgetpu-compiler",
613
- ):
614
- subprocess.run(
615
- c if sudo else c.replace("sudo ", ""), shell=True, check=True
616
- )
617
- ver = (
618
- subprocess.run(cmd, shell=True, capture_output=True, check=True)
619
- .stdout.decode()
620
- .split()[-1]
621
- )
622
-
623
- LOGGER.info(f"\n{prefix} starting export with Edge TPU compiler {ver}...")
624
- f = str(file).replace(".pt", "-int8_edgetpu.tflite") # Edge TPU model
625
- f_tfl = str(file).replace(".pt", "-int8.tflite") # TFLite model
626
-
627
- cmd = f"edgetpu_compiler -s -d -k 10 --out_dir {file.parent} {f_tfl}"
628
- subprocess.run(cmd.split(), check=True)
629
- return f, None
630
-
631
-
632
- @try_export
633
- def export_tfjs(file, prefix=colorstr("TensorFlow.js:")):
634
- # YOLOv5 TensorFlow.js export
635
- check_requirements("tensorflowjs")
636
- import tensorflowjs as tfjs
637
-
638
- LOGGER.info(
639
- f"\n{prefix} starting export with tensorflowjs {tfjs.__version__}..."
640
- )
641
- f = str(file).replace(".pt", "_web_model") # js dir
642
- f_pb = file.with_suffix(".pb") # *.pb path
643
- f_json = f"{f}/model.json" # *.json path
644
-
645
- cmd = (
646
- f"tensorflowjs_converter --input_format=tf_frozen_model "
647
- f"--output_node_names=Identity,Identity_1,Identity_2,Identity_3 {f_pb} {f}"
648
- )
649
- subprocess.run(cmd.split())
650
-
651
- json = Path(f_json).read_text()
652
- with open(f_json, "w") as j: # sort JSON Identity_* in ascending order
653
- subst = re.sub(
654
- r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, '
655
- r'"Identity.?.?": {"name": "Identity.?.?"}, '
656
- r'"Identity.?.?": {"name": "Identity.?.?"}, '
657
- r'"Identity.?.?": {"name": "Identity.?.?"}}}',
658
- r'{"outputs": {"Identity": {"name": "Identity"}, '
659
- r'"Identity_1": {"name": "Identity_1"}, '
660
- r'"Identity_2": {"name": "Identity_2"}, '
661
- r'"Identity_3": {"name": "Identity_3"}}}',
662
- json,
663
- )
664
- j.write(subst)
665
- return f, None
666
-
667
-
668
- def add_tflite_metadata(file, metadata, num_outputs):
669
- # Add metadata to *.tflite models per https://www.tensorflow.org/lite/models/convert/metadata
670
- with contextlib.suppress(ImportError):
671
- # check_requirements('tflite_support')
672
- from tflite_support import flatbuffers
673
- from tflite_support import metadata as _metadata
674
- from tflite_support import metadata_schema_py_generated as _metadata_fb
675
-
676
- tmp_file = Path("/tmp/meta.txt")
677
- with open(tmp_file, "w") as meta_f:
678
- meta_f.write(str(metadata))
679
-
680
- model_meta = _metadata_fb.ModelMetadataT()
681
- label_file = _metadata_fb.AssociatedFileT()
682
- label_file.name = tmp_file.name
683
- model_meta.associatedFiles = [label_file]
684
-
685
- subgraph = _metadata_fb.SubGraphMetadataT()
686
- subgraph.inputTensorMetadata = [_metadata_fb.TensorMetadataT()]
687
- subgraph.outputTensorMetadata = [
688
- _metadata_fb.TensorMetadataT()
689
- ] * num_outputs
690
- model_meta.subgraphMetadata = [subgraph]
691
-
692
- b = flatbuffers.Builder(0)
693
- b.Finish(
694
- model_meta.Pack(b),
695
- _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER,
696
- )
697
- metadata_buf = b.Output()
698
-
699
- populator = _metadata.MetadataPopulator.with_model_file(file)
700
- populator.load_metadata_buffer(metadata_buf)
701
- populator.load_associated_files([str(tmp_file)])
702
- populator.populate()
703
- tmp_file.unlink()
704
-
705
-
706
- @smart_inference_mode()
707
- def run(
708
- data=ROOT / "data/coco128.yaml", # 'dataset.yaml path'
709
- weights=ROOT / "yolov5s.pt", # weights path
710
- imgsz=(640, 640), # image (height, width)
711
- batch_size=1, # batch size
712
- device="cpu", # cuda device, i.e. 0 or 0,1,2,3 or cpu
713
- include=("torchscript", "onnx"), # include formats
714
- half=False, # FP16 half-precision export
715
- inplace=False, # set YOLOv5 Detect() inplace=True
716
- keras=False, # use Keras
717
- optimize=False, # TorchScript: optimize for mobile
718
- int8=False, # CoreML/TF INT8 quantization
719
- dynamic=False, # ONNX/TF/TensorRT: dynamic axes
720
- simplify=False, # ONNX: simplify model
721
- opset=12, # ONNX: opset version
722
- verbose=False, # TensorRT: verbose log
723
- workspace=4, # TensorRT: workspace size (GB)
724
- nms=False, # TF: add NMS to model
725
- agnostic_nms=False, # TF: add agnostic NMS to model
726
- topk_per_class=100, # TF.js NMS: topk per class to keep
727
- topk_all=100, # TF.js NMS: topk for all classes to keep
728
- iou_thres=0.45, # TF.js NMS: IoU threshold
729
- conf_thres=0.25, # TF.js NMS: confidence threshold
730
- ):
731
- t = time.time()
732
- include = [x.lower() for x in include] # to lowercase
733
- fmts = tuple(export_formats()["Argument"][1:]) # --include arguments
734
- flags = [x in include for x in fmts]
735
- assert sum(flags) == len(
736
- include
737
- ), f"ERROR: Invalid --include {include}, valid --include arguments are {fmts}"
738
- (
739
- jit,
740
- onnx,
741
- xml,
742
- engine,
743
- coreml,
744
- saved_model,
745
- pb,
746
- tflite,
747
- edgetpu,
748
- tfjs,
749
- paddle,
750
- ) = flags # export booleans
751
- file = Path(
752
- url2file(weights)
753
- if str(weights).startswith(("http:/", "https:/"))
754
- else weights
755
- ) # PyTorch weights
756
-
757
- # Load PyTorch model
758
- device = select_device(device)
759
- if half:
760
- assert (
761
- device.type != "cpu" or coreml
762
- ), "--half only compatible with GPU export, i.e. use --device 0"
763
- assert (
764
- not dynamic
765
- ), "--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both"
766
- model = attempt_load(
767
- weights, device=device, inplace=True, fuse=True
768
- ) # load FP32 model
769
-
770
- # Checks
771
- imgsz *= 2 if len(imgsz) == 1 else 1 # expand
772
- if optimize:
773
- assert (
774
- device.type == "cpu"
775
- ), "--optimize not compatible with cuda devices, i.e. use --device cpu"
776
-
777
- # Input
778
- gs = int(max(model.stride)) # grid size (max stride)
779
- imgsz = [
780
- check_img_size(x, gs) for x in imgsz
781
- ] # verify img_size are gs-multiples
782
- im = torch.zeros(batch_size, 3, *imgsz).to(
783
- device
784
- ) # image size(1,3,320,192) BCHW iDetection
785
-
786
- # Update model
787
- model.eval()
788
- for k, m in model.named_modules():
789
- if isinstance(m, Detect):
790
- m.inplace = inplace
791
- m.dynamic = dynamic
792
- m.export = True
793
-
794
- for _ in range(2):
795
- y = model(im) # dry runs
796
- if half and not coreml:
797
- im, model = im.half(), model.half() # to FP16
798
- shape = tuple(
799
- (y[0] if isinstance(y, tuple) else y).shape
800
- ) # model output shape
801
- metadata = {
802
- "stride": int(max(model.stride)),
803
- "names": model.names,
804
- } # model metadata
805
- LOGGER.info(
806
- f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)"
807
- )
808
-
809
- # Exports
810
- f = [""] * len(fmts) # exported filenames
811
- warnings.filterwarnings(
812
- action="ignore", category=torch.jit.TracerWarning
813
- ) # suppress TracerWarning
814
- if jit: # TorchScript
815
- f[0], _ = export_torchscript(model, im, file, optimize)
816
- if engine: # TensorRT required before ONNX
817
- f[1], _ = export_engine(
818
- model, im, file, half, dynamic, simplify, workspace, verbose
819
- )
820
- if onnx or xml: # OpenVINO requires ONNX
821
- f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify)
822
- if xml: # OpenVINO
823
- f[3], _ = export_openvino(file, metadata, half)
824
- if coreml: # CoreML
825
- f[4], _ = export_coreml(model, im, file, int8, half)
826
- if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats
827
- assert (
828
- not tflite or not tfjs
829
- ), "TFLite and TF.js models must be exported separately, please pass only one type."
830
- assert not isinstance(
831
- model, ClassificationModel
832
- ), "ClassificationModel export to TF formats not yet supported."
833
- f[5], s_model = export_saved_model(
834
- model.cpu(),
835
- im,
836
- file,
837
- dynamic,
838
- tf_nms=nms or agnostic_nms or tfjs,
839
- agnostic_nms=agnostic_nms or tfjs,
840
- topk_per_class=topk_per_class,
841
- topk_all=topk_all,
842
- iou_thres=iou_thres,
843
- conf_thres=conf_thres,
844
- keras=keras,
845
- )
846
- if pb or tfjs: # pb prerequisite to tfjs
847
- f[6], _ = export_pb(s_model, file)
848
- if tflite or edgetpu:
849
- f[7], _ = export_tflite(
850
- s_model,
851
- im,
852
- file,
853
- int8 or edgetpu,
854
- data=data,
855
- nms=nms,
856
- agnostic_nms=agnostic_nms,
857
- )
858
- if edgetpu:
859
- f[8], _ = export_edgetpu(file)
860
- add_tflite_metadata(
861
- f[8] or f[7], metadata, num_outputs=len(s_model.outputs)
862
- )
863
- if tfjs:
864
- f[9], _ = export_tfjs(file)
865
- if paddle: # PaddlePaddle
866
- f[10], _ = export_paddle(model, im, file, metadata)
867
-
868
- # Finish
869
- f = [str(x) for x in f if x] # filter out '' and None
870
- if any(f):
871
- cls, det, seg = (
872
- isinstance(model, x)
873
- for x in (ClassificationModel, DetectionModel, SegmentationModel)
874
- ) # type
875
- det &= (
876
- not seg
877
- ) # segmentation models inherit from SegmentationModel(DetectionModel)
878
- dir = Path("segment" if seg else "classify" if cls else "")
879
- h = "--half" if half else "" # --half FP16 inference arg
880
- s = (
881
- "# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference"
882
- if cls
883
- else "# WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference"
884
- if seg
885
- else ""
886
- )
887
- LOGGER.info(
888
- f"\nExport complete ({time.time() - t:.1f}s)"
889
- f"\nResults saved to {colorstr('bold', file.parent.resolve())}"
890
- f"\nDetect: python {dir / ('detect.py' if det else 'predict.py')} --weights {f[-1]} {h}"
891
- f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}"
892
- f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}') {s}"
893
- f"\nVisualize: https://netron.app"
894
- )
895
- return f # return list of exported files/dirs
896
-
897
-
898
- def parse_opt():
899
- parser = argparse.ArgumentParser()
900
- parser.add_argument(
901
- "--data",
902
- type=str,
903
- default=ROOT / "data/coco128.yaml",
904
- help="dataset.yaml path",
905
- )
906
- parser.add_argument(
907
- "--weights",
908
- nargs="+",
909
- type=str,
910
- default=ROOT / "yolov5s.pt",
911
- help="model.pt path(s)",
912
- )
913
- parser.add_argument(
914
- "--imgsz",
915
- "--img",
916
- "--img-size",
917
- nargs="+",
918
- type=int,
919
- default=[640, 640],
920
- help="image (h, w)",
921
- )
922
- parser.add_argument("--batch-size", type=int, default=1, help="batch size")
923
- parser.add_argument(
924
- "--device", default="cpu", help="cuda device, i.e. 0 or 0,1,2,3 or cpu"
925
- )
926
- parser.add_argument(
927
- "--half", action="store_true", help="FP16 half-precision export"
928
- )
929
- parser.add_argument(
930
- "--inplace",
931
- action="store_true",
932
- help="set YOLOv5 Detect() inplace=True",
933
- )
934
- parser.add_argument("--keras", action="store_true", help="TF: use Keras")
935
- parser.add_argument(
936
- "--optimize",
937
- action="store_true",
938
- help="TorchScript: optimize for mobile",
939
- )
940
- parser.add_argument(
941
- "--int8", action="store_true", help="CoreML/TF INT8 quantization"
942
- )
943
- parser.add_argument(
944
- "--dynamic", action="store_true", help="ONNX/TF/TensorRT: dynamic axes"
945
- )
946
- parser.add_argument(
947
- "--simplify", action="store_true", help="ONNX: simplify model"
948
- )
949
- parser.add_argument(
950
- "--opset", type=int, default=17, help="ONNX: opset version"
951
- )
952
- parser.add_argument(
953
- "--verbose", action="store_true", help="TensorRT: verbose log"
954
- )
955
- parser.add_argument(
956
- "--workspace",
957
- type=int,
958
- default=4,
959
- help="TensorRT: workspace size (GB)",
960
- )
961
- parser.add_argument(
962
- "--nms", action="store_true", help="TF: add NMS to model"
963
- )
964
- parser.add_argument(
965
- "--agnostic-nms",
966
- action="store_true",
967
- help="TF: add agnostic NMS to model",
968
- )
969
- parser.add_argument(
970
- "--topk-per-class",
971
- type=int,
972
- default=100,
973
- help="TF.js NMS: topk per class to keep",
974
- )
975
- parser.add_argument(
976
- "--topk-all",
977
- type=int,
978
- default=100,
979
- help="TF.js NMS: topk for all classes to keep",
980
- )
981
- parser.add_argument(
982
- "--iou-thres",
983
- type=float,
984
- default=0.45,
985
- help="TF.js NMS: IoU threshold",
986
- )
987
- parser.add_argument(
988
- "--conf-thres",
989
- type=float,
990
- default=0.25,
991
- help="TF.js NMS: confidence threshold",
992
- )
993
- parser.add_argument(
994
- "--include",
995
- nargs="+",
996
- default=["torchscript"],
997
- help="torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle",
998
- )
999
- opt = parser.parse_args()
1000
- print_args(vars(opt))
1001
- return opt
1002
-
1003
-
1004
- def main(opt):
1005
- for opt.weights in (
1006
- opt.weights if isinstance(opt.weights, list) else [opt.weights]
1007
- ):
1008
- run(**vars(opt))
1009
-
1010
-
1011
- if __name__ == "__main__":
1012
- opt = parse_opt()
1013
- main(opt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/classes/npc.ts DELETED
@@ -1,246 +0,0 @@
1
- import { Actor } from "./actor";
2
- import { DIRECTION } from "../utils";
3
- import {
4
- MoveTo,
5
- PathFinder,
6
- Board,
7
- } from "../phaser3-rex-plugins/plugins/board-components";
8
- import { Label } from "../phaser3-rex-plugins/templates/ui/ui-components";
9
- import { COLOR_DARK, COLOR_LIGHT, COLOR_PRIMARY } from "../constants";
10
- import { TownScene } from "../scenes";
11
- import eventsCenter from "./event_center";
12
-
13
- export class NPC extends Actor {
14
- private moveTo: MoveTo;
15
- private board: Board;
16
- private canMove: boolean = true;
17
- private talkWithPlayer: boolean = false;
18
- private path: PathFinder.NodeType[] = [];
19
- private finalDirection: number = undefined;
20
- private targetLocation: string = undefined;
21
- private targetNPC: NPC = undefined;
22
- private textBox: Label = undefined;
23
-
24
- public id: number;
25
- public direction: number = DIRECTION.DOWN;
26
-
27
- constructor(
28
- scene: Phaser.Scene,
29
- board: Board,
30
- x: number,
31
- y: number,
32
- name: string,
33
- id: number
34
- ) {
35
- super(scene, x, y, name);
36
-
37
- this.setName(name);
38
- this.board = board;
39
- this.id = id;
40
- // PHYSICS
41
- this.getBody().setSize(14, 16);
42
- this.getBody().setOffset(0, 4);
43
- this.getBody().setImmovable(true);
44
- this.setOrigin(0, 0.2);
45
-
46
- this.initAnimations();
47
- this.moveTo = this.scene.rexBoard.add.moveTo(this, {
48
- speed: 55,
49
- sneak: true,
50
- });
51
- this.listenToDirectionEvent();
52
- }
53
-
54
- update(): void {
55
- if (this.path.length > 0 && !this.moveTo.isRunning && this.canMove) {
56
- var tileXY = this.board.worldXYToTileXY(this.x, this.y);
57
- if (tileXY.x == this.path[0].x) {
58
- if (tileXY.y < this.path[0].y) this.changeDirection(DIRECTION.DOWN);
59
- else if (tileXY.y > this.path[0].y) this.changeDirection(DIRECTION.UP);
60
- } else if (tileXY.y == this.path[0].y) {
61
- if (tileXY.x < this.path[0].x) this.changeDirection(DIRECTION.RIGHT);
62
- else if (tileXY.x > this.path[0].x)
63
- this.changeDirection(DIRECTION.LEFT);
64
- }
65
- var move = this.moveTo.moveTo(this.path.shift());
66
- move.removeAllListeners("complete");
67
- move.on("complete", () => {
68
- if (this.path.length == 0) {
69
- this.changeDirection(this.finalDirection);
70
- this.emitTurnEvent();
71
- if (this.targetLocation != undefined) {
72
- fetch("http://127.0.0.1:10002/update_location", {
73
- method: "POST",
74
- headers: {
75
- "Content-Type": "application/json",
76
- },
77
- credentials: "same-origin",
78
- body: JSON.stringify({
79
- agent_locations: {
80
- [this.name]: this.targetLocation,
81
- },
82
- }),
83
- });
84
- }
85
- }
86
- });
87
- }
88
-
89
- var text = "";
90
- switch (this.direction) {
91
- case DIRECTION.UP:
92
- text = "up";
93
- break;
94
- case DIRECTION.DOWN:
95
- text = "down";
96
- break;
97
- case DIRECTION.LEFT:
98
- text = "left";
99
- break;
100
- case DIRECTION.RIGHT:
101
- text = "right";
102
- break;
103
- }
104
- this.anims.play(this.name + "-walk-" + text, true);
105
- if (this.anims.isPlaying && !this.moveTo.isRunning)
106
- this.anims.setCurrentFrame(this.anims.currentAnim!.frames[0]);
107
- this.updateTextBox();
108
- this.depth = this.y + this.height * 0.8;
109
- }
110
-
111
- listenToDirectionEvent(): void {
112
- eventsCenter.on(this.name + "-up", () => {
113
- this.changeDirection(DIRECTION.UP);
114
- });
115
- eventsCenter.on(this.name + "-down", () => {
116
- this.changeDirection(DIRECTION.DOWN);
117
- });
118
- eventsCenter.on(this.name + "-left", () => {
119
- this.changeDirection(DIRECTION.LEFT);
120
- });
121
- eventsCenter.on(this.name + "-right", () => {
122
- this.changeDirection(DIRECTION.RIGHT);
123
- });
124
- }
125
-
126
- emitTurnEvent(): void {
127
- // Make the listener NPC turn to the speaker NPC.
128
- if (this.targetNPC == undefined) return;
129
- var direction = "";
130
- switch (this.finalDirection) {
131
- case DIRECTION.UP:
132
- direction = "down";
133
- break;
134
- case DIRECTION.DOWN:
135
- direction = "up";
136
- break;
137
- case DIRECTION.LEFT:
138
- direction = "right";
139
- break;
140
- case DIRECTION.RIGHT:
141
- direction = "left";
142
- break;
143
- }
144
- eventsCenter.emit(this.targetNPC.name + "-" + direction);
145
- this.setTargetNPC();
146
- }
147
-
148
- updateTextBox(): void {
149
- if (this.textBox == undefined) return;
150
- this.textBox.setOrigin(0.5, 1.0);
151
- var scale = this.scene.cameras.main.zoom;
152
- this.textBox.setX(this.x + this.width / 2);
153
- this.textBox.setY(this.y - this.height * 0.2);
154
- this.textBox.depth = this.y + this.height * 0.8;
155
- this.textBox.getChildren().forEach((child) => {
156
- child.setDepth(this.y + this.height * 0.8);
157
- });
158
- }
159
-
160
- public setTextBox(text: string): void {
161
- this.destroyTextBox();
162
- var scale = this.scene.cameras.main.zoom;
163
- var scene = this.scene as TownScene;
164
- this.textBox = scene.rexUI.add
165
- .label({
166
- x: this.x + this.width / 2,
167
- y: this.y - this.height * 0.2,
168
- width: 24 * scale,
169
- orientation: "x",
170
- background: scene.rexUI.add.roundRectangle(
171
- 0,
172
- 0,
173
- 2,
174
- 2,
175
- 20,
176
- COLOR_PRIMARY,
177
- 0.7
178
- ),
179
- text: scene.rexUI.wrapExpandText(
180
- scene.add.text(0, 0, text, {
181
- fontSize: 10,
182
- })
183
- ),
184
- expandTextWidth: true,
185
- space: {
186
- left: 10,
187
- right: 10,
188
- top: 10,
189
- bottom: 10,
190
- },
191
- })
192
- .setOrigin(0.5, 1.0)
193
- .setScale(1 / scale, 1 / scale)
194
- .setDepth(this.y + this.height * 0.8)
195
- .layout();
196
- }
197
-
198
- public destroyTextBox(): void {
199
- if (this.textBox != undefined) this.textBox.destroy();
200
- this.textBox = undefined;
201
- }
202
-
203
- public changeDirection(direction: number): void {
204
- if (direction == undefined) return;
205
- this.direction = direction;
206
- }
207
-
208
- public moveAlongPath(
209
- path: PathFinder.NodeType[],
210
- finalDirection: number = undefined,
211
- targetLocation: string = undefined
212
- ): void {
213
- if (path.length == 0) return;
214
- if (this.moveTo.isRunning) return;
215
- if (this.path.length > 0) return;
216
- this.path = path;
217
- this.finalDirection = finalDirection;
218
- this.targetLocation = targetLocation;
219
- }
220
-
221
- public pauseMoving(): void {
222
- this.moveTo.stop();
223
- this.canMove = false;
224
- }
225
-
226
- public resumeMoving(): void {
227
- this.moveTo.resume();
228
- this.canMove = true;
229
- }
230
-
231
- public isMoving(): boolean {
232
- return this.moveTo.isRunning || this.path.length > 0;
233
- }
234
-
235
- public isTalking(): boolean {
236
- return this.talkWithPlayer;
237
- }
238
-
239
- public setTalking(talking: boolean): void {
240
- this.talkWithPlayer = talking;
241
- }
242
-
243
- public setTargetNPC(targetNPC: NPC = undefined): void {
244
- this.targetNPC = targetNPC;
245
- }
246
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/fsm-plugin.d.ts DELETED
@@ -1,8 +0,0 @@
1
- import FSM from './fsm';
2
-
3
- export default class FSMPlugin extends Phaser.Plugins.BasePlugin {
4
- add(
5
- config?: FSM.IConfig
6
- ): FSM;
7
-
8
- }
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/Sizer.js DELETED
@@ -1,79 +0,0 @@
1
- import BaseSizer from '../basesizer/BaseSizer.js';
2
- import Methods from './Methods.js';
3
- import GetChildrenProportion from './GetChildrenProportion.js';
4
- import GetOrientationMode from '../utils/GetOrientationMode.js';
5
-
6
- const IsPlainObject = Phaser.Utils.Objects.IsPlainObject;
7
- const GetValue = Phaser.Utils.Objects.GetValue;
8
-
9
- class Sizer extends BaseSizer {
10
- constructor(scene, x, y, minWidth, minHeight, orientation, config) {
11
- if (IsPlainObject(x)) {
12
- config = x;
13
- x = GetValue(config, 'x', 0);
14
- y = GetValue(config, 'y', 0);
15
- minWidth = GetValue(config, 'width', undefined);
16
- minHeight = GetValue(config, 'height', undefined);
17
- orientation = GetValue(config, 'orientation', 0);
18
- } else if (IsPlainObject(minWidth)) {
19
- config = minWidth;
20
- minWidth = GetValue(config, 'width', undefined);
21
- minHeight = GetValue(config, 'height', undefined);
22
- orientation = GetValue(config, 'orientation', 0);
23
- } else if (IsPlainObject(orientation)) {
24
- config = orientation;
25
- orientation = GetValue(config, 'orientation', 0);
26
- }
27
-
28
- if (orientation === undefined) {
29
- orientation = 0;
30
- }
31
- super(scene, x, y, minWidth, minHeight, config);
32
-
33
- this.type = 'rexSizer';
34
- this.sizerChildren = [];
35
- this.setOrientation(orientation);
36
- this.setItemSpacing(GetValue(config, 'space.item', 0));
37
- this.setStartChildIndex(GetValue(config, 'startChildIndex', 0));
38
- this.setRTL(GetValue(config, 'rtl', false));
39
-
40
- this.addChildrenMap('items', this.sizerChildren);
41
- }
42
-
43
- setOrientation(orientation) {
44
- this.orientation = GetOrientationMode(orientation);
45
- return this;
46
- }
47
-
48
- setItemSpacing(space) {
49
- this.space.item = space;
50
- return this;
51
- }
52
-
53
- setStartChildIndex(index) {
54
- this.startChildIndex = index;
55
- return this;
56
- }
57
-
58
- setRTL(enable) {
59
- if (enable === undefined) {
60
- enable = true;
61
- }
62
- this.rtl = enable;
63
- return this;
64
- }
65
-
66
- get childrenProportion() {
67
- if (this._childrenProportion === undefined) {
68
- this._childrenProportion = GetChildrenProportion.call(this);
69
- }
70
- return this._childrenProportion;
71
- }
72
- }
73
-
74
- Object.assign(
75
- Sizer.prototype,
76
- Methods
77
- );
78
-
79
- export default Sizer;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aki004/herta-so-vits/vdecoder/hifigan/models.py DELETED
@@ -1,503 +0,0 @@
1
- import os
2
- import json
3
- from .env import AttrDict
4
- import numpy as np
5
- import torch
6
- import torch.nn.functional as F
7
- import torch.nn as nn
8
- from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
9
- from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
10
- from .utils import init_weights, get_padding
11
-
12
- LRELU_SLOPE = 0.1
13
-
14
-
15
- def load_model(model_path, device='cuda'):
16
- config_file = os.path.join(os.path.split(model_path)[0], 'config.json')
17
- with open(config_file) as f:
18
- data = f.read()
19
-
20
- global h
21
- json_config = json.loads(data)
22
- h = AttrDict(json_config)
23
-
24
- generator = Generator(h).to(device)
25
-
26
- cp_dict = torch.load(model_path)
27
- generator.load_state_dict(cp_dict['generator'])
28
- generator.eval()
29
- generator.remove_weight_norm()
30
- del cp_dict
31
- return generator, h
32
-
33
-
34
- class ResBlock1(torch.nn.Module):
35
- def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):
36
- super(ResBlock1, self).__init__()
37
- self.h = h
38
- self.convs1 = nn.ModuleList([
39
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
40
- padding=get_padding(kernel_size, dilation[0]))),
41
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
42
- padding=get_padding(kernel_size, dilation[1]))),
43
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
44
- padding=get_padding(kernel_size, dilation[2])))
45
- ])
46
- self.convs1.apply(init_weights)
47
-
48
- self.convs2 = nn.ModuleList([
49
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
50
- padding=get_padding(kernel_size, 1))),
51
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
52
- padding=get_padding(kernel_size, 1))),
53
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
54
- padding=get_padding(kernel_size, 1)))
55
- ])
56
- self.convs2.apply(init_weights)
57
-
58
- def forward(self, x):
59
- for c1, c2 in zip(self.convs1, self.convs2):
60
- xt = F.leaky_relu(x, LRELU_SLOPE)
61
- xt = c1(xt)
62
- xt = F.leaky_relu(xt, LRELU_SLOPE)
63
- xt = c2(xt)
64
- x = xt + x
65
- return x
66
-
67
- def remove_weight_norm(self):
68
- for l in self.convs1:
69
- remove_weight_norm(l)
70
- for l in self.convs2:
71
- remove_weight_norm(l)
72
-
73
-
74
- class ResBlock2(torch.nn.Module):
75
- def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)):
76
- super(ResBlock2, self).__init__()
77
- self.h = h
78
- self.convs = nn.ModuleList([
79
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
80
- padding=get_padding(kernel_size, dilation[0]))),
81
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
82
- padding=get_padding(kernel_size, dilation[1])))
83
- ])
84
- self.convs.apply(init_weights)
85
-
86
- def forward(self, x):
87
- for c in self.convs:
88
- xt = F.leaky_relu(x, LRELU_SLOPE)
89
- xt = c(xt)
90
- x = xt + x
91
- return x
92
-
93
- def remove_weight_norm(self):
94
- for l in self.convs:
95
- remove_weight_norm(l)
96
-
97
-
98
- def padDiff(x):
99
- return F.pad(F.pad(x, (0,0,-1,1), 'constant', 0) - x, (0,0,0,-1), 'constant', 0)
100
-
101
- class SineGen(torch.nn.Module):
102
- """ Definition of sine generator
103
- SineGen(samp_rate, harmonic_num = 0,
104
- sine_amp = 0.1, noise_std = 0.003,
105
- voiced_threshold = 0,
106
- flag_for_pulse=False)
107
- samp_rate: sampling rate in Hz
108
- harmonic_num: number of harmonic overtones (default 0)
109
- sine_amp: amplitude of sine-wavefrom (default 0.1)
110
- noise_std: std of Gaussian noise (default 0.003)
111
- voiced_thoreshold: F0 threshold for U/V classification (default 0)
112
- flag_for_pulse: this SinGen is used inside PulseGen (default False)
113
- Note: when flag_for_pulse is True, the first time step of a voiced
114
- segment is always sin(np.pi) or cos(0)
115
- """
116
-
117
- def __init__(self, samp_rate, harmonic_num=0,
118
- sine_amp=0.1, noise_std=0.003,
119
- voiced_threshold=0,
120
- flag_for_pulse=False):
121
- super(SineGen, self).__init__()
122
- self.sine_amp = sine_amp
123
- self.noise_std = noise_std
124
- self.harmonic_num = harmonic_num
125
- self.dim = self.harmonic_num + 1
126
- self.sampling_rate = samp_rate
127
- self.voiced_threshold = voiced_threshold
128
- self.flag_for_pulse = flag_for_pulse
129
-
130
- def _f02uv(self, f0):
131
- # generate uv signal
132
- uv = (f0 > self.voiced_threshold).type(torch.float32)
133
- return uv
134
-
135
- def _f02sine(self, f0_values):
136
- """ f0_values: (batchsize, length, dim)
137
- where dim indicates fundamental tone and overtones
138
- """
139
- # convert to F0 in rad. The interger part n can be ignored
140
- # because 2 * np.pi * n doesn't affect phase
141
- rad_values = (f0_values / self.sampling_rate) % 1
142
-
143
- # initial phase noise (no noise for fundamental component)
144
- rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \
145
- device=f0_values.device)
146
- rand_ini[:, 0] = 0
147
- rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
148
-
149
- # instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
150
- if not self.flag_for_pulse:
151
- # for normal case
152
-
153
- # To prevent torch.cumsum numerical overflow,
154
- # it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1.
155
- # Buffer tmp_over_one_idx indicates the time step to add -1.
156
- # This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi
157
- tmp_over_one = torch.cumsum(rad_values, 1) % 1
158
- tmp_over_one_idx = (padDiff(tmp_over_one)) < 0
159
- cumsum_shift = torch.zeros_like(rad_values)
160
- cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
161
-
162
- sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1)
163
- * 2 * np.pi)
164
- else:
165
- # If necessary, make sure that the first time step of every
166
- # voiced segments is sin(pi) or cos(0)
167
- # This is used for pulse-train generation
168
-
169
- # identify the last time step in unvoiced segments
170
- uv = self._f02uv(f0_values)
171
- uv_1 = torch.roll(uv, shifts=-1, dims=1)
172
- uv_1[:, -1, :] = 1
173
- u_loc = (uv < 1) * (uv_1 > 0)
174
-
175
- # get the instantanouse phase
176
- tmp_cumsum = torch.cumsum(rad_values, dim=1)
177
- # different batch needs to be processed differently
178
- for idx in range(f0_values.shape[0]):
179
- temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :]
180
- temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :]
181
- # stores the accumulation of i.phase within
182
- # each voiced segments
183
- tmp_cumsum[idx, :, :] = 0
184
- tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum
185
-
186
- # rad_values - tmp_cumsum: remove the accumulation of i.phase
187
- # within the previous voiced segment.
188
- i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1)
189
-
190
- # get the sines
191
- sines = torch.cos(i_phase * 2 * np.pi)
192
- return sines
193
-
194
- def forward(self, f0):
195
- """ sine_tensor, uv = forward(f0)
196
- input F0: tensor(batchsize=1, length, dim=1)
197
- f0 for unvoiced steps should be 0
198
- output sine_tensor: tensor(batchsize=1, length, dim)
199
- output uv: tensor(batchsize=1, length, 1)
200
- """
201
- with torch.no_grad():
202
- f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim,
203
- device=f0.device)
204
- # fundamental component
205
- fn = torch.multiply(f0, torch.FloatTensor([[range(1, self.harmonic_num + 2)]]).to(f0.device))
206
-
207
- # generate sine waveforms
208
- sine_waves = self._f02sine(fn) * self.sine_amp
209
-
210
- # generate uv signal
211
- # uv = torch.ones(f0.shape)
212
- # uv = uv * (f0 > self.voiced_threshold)
213
- uv = self._f02uv(f0)
214
-
215
- # noise: for unvoiced should be similar to sine_amp
216
- # std = self.sine_amp/3 -> max value ~ self.sine_amp
217
- # . for voiced regions is self.noise_std
218
- noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
219
- noise = noise_amp * torch.randn_like(sine_waves)
220
-
221
- # first: set the unvoiced part to 0 by uv
222
- # then: additive noise
223
- sine_waves = sine_waves * uv + noise
224
- return sine_waves, uv, noise
225
-
226
-
227
- class SourceModuleHnNSF(torch.nn.Module):
228
- """ SourceModule for hn-nsf
229
- SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
230
- add_noise_std=0.003, voiced_threshod=0)
231
- sampling_rate: sampling_rate in Hz
232
- harmonic_num: number of harmonic above F0 (default: 0)
233
- sine_amp: amplitude of sine source signal (default: 0.1)
234
- add_noise_std: std of additive Gaussian noise (default: 0.003)
235
- note that amplitude of noise in unvoiced is decided
236
- by sine_amp
237
- voiced_threshold: threhold to set U/V given F0 (default: 0)
238
- Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
239
- F0_sampled (batchsize, length, 1)
240
- Sine_source (batchsize, length, 1)
241
- noise_source (batchsize, length 1)
242
- uv (batchsize, length, 1)
243
- """
244
-
245
- def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1,
246
- add_noise_std=0.003, voiced_threshod=0):
247
- super(SourceModuleHnNSF, self).__init__()
248
-
249
- self.sine_amp = sine_amp
250
- self.noise_std = add_noise_std
251
-
252
- # to produce sine waveforms
253
- self.l_sin_gen = SineGen(sampling_rate, harmonic_num,
254
- sine_amp, add_noise_std, voiced_threshod)
255
-
256
- # to merge source harmonics into a single excitation
257
- self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
258
- self.l_tanh = torch.nn.Tanh()
259
-
260
- def forward(self, x):
261
- """
262
- Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
263
- F0_sampled (batchsize, length, 1)
264
- Sine_source (batchsize, length, 1)
265
- noise_source (batchsize, length 1)
266
- """
267
- # source for harmonic branch
268
- sine_wavs, uv, _ = self.l_sin_gen(x)
269
- sine_merge = self.l_tanh(self.l_linear(sine_wavs))
270
-
271
- # source for noise branch, in the same shape as uv
272
- noise = torch.randn_like(uv) * self.sine_amp / 3
273
- return sine_merge, noise, uv
274
-
275
-
276
- class Generator(torch.nn.Module):
277
- def __init__(self, h):
278
- super(Generator, self).__init__()
279
- self.h = h
280
-
281
- self.num_kernels = len(h["resblock_kernel_sizes"])
282
- self.num_upsamples = len(h["upsample_rates"])
283
- self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(h["upsample_rates"]))
284
- self.m_source = SourceModuleHnNSF(
285
- sampling_rate=h["sampling_rate"],
286
- harmonic_num=8)
287
- self.noise_convs = nn.ModuleList()
288
- self.conv_pre = weight_norm(Conv1d(h["inter_channels"], h["upsample_initial_channel"], 7, 1, padding=3))
289
- resblock = ResBlock1 if h["resblock"] == '1' else ResBlock2
290
- self.ups = nn.ModuleList()
291
- for i, (u, k) in enumerate(zip(h["upsample_rates"], h["upsample_kernel_sizes"])):
292
- c_cur = h["upsample_initial_channel"] // (2 ** (i + 1))
293
- self.ups.append(weight_norm(
294
- ConvTranspose1d(h["upsample_initial_channel"] // (2 ** i), h["upsample_initial_channel"] // (2 ** (i + 1)),
295
- k, u, padding=(k - u) // 2)))
296
- if i + 1 < len(h["upsample_rates"]): #
297
- stride_f0 = np.prod(h["upsample_rates"][i + 1:])
298
- self.noise_convs.append(Conv1d(
299
- 1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=stride_f0 // 2))
300
- else:
301
- self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
302
- self.resblocks = nn.ModuleList()
303
- for i in range(len(self.ups)):
304
- ch = h["upsample_initial_channel"] // (2 ** (i + 1))
305
- for j, (k, d) in enumerate(zip(h["resblock_kernel_sizes"], h["resblock_dilation_sizes"])):
306
- self.resblocks.append(resblock(h, ch, k, d))
307
-
308
- self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
309
- self.ups.apply(init_weights)
310
- self.conv_post.apply(init_weights)
311
- self.cond = nn.Conv1d(h['gin_channels'], h['upsample_initial_channel'], 1)
312
-
313
- def forward(self, x, f0, g=None):
314
- # print(1,x.shape,f0.shape,f0[:, None].shape)
315
- f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t
316
- # print(2,f0.shape)
317
- har_source, noi_source, uv = self.m_source(f0)
318
- har_source = har_source.transpose(1, 2)
319
- x = self.conv_pre(x)
320
- x = x + self.cond(g)
321
- # print(124,x.shape,har_source.shape)
322
- for i in range(self.num_upsamples):
323
- x = F.leaky_relu(x, LRELU_SLOPE)
324
- # print(3,x.shape)
325
- x = self.ups[i](x)
326
- x_source = self.noise_convs[i](har_source)
327
- # print(4,x_source.shape,har_source.shape,x.shape)
328
- x = x + x_source
329
- xs = None
330
- for j in range(self.num_kernels):
331
- if xs is None:
332
- xs = self.resblocks[i * self.num_kernels + j](x)
333
- else:
334
- xs += self.resblocks[i * self.num_kernels + j](x)
335
- x = xs / self.num_kernels
336
- x = F.leaky_relu(x)
337
- x = self.conv_post(x)
338
- x = torch.tanh(x)
339
-
340
- return x
341
-
342
- def remove_weight_norm(self):
343
- print('Removing weight norm...')
344
- for l in self.ups:
345
- remove_weight_norm(l)
346
- for l in self.resblocks:
347
- l.remove_weight_norm()
348
- remove_weight_norm(self.conv_pre)
349
- remove_weight_norm(self.conv_post)
350
-
351
-
352
- class DiscriminatorP(torch.nn.Module):
353
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
354
- super(DiscriminatorP, self).__init__()
355
- self.period = period
356
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
357
- self.convs = nn.ModuleList([
358
- norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
359
- norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
360
- norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
361
- norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
362
- norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))),
363
- ])
364
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
365
-
366
- def forward(self, x):
367
- fmap = []
368
-
369
- # 1d to 2d
370
- b, c, t = x.shape
371
- if t % self.period != 0: # pad first
372
- n_pad = self.period - (t % self.period)
373
- x = F.pad(x, (0, n_pad), "reflect")
374
- t = t + n_pad
375
- x = x.view(b, c, t // self.period, self.period)
376
-
377
- for l in self.convs:
378
- x = l(x)
379
- x = F.leaky_relu(x, LRELU_SLOPE)
380
- fmap.append(x)
381
- x = self.conv_post(x)
382
- fmap.append(x)
383
- x = torch.flatten(x, 1, -1)
384
-
385
- return x, fmap
386
-
387
-
388
- class MultiPeriodDiscriminator(torch.nn.Module):
389
- def __init__(self, periods=None):
390
- super(MultiPeriodDiscriminator, self).__init__()
391
- self.periods = periods if periods is not None else [2, 3, 5, 7, 11]
392
- self.discriminators = nn.ModuleList()
393
- for period in self.periods:
394
- self.discriminators.append(DiscriminatorP(period))
395
-
396
- def forward(self, y, y_hat):
397
- y_d_rs = []
398
- y_d_gs = []
399
- fmap_rs = []
400
- fmap_gs = []
401
- for i, d in enumerate(self.discriminators):
402
- y_d_r, fmap_r = d(y)
403
- y_d_g, fmap_g = d(y_hat)
404
- y_d_rs.append(y_d_r)
405
- fmap_rs.append(fmap_r)
406
- y_d_gs.append(y_d_g)
407
- fmap_gs.append(fmap_g)
408
-
409
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
410
-
411
-
412
- class DiscriminatorS(torch.nn.Module):
413
- def __init__(self, use_spectral_norm=False):
414
- super(DiscriminatorS, self).__init__()
415
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
416
- self.convs = nn.ModuleList([
417
- norm_f(Conv1d(1, 128, 15, 1, padding=7)),
418
- norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)),
419
- norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)),
420
- norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)),
421
- norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)),
422
- norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)),
423
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
424
- ])
425
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
426
-
427
- def forward(self, x):
428
- fmap = []
429
- for l in self.convs:
430
- x = l(x)
431
- x = F.leaky_relu(x, LRELU_SLOPE)
432
- fmap.append(x)
433
- x = self.conv_post(x)
434
- fmap.append(x)
435
- x = torch.flatten(x, 1, -1)
436
-
437
- return x, fmap
438
-
439
-
440
- class MultiScaleDiscriminator(torch.nn.Module):
441
- def __init__(self):
442
- super(MultiScaleDiscriminator, self).__init__()
443
- self.discriminators = nn.ModuleList([
444
- DiscriminatorS(use_spectral_norm=True),
445
- DiscriminatorS(),
446
- DiscriminatorS(),
447
- ])
448
- self.meanpools = nn.ModuleList([
449
- AvgPool1d(4, 2, padding=2),
450
- AvgPool1d(4, 2, padding=2)
451
- ])
452
-
453
- def forward(self, y, y_hat):
454
- y_d_rs = []
455
- y_d_gs = []
456
- fmap_rs = []
457
- fmap_gs = []
458
- for i, d in enumerate(self.discriminators):
459
- if i != 0:
460
- y = self.meanpools[i - 1](y)
461
- y_hat = self.meanpools[i - 1](y_hat)
462
- y_d_r, fmap_r = d(y)
463
- y_d_g, fmap_g = d(y_hat)
464
- y_d_rs.append(y_d_r)
465
- fmap_rs.append(fmap_r)
466
- y_d_gs.append(y_d_g)
467
- fmap_gs.append(fmap_g)
468
-
469
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
470
-
471
-
472
- def feature_loss(fmap_r, fmap_g):
473
- loss = 0
474
- for dr, dg in zip(fmap_r, fmap_g):
475
- for rl, gl in zip(dr, dg):
476
- loss += torch.mean(torch.abs(rl - gl))
477
-
478
- return loss * 2
479
-
480
-
481
- def discriminator_loss(disc_real_outputs, disc_generated_outputs):
482
- loss = 0
483
- r_losses = []
484
- g_losses = []
485
- for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
486
- r_loss = torch.mean((1 - dr) ** 2)
487
- g_loss = torch.mean(dg ** 2)
488
- loss += (r_loss + g_loss)
489
- r_losses.append(r_loss.item())
490
- g_losses.append(g_loss.item())
491
-
492
- return loss, r_losses, g_losses
493
-
494
-
495
- def generator_loss(disc_outputs):
496
- loss = 0
497
- gen_losses = []
498
- for dg in disc_outputs:
499
- l = torch.mean((1 - dg) ** 2)
500
- gen_losses.append(l)
501
- loss += l
502
-
503
- return loss, gen_losses
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AkshayDev/Lazy-Film-Reviews/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Lazy Film Reviews
3
- emoji: 🌖
4
- colorFrom: purple
5
- colorTo: gray
6
- sdk: streamlit
7
- sdk_version: 1.2.0
8
- app_file: app.py
9
- pinned: false
10
- license: cc-by-nc-4.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexWang/lama/saicinpainting/evaluation/masks/__init__.py DELETED
File without changes
spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/decoder/sh.py DELETED
@@ -1,133 +0,0 @@
1
- import torch
2
-
3
- ################## sh function ##################
4
- C0 = 0.28209479177387814
5
- C1 = 0.4886025119029199
6
- C2 = [
7
- 1.0925484305920792,
8
- -1.0925484305920792,
9
- 0.31539156525252005,
10
- -1.0925484305920792,
11
- 0.5462742152960396
12
- ]
13
- C3 = [
14
- -0.5900435899266435,
15
- 2.890611442640554,
16
- -0.4570457994644658,
17
- 0.3731763325901154,
18
- -0.4570457994644658,
19
- 1.445305721320277,
20
- -0.5900435899266435
21
- ]
22
- C4 = [
23
- 2.5033429417967046,
24
- -1.7701307697799304,
25
- 0.9461746957575601,
26
- -0.6690465435572892,
27
- 0.10578554691520431,
28
- -0.6690465435572892,
29
- 0.47308734787878004,
30
- -1.7701307697799304,
31
- 0.6258357354491761,
32
- ]
33
-
34
- def eval_sh(deg, sh, dirs):
35
- """
36
- Evaluate spherical harmonics at unit directions
37
- using hardcoded SH polynomials.
38
- Works with torch/np/jnp.
39
- ... Can be 0 or more batch dimensions.
40
- :param deg: int SH max degree. Currently, 0-4 supported
41
- :param sh: torch.Tensor SH coeffs (..., C, (max degree + 1) ** 2)
42
- :param dirs: torch.Tensor unit directions (..., 3)
43
- :return: (..., C)
44
- """
45
- assert deg <= 4 and deg >= 0
46
- assert (deg + 1) ** 2 == sh.shape[-1]
47
- C = sh.shape[-2]
48
-
49
- result = C0 * sh[..., 0]
50
- if deg > 0:
51
- x, y, z = dirs[..., 0:1], dirs[..., 1:2], dirs[..., 2:3]
52
- result = (result -
53
- C1 * y * sh[..., 1] +
54
- C1 * z * sh[..., 2] -
55
- C1 * x * sh[..., 3])
56
- if deg > 1:
57
- xx, yy, zz = x * x, y * y, z * z
58
- xy, yz, xz = x * y, y * z, x * z
59
- result = (result +
60
- C2[0] * xy * sh[..., 4] +
61
- C2[1] * yz * sh[..., 5] +
62
- C2[2] * (2.0 * zz - xx - yy) * sh[..., 6] +
63
- C2[3] * xz * sh[..., 7] +
64
- C2[4] * (xx - yy) * sh[..., 8])
65
-
66
- if deg > 2:
67
- result = (result +
68
- C3[0] * y * (3 * xx - yy) * sh[..., 9] +
69
- C3[1] * xy * z * sh[..., 10] +
70
- C3[2] * y * (4 * zz - xx - yy)* sh[..., 11] +
71
- C3[3] * z * (2 * zz - 3 * xx - 3 * yy) * sh[..., 12] +
72
- C3[4] * x * (4 * zz - xx - yy) * sh[..., 13] +
73
- C3[5] * z * (xx - yy) * sh[..., 14] +
74
- C3[6] * x * (xx - 3 * yy) * sh[..., 15])
75
- if deg > 3:
76
- result = (result + C4[0] * xy * (xx - yy) * sh[..., 16] +
77
- C4[1] * yz * (3 * xx - yy) * sh[..., 17] +
78
- C4[2] * xy * (7 * zz - 1) * sh[..., 18] +
79
- C4[3] * yz * (7 * zz - 3) * sh[..., 19] +
80
- C4[4] * (zz * (35 * zz - 30) + 3) * sh[..., 20] +
81
- C4[5] * xz * (7 * zz - 3) * sh[..., 21] +
82
- C4[6] * (xx - yy) * (7 * zz - 1) * sh[..., 22] +
83
- C4[7] * xz * (xx - 3 * yy) * sh[..., 23] +
84
- C4[8] * (xx * (xx - 3 * yy) - yy * (3 * xx - yy)) * sh[..., 24])
85
- return result
86
-
87
- def eval_sh_bases(deg, dirs):
88
- """
89
- Evaluate spherical harmonics bases at unit directions,
90
- without taking linear combination.
91
- At each point, the final result may the be
92
- obtained through simple multiplication.
93
- :param deg: int SH max degree. Currently, 0-4 supported
94
- :param dirs: torch.Tensor (..., 3) unit directions
95
- :return: torch.Tensor (..., (deg+1) ** 2)
96
- """
97
- assert deg <= 4 and deg >= 0
98
- result = torch.empty((*dirs.shape[:-1], (deg + 1) ** 2), dtype=dirs.dtype, device=dirs.device)
99
- result[..., 0] = C0
100
- if deg > 0:
101
- x, y, z = dirs.unbind(-1)
102
- result[..., 1] = -C1 * y;
103
- result[..., 2] = C1 * z;
104
- result[..., 3] = -C1 * x;
105
- if deg > 1:
106
- xx, yy, zz = x * x, y * y, z * z
107
- xy, yz, xz = x * y, y * z, x * z
108
- result[..., 4] = C2[0] * xy;
109
- result[..., 5] = C2[1] * yz;
110
- result[..., 6] = C2[2] * (2.0 * zz - xx - yy);
111
- result[..., 7] = C2[3] * xz;
112
- result[..., 8] = C2[4] * (xx - yy);
113
-
114
- if deg > 2:
115
- result[..., 9] = C3[0] * y * (3 * xx - yy);
116
- result[..., 10] = C3[1] * xy * z;
117
- result[..., 11] = C3[2] * y * (4 * zz - xx - yy);
118
- result[..., 12] = C3[3] * z * (2 * zz - 3 * xx - 3 * yy);
119
- result[..., 13] = C3[4] * x * (4 * zz - xx - yy);
120
- result[..., 14] = C3[5] * z * (xx - yy);
121
- result[..., 15] = C3[6] * x * (xx - 3 * yy);
122
-
123
- if deg > 3:
124
- result[..., 16] = C4[0] * xy * (xx - yy);
125
- result[..., 17] = C4[1] * yz * (3 * xx - yy);
126
- result[..., 18] = C4[2] * xy * (7 * zz - 1);
127
- result[..., 19] = C4[3] * yz * (7 * zz - 3);
128
- result[..., 20] = C4[4] * (zz * (35 * zz - 30) + 3);
129
- result[..., 21] = C4[5] * xz * (7 * zz - 3);
130
- result[..., 22] = C4[6] * (xx - yy) * (7 * zz - 1);
131
- result[..., 23] = C4[7] * xz * (xx - 3 * yy);
132
- result[..., 24] = C4[8] * (xx * (xx - 3 * yy) - yy * (3 * xx - yy));
133
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AndreLie95/Diabetes_Risk_Prediction/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Deploy Milestone 2
3
- emoji: ⚡
4
- colorFrom: indigo
5
- colorTo: indigo
6
- sdk: streamlit
7
- sdk_version: 1.17.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/optimization/opt_overview.md DELETED
@@ -1,17 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # Overview
14
-
15
- Generating high-quality outputs is computationally intensive, especially during each iterative step where you go from a noisy output to a less noisy output. One of 🧨 Diffuser's goal is to make this technology widely accessible to everyone, which includes enabling fast inference on consumer and specialized hardware.
16
-
17
- This section will cover tips and tricks - like half-precision weights and sliced attention - for optimizing inference speed and reducing memory-consumption. You can also learn how to speed up your PyTorch code with [`torch.compile`](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html) or [ONNX Runtime](https://onnxruntime.ai/docs/), and enable memory-efficient attention with [xFormers](https://facebookresearch.github.io/xformers/). There are also guides for running inference on specific hardware like Apple Silicon, and Intel or Habana processors.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/controlnet/README.md DELETED
@@ -1,465 +0,0 @@
1
- # ControlNet training example
2
-
3
- [Adding Conditional Control to Text-to-Image Diffusion Models](https://arxiv.org/abs/2302.05543) by Lvmin Zhang and Maneesh Agrawala.
4
-
5
- This example is based on the [training example in the original ControlNet repository](https://github.com/lllyasviel/ControlNet/blob/main/docs/train.md). It trains a ControlNet to fill circles using a [small synthetic dataset](https://huggingface.co/datasets/fusing/fill50k).
6
-
7
- ## Installing the dependencies
8
-
9
- Before running the scripts, make sure to install the library's training dependencies:
10
-
11
- **Important**
12
-
13
- To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment:
14
- ```bash
15
- git clone https://github.com/huggingface/diffusers
16
- cd diffusers
17
- pip install -e .
18
- ```
19
-
20
- Then cd in the example folder and run
21
- ```bash
22
- pip install -r requirements.txt
23
- ```
24
-
25
- And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with:
26
-
27
- ```bash
28
- accelerate config
29
- ```
30
-
31
- Or for a default accelerate configuration without answering questions about your environment
32
-
33
- ```bash
34
- accelerate config default
35
- ```
36
-
37
- Or if your environment doesn't support an interactive shell e.g. a notebook
38
-
39
- ```python
40
- from accelerate.utils import write_basic_config
41
- write_basic_config()
42
- ```
43
-
44
- ## Circle filling dataset
45
-
46
- The original dataset is hosted in the [ControlNet repo](https://huggingface.co/lllyasviel/ControlNet/blob/main/training/fill50k.zip). We re-uploaded it to be compatible with `datasets` [here](https://huggingface.co/datasets/fusing/fill50k). Note that `datasets` handles dataloading within the training script.
47
-
48
- Our training examples use [Stable Diffusion 1.5](https://huggingface.co/runwayml/stable-diffusion-v1-5) as the original set of ControlNet models were trained from it. However, ControlNet can be trained to augment any Stable Diffusion compatible model (such as [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4)) or [stabilityai/stable-diffusion-2-1](https://huggingface.co/stabilityai/stable-diffusion-2-1).
49
-
50
- ## Training
51
-
52
- Our training examples use two test conditioning images. They can be downloaded by running
53
-
54
- ```sh
55
- wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png
56
-
57
- wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png
58
- ```
59
-
60
-
61
- ```bash
62
- export MODEL_DIR="runwayml/stable-diffusion-v1-5"
63
- export OUTPUT_DIR="path to save model"
64
-
65
- accelerate launch train_controlnet.py \
66
- --pretrained_model_name_or_path=$MODEL_DIR \
67
- --output_dir=$OUTPUT_DIR \
68
- --dataset_name=fusing/fill50k \
69
- --resolution=512 \
70
- --learning_rate=1e-5 \
71
- --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \
72
- --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \
73
- --train_batch_size=4
74
- ```
75
-
76
- This default configuration requires ~38GB VRAM.
77
-
78
- By default, the training script logs outputs to tensorboard. Pass `--report_to wandb` to use weights and
79
- biases.
80
-
81
- Gradient accumulation with a smaller batch size can be used to reduce training requirements to ~20 GB VRAM.
82
-
83
- ```bash
84
- export MODEL_DIR="runwayml/stable-diffusion-v1-5"
85
- export OUTPUT_DIR="path to save model"
86
-
87
- accelerate launch train_controlnet.py \
88
- --pretrained_model_name_or_path=$MODEL_DIR \
89
- --output_dir=$OUTPUT_DIR \
90
- --dataset_name=fusing/fill50k \
91
- --resolution=512 \
92
- --learning_rate=1e-5 \
93
- --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \
94
- --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \
95
- --train_batch_size=1 \
96
- --gradient_accumulation_steps=4
97
- ```
98
-
99
- ## Training with multiple GPUs
100
-
101
- `accelerate` allows for seamless multi-GPU training. Follow the instructions [here](https://huggingface.co/docs/accelerate/basic_tutorials/launch)
102
- for running distributed training with `accelerate`. Here is an example command:
103
-
104
- ```bash
105
- export MODEL_DIR="runwayml/stable-diffusion-v1-5"
106
- export OUTPUT_DIR="path to save model"
107
-
108
- accelerate launch --mixed_precision="fp16" --multi_gpu train_controlnet.py \
109
- --pretrained_model_name_or_path=$MODEL_DIR \
110
- --output_dir=$OUTPUT_DIR \
111
- --dataset_name=fusing/fill50k \
112
- --resolution=512 \
113
- --learning_rate=1e-5 \
114
- --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \
115
- --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \
116
- --train_batch_size=4 \
117
- --mixed_precision="fp16" \
118
- --tracker_project_name="controlnet-demo" \
119
- --report_to=wandb
120
- ```
121
-
122
- ## Example results
123
-
124
- #### After 300 steps with batch size 8
125
-
126
- | | |
127
- |-------------------|:-------------------------:|
128
- | | red circle with blue background |
129
- ![conditioning image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png) | ![red circle with blue background](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/red_circle_with_blue_background_300_steps.png) |
130
- | | cyan circle with brown floral background |
131
- ![conditioning image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png) | ![cyan circle with brown floral background](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/cyan_circle_with_brown_floral_background_300_steps.png) |
132
-
133
-
134
- #### After 6000 steps with batch size 8:
135
-
136
- | | |
137
- |-------------------|:-------------------------:|
138
- | | red circle with blue background |
139
- ![conditioning image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png) | ![red circle with blue background](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/red_circle_with_blue_background_6000_steps.png) |
140
- | | cyan circle with brown floral background |
141
- ![conditioning image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png) | ![cyan circle with brown floral background](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/cyan_circle_with_brown_floral_background_6000_steps.png) |
142
-
143
- ## Training on a 16 GB GPU
144
-
145
- Optimizations:
146
- - Gradient checkpointing
147
- - bitsandbyte's 8-bit optimizer
148
-
149
- [bitandbytes install instructions](https://github.com/TimDettmers/bitsandbytes#requirements--installation).
150
-
151
- ```bash
152
- export MODEL_DIR="runwayml/stable-diffusion-v1-5"
153
- export OUTPUT_DIR="path to save model"
154
-
155
- accelerate launch train_controlnet.py \
156
- --pretrained_model_name_or_path=$MODEL_DIR \
157
- --output_dir=$OUTPUT_DIR \
158
- --dataset_name=fusing/fill50k \
159
- --resolution=512 \
160
- --learning_rate=1e-5 \
161
- --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \
162
- --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \
163
- --train_batch_size=1 \
164
- --gradient_accumulation_steps=4 \
165
- --gradient_checkpointing \
166
- --use_8bit_adam
167
- ```
168
-
169
- ## Training on a 12 GB GPU
170
-
171
- Optimizations:
172
- - Gradient checkpointing
173
- - bitsandbyte's 8-bit optimizer
174
- - xformers
175
- - set grads to none
176
-
177
- ```bash
178
- export MODEL_DIR="runwayml/stable-diffusion-v1-5"
179
- export OUTPUT_DIR="path to save model"
180
-
181
- accelerate launch train_controlnet.py \
182
- --pretrained_model_name_or_path=$MODEL_DIR \
183
- --output_dir=$OUTPUT_DIR \
184
- --dataset_name=fusing/fill50k \
185
- --resolution=512 \
186
- --learning_rate=1e-5 \
187
- --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \
188
- --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \
189
- --train_batch_size=1 \
190
- --gradient_accumulation_steps=4 \
191
- --gradient_checkpointing \
192
- --use_8bit_adam \
193
- --enable_xformers_memory_efficient_attention \
194
- --set_grads_to_none
195
- ```
196
-
197
- When using `enable_xformers_memory_efficient_attention`, please make sure to install `xformers` by `pip install xformers`.
198
-
199
- ## Training on an 8 GB GPU
200
-
201
- We have not exhaustively tested DeepSpeed support for ControlNet. While the configuration does
202
- save memory, we have not confirmed the configuration to train successfully. You will very likely
203
- have to make changes to the config to have a successful training run.
204
-
205
- Optimizations:
206
- - Gradient checkpointing
207
- - xformers
208
- - set grads to none
209
- - DeepSpeed stage 2 with parameter and optimizer offloading
210
- - fp16 mixed precision
211
-
212
- [DeepSpeed](https://www.deepspeed.ai/) can offload tensors from VRAM to either
213
- CPU or NVME. This requires significantly more RAM (about 25 GB).
214
-
215
- Use `accelerate config` to enable DeepSpeed stage 2.
216
-
217
- The relevant parts of the resulting accelerate config file are
218
-
219
- ```yaml
220
- compute_environment: LOCAL_MACHINE
221
- deepspeed_config:
222
- gradient_accumulation_steps: 4
223
- offload_optimizer_device: cpu
224
- offload_param_device: cpu
225
- zero3_init_flag: false
226
- zero_stage: 2
227
- distributed_type: DEEPSPEED
228
- ```
229
-
230
- See [documentation](https://huggingface.co/docs/accelerate/usage_guides/deepspeed) for more DeepSpeed configuration options.
231
-
232
- Changing the default Adam optimizer to DeepSpeed's Adam
233
- `deepspeed.ops.adam.DeepSpeedCPUAdam` gives a substantial speedup but
234
- it requires CUDA toolchain with the same version as pytorch. 8-bit optimizer
235
- does not seem to be compatible with DeepSpeed at the moment.
236
-
237
- ```bash
238
- export MODEL_DIR="runwayml/stable-diffusion-v1-5"
239
- export OUTPUT_DIR="path to save model"
240
-
241
- accelerate launch train_controlnet.py \
242
- --pretrained_model_name_or_path=$MODEL_DIR \
243
- --output_dir=$OUTPUT_DIR \
244
- --dataset_name=fusing/fill50k \
245
- --resolution=512 \
246
- --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \
247
- --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \
248
- --train_batch_size=1 \
249
- --gradient_accumulation_steps=4 \
250
- --gradient_checkpointing \
251
- --enable_xformers_memory_efficient_attention \
252
- --set_grads_to_none \
253
- --mixed_precision fp16
254
- ```
255
-
256
- ## Performing inference with the trained ControlNet
257
-
258
- The trained model can be run the same as the original ControlNet pipeline with the newly trained ControlNet.
259
- Set `base_model_path` and `controlnet_path` to the values `--pretrained_model_name_or_path` and
260
- `--output_dir` were respectively set to in the training script.
261
-
262
- ```py
263
- from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
264
- from diffusers.utils import load_image
265
- import torch
266
-
267
- base_model_path = "path to model"
268
- controlnet_path = "path to controlnet"
269
-
270
- controlnet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=torch.float16)
271
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
272
- base_model_path, controlnet=controlnet, torch_dtype=torch.float16
273
- )
274
-
275
- # speed up diffusion process with faster scheduler and memory optimization
276
- pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
277
- # remove following line if xformers is not installed or when using Torch 2.0.
278
- pipe.enable_xformers_memory_efficient_attention()
279
- # memory optimization.
280
- pipe.enable_model_cpu_offload()
281
-
282
- control_image = load_image("./conditioning_image_1.png")
283
- prompt = "pale golden rod circle with old lace background"
284
-
285
- # generate image
286
- generator = torch.manual_seed(0)
287
- image = pipe(
288
- prompt, num_inference_steps=20, generator=generator, image=control_image
289
- ).images[0]
290
- image.save("./output.png")
291
- ```
292
-
293
- ## Training with Flax/JAX
294
-
295
- For faster training on TPUs and GPUs you can leverage the flax training example. Follow the instructions above to get the model and dataset before running the script.
296
-
297
- ### Running on Google Cloud TPU
298
-
299
- See below for commands to set up a TPU VM(`--accelerator-type v4-8`). For more details about how to set up and use TPUs, refer to [Cloud docs for single VM setup](https://cloud.google.com/tpu/docs/run-calculation-jax).
300
-
301
- First create a single TPUv4-8 VM and connect to it:
302
-
303
- ```
304
- ZONE=us-central2-b
305
- TPU_TYPE=v4-8
306
- VM_NAME=hg_flax
307
-
308
- gcloud alpha compute tpus tpu-vm create $VM_NAME \
309
- --zone $ZONE \
310
- --accelerator-type $TPU_TYPE \
311
- --version tpu-vm-v4-base
312
-
313
- gcloud alpha compute tpus tpu-vm ssh $VM_NAME --zone $ZONE -- \
314
- ```
315
-
316
- When connected install JAX `0.4.5`:
317
-
318
- ```
319
- pip install "jax[tpu]==0.4.5" -f https://storage.googleapis.com/jax-releases/libtpu_releases.html
320
- ```
321
-
322
- To verify that JAX was correctly installed, you can run the following command:
323
-
324
- ```
325
- import jax
326
- jax.device_count()
327
- ```
328
-
329
- This should display the number of TPU cores, which should be 4 on a TPUv4-8 VM.
330
-
331
- Then install Diffusers and the library's training dependencies:
332
-
333
- ```bash
334
- git clone https://github.com/huggingface/diffusers
335
- cd diffusers
336
- pip install .
337
- ```
338
-
339
- Then cd in the example folder and run
340
-
341
- ```bash
342
- pip install -U -r requirements_flax.txt
343
- ```
344
-
345
- If you want to use Weights and Biases logging, you should also install `wandb` now
346
-
347
- ```bash
348
- pip install wandb
349
- ```
350
-
351
-
352
- Now let's downloading two conditioning images that we will use to run validation during the training in order to track our progress
353
-
354
- ```
355
- wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png
356
- wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png
357
- ```
358
-
359
- We encourage you to store or share your model with the community. To use huggingface hub, please login to your Hugging Face account, or ([create one](https://huggingface.co/docs/diffusers/main/en/training/hf.co/join) if you don’t have one already):
360
-
361
- ```
362
- huggingface-cli login
363
- ```
364
-
365
- Make sure you have the `MODEL_DIR`,`OUTPUT_DIR` and `HUB_MODEL_ID` environment variables set. The `OUTPUT_DIR` and `HUB_MODEL_ID` variables specify where to save the model to on the Hub:
366
-
367
- ```bash
368
- export MODEL_DIR="runwayml/stable-diffusion-v1-5"
369
- export OUTPUT_DIR="runs/fill-circle-{timestamp}"
370
- export HUB_MODEL_ID="controlnet-fill-circle"
371
- ```
372
-
373
- And finally start the training
374
-
375
- ```bash
376
- python3 train_controlnet_flax.py \
377
- --pretrained_model_name_or_path=$MODEL_DIR \
378
- --output_dir=$OUTPUT_DIR \
379
- --dataset_name=fusing/fill50k \
380
- --resolution=512 \
381
- --learning_rate=1e-5 \
382
- --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \
383
- --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \
384
- --validation_steps=1000 \
385
- --train_batch_size=2 \
386
- --revision="non-ema" \
387
- --from_pt \
388
- --report_to="wandb" \
389
- --tracker_project_name=$HUB_MODEL_ID \
390
- --num_train_epochs=11 \
391
- --push_to_hub \
392
- --hub_model_id=$HUB_MODEL_ID
393
- ```
394
-
395
- Since we passed the `--push_to_hub` flag, it will automatically create a model repo under your huggingface account based on `$HUB_MODEL_ID`. By the end of training, the final checkpoint will be automatically stored on the hub. You can find an example model repo [here](https://huggingface.co/YiYiXu/fill-circle-controlnet).
396
-
397
- Our training script also provides limited support for streaming large datasets from the Hugging Face Hub. In order to enable streaming, one must also set `--max_train_samples`. Here is an example command (from [this blog article](https://huggingface.co/blog/train-your-controlnet)):
398
-
399
- ```bash
400
- export MODEL_DIR="runwayml/stable-diffusion-v1-5"
401
- export OUTPUT_DIR="runs/uncanny-faces-{timestamp}"
402
- export HUB_MODEL_ID="controlnet-uncanny-faces"
403
-
404
- python3 train_controlnet_flax.py \
405
- --pretrained_model_name_or_path=$MODEL_DIR \
406
- --output_dir=$OUTPUT_DIR \
407
- --dataset_name=multimodalart/facesyntheticsspigacaptioned \
408
- --streaming \
409
- --conditioning_image_column=spiga_seg \
410
- --image_column=image \
411
- --caption_column=image_caption \
412
- --resolution=512 \
413
- --max_train_samples 100000 \
414
- --learning_rate=1e-5 \
415
- --train_batch_size=1 \
416
- --revision="flax" \
417
- --report_to="wandb" \
418
- --tracker_project_name=$HUB_MODEL_ID
419
- ```
420
-
421
- Note, however, that the performance of the TPUs might get bottlenecked as streaming with `datasets` is not optimized for images. For ensuring maximum throughput, we encourage you to explore the following options:
422
-
423
- * [Webdataset](https://webdataset.github.io/webdataset/)
424
- * [TorchData](https://github.com/pytorch/data)
425
- * [TensorFlow Datasets](https://www.tensorflow.org/datasets/tfless_tfds)
426
-
427
- When work with a larger dataset, you may need to run training process for a long time and it’s useful to save regular checkpoints during the process. You can use the following argument to enable intermediate checkpointing:
428
-
429
- ```bash
430
- --checkpointing_steps=500
431
- ```
432
- This will save the trained model in subfolders of your output_dir. Subfolder names is the number of steps performed so far; for example: a checkpoint saved after 500 training steps would be saved in a subfolder named 500
433
-
434
- You can then start your training from this saved checkpoint with
435
-
436
- ```bash
437
- --controlnet_model_name_or_path="./control_out/500"
438
- ```
439
-
440
- We support training with the Min-SNR weighting strategy proposed in [Efficient Diffusion Training via Min-SNR Weighting Strategy](https://arxiv.org/abs/2303.09556) which helps to achieve faster convergence by rebalancing the loss. To use it, one needs to set the `--snr_gamma` argument. The recommended value when using it is `5.0`.
441
-
442
- We also support gradient accumulation - it is a technique that lets you use a bigger batch size than your machine would normally be able to fit into memory. You can use `gradient_accumulation_steps` argument to set gradient accumulation steps. The ControlNet author recommends using gradient accumulation to achieve better convergence. Read more [here](https://github.com/lllyasviel/ControlNet/blob/main/docs/train.md#more-consideration-sudden-converge-phenomenon-and-gradient-accumulation).
443
-
444
- You can **profile your code** with:
445
-
446
- ```bash
447
- --profile_steps==5
448
- ```
449
-
450
- Refer to the [JAX documentation on profiling](https://jax.readthedocs.io/en/latest/profiling.html). To inspect the profile trace, you'll have to install and start Tensorboard with the profile plugin:
451
-
452
- ```bash
453
- pip install tensorflow tensorboard-plugin-profile
454
- tensorboard --logdir runs/fill-circle-100steps-20230411_165612/
455
- ```
456
-
457
- The profile can then be inspected at http://localhost:6006/#profile
458
-
459
- Sometimes you'll get version conflicts (error messages like `Duplicate plugins for name projector`), which means that you have to uninstall and reinstall all versions of Tensorflow/Tensorboard (e.g. with `pip uninstall tensorflow tf-nightly tensorboard tb-nightly tensorboard-plugin-profile && pip install tf-nightly tbp-nightly tensorboard-plugin-profile`).
460
-
461
- Note that the debugging functionality of the Tensorboard `profile` plugin is still under active development. Not all views are fully functional, and for example the `trace_viewer` cuts off events after 1M (which can result in all your device traces getting lost if you for example profile the compilation step by accident).
462
-
463
- ## Support for Stable Diffusion XL
464
-
465
- We provide a training script for training a ControlNet with [Stable Diffusion XL](https://huggingface.co/papers/2307.01952). Please refer to [README_sdxl.md](./README_sdxl.md) for more details.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/builder.py DELETED
@@ -1,77 +0,0 @@
1
- import warnings
2
-
3
- from mmcv.utils import Registry, build_from_cfg
4
- from torch import nn
5
-
6
- BACKBONES = Registry('backbone')
7
- NECKS = Registry('neck')
8
- ROI_EXTRACTORS = Registry('roi_extractor')
9
- SHARED_HEADS = Registry('shared_head')
10
- HEADS = Registry('head')
11
- LOSSES = Registry('loss')
12
- DETECTORS = Registry('detector')
13
-
14
-
15
- def build(cfg, registry, default_args=None):
16
- """Build a module.
17
-
18
- Args:
19
- cfg (dict, list[dict]): The config of modules, is is either a dict
20
- or a list of configs.
21
- registry (:obj:`Registry`): A registry the module belongs to.
22
- default_args (dict, optional): Default arguments to build the module.
23
- Defaults to None.
24
-
25
- Returns:
26
- nn.Module: A built nn module.
27
- """
28
- if isinstance(cfg, list):
29
- modules = [
30
- build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
31
- ]
32
- return nn.Sequential(*modules)
33
- else:
34
- return build_from_cfg(cfg, registry, default_args)
35
-
36
-
37
- def build_backbone(cfg):
38
- """Build backbone."""
39
- return build(cfg, BACKBONES)
40
-
41
-
42
- def build_neck(cfg):
43
- """Build neck."""
44
- return build(cfg, NECKS)
45
-
46
-
47
- def build_roi_extractor(cfg):
48
- """Build roi extractor."""
49
- return build(cfg, ROI_EXTRACTORS)
50
-
51
-
52
- def build_shared_head(cfg):
53
- """Build shared head."""
54
- return build(cfg, SHARED_HEADS)
55
-
56
-
57
- def build_head(cfg):
58
- """Build head."""
59
- return build(cfg, HEADS)
60
-
61
-
62
- def build_loss(cfg):
63
- """Build loss."""
64
- return build(cfg, LOSSES)
65
-
66
-
67
- def build_detector(cfg, train_cfg=None, test_cfg=None):
68
- """Build detector."""
69
- if train_cfg is not None or test_cfg is not None:
70
- warnings.warn(
71
- 'train_cfg and test_cfg is deprecated, '
72
- 'please specify them in model', UserWarning)
73
- assert cfg.get('train_cfg') is None or train_cfg is None, \
74
- 'train_cfg specified in both outer field and model field '
75
- assert cfg.get('test_cfg') is None or test_cfg is None, \
76
- 'test_cfg specified in both outer field and model field '
77
- return build(cfg, DETECTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_hr18s_512x512_160k_ade20k.py DELETED
@@ -1,9 +0,0 @@
1
- _base_ = './ocrnet_hr18_512x512_160k_ade20k.py'
2
- model = dict(
3
- pretrained='open-mmlab://msra/hrnetv2_w18_small',
4
- backbone=dict(
5
- extra=dict(
6
- stage1=dict(num_blocks=(2, )),
7
- stage2=dict(num_blocks=(2, 2)),
8
- stage3=dict(num_modules=3, num_blocks=(2, 2, 2)),
9
- stage4=dict(num_modules=2, num_blocks=(2, 2, 2, 2)))))
 
 
 
 
 
 
 
 
 
 
spaces/AnimalEquality/chatbot/constants.py DELETED
@@ -1,3 +0,0 @@
1
- from pathlib import Path
2
-
3
- ROOT_DIR = Path(__file__).parent
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/deepspeed_parameters.py DELETED
@@ -1,74 +0,0 @@
1
- def generate_ds_config(ds_bf16, train_batch_size, nvme_offload_dir):
2
- '''
3
- DeepSpeed configuration
4
- https://huggingface.co/docs/transformers/main_classes/deepspeed
5
- '''
6
-
7
- if nvme_offload_dir:
8
- ds_config = {
9
- "fp16": {
10
- "enabled": not ds_bf16,
11
- },
12
- "bf16": {
13
- "enabled": ds_bf16,
14
- },
15
- "zero_optimization": {
16
- "stage": 3,
17
- "offload_param": {
18
- "device": "nvme",
19
- "nvme_path": nvme_offload_dir,
20
- "pin_memory": True,
21
- "buffer_count": 5,
22
- "buffer_size": 1e9,
23
- "max_in_cpu": 1e9
24
- },
25
- "overlap_comm": True,
26
- "reduce_bucket_size": "auto",
27
- "contiguous_gradients": True,
28
- "sub_group_size": 1e8,
29
- "stage3_prefetch_bucket_size": "auto",
30
- "stage3_param_persistence_threshold": "auto",
31
- "stage3_max_live_parameters": "auto",
32
- "stage3_max_reuse_distance": "auto",
33
- },
34
- "aio": {
35
- "block_size": 262144,
36
- "queue_depth": 32,
37
- "thread_count": 1,
38
- "single_submit": False,
39
- "overlap_events": True
40
- },
41
- "steps_per_print": 2000,
42
- "train_batch_size": train_batch_size,
43
- "train_micro_batch_size_per_gpu": 1,
44
- "wall_clock_breakdown": False
45
- }
46
- else:
47
- ds_config = {
48
- "fp16": {
49
- "enabled": not ds_bf16,
50
- },
51
- "bf16": {
52
- "enabled": ds_bf16,
53
- },
54
- "zero_optimization": {
55
- "stage": 3,
56
- "offload_param": {
57
- "device": "cpu",
58
- "pin_memory": True
59
- },
60
- "overlap_comm": True,
61
- "contiguous_gradients": True,
62
- "reduce_bucket_size": "auto",
63
- "stage3_prefetch_bucket_size": "auto",
64
- "stage3_param_persistence_threshold": "auto",
65
- "stage3_max_live_parameters": "auto",
66
- "stage3_max_reuse_distance": "auto",
67
- },
68
- "steps_per_print": 2000,
69
- "train_batch_size": train_batch_size,
70
- "train_micro_batch_size_per_gpu": 1,
71
- "wall_clock_breakdown": False
72
- }
73
-
74
- return ds_config
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/gmflow_module/scripts/demo.sh DELETED
@@ -1,63 +0,0 @@
1
- #!/usr/bin/env bash
2
-
3
- # inference GMFlow without refinement
4
-
5
- # sintel
6
-
7
- # only predict forward flow
8
- CUDA_VISIBLE_DEVICES=0 python main.py \
9
- --inference_dir demo/sintel_market_1 \
10
- --output_path output/gmflow-norefine-sintel_market_1 \
11
- --resume pretrained/gmflow_sintel-0c07dcb3.pth
12
-
13
- # predict forward & backward flow
14
- CUDA_VISIBLE_DEVICES=0 python main.py \
15
- --inference_dir demo/sintel_market_1 \
16
- --output_path output/gmflow-norefine-sintel_market_1 \
17
- --pred_bidir_flow \
18
- --resume pretrained/gmflow_sintel-0c07dcb3.pth
19
-
20
-
21
- # predict forward & backward flow with forward-backward consistency check
22
- CUDA_VISIBLE_DEVICES=0 python main.py \
23
- --inference_dir demo/sintel_market_1 \
24
- --output_path output/gmflow-norefine-sintel_market_1 \
25
- --pred_bidir_flow \
26
- --fwd_bwd_consistency_check \
27
- --resume pretrained/gmflow_sintel-0c07dcb3.pth
28
-
29
-
30
- # davis
31
-
32
- CUDA_VISIBLE_DEVICES=0 python main.py \
33
- --inference_dir demo/davis_breakdance-flare \
34
- --output_path output/gmflow-norefine-davis_breakdance-flare \
35
- --resume pretrained/gmflow_sintel-0c07dcb3.pth
36
-
37
-
38
-
39
-
40
- # inference GMFlow with refinement
41
-
42
- CUDA_VISIBLE_DEVICES=0 python main.py \
43
- --inference_dir demo/davis_breakdance-flare \
44
- --output_path output/gmflow-withrefine-davis_breakdance-flare \
45
- --resume pretrained/gmflow_with_refine_sintel-3ed1cf48.pth \
46
- --padding_factor 32 \
47
- --upsample_factor 4 \
48
- --num_scales 2 \
49
- --attn_splits_list 2 8 \
50
- --corr_radius_list -1 4 \
51
- --prop_radius_list -1 1
52
-
53
-
54
-
55
-
56
- CUDA_VISIBLE_DEVICES=0 python main.py \
57
- --inference_dir demo/sintel_test_clean_market_1 \
58
- --output_path output/gmflow-norefine-sintel_test_clean_market_1 \
59
- --pred_bidir_flow \
60
- --fwd_bwd_consistency_check \
61
- --resume pretrained/gmflow_sintel-0c07dcb3.pth
62
-
63
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model/groundingdino/config/GroundingDINO_SwinB_cfg.py DELETED
@@ -1,43 +0,0 @@
1
- batch_size = 1
2
- modelname = "groundingdino"
3
- backbone = "swin_B_384_22k"
4
- position_embedding = "sine"
5
- pe_temperatureH = 20
6
- pe_temperatureW = 20
7
- return_interm_indices = [1, 2, 3]
8
- backbone_freeze_keywords = None
9
- enc_layers = 6
10
- dec_layers = 6
11
- pre_norm = False
12
- dim_feedforward = 2048
13
- hidden_dim = 256
14
- dropout = 0.0
15
- nheads = 8
16
- num_queries = 900
17
- query_dim = 4
18
- num_patterns = 0
19
- num_feature_levels = 4
20
- enc_n_points = 4
21
- dec_n_points = 4
22
- two_stage_type = "standard"
23
- two_stage_bbox_embed_share = False
24
- two_stage_class_embed_share = False
25
- transformer_activation = "relu"
26
- dec_pred_bbox_embed_share = True
27
- dn_box_noise_scale = 1.0
28
- dn_label_noise_ratio = 0.5
29
- dn_label_coef = 1.0
30
- dn_bbox_coef = 1.0
31
- embed_init_tgt = True
32
- dn_labelbook_size = 2000
33
- max_text_len = 256
34
- text_encoder_type = "bert-base-uncased"
35
- use_text_enhancer = True
36
- use_fusion_layer = True
37
- use_checkpoint = True
38
- use_transformer_ckpt = True
39
- use_text_cross_attention = True
40
- text_dropout = 0.0
41
- fusion_dropout = 0.0
42
- fusion_droppath = 0.1
43
- sub_sentence_present = True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/cli/req_command.py DELETED
@@ -1,505 +0,0 @@
1
- """Contains the Command base classes that depend on PipSession.
2
-
3
- The classes in this module are in a separate module so the commands not
4
- needing download / PackageFinder capability don't unnecessarily import the
5
- PackageFinder machinery and all its vendored dependencies, etc.
6
- """
7
-
8
- import logging
9
- import os
10
- import sys
11
- from functools import partial
12
- from optparse import Values
13
- from typing import TYPE_CHECKING, Any, List, Optional, Tuple
14
-
15
- from pip._internal.cache import WheelCache
16
- from pip._internal.cli import cmdoptions
17
- from pip._internal.cli.base_command import Command
18
- from pip._internal.cli.command_context import CommandContextMixIn
19
- from pip._internal.exceptions import CommandError, PreviousBuildDirError
20
- from pip._internal.index.collector import LinkCollector
21
- from pip._internal.index.package_finder import PackageFinder
22
- from pip._internal.models.selection_prefs import SelectionPreferences
23
- from pip._internal.models.target_python import TargetPython
24
- from pip._internal.network.session import PipSession
25
- from pip._internal.operations.build.build_tracker import BuildTracker
26
- from pip._internal.operations.prepare import RequirementPreparer
27
- from pip._internal.req.constructors import (
28
- install_req_from_editable,
29
- install_req_from_line,
30
- install_req_from_parsed_requirement,
31
- install_req_from_req_string,
32
- )
33
- from pip._internal.req.req_file import parse_requirements
34
- from pip._internal.req.req_install import InstallRequirement
35
- from pip._internal.resolution.base import BaseResolver
36
- from pip._internal.self_outdated_check import pip_self_version_check
37
- from pip._internal.utils.temp_dir import (
38
- TempDirectory,
39
- TempDirectoryTypeRegistry,
40
- tempdir_kinds,
41
- )
42
- from pip._internal.utils.virtualenv import running_under_virtualenv
43
-
44
- if TYPE_CHECKING:
45
- from ssl import SSLContext
46
-
47
- logger = logging.getLogger(__name__)
48
-
49
-
50
- def _create_truststore_ssl_context() -> Optional["SSLContext"]:
51
- if sys.version_info < (3, 10):
52
- raise CommandError("The truststore feature is only available for Python 3.10+")
53
-
54
- try:
55
- import ssl
56
- except ImportError:
57
- logger.warning("Disabling truststore since ssl support is missing")
58
- return None
59
-
60
- try:
61
- import truststore
62
- except ImportError:
63
- raise CommandError(
64
- "To use the truststore feature, 'truststore' must be installed into "
65
- "pip's current environment."
66
- )
67
-
68
- return truststore.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
69
-
70
-
71
- class SessionCommandMixin(CommandContextMixIn):
72
-
73
- """
74
- A class mixin for command classes needing _build_session().
75
- """
76
-
77
- def __init__(self) -> None:
78
- super().__init__()
79
- self._session: Optional[PipSession] = None
80
-
81
- @classmethod
82
- def _get_index_urls(cls, options: Values) -> Optional[List[str]]:
83
- """Return a list of index urls from user-provided options."""
84
- index_urls = []
85
- if not getattr(options, "no_index", False):
86
- url = getattr(options, "index_url", None)
87
- if url:
88
- index_urls.append(url)
89
- urls = getattr(options, "extra_index_urls", None)
90
- if urls:
91
- index_urls.extend(urls)
92
- # Return None rather than an empty list
93
- return index_urls or None
94
-
95
- def get_default_session(self, options: Values) -> PipSession:
96
- """Get a default-managed session."""
97
- if self._session is None:
98
- self._session = self.enter_context(self._build_session(options))
99
- # there's no type annotation on requests.Session, so it's
100
- # automatically ContextManager[Any] and self._session becomes Any,
101
- # then https://github.com/python/mypy/issues/7696 kicks in
102
- assert self._session is not None
103
- return self._session
104
-
105
- def _build_session(
106
- self,
107
- options: Values,
108
- retries: Optional[int] = None,
109
- timeout: Optional[int] = None,
110
- fallback_to_certifi: bool = False,
111
- ) -> PipSession:
112
- cache_dir = options.cache_dir
113
- assert not cache_dir or os.path.isabs(cache_dir)
114
-
115
- if "truststore" in options.features_enabled:
116
- try:
117
- ssl_context = _create_truststore_ssl_context()
118
- except Exception:
119
- if not fallback_to_certifi:
120
- raise
121
- ssl_context = None
122
- else:
123
- ssl_context = None
124
-
125
- session = PipSession(
126
- cache=os.path.join(cache_dir, "http") if cache_dir else None,
127
- retries=retries if retries is not None else options.retries,
128
- trusted_hosts=options.trusted_hosts,
129
- index_urls=self._get_index_urls(options),
130
- ssl_context=ssl_context,
131
- )
132
-
133
- # Handle custom ca-bundles from the user
134
- if options.cert:
135
- session.verify = options.cert
136
-
137
- # Handle SSL client certificate
138
- if options.client_cert:
139
- session.cert = options.client_cert
140
-
141
- # Handle timeouts
142
- if options.timeout or timeout:
143
- session.timeout = timeout if timeout is not None else options.timeout
144
-
145
- # Handle configured proxies
146
- if options.proxy:
147
- session.proxies = {
148
- "http": options.proxy,
149
- "https": options.proxy,
150
- }
151
-
152
- # Determine if we can prompt the user for authentication or not
153
- session.auth.prompting = not options.no_input
154
- session.auth.keyring_provider = options.keyring_provider
155
-
156
- return session
157
-
158
-
159
- class IndexGroupCommand(Command, SessionCommandMixin):
160
-
161
- """
162
- Abstract base class for commands with the index_group options.
163
-
164
- This also corresponds to the commands that permit the pip version check.
165
- """
166
-
167
- def handle_pip_version_check(self, options: Values) -> None:
168
- """
169
- Do the pip version check if not disabled.
170
-
171
- This overrides the default behavior of not doing the check.
172
- """
173
- # Make sure the index_group options are present.
174
- assert hasattr(options, "no_index")
175
-
176
- if options.disable_pip_version_check or options.no_index:
177
- return
178
-
179
- # Otherwise, check if we're using the latest version of pip available.
180
- session = self._build_session(
181
- options,
182
- retries=0,
183
- timeout=min(5, options.timeout),
184
- # This is set to ensure the function does not fail when truststore is
185
- # specified in use-feature but cannot be loaded. This usually raises a
186
- # CommandError and shows a nice user-facing error, but this function is not
187
- # called in that try-except block.
188
- fallback_to_certifi=True,
189
- )
190
- with session:
191
- pip_self_version_check(session, options)
192
-
193
-
194
- KEEPABLE_TEMPDIR_TYPES = [
195
- tempdir_kinds.BUILD_ENV,
196
- tempdir_kinds.EPHEM_WHEEL_CACHE,
197
- tempdir_kinds.REQ_BUILD,
198
- ]
199
-
200
-
201
- def warn_if_run_as_root() -> None:
202
- """Output a warning for sudo users on Unix.
203
-
204
- In a virtual environment, sudo pip still writes to virtualenv.
205
- On Windows, users may run pip as Administrator without issues.
206
- This warning only applies to Unix root users outside of virtualenv.
207
- """
208
- if running_under_virtualenv():
209
- return
210
- if not hasattr(os, "getuid"):
211
- return
212
- # On Windows, there are no "system managed" Python packages. Installing as
213
- # Administrator via pip is the correct way of updating system environments.
214
- #
215
- # We choose sys.platform over utils.compat.WINDOWS here to enable Mypy platform
216
- # checks: https://mypy.readthedocs.io/en/stable/common_issues.html
217
- if sys.platform == "win32" or sys.platform == "cygwin":
218
- return
219
-
220
- if os.getuid() != 0:
221
- return
222
-
223
- logger.warning(
224
- "Running pip as the 'root' user can result in broken permissions and "
225
- "conflicting behaviour with the system package manager. "
226
- "It is recommended to use a virtual environment instead: "
227
- "https://pip.pypa.io/warnings/venv"
228
- )
229
-
230
-
231
- def with_cleanup(func: Any) -> Any:
232
- """Decorator for common logic related to managing temporary
233
- directories.
234
- """
235
-
236
- def configure_tempdir_registry(registry: TempDirectoryTypeRegistry) -> None:
237
- for t in KEEPABLE_TEMPDIR_TYPES:
238
- registry.set_delete(t, False)
239
-
240
- def wrapper(
241
- self: RequirementCommand, options: Values, args: List[Any]
242
- ) -> Optional[int]:
243
- assert self.tempdir_registry is not None
244
- if options.no_clean:
245
- configure_tempdir_registry(self.tempdir_registry)
246
-
247
- try:
248
- return func(self, options, args)
249
- except PreviousBuildDirError:
250
- # This kind of conflict can occur when the user passes an explicit
251
- # build directory with a pre-existing folder. In that case we do
252
- # not want to accidentally remove it.
253
- configure_tempdir_registry(self.tempdir_registry)
254
- raise
255
-
256
- return wrapper
257
-
258
-
259
- class RequirementCommand(IndexGroupCommand):
260
- def __init__(self, *args: Any, **kw: Any) -> None:
261
- super().__init__(*args, **kw)
262
-
263
- self.cmd_opts.add_option(cmdoptions.no_clean())
264
-
265
- @staticmethod
266
- def determine_resolver_variant(options: Values) -> str:
267
- """Determines which resolver should be used, based on the given options."""
268
- if "legacy-resolver" in options.deprecated_features_enabled:
269
- return "legacy"
270
-
271
- return "2020-resolver"
272
-
273
- @classmethod
274
- def make_requirement_preparer(
275
- cls,
276
- temp_build_dir: TempDirectory,
277
- options: Values,
278
- build_tracker: BuildTracker,
279
- session: PipSession,
280
- finder: PackageFinder,
281
- use_user_site: bool,
282
- download_dir: Optional[str] = None,
283
- verbosity: int = 0,
284
- ) -> RequirementPreparer:
285
- """
286
- Create a RequirementPreparer instance for the given parameters.
287
- """
288
- temp_build_dir_path = temp_build_dir.path
289
- assert temp_build_dir_path is not None
290
-
291
- resolver_variant = cls.determine_resolver_variant(options)
292
- if resolver_variant == "2020-resolver":
293
- lazy_wheel = "fast-deps" in options.features_enabled
294
- if lazy_wheel:
295
- logger.warning(
296
- "pip is using lazily downloaded wheels using HTTP "
297
- "range requests to obtain dependency information. "
298
- "This experimental feature is enabled through "
299
- "--use-feature=fast-deps and it is not ready for "
300
- "production."
301
- )
302
- else:
303
- lazy_wheel = False
304
- if "fast-deps" in options.features_enabled:
305
- logger.warning(
306
- "fast-deps has no effect when used with the legacy resolver."
307
- )
308
-
309
- return RequirementPreparer(
310
- build_dir=temp_build_dir_path,
311
- src_dir=options.src_dir,
312
- download_dir=download_dir,
313
- build_isolation=options.build_isolation,
314
- check_build_deps=options.check_build_deps,
315
- build_tracker=build_tracker,
316
- session=session,
317
- progress_bar=options.progress_bar,
318
- finder=finder,
319
- require_hashes=options.require_hashes,
320
- use_user_site=use_user_site,
321
- lazy_wheel=lazy_wheel,
322
- verbosity=verbosity,
323
- )
324
-
325
- @classmethod
326
- def make_resolver(
327
- cls,
328
- preparer: RequirementPreparer,
329
- finder: PackageFinder,
330
- options: Values,
331
- wheel_cache: Optional[WheelCache] = None,
332
- use_user_site: bool = False,
333
- ignore_installed: bool = True,
334
- ignore_requires_python: bool = False,
335
- force_reinstall: bool = False,
336
- upgrade_strategy: str = "to-satisfy-only",
337
- use_pep517: Optional[bool] = None,
338
- py_version_info: Optional[Tuple[int, ...]] = None,
339
- ) -> BaseResolver:
340
- """
341
- Create a Resolver instance for the given parameters.
342
- """
343
- make_install_req = partial(
344
- install_req_from_req_string,
345
- isolated=options.isolated_mode,
346
- use_pep517=use_pep517,
347
- )
348
- resolver_variant = cls.determine_resolver_variant(options)
349
- # The long import name and duplicated invocation is needed to convince
350
- # Mypy into correctly typechecking. Otherwise it would complain the
351
- # "Resolver" class being redefined.
352
- if resolver_variant == "2020-resolver":
353
- import pip._internal.resolution.resolvelib.resolver
354
-
355
- return pip._internal.resolution.resolvelib.resolver.Resolver(
356
- preparer=preparer,
357
- finder=finder,
358
- wheel_cache=wheel_cache,
359
- make_install_req=make_install_req,
360
- use_user_site=use_user_site,
361
- ignore_dependencies=options.ignore_dependencies,
362
- ignore_installed=ignore_installed,
363
- ignore_requires_python=ignore_requires_python,
364
- force_reinstall=force_reinstall,
365
- upgrade_strategy=upgrade_strategy,
366
- py_version_info=py_version_info,
367
- )
368
- import pip._internal.resolution.legacy.resolver
369
-
370
- return pip._internal.resolution.legacy.resolver.Resolver(
371
- preparer=preparer,
372
- finder=finder,
373
- wheel_cache=wheel_cache,
374
- make_install_req=make_install_req,
375
- use_user_site=use_user_site,
376
- ignore_dependencies=options.ignore_dependencies,
377
- ignore_installed=ignore_installed,
378
- ignore_requires_python=ignore_requires_python,
379
- force_reinstall=force_reinstall,
380
- upgrade_strategy=upgrade_strategy,
381
- py_version_info=py_version_info,
382
- )
383
-
384
- def get_requirements(
385
- self,
386
- args: List[str],
387
- options: Values,
388
- finder: PackageFinder,
389
- session: PipSession,
390
- ) -> List[InstallRequirement]:
391
- """
392
- Parse command-line arguments into the corresponding requirements.
393
- """
394
- requirements: List[InstallRequirement] = []
395
- for filename in options.constraints:
396
- for parsed_req in parse_requirements(
397
- filename,
398
- constraint=True,
399
- finder=finder,
400
- options=options,
401
- session=session,
402
- ):
403
- req_to_add = install_req_from_parsed_requirement(
404
- parsed_req,
405
- isolated=options.isolated_mode,
406
- user_supplied=False,
407
- )
408
- requirements.append(req_to_add)
409
-
410
- for req in args:
411
- req_to_add = install_req_from_line(
412
- req,
413
- comes_from=None,
414
- isolated=options.isolated_mode,
415
- use_pep517=options.use_pep517,
416
- user_supplied=True,
417
- config_settings=getattr(options, "config_settings", None),
418
- )
419
- requirements.append(req_to_add)
420
-
421
- for req in options.editables:
422
- req_to_add = install_req_from_editable(
423
- req,
424
- user_supplied=True,
425
- isolated=options.isolated_mode,
426
- use_pep517=options.use_pep517,
427
- config_settings=getattr(options, "config_settings", None),
428
- )
429
- requirements.append(req_to_add)
430
-
431
- # NOTE: options.require_hashes may be set if --require-hashes is True
432
- for filename in options.requirements:
433
- for parsed_req in parse_requirements(
434
- filename, finder=finder, options=options, session=session
435
- ):
436
- req_to_add = install_req_from_parsed_requirement(
437
- parsed_req,
438
- isolated=options.isolated_mode,
439
- use_pep517=options.use_pep517,
440
- user_supplied=True,
441
- config_settings=parsed_req.options.get("config_settings")
442
- if parsed_req.options
443
- else None,
444
- )
445
- requirements.append(req_to_add)
446
-
447
- # If any requirement has hash options, enable hash checking.
448
- if any(req.has_hash_options for req in requirements):
449
- options.require_hashes = True
450
-
451
- if not (args or options.editables or options.requirements):
452
- opts = {"name": self.name}
453
- if options.find_links:
454
- raise CommandError(
455
- "You must give at least one requirement to {name} "
456
- '(maybe you meant "pip {name} {links}"?)'.format(
457
- **dict(opts, links=" ".join(options.find_links))
458
- )
459
- )
460
- else:
461
- raise CommandError(
462
- "You must give at least one requirement to {name} "
463
- '(see "pip help {name}")'.format(**opts)
464
- )
465
-
466
- return requirements
467
-
468
- @staticmethod
469
- def trace_basic_info(finder: PackageFinder) -> None:
470
- """
471
- Trace basic information about the provided objects.
472
- """
473
- # Display where finder is looking for packages
474
- search_scope = finder.search_scope
475
- locations = search_scope.get_formatted_locations()
476
- if locations:
477
- logger.info(locations)
478
-
479
- def _build_package_finder(
480
- self,
481
- options: Values,
482
- session: PipSession,
483
- target_python: Optional[TargetPython] = None,
484
- ignore_requires_python: Optional[bool] = None,
485
- ) -> PackageFinder:
486
- """
487
- Create a package finder appropriate to this requirement command.
488
-
489
- :param ignore_requires_python: Whether to ignore incompatible
490
- "Requires-Python" values in links. Defaults to False.
491
- """
492
- link_collector = LinkCollector.create(session, options=options)
493
- selection_prefs = SelectionPreferences(
494
- allow_yanked=True,
495
- format_control=options.format_control,
496
- allow_all_prereleases=options.pre,
497
- prefer_binary=options.prefer_binary,
498
- ignore_requires_python=ignore_requires_python,
499
- )
500
-
501
- return PackageFinder.create(
502
- link_collector=link_collector,
503
- selection_prefs=selection_prefs,
504
- target_python=target_python,
505
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/constrain.py DELETED
@@ -1,37 +0,0 @@
1
- from typing import Optional, TYPE_CHECKING
2
-
3
- from .jupyter import JupyterMixin
4
- from .measure import Measurement
5
-
6
- if TYPE_CHECKING:
7
- from .console import Console, ConsoleOptions, RenderableType, RenderResult
8
-
9
-
10
- class Constrain(JupyterMixin):
11
- """Constrain the width of a renderable to a given number of characters.
12
-
13
- Args:
14
- renderable (RenderableType): A renderable object.
15
- width (int, optional): The maximum width (in characters) to render. Defaults to 80.
16
- """
17
-
18
- def __init__(self, renderable: "RenderableType", width: Optional[int] = 80) -> None:
19
- self.renderable = renderable
20
- self.width = width
21
-
22
- def __rich_console__(
23
- self, console: "Console", options: "ConsoleOptions"
24
- ) -> "RenderResult":
25
- if self.width is None:
26
- yield self.renderable
27
- else:
28
- child_options = options.update_width(min(self.width, options.max_width))
29
- yield from console.render(self.renderable, child_options)
30
-
31
- def __rich_measure__(
32
- self, console: "Console", options: "ConsoleOptions"
33
- ) -> "Measurement":
34
- if self.width is not None:
35
- options = options.update_width(self.width)
36
- measurement = Measurement.get(console, options, self.renderable)
37
- return measurement
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/tree.py DELETED
@@ -1,251 +0,0 @@
1
- from typing import Iterator, List, Optional, Tuple
2
-
3
- from ._loop import loop_first, loop_last
4
- from .console import Console, ConsoleOptions, RenderableType, RenderResult
5
- from .jupyter import JupyterMixin
6
- from .measure import Measurement
7
- from .segment import Segment
8
- from .style import Style, StyleStack, StyleType
9
- from .styled import Styled
10
-
11
-
12
- class Tree(JupyterMixin):
13
- """A renderable for a tree structure.
14
-
15
- Args:
16
- label (RenderableType): The renderable or str for the tree label.
17
- style (StyleType, optional): Style of this tree. Defaults to "tree".
18
- guide_style (StyleType, optional): Style of the guide lines. Defaults to "tree.line".
19
- expanded (bool, optional): Also display children. Defaults to True.
20
- highlight (bool, optional): Highlight renderable (if str). Defaults to False.
21
- """
22
-
23
- def __init__(
24
- self,
25
- label: RenderableType,
26
- *,
27
- style: StyleType = "tree",
28
- guide_style: StyleType = "tree.line",
29
- expanded: bool = True,
30
- highlight: bool = False,
31
- hide_root: bool = False,
32
- ) -> None:
33
- self.label = label
34
- self.style = style
35
- self.guide_style = guide_style
36
- self.children: List[Tree] = []
37
- self.expanded = expanded
38
- self.highlight = highlight
39
- self.hide_root = hide_root
40
-
41
- def add(
42
- self,
43
- label: RenderableType,
44
- *,
45
- style: Optional[StyleType] = None,
46
- guide_style: Optional[StyleType] = None,
47
- expanded: bool = True,
48
- highlight: Optional[bool] = False,
49
- ) -> "Tree":
50
- """Add a child tree.
51
-
52
- Args:
53
- label (RenderableType): The renderable or str for the tree label.
54
- style (StyleType, optional): Style of this tree. Defaults to "tree".
55
- guide_style (StyleType, optional): Style of the guide lines. Defaults to "tree.line".
56
- expanded (bool, optional): Also display children. Defaults to True.
57
- highlight (Optional[bool], optional): Highlight renderable (if str). Defaults to False.
58
-
59
- Returns:
60
- Tree: A new child Tree, which may be further modified.
61
- """
62
- node = Tree(
63
- label,
64
- style=self.style if style is None else style,
65
- guide_style=self.guide_style if guide_style is None else guide_style,
66
- expanded=expanded,
67
- highlight=self.highlight if highlight is None else highlight,
68
- )
69
- self.children.append(node)
70
- return node
71
-
72
- def __rich_console__(
73
- self, console: "Console", options: "ConsoleOptions"
74
- ) -> "RenderResult":
75
-
76
- stack: List[Iterator[Tuple[bool, Tree]]] = []
77
- pop = stack.pop
78
- push = stack.append
79
- new_line = Segment.line()
80
-
81
- get_style = console.get_style
82
- null_style = Style.null()
83
- guide_style = get_style(self.guide_style, default="") or null_style
84
- SPACE, CONTINUE, FORK, END = range(4)
85
-
86
- ASCII_GUIDES = (" ", "| ", "+-- ", "`-- ")
87
- TREE_GUIDES = [
88
- (" ", "│ ", "├── ", "└── "),
89
- (" ", "┃ ", "┣━━ ", "┗━━ "),
90
- (" ", "║ ", "╠══ ", "╚══ "),
91
- ]
92
- _Segment = Segment
93
-
94
- def make_guide(index: int, style: Style) -> Segment:
95
- """Make a Segment for a level of the guide lines."""
96
- if options.ascii_only:
97
- line = ASCII_GUIDES[index]
98
- else:
99
- guide = 1 if style.bold else (2 if style.underline2 else 0)
100
- line = TREE_GUIDES[0 if options.legacy_windows else guide][index]
101
- return _Segment(line, style)
102
-
103
- levels: List[Segment] = [make_guide(CONTINUE, guide_style)]
104
- push(iter(loop_last([self])))
105
-
106
- guide_style_stack = StyleStack(get_style(self.guide_style))
107
- style_stack = StyleStack(get_style(self.style))
108
- remove_guide_styles = Style(bold=False, underline2=False)
109
-
110
- depth = 0
111
-
112
- while stack:
113
- stack_node = pop()
114
- try:
115
- last, node = next(stack_node)
116
- except StopIteration:
117
- levels.pop()
118
- if levels:
119
- guide_style = levels[-1].style or null_style
120
- levels[-1] = make_guide(FORK, guide_style)
121
- guide_style_stack.pop()
122
- style_stack.pop()
123
- continue
124
- push(stack_node)
125
- if last:
126
- levels[-1] = make_guide(END, levels[-1].style or null_style)
127
-
128
- guide_style = guide_style_stack.current + get_style(node.guide_style)
129
- style = style_stack.current + get_style(node.style)
130
- prefix = levels[(2 if self.hide_root else 1) :]
131
- renderable_lines = console.render_lines(
132
- Styled(node.label, style),
133
- options.update(
134
- width=options.max_width
135
- - sum(level.cell_length for level in prefix),
136
- highlight=self.highlight,
137
- height=None,
138
- ),
139
- pad=options.justify is not None,
140
- )
141
-
142
- if not (depth == 0 and self.hide_root):
143
- for first, line in loop_first(renderable_lines):
144
- if prefix:
145
- yield from _Segment.apply_style(
146
- prefix,
147
- style.background_style,
148
- post_style=remove_guide_styles,
149
- )
150
- yield from line
151
- yield new_line
152
- if first and prefix:
153
- prefix[-1] = make_guide(
154
- SPACE if last else CONTINUE, prefix[-1].style or null_style
155
- )
156
-
157
- if node.expanded and node.children:
158
- levels[-1] = make_guide(
159
- SPACE if last else CONTINUE, levels[-1].style or null_style
160
- )
161
- levels.append(
162
- make_guide(END if len(node.children) == 1 else FORK, guide_style)
163
- )
164
- style_stack.push(get_style(node.style))
165
- guide_style_stack.push(get_style(node.guide_style))
166
- push(iter(loop_last(node.children)))
167
- depth += 1
168
-
169
- def __rich_measure__(
170
- self, console: "Console", options: "ConsoleOptions"
171
- ) -> "Measurement":
172
- stack: List[Iterator[Tree]] = [iter([self])]
173
- pop = stack.pop
174
- push = stack.append
175
- minimum = 0
176
- maximum = 0
177
- measure = Measurement.get
178
- level = 0
179
- while stack:
180
- iter_tree = pop()
181
- try:
182
- tree = next(iter_tree)
183
- except StopIteration:
184
- level -= 1
185
- continue
186
- push(iter_tree)
187
- min_measure, max_measure = measure(console, options, tree.label)
188
- indent = level * 4
189
- minimum = max(min_measure + indent, minimum)
190
- maximum = max(max_measure + indent, maximum)
191
- if tree.expanded and tree.children:
192
- push(iter(tree.children))
193
- level += 1
194
- return Measurement(minimum, maximum)
195
-
196
-
197
- if __name__ == "__main__": # pragma: no cover
198
-
199
- from pip._vendor.rich.console import Group
200
- from pip._vendor.rich.markdown import Markdown
201
- from pip._vendor.rich.panel import Panel
202
- from pip._vendor.rich.syntax import Syntax
203
- from pip._vendor.rich.table import Table
204
-
205
- table = Table(row_styles=["", "dim"])
206
-
207
- table.add_column("Released", style="cyan", no_wrap=True)
208
- table.add_column("Title", style="magenta")
209
- table.add_column("Box Office", justify="right", style="green")
210
-
211
- table.add_row("Dec 20, 2019", "Star Wars: The Rise of Skywalker", "$952,110,690")
212
- table.add_row("May 25, 2018", "Solo: A Star Wars Story", "$393,151,347")
213
- table.add_row("Dec 15, 2017", "Star Wars Ep. V111: The Last Jedi", "$1,332,539,889")
214
- table.add_row("Dec 16, 2016", "Rogue One: A Star Wars Story", "$1,332,439,889")
215
-
216
- code = """\
217
- class Segment(NamedTuple):
218
- text: str = ""
219
- style: Optional[Style] = None
220
- is_control: bool = False
221
- """
222
- syntax = Syntax(code, "python", theme="monokai", line_numbers=True)
223
-
224
- markdown = Markdown(
225
- """\
226
- ### example.md
227
- > Hello, World!
228
- >
229
- > Markdown _all_ the things
230
- """
231
- )
232
-
233
- root = Tree("🌲 [b green]Rich Tree", highlight=True, hide_root=True)
234
-
235
- node = root.add(":file_folder: Renderables", guide_style="red")
236
- simple_node = node.add(":file_folder: [bold yellow]Atomic", guide_style="uu green")
237
- simple_node.add(Group("📄 Syntax", syntax))
238
- simple_node.add(Group("📄 Markdown", Panel(markdown, border_style="green")))
239
-
240
- containers_node = node.add(
241
- ":file_folder: [bold magenta]Containers", guide_style="bold magenta"
242
- )
243
- containers_node.expanded = True
244
- panel = Panel.fit("Just a panel", border_style="red")
245
- containers_node.add(Group("📄 Panels", panel))
246
-
247
- containers_node.add(Group("📄 [b magenta]Table", table))
248
-
249
- console = Console()
250
-
251
- console.print(root)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/_musllinux.py DELETED
@@ -1,136 +0,0 @@
1
- """PEP 656 support.
2
-
3
- This module implements logic to detect if the currently running Python is
4
- linked against musl, and what musl version is used.
5
- """
6
-
7
- import contextlib
8
- import functools
9
- import operator
10
- import os
11
- import re
12
- import struct
13
- import subprocess
14
- import sys
15
- from typing import IO, Iterator, NamedTuple, Optional, Tuple
16
-
17
-
18
- def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]:
19
- return struct.unpack(fmt, f.read(struct.calcsize(fmt)))
20
-
21
-
22
- def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]:
23
- """Detect musl libc location by parsing the Python executable.
24
-
25
- Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
26
- ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
27
- """
28
- f.seek(0)
29
- try:
30
- ident = _read_unpacked(f, "16B")
31
- except struct.error:
32
- return None
33
- if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF.
34
- return None
35
- f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version.
36
-
37
- try:
38
- # e_fmt: Format for program header.
39
- # p_fmt: Format for section header.
40
- # p_idx: Indexes to find p_type, p_offset, and p_filesz.
41
- e_fmt, p_fmt, p_idx = {
42
- 1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit.
43
- 2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit.
44
- }[ident[4]]
45
- except KeyError:
46
- return None
47
- else:
48
- p_get = operator.itemgetter(*p_idx)
49
-
50
- # Find the interpreter section and return its content.
51
- try:
52
- _, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt)
53
- except struct.error:
54
- return None
55
- for i in range(e_phnum + 1):
56
- f.seek(e_phoff + e_phentsize * i)
57
- try:
58
- p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt))
59
- except struct.error:
60
- return None
61
- if p_type != 3: # Not PT_INTERP.
62
- continue
63
- f.seek(p_offset)
64
- interpreter = os.fsdecode(f.read(p_filesz)).strip("\0")
65
- if "musl" not in interpreter:
66
- return None
67
- return interpreter
68
- return None
69
-
70
-
71
- class _MuslVersion(NamedTuple):
72
- major: int
73
- minor: int
74
-
75
-
76
- def _parse_musl_version(output: str) -> Optional[_MuslVersion]:
77
- lines = [n for n in (n.strip() for n in output.splitlines()) if n]
78
- if len(lines) < 2 or lines[0][:4] != "musl":
79
- return None
80
- m = re.match(r"Version (\d+)\.(\d+)", lines[1])
81
- if not m:
82
- return None
83
- return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
84
-
85
-
86
- @functools.lru_cache()
87
- def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
88
- """Detect currently-running musl runtime version.
89
-
90
- This is done by checking the specified executable's dynamic linking
91
- information, and invoking the loader to parse its output for a version
92
- string. If the loader is musl, the output would be something like::
93
-
94
- musl libc (x86_64)
95
- Version 1.2.2
96
- Dynamic Program Loader
97
- """
98
- with contextlib.ExitStack() as stack:
99
- try:
100
- f = stack.enter_context(open(executable, "rb"))
101
- except OSError:
102
- return None
103
- ld = _parse_ld_musl_from_elf(f)
104
- if not ld:
105
- return None
106
- proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True)
107
- return _parse_musl_version(proc.stderr)
108
-
109
-
110
- def platform_tags(arch: str) -> Iterator[str]:
111
- """Generate musllinux tags compatible to the current platform.
112
-
113
- :param arch: Should be the part of platform tag after the ``linux_``
114
- prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a
115
- prerequisite for the current platform to be musllinux-compatible.
116
-
117
- :returns: An iterator of compatible musllinux tags.
118
- """
119
- sys_musl = _get_musl_version(sys.executable)
120
- if sys_musl is None: # Python not dynamically linked against musl.
121
- return
122
- for minor in range(sys_musl.minor, -1, -1):
123
- yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
124
-
125
-
126
- if __name__ == "__main__": # pragma: no cover
127
- import sysconfig
128
-
129
- plat = sysconfig.get_platform()
130
- assert plat.startswith("linux-"), "not linux"
131
-
132
- print("plat:", plat)
133
- print("musl:", _get_musl_version(sys.executable))
134
- print("tags:", end=" ")
135
- for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
136
- print(t, end="\n ")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AtomdffAI/wechatgpt4atom/channel/wechat/wechaty_channel.py DELETED
@@ -1,201 +0,0 @@
1
- # encoding:utf-8
2
-
3
- """
4
- wechaty channel
5
- Python Wechaty - https://github.com/wechaty/python-wechaty
6
- """
7
- import io
8
- import os
9
- import json
10
- import time
11
- import asyncio
12
- import requests
13
- from typing import Optional, Union
14
- from wechaty_puppet import MessageType, FileBox, ScanStatus # type: ignore
15
- from wechaty import Wechaty, Contact
16
- from wechaty.user import Message, Room, MiniProgram, UrlLink
17
- from channel.channel import Channel
18
- from common.log import logger
19
- from config import conf
20
-
21
-
22
- class WechatyChannel(Channel):
23
-
24
- def __init__(self):
25
- pass
26
-
27
- def startup(self):
28
- asyncio.run(self.main())
29
-
30
- async def main(self):
31
- config = conf()
32
- # 使用PadLocal协议 比较稳定(免费web协议 os.environ['WECHATY_PUPPET_SERVICE_ENDPOINT'] = '127.0.0.1:8080')
33
- token = config.get('wechaty_puppet_service_token')
34
- os.environ['WECHATY_PUPPET_SERVICE_TOKEN'] = token
35
- global bot
36
- bot = Wechaty()
37
-
38
- bot.on('scan', self.on_scan)
39
- bot.on('login', self.on_login)
40
- bot.on('message', self.on_message)
41
- await bot.start()
42
-
43
- async def on_login(self, contact: Contact):
44
- logger.info('[WX] login user={}'.format(contact))
45
-
46
- async def on_scan(self, status: ScanStatus, qr_code: Optional[str] = None,
47
- data: Optional[str] = None):
48
- contact = self.Contact.load(self.contact_id)
49
- logger.info('[WX] scan user={}, scan status={}, scan qr_code={}'.format(contact, status.name, qr_code))
50
- # print(f'user <{contact}> scan status: {status.name} , 'f'qr_code: {qr_code}')
51
-
52
- async def on_message(self, msg: Message):
53
- """
54
- listen for message event
55
- """
56
- from_contact = msg.talker() # 获取消息的发送者
57
- to_contact = msg.to() # 接收人
58
- room = msg.room() # 获取消息来自的群聊. 如果消息不是来自群聊, 则返回None
59
- from_user_id = from_contact.contact_id
60
- to_user_id = to_contact.contact_id # 接收人id
61
- # other_user_id = msg['User']['UserName'] # 对手方id
62
- content = msg.text()
63
- mention_content = await msg.mention_text() # 返回过滤掉@name后的消息
64
- match_prefix = self.check_prefix(content, conf().get('single_chat_prefix'))
65
- conversation: Union[Room, Contact] = from_contact if room is None else room
66
-
67
- if room is None and msg.type() == MessageType.MESSAGE_TYPE_TEXT:
68
- if not msg.is_self() and match_prefix is not None:
69
- # 好友向自己发送消息
70
- if match_prefix != '':
71
- str_list = content.split(match_prefix, 1)
72
- if len(str_list) == 2:
73
- content = str_list[1].strip()
74
-
75
- img_match_prefix = self.check_prefix(content, conf().get('image_create_prefix'))
76
- if img_match_prefix:
77
- content = content.split(img_match_prefix, 1)[1].strip()
78
- await self._do_send_img(content, from_user_id)
79
- else:
80
- await self._do_send(content, from_user_id)
81
- elif msg.is_self() and match_prefix:
82
- # 自己给好友发送消息
83
- str_list = content.split(match_prefix, 1)
84
- if len(str_list) == 2:
85
- content = str_list[1].strip()
86
- img_match_prefix = self.check_prefix(content, conf().get('image_create_prefix'))
87
- if img_match_prefix:
88
- content = content.split(img_match_prefix, 1)[1].strip()
89
- await self._do_send_img(content, to_user_id)
90
- else:
91
- await self._do_send(content, to_user_id)
92
- elif room and msg.type() == MessageType.MESSAGE_TYPE_TEXT:
93
- # 群组&文本消息
94
- room_id = room.room_id
95
- room_name = await room.topic()
96
- from_user_id = from_contact.contact_id
97
- from_user_name = from_contact.name
98
- is_at = await msg.mention_self()
99
- content = mention_content
100
- config = conf()
101
- match_prefix = (is_at and not config.get("group_at_off", False)) \
102
- or self.check_prefix(content, config.get('group_chat_prefix')) \
103
- or self.check_contain(content, config.get('group_chat_keyword'))
104
- if ('ALL_GROUP' in config.get('group_name_white_list') or room_name in config.get(
105
- 'group_name_white_list') or self.check_contain(room_name, config.get(
106
- 'group_name_keyword_white_list'))) and match_prefix:
107
- img_match_prefix = self.check_prefix(content, conf().get('image_create_prefix'))
108
- if img_match_prefix:
109
- content = content.split(img_match_prefix, 1)[1].strip()
110
- await self._do_send_group_img(content, room_id)
111
- else:
112
- await self._do_send_group(content, room_id, from_user_id, from_user_name)
113
-
114
- async def send(self, message: Union[str, Message, FileBox, Contact, UrlLink, MiniProgram], receiver):
115
- logger.info('[WX] sendMsg={}, receiver={}'.format(message, receiver))
116
- if receiver:
117
- contact = await bot.Contact.find(receiver)
118
- await contact.say(message)
119
-
120
- async def send_group(self, message: Union[str, Message, FileBox, Contact, UrlLink, MiniProgram], receiver):
121
- logger.info('[WX] sendMsg={}, receiver={}'.format(message, receiver))
122
- if receiver:
123
- room = await bot.Room.find(receiver)
124
- await room.say(message)
125
-
126
- async def _do_send(self, query, reply_user_id):
127
- try:
128
- if not query:
129
- return
130
- context = dict()
131
- context['from_user_id'] = reply_user_id
132
- reply_text = super().build_reply_content(query, context)
133
- if reply_text:
134
- await self.send(conf().get("single_chat_reply_prefix") + reply_text, reply_user_id)
135
- except Exception as e:
136
- logger.exception(e)
137
-
138
- async def _do_send_img(self, query, reply_user_id):
139
- try:
140
- if not query:
141
- return
142
- context = dict()
143
- context['type'] = 'IMAGE_CREATE'
144
- img_url = super().build_reply_content(query, context)
145
- if not img_url:
146
- return
147
- # 图片下载
148
- # pic_res = requests.get(img_url, stream=True)
149
- # image_storage = io.BytesIO()
150
- # for block in pic_res.iter_content(1024):
151
- # image_storage.write(block)
152
- # image_storage.seek(0)
153
-
154
- # 图片发送
155
- logger.info('[WX] sendImage, receiver={}'.format(reply_user_id))
156
- t = int(time.time())
157
- file_box = FileBox.from_url(url=img_url, name=str(t) + '.png')
158
- await self.send(file_box, reply_user_id)
159
- except Exception as e:
160
- logger.exception(e)
161
-
162
- async def _do_send_group(self, query, group_id, group_user_id, group_user_name):
163
- if not query:
164
- return
165
- context = dict()
166
- context['from_user_id'] = str(group_id) + '-' + str(group_user_id)
167
- reply_text = super().build_reply_content(query, context)
168
- if reply_text:
169
- reply_text = '@' + group_user_name + ' ' + reply_text.strip()
170
- await self.send_group(conf().get("group_chat_reply_prefix", "") + reply_text, group_id)
171
-
172
- async def _do_send_group_img(self, query, reply_room_id):
173
- try:
174
- if not query:
175
- return
176
- context = dict()
177
- context['type'] = 'IMAGE_CREATE'
178
- img_url = super().build_reply_content(query, context)
179
- if not img_url:
180
- return
181
- # 图片发送
182
- logger.info('[WX] sendImage, receiver={}'.format(reply_room_id))
183
- t = int(time.time())
184
- file_box = FileBox.from_url(url=img_url, name=str(t) + '.png')
185
- await self.send_group(file_box, reply_room_id)
186
- except Exception as e:
187
- logger.exception(e)
188
-
189
- def check_prefix(self, content, prefix_list):
190
- for prefix in prefix_list:
191
- if content.startswith(prefix):
192
- return prefix
193
- return None
194
-
195
- def check_contain(self, content, keyword_list):
196
- if not keyword_list:
197
- return None
198
- for ky in keyword_list:
199
- if content.find(ky) != -1:
200
- return True
201
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tools/lightning_train_net.py DELETED
@@ -1,239 +0,0 @@
1
- #!/usr/bin/env python3
2
- # Copyright (c) Facebook, Inc. and its affiliates.
3
- # Lightning Trainer should be considered beta at this point
4
- # We have confirmed that training and validation run correctly and produce correct results
5
- # Depending on how you launch the trainer, there are issues with processes terminating correctly
6
- # This module is still dependent on D2 logging, but could be transferred to use Lightning logging
7
-
8
- import logging
9
- import os
10
- import time
11
- import weakref
12
- from collections import OrderedDict
13
- from typing import Any, Dict, List
14
-
15
- import detectron2.utils.comm as comm
16
- from detectron2.checkpoint import DetectionCheckpointer
17
- from detectron2.config import get_cfg
18
- from detectron2.data import build_detection_test_loader, build_detection_train_loader
19
- from detectron2.engine import (
20
- DefaultTrainer,
21
- SimpleTrainer,
22
- default_argument_parser,
23
- default_setup,
24
- default_writers,
25
- hooks,
26
- )
27
- from detectron2.evaluation import print_csv_format
28
- from detectron2.evaluation.testing import flatten_results_dict
29
- from detectron2.modeling import build_model
30
- from detectron2.solver import build_lr_scheduler, build_optimizer
31
- from detectron2.utils.events import EventStorage
32
- from detectron2.utils.logger import setup_logger
33
-
34
- import pytorch_lightning as pl # type: ignore
35
- from pytorch_lightning import LightningDataModule, LightningModule
36
- from train_net import build_evaluator
37
-
38
- logging.basicConfig(level=logging.INFO)
39
- logger = logging.getLogger("detectron2")
40
-
41
-
42
- class TrainingModule(LightningModule):
43
- def __init__(self, cfg):
44
- super().__init__()
45
- if not logger.isEnabledFor(logging.INFO): # setup_logger is not called for d2
46
- setup_logger()
47
- self.cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size())
48
- self.storage: EventStorage = None
49
- self.model = build_model(self.cfg)
50
-
51
- self.start_iter = 0
52
- self.max_iter = cfg.SOLVER.MAX_ITER
53
-
54
- def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
55
- checkpoint["iteration"] = self.storage.iter
56
-
57
- def on_load_checkpoint(self, checkpointed_state: Dict[str, Any]) -> None:
58
- self.start_iter = checkpointed_state["iteration"]
59
- self.storage.iter = self.start_iter
60
-
61
- def setup(self, stage: str):
62
- if self.cfg.MODEL.WEIGHTS:
63
- self.checkpointer = DetectionCheckpointer(
64
- # Assume you want to save checkpoints together with logs/statistics
65
- self.model,
66
- self.cfg.OUTPUT_DIR,
67
- )
68
- logger.info(f"Load model weights from checkpoint: {self.cfg.MODEL.WEIGHTS}.")
69
- # Only load weights, use lightning checkpointing if you want to resume
70
- self.checkpointer.load(self.cfg.MODEL.WEIGHTS)
71
-
72
- self.iteration_timer = hooks.IterationTimer()
73
- self.iteration_timer.before_train()
74
- self.data_start = time.perf_counter()
75
- self.writers = None
76
-
77
- def training_step(self, batch, batch_idx):
78
- data_time = time.perf_counter() - self.data_start
79
- # Need to manually enter/exit since trainer may launch processes
80
- # This ideally belongs in setup, but setup seems to run before processes are spawned
81
- if self.storage is None:
82
- self.storage = EventStorage(0)
83
- self.storage.__enter__()
84
- self.iteration_timer.trainer = weakref.proxy(self)
85
- self.iteration_timer.before_step()
86
- self.writers = (
87
- default_writers(self.cfg.OUTPUT_DIR, self.max_iter)
88
- if comm.is_main_process()
89
- else {}
90
- )
91
-
92
- loss_dict = self.model(batch)
93
- SimpleTrainer.write_metrics(loss_dict, data_time)
94
-
95
- opt = self.optimizers()
96
- self.storage.put_scalar(
97
- "lr", opt.param_groups[self._best_param_group_id]["lr"], smoothing_hint=False
98
- )
99
- self.iteration_timer.after_step()
100
- self.storage.step()
101
- # A little odd to put before step here, but it's the best way to get a proper timing
102
- self.iteration_timer.before_step()
103
-
104
- if self.storage.iter % 20 == 0:
105
- for writer in self.writers:
106
- writer.write()
107
- return sum(loss_dict.values())
108
-
109
- def training_step_end(self, training_step_outpus):
110
- self.data_start = time.perf_counter()
111
- return training_step_outpus
112
-
113
- def training_epoch_end(self, training_step_outputs):
114
- self.iteration_timer.after_train()
115
- if comm.is_main_process():
116
- self.checkpointer.save("model_final")
117
- for writer in self.writers:
118
- writer.write()
119
- writer.close()
120
- self.storage.__exit__(None, None, None)
121
-
122
- def _process_dataset_evaluation_results(self) -> OrderedDict:
123
- results = OrderedDict()
124
- for idx, dataset_name in enumerate(self.cfg.DATASETS.TEST):
125
- results[dataset_name] = self._evaluators[idx].evaluate()
126
- if comm.is_main_process():
127
- print_csv_format(results[dataset_name])
128
-
129
- if len(results) == 1:
130
- results = list(results.values())[0]
131
- return results
132
-
133
- def _reset_dataset_evaluators(self):
134
- self._evaluators = []
135
- for dataset_name in self.cfg.DATASETS.TEST:
136
- evaluator = build_evaluator(self.cfg, dataset_name)
137
- evaluator.reset()
138
- self._evaluators.append(evaluator)
139
-
140
- def on_validation_epoch_start(self, _outputs):
141
- self._reset_dataset_evaluators()
142
-
143
- def validation_epoch_end(self, _outputs):
144
- results = self._process_dataset_evaluation_results(_outputs)
145
-
146
- flattened_results = flatten_results_dict(results)
147
- for k, v in flattened_results.items():
148
- try:
149
- v = float(v)
150
- except Exception as e:
151
- raise ValueError(
152
- "[EvalHook] eval_function should return a nested dict of float. "
153
- "Got '{}: {}' instead.".format(k, v)
154
- ) from e
155
- self.storage.put_scalars(**flattened_results, smoothing_hint=False)
156
-
157
- def validation_step(self, batch, batch_idx: int, dataloader_idx: int = 0) -> None:
158
- if not isinstance(batch, List):
159
- batch = [batch]
160
- outputs = self.model(batch)
161
- self._evaluators[dataloader_idx].process(batch, outputs)
162
-
163
- def configure_optimizers(self):
164
- optimizer = build_optimizer(self.cfg, self.model)
165
- self._best_param_group_id = hooks.LRScheduler.get_best_param_group_id(optimizer)
166
- scheduler = build_lr_scheduler(self.cfg, optimizer)
167
- return [optimizer], [{"scheduler": scheduler, "interval": "step"}]
168
-
169
-
170
- class DataModule(LightningDataModule):
171
- def __init__(self, cfg):
172
- super().__init__()
173
- self.cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size())
174
-
175
- def train_dataloader(self):
176
- return build_detection_train_loader(self.cfg)
177
-
178
- def val_dataloader(self):
179
- dataloaders = []
180
- for dataset_name in self.cfg.DATASETS.TEST:
181
- dataloaders.append(build_detection_test_loader(self.cfg, dataset_name))
182
- return dataloaders
183
-
184
-
185
- def main(args):
186
- cfg = setup(args)
187
- train(cfg, args)
188
-
189
-
190
- def train(cfg, args):
191
- trainer_params = {
192
- # training loop is bounded by max steps, use a large max_epochs to make
193
- # sure max_steps is met first
194
- "max_epochs": 10 ** 8,
195
- "max_steps": cfg.SOLVER.MAX_ITER,
196
- "val_check_interval": cfg.TEST.EVAL_PERIOD if cfg.TEST.EVAL_PERIOD > 0 else 10 ** 8,
197
- "num_nodes": args.num_machines,
198
- "gpus": args.num_gpus,
199
- "num_sanity_val_steps": 0,
200
- }
201
- if cfg.SOLVER.AMP.ENABLED:
202
- trainer_params["precision"] = 16
203
-
204
- last_checkpoint = os.path.join(cfg.OUTPUT_DIR, "last.ckpt")
205
- if args.resume:
206
- # resume training from checkpoint
207
- trainer_params["resume_from_checkpoint"] = last_checkpoint
208
- logger.info(f"Resuming training from checkpoint: {last_checkpoint}.")
209
-
210
- trainer = pl.Trainer(**trainer_params)
211
- logger.info(f"start to train with {args.num_machines} nodes and {args.num_gpus} GPUs")
212
-
213
- module = TrainingModule(cfg)
214
- data_module = DataModule(cfg)
215
- if args.eval_only:
216
- logger.info("Running inference")
217
- trainer.validate(module, data_module)
218
- else:
219
- logger.info("Running training")
220
- trainer.fit(module, data_module)
221
-
222
-
223
- def setup(args):
224
- """
225
- Create configs and perform basic setups.
226
- """
227
- cfg = get_cfg()
228
- cfg.merge_from_file(args.config_file)
229
- cfg.merge_from_list(args.opts)
230
- cfg.freeze()
231
- default_setup(cfg, args)
232
- return cfg
233
-
234
-
235
- if __name__ == "__main__":
236
- parser = default_argument_parser()
237
- args = parser.parse_args()
238
- logger.info("Command Line Args:", args)
239
- main(args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BartPoint/VoiceChange_Beta/app_multi.py DELETED
@@ -1,496 +0,0 @@
1
- from typing import Union
2
-
3
- from argparse import ArgumentParser
4
-
5
- import asyncio
6
- import json
7
- import hashlib
8
- from os import path, getenv
9
-
10
- import gradio as gr
11
-
12
- import torch
13
-
14
- import numpy as np
15
- import librosa
16
-
17
- import edge_tts
18
-
19
- import config
20
- import util
21
- from fairseq import checkpoint_utils
22
- from infer_pack.models import (
23
- SynthesizerTrnMs256NSFsid,
24
- SynthesizerTrnMs256NSFsid_nono,
25
- SynthesizerTrnMs768NSFsid,
26
- SynthesizerTrnMs768NSFsid_nono,
27
- )
28
- from vc_infer_pipeline import VC
29
- from config import Config
30
- config = Config()
31
- force_support = None
32
- if config.unsupported is False:
33
- if config.device == "mps" or config.device == "cpu":
34
- force_support = False
35
- else:
36
- force_support = True
37
-
38
- # Reference: https://huggingface.co/spaces/zomehwh/rvc-models/blob/main/app.py#L21 # noqa
39
- in_hf_space = getenv('SYSTEM') == 'spaces'
40
-
41
- # Argument parsing
42
- arg_parser = ArgumentParser()
43
- arg_parser.add_argument(
44
- '--hubert',
45
- default=getenv('RVC_HUBERT', 'hubert_base.pt'),
46
- help='path to hubert base model (default: hubert_base.pt)'
47
- )
48
- arg_parser.add_argument(
49
- '--config',
50
- default=getenv('RVC_MULTI_CFG', 'multi_config.json'),
51
- help='path to config file (default: multi_config.json)'
52
- )
53
- arg_parser.add_argument(
54
- '--api',
55
- action='store_true',
56
- help='enable api endpoint'
57
- )
58
- arg_parser.add_argument(
59
- '--cache-examples',
60
- action='store_true',
61
- help='enable example caching, please remember delete gradio_cached_examples folder when example config has been modified' # noqa
62
- )
63
- args = arg_parser.parse_args()
64
-
65
- app_css = '''
66
- #model_info img {
67
- max-width: 100px;
68
- max-height: 100px;
69
- float: right;
70
- }
71
-
72
- #model_info p {
73
- margin: unset;
74
- }
75
- '''
76
-
77
- app = gr.Blocks(
78
- theme=gr.themes.Soft(primary_hue="orange", secondary_hue="slate"),
79
- css=app_css,
80
- analytics_enabled=False
81
- )
82
-
83
- # Load hubert model
84
- models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
85
- ["hubert_base.pt"],
86
- suffix="",
87
- )
88
- hubert_model = models[0]
89
- hubert_model = hubert_model.to(config.device)
90
- if config.is_half:
91
- hubert_model = hubert_model.half()
92
- else:
93
- hubert_model = hubert_model.float()
94
- hubert_model.eval()
95
-
96
- # Load models
97
- multi_cfg = json.load(open(args.config, 'r'))
98
- loaded_models = []
99
-
100
- for model_name in multi_cfg.get('models'):
101
- print(f'Loading model: {model_name}')
102
-
103
- # Load model info
104
- model_info = json.load(
105
- open(path.join('model', model_name, 'config.json'), 'r')
106
- )
107
-
108
- # Load RVC checkpoint
109
- cpt = torch.load(
110
- path.join('model', model_name, model_info['model']),
111
- map_location='cpu'
112
- )
113
- tgt_sr = cpt['config'][-1]
114
- cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
115
- if_f0 = cpt.get("f0", 1)
116
- version = cpt.get("version", "v1")
117
- if version == "v1":
118
- if if_f0 == 1:
119
- net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half)
120
- else:
121
- net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
122
- model_version = "V1"
123
- elif version == "v2":
124
- if if_f0 == 1:
125
- net_g = SynthesizerTrnMs768NSFsid(*cpt["config"], is_half=config.is_half)
126
- else:
127
- net_g = SynthesizerTrnMs768NSFsid_nono(*cpt["config"])
128
- model_version = "V2"
129
- del net_g.enc_q
130
-
131
- print(net_g.load_state_dict(cpt["weight"], strict=False))
132
- net_g.eval().to(config.device)
133
- if config.is_half:
134
- net_g = net_g.half()
135
- else:
136
- net_g = net_g.float()
137
- vc = VC(tgt_sr, config)
138
-
139
- loaded_models.append(dict(
140
- name=model_name,
141
- metadata=model_info,
142
- vc=vc,
143
- net_g=net_g,
144
- if_f0=if_f0,
145
- target_sr=tgt_sr,
146
- test=model_version
147
- ))
148
-
149
- print(f'Models loaded: {len(loaded_models)}')
150
-
151
- # Edge TTS speakers
152
- tts_speakers_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices()) # noqa
153
-
154
-
155
- # https://github.com/fumiama/Retrieval-based-Voice-Conversion-WebUI/blob/main/infer-web.py#L118 # noqa
156
- def vc_func(
157
- input_audio, model_index, pitch_adjust, f0_method, feat_ratio,
158
- filter_radius, rms_mix_rate, resample_option
159
- ):
160
- if input_audio is None:
161
- return (None, 'Please provide input audio.')
162
-
163
- if model_index is None:
164
- return (None, 'Please select a model.')
165
-
166
- model = loaded_models[model_index]
167
-
168
- # Reference: so-vits
169
- (audio_samp, audio_npy) = input_audio
170
-
171
- # https://huggingface.co/spaces/zomehwh/rvc-models/blob/main/app.py#L49
172
- # Can be change well, we will see
173
- if (audio_npy.shape[0] / audio_samp) > 320 and in_hf_space:
174
- return (None, 'Input audio is longer than 60 secs.')
175
-
176
- # Bloody hell: https://stackoverflow.com/questions/26921836/
177
- if audio_npy.dtype != np.float32: # :thonk:
178
- audio_npy = (
179
- audio_npy / np.iinfo(audio_npy.dtype).max
180
- ).astype(np.float32)
181
-
182
- if len(audio_npy.shape) > 1:
183
- audio_npy = librosa.to_mono(audio_npy.transpose(1, 0))
184
-
185
- if audio_samp != 16000:
186
- audio_npy = librosa.resample(
187
- audio_npy,
188
- orig_sr=audio_samp,
189
- target_sr=16000
190
- )
191
-
192
- pitch_int = int(pitch_adjust)
193
-
194
- resample = (
195
- 0 if resample_option == 'Disable resampling'
196
- else int(resample_option)
197
- )
198
-
199
- times = [0, 0, 0]
200
-
201
- checksum = hashlib.sha512()
202
- checksum.update(audio_npy.tobytes())
203
-
204
- print(model['test'])
205
-
206
- output_audio = model['vc'].pipeline(
207
- hubert_model,
208
- model['net_g'],
209
- model['metadata'].get('speaker_id', 0),
210
- audio_npy,
211
- checksum.hexdigest(),
212
- times,
213
- pitch_int,
214
- f0_method,
215
- path.join('model', model['name'], model['metadata']['feat_index']),
216
- feat_ratio,
217
- model['if_f0'],
218
- filter_radius,
219
- model['target_sr'],
220
- resample,
221
- rms_mix_rate,
222
- model['test'],
223
- 0.5
224
- )
225
-
226
- out_sr = (
227
- resample if resample >= 16000 and model['target_sr'] != resample
228
- else model['target_sr']
229
- )
230
-
231
- print(f'npy: {times[0]}s, f0: {times[1]}s, infer: {times[2]}s')
232
- return ((out_sr, output_audio), 'Success')
233
-
234
-
235
- async def edge_tts_vc_func(
236
- input_text, model_index, tts_speaker, pitch_adjust, f0_method, feat_ratio,
237
- filter_radius, rms_mix_rate, resample_option
238
- ):
239
- if input_text is None:
240
- return (None, 'Please provide TTS text.')
241
-
242
- if tts_speaker is None:
243
- return (None, 'Please select TTS speaker.')
244
-
245
- if model_index is None:
246
- return (None, 'Please select a model.')
247
-
248
- speaker = tts_speakers_list[tts_speaker]['ShortName']
249
- (tts_np, tts_sr) = await util.call_edge_tts(speaker, input_text)
250
- return vc_func(
251
- (tts_sr, tts_np),
252
- model_index,
253
- pitch_adjust,
254
- f0_method,
255
- feat_ratio,
256
- filter_radius,
257
- rms_mix_rate,
258
- resample_option
259
- )
260
-
261
-
262
- def update_model_info(model_index):
263
- if model_index is None:
264
- return str(
265
- '### Model info\n'
266
- 'Please select a model from dropdown above.'
267
- )
268
-
269
- model = loaded_models[model_index]
270
- model_icon = model['metadata'].get('icon', '')
271
-
272
- return str(
273
- '### Model info\n'
274
- '![model icon]({icon})'
275
- '**{name}**\n\n'
276
- 'Author: {author}\n\n'
277
- 'Source: {source}\n\n'
278
- '{note}'
279
- ).format(
280
- name=model['metadata'].get('name'),
281
- author=model['metadata'].get('author', 'Anonymous'),
282
- source=model['metadata'].get('source', 'Unknown'),
283
- note=model['metadata'].get('note', ''),
284
- icon=(
285
- model_icon
286
- if model_icon.startswith(('http://', 'https://'))
287
- else '/file/model/%s/%s' % (model['name'], model_icon)
288
- )
289
- )
290
-
291
-
292
- def _example_vc(
293
- input_audio, model_index, pitch_adjust, f0_method, feat_ratio,
294
- filter_radius, rms_mix_rate, resample_option
295
- ):
296
- (audio, message) = vc_func(
297
- input_audio, model_index, pitch_adjust, f0_method, feat_ratio,
298
- filter_radius, rms_mix_rate, resample_option
299
- )
300
- return (
301
- audio,
302
- message,
303
- update_model_info(model_index)
304
- )
305
-
306
-
307
- async def _example_edge_tts(
308
- input_text, model_index, tts_speaker, pitch_adjust, f0_method, feat_ratio,
309
- filter_radius, rms_mix_rate, resample_option
310
- ):
311
- (audio, message) = await edge_tts_vc_func(
312
- input_text, model_index, tts_speaker, pitch_adjust, f0_method,
313
- feat_ratio, filter_radius, rms_mix_rate, resample_option
314
- )
315
- return (
316
- audio,
317
- message,
318
- update_model_info(model_index)
319
- )
320
-
321
-
322
- with app:
323
- gr.Markdown(
324
- '## A simplistic Web interface\n'
325
- 'RVC interface, project based on [RVC-WebUI](https://github.com/fumiama/Retrieval-based-Voice-Conversion-WebUI)' # thx noqa
326
- 'A lot of inspiration from what\'s already out there, including [zomehwh/rvc-models](https://huggingface.co/spaces/zomehwh/rvc-models) & [DJQmUKV/rvc-inference](https://huggingface.co/spaces/DJQmUKV/rvc-inference).\n ' # thx noqa
327
- )
328
-
329
- with gr.Row():
330
- with gr.Column():
331
- with gr.Tab('Audio conversion'):
332
- input_audio = gr.Audio(label='Input audio')
333
-
334
- vc_convert_btn = gr.Button('Convert', variant='primary')
335
-
336
- with gr.Tab('TTS conversion'):
337
- tts_input = gr.TextArea(
338
- label='TTS input text'
339
- )
340
- tts_speaker = gr.Dropdown(
341
- [
342
- '%s (%s)' % (
343
- s['FriendlyName'],
344
- s['Gender']
345
- )
346
- for s in tts_speakers_list
347
- ],
348
- label='TTS speaker',
349
- type='index'
350
- )
351
-
352
- tts_convert_btn = gr.Button('Convert', variant='primary')
353
-
354
- pitch_adjust = gr.Slider(
355
- label='Pitch',
356
- minimum=-24,
357
- maximum=24,
358
- step=1,
359
- value=0
360
- )
361
- f0_method = gr.Radio(
362
- label='f0 methods',
363
- choices=['pm', 'harvest', 'crepe'],
364
- value='pm',
365
- interactive=True
366
- )
367
-
368
- with gr.Accordion('Advanced options', open=False):
369
- feat_ratio = gr.Slider(
370
- label='Feature ratio',
371
- minimum=0,
372
- maximum=1,
373
- step=0.1,
374
- value=0.6
375
- )
376
- filter_radius = gr.Slider(
377
- label='Filter radius',
378
- minimum=0,
379
- maximum=7,
380
- step=1,
381
- value=3
382
- )
383
- rms_mix_rate = gr.Slider(
384
- label='Volume envelope mix rate',
385
- minimum=0,
386
- maximum=1,
387
- step=0.1,
388
- value=1
389
- )
390
- resample_rate = gr.Dropdown(
391
- [
392
- 'Disable resampling',
393
- '16000',
394
- '22050',
395
- '44100',
396
- '48000'
397
- ],
398
- label='Resample rate',
399
- value='Disable resampling'
400
- )
401
-
402
- with gr.Column():
403
- # Model select
404
- model_index = gr.Dropdown(
405
- [
406
- '%s - %s' % (
407
- m['metadata'].get('source', 'Unknown'),
408
- m['metadata'].get('name')
409
- )
410
- for m in loaded_models
411
- ],
412
- label='Model',
413
- type='index'
414
- )
415
-
416
- # Model info
417
- with gr.Box():
418
- model_info = gr.Markdown(
419
- '### Model info\n'
420
- 'Please select a model from dropdown above.',
421
- elem_id='model_info'
422
- )
423
-
424
- output_audio = gr.Audio(label='Output audio')
425
- output_msg = gr.Textbox(label='Output message')
426
-
427
- multi_examples = multi_cfg.get('examples')
428
- if (
429
- multi_examples and
430
- multi_examples.get('vc') and multi_examples.get('tts_vc')
431
- ):
432
- with gr.Accordion('Sweet sweet examples', open=False):
433
- with gr.Row():
434
- # VC Example
435
- if multi_examples.get('vc'):
436
- gr.Examples(
437
- label='Audio conversion examples',
438
- examples=multi_examples.get('vc'),
439
- inputs=[
440
- input_audio, model_index, pitch_adjust, f0_method,
441
- feat_ratio
442
- ],
443
- outputs=[output_audio, output_msg, model_info],
444
- fn=_example_vc,
445
- cache_examples=args.cache_examples,
446
- run_on_click=args.cache_examples
447
- )
448
-
449
- # Edge TTS Example
450
- if multi_examples.get('tts_vc'):
451
- gr.Examples(
452
- label='TTS conversion examples',
453
- examples=multi_examples.get('tts_vc'),
454
- inputs=[
455
- tts_input, model_index, tts_speaker, pitch_adjust,
456
- f0_method, feat_ratio
457
- ],
458
- outputs=[output_audio, output_msg, model_info],
459
- fn=_example_edge_tts,
460
- cache_examples=args.cache_examples,
461
- run_on_click=args.cache_examples
462
- )
463
-
464
- vc_convert_btn.click(
465
- vc_func,
466
- [
467
- input_audio, model_index, pitch_adjust, f0_method, feat_ratio,
468
- filter_radius, rms_mix_rate, resample_rate
469
- ],
470
- [output_audio, output_msg],
471
- api_name='audio_conversion'
472
- )
473
-
474
- tts_convert_btn.click(
475
- edge_tts_vc_func,
476
- [
477
- tts_input, model_index, tts_speaker, pitch_adjust, f0_method,
478
- feat_ratio, filter_radius, rms_mix_rate, resample_rate
479
- ],
480
- [output_audio, output_msg],
481
- api_name='tts_conversion'
482
- )
483
-
484
- model_index.change(
485
- update_model_info,
486
- inputs=[model_index],
487
- outputs=[model_info],
488
- show_progress=False,
489
- queue=False
490
- )
491
-
492
- app.queue(
493
- concurrency_count=1,
494
- max_size=20,
495
- api_open=args.api
496
- ).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pygments/formatters/img.py DELETED
@@ -1,645 +0,0 @@
1
- """
2
- pygments.formatters.img
3
- ~~~~~~~~~~~~~~~~~~~~~~~
4
-
5
- Formatter for Pixmap output.
6
-
7
- :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
8
- :license: BSD, see LICENSE for details.
9
- """
10
-
11
- import os
12
- import sys
13
-
14
- from pip._vendor.pygments.formatter import Formatter
15
- from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
16
- get_choice_opt
17
-
18
- import subprocess
19
-
20
- # Import this carefully
21
- try:
22
- from PIL import Image, ImageDraw, ImageFont
23
- pil_available = True
24
- except ImportError:
25
- pil_available = False
26
-
27
- try:
28
- import _winreg
29
- except ImportError:
30
- try:
31
- import winreg as _winreg
32
- except ImportError:
33
- _winreg = None
34
-
35
- __all__ = ['ImageFormatter', 'GifImageFormatter', 'JpgImageFormatter',
36
- 'BmpImageFormatter']
37
-
38
-
39
- # For some unknown reason every font calls it something different
40
- STYLES = {
41
- 'NORMAL': ['', 'Roman', 'Book', 'Normal', 'Regular', 'Medium'],
42
- 'ITALIC': ['Oblique', 'Italic'],
43
- 'BOLD': ['Bold'],
44
- 'BOLDITALIC': ['Bold Oblique', 'Bold Italic'],
45
- }
46
-
47
- # A sane default for modern systems
48
- DEFAULT_FONT_NAME_NIX = 'DejaVu Sans Mono'
49
- DEFAULT_FONT_NAME_WIN = 'Courier New'
50
- DEFAULT_FONT_NAME_MAC = 'Menlo'
51
-
52
-
53
- class PilNotAvailable(ImportError):
54
- """When Python imaging library is not available"""
55
-
56
-
57
- class FontNotFound(Exception):
58
- """When there are no usable fonts specified"""
59
-
60
-
61
- class FontManager:
62
- """
63
- Manages a set of fonts: normal, italic, bold, etc...
64
- """
65
-
66
- def __init__(self, font_name, font_size=14):
67
- self.font_name = font_name
68
- self.font_size = font_size
69
- self.fonts = {}
70
- self.encoding = None
71
- if sys.platform.startswith('win'):
72
- if not font_name:
73
- self.font_name = DEFAULT_FONT_NAME_WIN
74
- self._create_win()
75
- elif sys.platform.startswith('darwin'):
76
- if not font_name:
77
- self.font_name = DEFAULT_FONT_NAME_MAC
78
- self._create_mac()
79
- else:
80
- if not font_name:
81
- self.font_name = DEFAULT_FONT_NAME_NIX
82
- self._create_nix()
83
-
84
- def _get_nix_font_path(self, name, style):
85
- proc = subprocess.Popen(['fc-list', "%s:style=%s" % (name, style), 'file'],
86
- stdout=subprocess.PIPE, stderr=None)
87
- stdout, _ = proc.communicate()
88
- if proc.returncode == 0:
89
- lines = stdout.splitlines()
90
- for line in lines:
91
- if line.startswith(b'Fontconfig warning:'):
92
- continue
93
- path = line.decode().strip().strip(':')
94
- if path:
95
- return path
96
- return None
97
-
98
- def _create_nix(self):
99
- for name in STYLES['NORMAL']:
100
- path = self._get_nix_font_path(self.font_name, name)
101
- if path is not None:
102
- self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
103
- break
104
- else:
105
- raise FontNotFound('No usable fonts named: "%s"' %
106
- self.font_name)
107
- for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
108
- for stylename in STYLES[style]:
109
- path = self._get_nix_font_path(self.font_name, stylename)
110
- if path is not None:
111
- self.fonts[style] = ImageFont.truetype(path, self.font_size)
112
- break
113
- else:
114
- if style == 'BOLDITALIC':
115
- self.fonts[style] = self.fonts['BOLD']
116
- else:
117
- self.fonts[style] = self.fonts['NORMAL']
118
-
119
- def _get_mac_font_path(self, font_map, name, style):
120
- return font_map.get((name + ' ' + style).strip().lower())
121
-
122
- def _create_mac(self):
123
- font_map = {}
124
- for font_dir in (os.path.join(os.getenv("HOME"), 'Library/Fonts/'),
125
- '/Library/Fonts/', '/System/Library/Fonts/'):
126
- font_map.update(
127
- (os.path.splitext(f)[0].lower(), os.path.join(font_dir, f))
128
- for f in os.listdir(font_dir)
129
- if f.lower().endswith(('ttf', 'ttc')))
130
-
131
- for name in STYLES['NORMAL']:
132
- path = self._get_mac_font_path(font_map, self.font_name, name)
133
- if path is not None:
134
- self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
135
- break
136
- else:
137
- raise FontNotFound('No usable fonts named: "%s"' %
138
- self.font_name)
139
- for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
140
- for stylename in STYLES[style]:
141
- path = self._get_mac_font_path(font_map, self.font_name, stylename)
142
- if path is not None:
143
- self.fonts[style] = ImageFont.truetype(path, self.font_size)
144
- break
145
- else:
146
- if style == 'BOLDITALIC':
147
- self.fonts[style] = self.fonts['BOLD']
148
- else:
149
- self.fonts[style] = self.fonts['NORMAL']
150
-
151
- def _lookup_win(self, key, basename, styles, fail=False):
152
- for suffix in ('', ' (TrueType)'):
153
- for style in styles:
154
- try:
155
- valname = '%s%s%s' % (basename, style and ' '+style, suffix)
156
- val, _ = _winreg.QueryValueEx(key, valname)
157
- return val
158
- except OSError:
159
- continue
160
- else:
161
- if fail:
162
- raise FontNotFound('Font %s (%s) not found in registry' %
163
- (basename, styles[0]))
164
- return None
165
-
166
- def _create_win(self):
167
- lookuperror = None
168
- keynames = [ (_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'),
169
- (_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows\CurrentVersion\Fonts'),
170
- (_winreg.HKEY_LOCAL_MACHINE, r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'),
171
- (_winreg.HKEY_LOCAL_MACHINE, r'Software\Microsoft\Windows\CurrentVersion\Fonts') ]
172
- for keyname in keynames:
173
- try:
174
- key = _winreg.OpenKey(*keyname)
175
- try:
176
- path = self._lookup_win(key, self.font_name, STYLES['NORMAL'], True)
177
- self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
178
- for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
179
- path = self._lookup_win(key, self.font_name, STYLES[style])
180
- if path:
181
- self.fonts[style] = ImageFont.truetype(path, self.font_size)
182
- else:
183
- if style == 'BOLDITALIC':
184
- self.fonts[style] = self.fonts['BOLD']
185
- else:
186
- self.fonts[style] = self.fonts['NORMAL']
187
- return
188
- except FontNotFound as err:
189
- lookuperror = err
190
- finally:
191
- _winreg.CloseKey(key)
192
- except OSError:
193
- pass
194
- else:
195
- # If we get here, we checked all registry keys and had no luck
196
- # We can be in one of two situations now:
197
- # * All key lookups failed. In this case lookuperror is None and we
198
- # will raise a generic error
199
- # * At least one lookup failed with a FontNotFound error. In this
200
- # case, we will raise that as a more specific error
201
- if lookuperror:
202
- raise lookuperror
203
- raise FontNotFound('Can\'t open Windows font registry key')
204
-
205
- def get_char_size(self):
206
- """
207
- Get the character size.
208
- """
209
- return self.get_text_size('M')
210
-
211
- def get_text_size(self, text):
212
- """
213
- Get the text size (width, height).
214
- """
215
- font = self.fonts['NORMAL']
216
- if hasattr(font, 'getbbox'): # Pillow >= 9.2.0
217
- return font.getbbox(text)[2:4]
218
- else:
219
- return font.getsize(text)
220
-
221
- def get_font(self, bold, oblique):
222
- """
223
- Get the font based on bold and italic flags.
224
- """
225
- if bold and oblique:
226
- return self.fonts['BOLDITALIC']
227
- elif bold:
228
- return self.fonts['BOLD']
229
- elif oblique:
230
- return self.fonts['ITALIC']
231
- else:
232
- return self.fonts['NORMAL']
233
-
234
-
235
- class ImageFormatter(Formatter):
236
- """
237
- Create a PNG image from source code. This uses the Python Imaging Library to
238
- generate a pixmap from the source code.
239
-
240
- .. versionadded:: 0.10
241
-
242
- Additional options accepted:
243
-
244
- `image_format`
245
- An image format to output to that is recognised by PIL, these include:
246
-
247
- * "PNG" (default)
248
- * "JPEG"
249
- * "BMP"
250
- * "GIF"
251
-
252
- `line_pad`
253
- The extra spacing (in pixels) between each line of text.
254
-
255
- Default: 2
256
-
257
- `font_name`
258
- The font name to be used as the base font from which others, such as
259
- bold and italic fonts will be generated. This really should be a
260
- monospace font to look sane.
261
-
262
- Default: "Courier New" on Windows, "Menlo" on Mac OS, and
263
- "DejaVu Sans Mono" on \\*nix
264
-
265
- `font_size`
266
- The font size in points to be used.
267
-
268
- Default: 14
269
-
270
- `image_pad`
271
- The padding, in pixels to be used at each edge of the resulting image.
272
-
273
- Default: 10
274
-
275
- `line_numbers`
276
- Whether line numbers should be shown: True/False
277
-
278
- Default: True
279
-
280
- `line_number_start`
281
- The line number of the first line.
282
-
283
- Default: 1
284
-
285
- `line_number_step`
286
- The step used when printing line numbers.
287
-
288
- Default: 1
289
-
290
- `line_number_bg`
291
- The background colour (in "#123456" format) of the line number bar, or
292
- None to use the style background color.
293
-
294
- Default: "#eed"
295
-
296
- `line_number_fg`
297
- The text color of the line numbers (in "#123456"-like format).
298
-
299
- Default: "#886"
300
-
301
- `line_number_chars`
302
- The number of columns of line numbers allowable in the line number
303
- margin.
304
-
305
- Default: 2
306
-
307
- `line_number_bold`
308
- Whether line numbers will be bold: True/False
309
-
310
- Default: False
311
-
312
- `line_number_italic`
313
- Whether line numbers will be italicized: True/False
314
-
315
- Default: False
316
-
317
- `line_number_separator`
318
- Whether a line will be drawn between the line number area and the
319
- source code area: True/False
320
-
321
- Default: True
322
-
323
- `line_number_pad`
324
- The horizontal padding (in pixels) between the line number margin, and
325
- the source code area.
326
-
327
- Default: 6
328
-
329
- `hl_lines`
330
- Specify a list of lines to be highlighted.
331
-
332
- .. versionadded:: 1.2
333
-
334
- Default: empty list
335
-
336
- `hl_color`
337
- Specify the color for highlighting lines.
338
-
339
- .. versionadded:: 1.2
340
-
341
- Default: highlight color of the selected style
342
- """
343
-
344
- # Required by the pygments mapper
345
- name = 'img'
346
- aliases = ['img', 'IMG', 'png']
347
- filenames = ['*.png']
348
-
349
- unicodeoutput = False
350
-
351
- default_image_format = 'png'
352
-
353
- def __init__(self, **options):
354
- """
355
- See the class docstring for explanation of options.
356
- """
357
- if not pil_available:
358
- raise PilNotAvailable(
359
- 'Python Imaging Library is required for this formatter')
360
- Formatter.__init__(self, **options)
361
- self.encoding = 'latin1' # let pygments.format() do the right thing
362
- # Read the style
363
- self.styles = dict(self.style)
364
- if self.style.background_color is None:
365
- self.background_color = '#fff'
366
- else:
367
- self.background_color = self.style.background_color
368
- # Image options
369
- self.image_format = get_choice_opt(
370
- options, 'image_format', ['png', 'jpeg', 'gif', 'bmp'],
371
- self.default_image_format, normcase=True)
372
- self.image_pad = get_int_opt(options, 'image_pad', 10)
373
- self.line_pad = get_int_opt(options, 'line_pad', 2)
374
- # The fonts
375
- fontsize = get_int_opt(options, 'font_size', 14)
376
- self.fonts = FontManager(options.get('font_name', ''), fontsize)
377
- self.fontw, self.fonth = self.fonts.get_char_size()
378
- # Line number options
379
- self.line_number_fg = options.get('line_number_fg', '#886')
380
- self.line_number_bg = options.get('line_number_bg', '#eed')
381
- self.line_number_chars = get_int_opt(options,
382
- 'line_number_chars', 2)
383
- self.line_number_bold = get_bool_opt(options,
384
- 'line_number_bold', False)
385
- self.line_number_italic = get_bool_opt(options,
386
- 'line_number_italic', False)
387
- self.line_number_pad = get_int_opt(options, 'line_number_pad', 6)
388
- self.line_numbers = get_bool_opt(options, 'line_numbers', True)
389
- self.line_number_separator = get_bool_opt(options,
390
- 'line_number_separator', True)
391
- self.line_number_step = get_int_opt(options, 'line_number_step', 1)
392
- self.line_number_start = get_int_opt(options, 'line_number_start', 1)
393
- if self.line_numbers:
394
- self.line_number_width = (self.fontw * self.line_number_chars +
395
- self.line_number_pad * 2)
396
- else:
397
- self.line_number_width = 0
398
- self.hl_lines = []
399
- hl_lines_str = get_list_opt(options, 'hl_lines', [])
400
- for line in hl_lines_str:
401
- try:
402
- self.hl_lines.append(int(line))
403
- except ValueError:
404
- pass
405
- self.hl_color = options.get('hl_color',
406
- self.style.highlight_color) or '#f90'
407
- self.drawables = []
408
-
409
- def get_style_defs(self, arg=''):
410
- raise NotImplementedError('The -S option is meaningless for the image '
411
- 'formatter. Use -O style=<stylename> instead.')
412
-
413
- def _get_line_height(self):
414
- """
415
- Get the height of a line.
416
- """
417
- return self.fonth + self.line_pad
418
-
419
- def _get_line_y(self, lineno):
420
- """
421
- Get the Y coordinate of a line number.
422
- """
423
- return lineno * self._get_line_height() + self.image_pad
424
-
425
- def _get_char_width(self):
426
- """
427
- Get the width of a character.
428
- """
429
- return self.fontw
430
-
431
- def _get_char_x(self, linelength):
432
- """
433
- Get the X coordinate of a character position.
434
- """
435
- return linelength + self.image_pad + self.line_number_width
436
-
437
- def _get_text_pos(self, linelength, lineno):
438
- """
439
- Get the actual position for a character and line position.
440
- """
441
- return self._get_char_x(linelength), self._get_line_y(lineno)
442
-
443
- def _get_linenumber_pos(self, lineno):
444
- """
445
- Get the actual position for the start of a line number.
446
- """
447
- return (self.image_pad, self._get_line_y(lineno))
448
-
449
- def _get_text_color(self, style):
450
- """
451
- Get the correct color for the token from the style.
452
- """
453
- if style['color'] is not None:
454
- fill = '#' + style['color']
455
- else:
456
- fill = '#000'
457
- return fill
458
-
459
- def _get_text_bg_color(self, style):
460
- """
461
- Get the correct background color for the token from the style.
462
- """
463
- if style['bgcolor'] is not None:
464
- bg_color = '#' + style['bgcolor']
465
- else:
466
- bg_color = None
467
- return bg_color
468
-
469
- def _get_style_font(self, style):
470
- """
471
- Get the correct font for the style.
472
- """
473
- return self.fonts.get_font(style['bold'], style['italic'])
474
-
475
- def _get_image_size(self, maxlinelength, maxlineno):
476
- """
477
- Get the required image size.
478
- """
479
- return (self._get_char_x(maxlinelength) + self.image_pad,
480
- self._get_line_y(maxlineno + 0) + self.image_pad)
481
-
482
- def _draw_linenumber(self, posno, lineno):
483
- """
484
- Remember a line number drawable to paint later.
485
- """
486
- self._draw_text(
487
- self._get_linenumber_pos(posno),
488
- str(lineno).rjust(self.line_number_chars),
489
- font=self.fonts.get_font(self.line_number_bold,
490
- self.line_number_italic),
491
- text_fg=self.line_number_fg,
492
- text_bg=None,
493
- )
494
-
495
- def _draw_text(self, pos, text, font, text_fg, text_bg):
496
- """
497
- Remember a single drawable tuple to paint later.
498
- """
499
- self.drawables.append((pos, text, font, text_fg, text_bg))
500
-
501
- def _create_drawables(self, tokensource):
502
- """
503
- Create drawables for the token content.
504
- """
505
- lineno = charno = maxcharno = 0
506
- maxlinelength = linelength = 0
507
- for ttype, value in tokensource:
508
- while ttype not in self.styles:
509
- ttype = ttype.parent
510
- style = self.styles[ttype]
511
- # TODO: make sure tab expansion happens earlier in the chain. It
512
- # really ought to be done on the input, as to do it right here is
513
- # quite complex.
514
- value = value.expandtabs(4)
515
- lines = value.splitlines(True)
516
- # print lines
517
- for i, line in enumerate(lines):
518
- temp = line.rstrip('\n')
519
- if temp:
520
- self._draw_text(
521
- self._get_text_pos(linelength, lineno),
522
- temp,
523
- font = self._get_style_font(style),
524
- text_fg = self._get_text_color(style),
525
- text_bg = self._get_text_bg_color(style),
526
- )
527
- temp_width, _ = self.fonts.get_text_size(temp)
528
- linelength += temp_width
529
- maxlinelength = max(maxlinelength, linelength)
530
- charno += len(temp)
531
- maxcharno = max(maxcharno, charno)
532
- if line.endswith('\n'):
533
- # add a line for each extra line in the value
534
- linelength = 0
535
- charno = 0
536
- lineno += 1
537
- self.maxlinelength = maxlinelength
538
- self.maxcharno = maxcharno
539
- self.maxlineno = lineno
540
-
541
- def _draw_line_numbers(self):
542
- """
543
- Create drawables for the line numbers.
544
- """
545
- if not self.line_numbers:
546
- return
547
- for p in range(self.maxlineno):
548
- n = p + self.line_number_start
549
- if (n % self.line_number_step) == 0:
550
- self._draw_linenumber(p, n)
551
-
552
- def _paint_line_number_bg(self, im):
553
- """
554
- Paint the line number background on the image.
555
- """
556
- if not self.line_numbers:
557
- return
558
- if self.line_number_fg is None:
559
- return
560
- draw = ImageDraw.Draw(im)
561
- recth = im.size[-1]
562
- rectw = self.image_pad + self.line_number_width - self.line_number_pad
563
- draw.rectangle([(0, 0), (rectw, recth)],
564
- fill=self.line_number_bg)
565
- if self.line_number_separator:
566
- draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
567
- del draw
568
-
569
- def format(self, tokensource, outfile):
570
- """
571
- Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
572
- tuples and write it into ``outfile``.
573
-
574
- This implementation calculates where it should draw each token on the
575
- pixmap, then calculates the required pixmap size and draws the items.
576
- """
577
- self._create_drawables(tokensource)
578
- self._draw_line_numbers()
579
- im = Image.new(
580
- 'RGB',
581
- self._get_image_size(self.maxlinelength, self.maxlineno),
582
- self.background_color
583
- )
584
- self._paint_line_number_bg(im)
585
- draw = ImageDraw.Draw(im)
586
- # Highlight
587
- if self.hl_lines:
588
- x = self.image_pad + self.line_number_width - self.line_number_pad + 1
589
- recth = self._get_line_height()
590
- rectw = im.size[0] - x
591
- for linenumber in self.hl_lines:
592
- y = self._get_line_y(linenumber - 1)
593
- draw.rectangle([(x, y), (x + rectw, y + recth)],
594
- fill=self.hl_color)
595
- for pos, value, font, text_fg, text_bg in self.drawables:
596
- if text_bg:
597
- text_size = draw.textsize(text=value, font=font)
598
- draw.rectangle([pos[0], pos[1], pos[0] + text_size[0], pos[1] + text_size[1]], fill=text_bg)
599
- draw.text(pos, value, font=font, fill=text_fg)
600
- im.save(outfile, self.image_format.upper())
601
-
602
-
603
- # Add one formatter per format, so that the "-f gif" option gives the correct result
604
- # when used in pygmentize.
605
-
606
- class GifImageFormatter(ImageFormatter):
607
- """
608
- Create a GIF image from source code. This uses the Python Imaging Library to
609
- generate a pixmap from the source code.
610
-
611
- .. versionadded:: 1.0
612
- """
613
-
614
- name = 'img_gif'
615
- aliases = ['gif']
616
- filenames = ['*.gif']
617
- default_image_format = 'gif'
618
-
619
-
620
- class JpgImageFormatter(ImageFormatter):
621
- """
622
- Create a JPEG image from source code. This uses the Python Imaging Library to
623
- generate a pixmap from the source code.
624
-
625
- .. versionadded:: 1.0
626
- """
627
-
628
- name = 'img_jpg'
629
- aliases = ['jpg', 'jpeg']
630
- filenames = ['*.jpg']
631
- default_image_format = 'jpeg'
632
-
633
-
634
- class BmpImageFormatter(ImageFormatter):
635
- """
636
- Create a bitmap image from source code. This uses the Python Imaging Library to
637
- generate a pixmap from the source code.
638
-
639
- .. versionadded:: 1.0
640
- """
641
-
642
- name = 'img_bmp'
643
- aliases = ['bmp', 'bitmap']
644
- filenames = ['*.bmp']
645
- default_image_format = 'bmp'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/install_lib.py DELETED
@@ -1,238 +0,0 @@
1
- """distutils.command.install_lib
2
-
3
- Implements the Distutils 'install_lib' command
4
- (install all Python modules)."""
5
-
6
- import os
7
- import importlib.util
8
- import sys
9
-
10
- from distutils.core import Command
11
- from distutils.errors import DistutilsOptionError
12
-
13
-
14
- # Extension for Python source files.
15
- PYTHON_SOURCE_EXTENSION = ".py"
16
-
17
-
18
- class install_lib(Command):
19
-
20
- description = "install all Python modules (extensions and pure Python)"
21
-
22
- # The byte-compilation options are a tad confusing. Here are the
23
- # possible scenarios:
24
- # 1) no compilation at all (--no-compile --no-optimize)
25
- # 2) compile .pyc only (--compile --no-optimize; default)
26
- # 3) compile .pyc and "opt-1" .pyc (--compile --optimize)
27
- # 4) compile "opt-1" .pyc only (--no-compile --optimize)
28
- # 5) compile .pyc and "opt-2" .pyc (--compile --optimize-more)
29
- # 6) compile "opt-2" .pyc only (--no-compile --optimize-more)
30
- #
31
- # The UI for this is two options, 'compile' and 'optimize'.
32
- # 'compile' is strictly boolean, and only decides whether to
33
- # generate .pyc files. 'optimize' is three-way (0, 1, or 2), and
34
- # decides both whether to generate .pyc files and what level of
35
- # optimization to use.
36
-
37
- user_options = [
38
- ('install-dir=', 'd', "directory to install to"),
39
- ('build-dir=', 'b', "build directory (where to install from)"),
40
- ('force', 'f', "force installation (overwrite existing files)"),
41
- ('compile', 'c', "compile .py to .pyc [default]"),
42
- ('no-compile', None, "don't compile .py files"),
43
- (
44
- 'optimize=',
45
- 'O',
46
- "also compile with optimization: -O1 for \"python -O\", "
47
- "-O2 for \"python -OO\", and -O0 to disable [default: -O0]",
48
- ),
49
- ('skip-build', None, "skip the build steps"),
50
- ]
51
-
52
- boolean_options = ['force', 'compile', 'skip-build']
53
- negative_opt = {'no-compile': 'compile'}
54
-
55
- def initialize_options(self):
56
- # let the 'install' command dictate our installation directory
57
- self.install_dir = None
58
- self.build_dir = None
59
- self.force = 0
60
- self.compile = None
61
- self.optimize = None
62
- self.skip_build = None
63
-
64
- def finalize_options(self):
65
- # Get all the information we need to install pure Python modules
66
- # from the umbrella 'install' command -- build (source) directory,
67
- # install (target) directory, and whether to compile .py files.
68
- self.set_undefined_options(
69
- 'install',
70
- ('build_lib', 'build_dir'),
71
- ('install_lib', 'install_dir'),
72
- ('force', 'force'),
73
- ('compile', 'compile'),
74
- ('optimize', 'optimize'),
75
- ('skip_build', 'skip_build'),
76
- )
77
-
78
- if self.compile is None:
79
- self.compile = True
80
- if self.optimize is None:
81
- self.optimize = False
82
-
83
- if not isinstance(self.optimize, int):
84
- try:
85
- self.optimize = int(self.optimize)
86
- if self.optimize not in (0, 1, 2):
87
- raise AssertionError
88
- except (ValueError, AssertionError):
89
- raise DistutilsOptionError("optimize must be 0, 1, or 2")
90
-
91
- def run(self):
92
- # Make sure we have built everything we need first
93
- self.build()
94
-
95
- # Install everything: simply dump the entire contents of the build
96
- # directory to the installation directory (that's the beauty of
97
- # having a build directory!)
98
- outfiles = self.install()
99
-
100
- # (Optionally) compile .py to .pyc
101
- if outfiles is not None and self.distribution.has_pure_modules():
102
- self.byte_compile(outfiles)
103
-
104
- # -- Top-level worker functions ------------------------------------
105
- # (called from 'run()')
106
-
107
- def build(self):
108
- if not self.skip_build:
109
- if self.distribution.has_pure_modules():
110
- self.run_command('build_py')
111
- if self.distribution.has_ext_modules():
112
- self.run_command('build_ext')
113
-
114
- def install(self):
115
- if os.path.isdir(self.build_dir):
116
- outfiles = self.copy_tree(self.build_dir, self.install_dir)
117
- else:
118
- self.warn(
119
- "'%s' does not exist -- no Python modules to install" % self.build_dir
120
- )
121
- return
122
- return outfiles
123
-
124
- def byte_compile(self, files):
125
- if sys.dont_write_bytecode:
126
- self.warn('byte-compiling is disabled, skipping.')
127
- return
128
-
129
- from distutils.util import byte_compile
130
-
131
- # Get the "--root" directory supplied to the "install" command,
132
- # and use it as a prefix to strip off the purported filename
133
- # encoded in bytecode files. This is far from complete, but it
134
- # should at least generate usable bytecode in RPM distributions.
135
- install_root = self.get_finalized_command('install').root
136
-
137
- if self.compile:
138
- byte_compile(
139
- files,
140
- optimize=0,
141
- force=self.force,
142
- prefix=install_root,
143
- dry_run=self.dry_run,
144
- )
145
- if self.optimize > 0:
146
- byte_compile(
147
- files,
148
- optimize=self.optimize,
149
- force=self.force,
150
- prefix=install_root,
151
- verbose=self.verbose,
152
- dry_run=self.dry_run,
153
- )
154
-
155
- # -- Utility methods -----------------------------------------------
156
-
157
- def _mutate_outputs(self, has_any, build_cmd, cmd_option, output_dir):
158
- if not has_any:
159
- return []
160
-
161
- build_cmd = self.get_finalized_command(build_cmd)
162
- build_files = build_cmd.get_outputs()
163
- build_dir = getattr(build_cmd, cmd_option)
164
-
165
- prefix_len = len(build_dir) + len(os.sep)
166
- outputs = []
167
- for file in build_files:
168
- outputs.append(os.path.join(output_dir, file[prefix_len:]))
169
-
170
- return outputs
171
-
172
- def _bytecode_filenames(self, py_filenames):
173
- bytecode_files = []
174
- for py_file in py_filenames:
175
- # Since build_py handles package data installation, the
176
- # list of outputs can contain more than just .py files.
177
- # Make sure we only report bytecode for the .py files.
178
- ext = os.path.splitext(os.path.normcase(py_file))[1]
179
- if ext != PYTHON_SOURCE_EXTENSION:
180
- continue
181
- if self.compile:
182
- bytecode_files.append(
183
- importlib.util.cache_from_source(py_file, optimization='')
184
- )
185
- if self.optimize > 0:
186
- bytecode_files.append(
187
- importlib.util.cache_from_source(
188
- py_file, optimization=self.optimize
189
- )
190
- )
191
-
192
- return bytecode_files
193
-
194
- # -- External interface --------------------------------------------
195
- # (called by outsiders)
196
-
197
- def get_outputs(self):
198
- """Return the list of files that would be installed if this command
199
- were actually run. Not affected by the "dry-run" flag or whether
200
- modules have actually been built yet.
201
- """
202
- pure_outputs = self._mutate_outputs(
203
- self.distribution.has_pure_modules(),
204
- 'build_py',
205
- 'build_lib',
206
- self.install_dir,
207
- )
208
- if self.compile:
209
- bytecode_outputs = self._bytecode_filenames(pure_outputs)
210
- else:
211
- bytecode_outputs = []
212
-
213
- ext_outputs = self._mutate_outputs(
214
- self.distribution.has_ext_modules(),
215
- 'build_ext',
216
- 'build_lib',
217
- self.install_dir,
218
- )
219
-
220
- return pure_outputs + bytecode_outputs + ext_outputs
221
-
222
- def get_inputs(self):
223
- """Get the list of files that are input to this command, ie. the
224
- files that get installed as they are named in the build tree.
225
- The files in this list correspond one-to-one to the output
226
- filenames returned by 'get_outputs()'.
227
- """
228
- inputs = []
229
-
230
- if self.distribution.has_pure_modules():
231
- build_py = self.get_finalized_command('build_py')
232
- inputs.extend(build_py.get_outputs())
233
-
234
- if self.distribution.has_ext_modules():
235
- build_ext = self.get_finalized_command('build_ext')
236
- inputs.extend(build_ext.get_outputs())
237
-
238
- return inputs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Billet/WizardLM-WizardMath-70B-V1.033/app.py DELETED
@@ -1,3 +0,0 @@
1
- import gradio as gr
2
-
3
- gr.Interface.load("models/WizardLM/WizardMath-70B-V1.0").launch()
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/export/caffe2_inference.py DELETED
@@ -1,136 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
-
3
- import collections
4
- import logging
5
- import numpy as np
6
- import torch
7
- from caffe2.proto import caffe2_pb2
8
- from caffe2.python import core
9
-
10
- from .caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP, convert_batched_inputs_to_c2_format
11
- from .shared import ScopedWS, get_pb_arg_vali, get_pb_arg_vals, infer_device_type
12
-
13
- logger = logging.getLogger(__name__)
14
-
15
-
16
- class ProtobufModel(torch.nn.Module):
17
- """
18
- A class works just like nn.Module in terms of inference, but running
19
- caffe2 model under the hood. Input/Output are Dict[str, tensor] whose keys
20
- are in external_input/output.
21
- """
22
-
23
- def __init__(self, predict_net, init_net):
24
- logger.info("Initializing ProtobufModel ...")
25
- super().__init__()
26
- assert isinstance(predict_net, caffe2_pb2.NetDef)
27
- assert isinstance(init_net, caffe2_pb2.NetDef)
28
- self.ws_name = "__ws_tmp__"
29
- self.net = core.Net(predict_net)
30
-
31
- with ScopedWS(self.ws_name, is_reset=True, is_cleanup=False) as ws:
32
- ws.RunNetOnce(init_net)
33
- for blob in self.net.Proto().external_input:
34
- if blob not in ws.Blobs():
35
- ws.CreateBlob(blob)
36
- ws.CreateNet(self.net)
37
-
38
- self._error_msgs = set()
39
-
40
- def forward(self, inputs_dict):
41
- assert all(inp in self.net.Proto().external_input for inp in inputs_dict)
42
- with ScopedWS(self.ws_name, is_reset=False, is_cleanup=False) as ws:
43
- for b, tensor in inputs_dict.items():
44
- ws.FeedBlob(b, tensor)
45
- try:
46
- ws.RunNet(self.net.Proto().name)
47
- except RuntimeError as e:
48
- if not str(e) in self._error_msgs:
49
- self._error_msgs.add(str(e))
50
- logger.warning("Encountered new RuntimeError: \n{}".format(str(e)))
51
- logger.warning("Catch the error and use partial results.")
52
-
53
- outputs_dict = collections.OrderedDict(
54
- [(b, ws.FetchBlob(b)) for b in self.net.Proto().external_output]
55
- )
56
- # Remove outputs of current run, this is necessary in order to
57
- # prevent fetching the result from previous run if the model fails
58
- # in the middle.
59
- for b in self.net.Proto().external_output:
60
- # Needs to create uninitialized blob to make the net runable.
61
- # This is "equivalent" to: ws.RemoveBlob(b) then ws.CreateBlob(b),
62
- # but there'no such API.
63
- ws.FeedBlob(b, "{}, a C++ native class of type nullptr (uninitialized).".format(b))
64
-
65
- return outputs_dict
66
-
67
-
68
- class ProtobufDetectionModel(torch.nn.Module):
69
- """
70
- A class works just like a pytorch meta arch in terms of inference, but running
71
- caffe2 model under the hood.
72
- """
73
-
74
- def __init__(self, predict_net, init_net, *, convert_outputs=None):
75
- """
76
- Args:
77
- predict_net, init_net (core.Net): caffe2 nets
78
- convert_outptus (callable): a function that converts caffe2
79
- outputs to the same format of the original pytorch model.
80
- By default, use the one defined in the caffe2 meta_arch.
81
- """
82
- super().__init__()
83
- self.protobuf_model = ProtobufModel(predict_net, init_net)
84
- self.size_divisibility = get_pb_arg_vali(predict_net, "size_divisibility", 0)
85
- self.device = get_pb_arg_vals(predict_net, "device", b"cpu").decode("ascii")
86
-
87
- if convert_outputs is None:
88
- meta_arch = get_pb_arg_vals(predict_net, "meta_architecture", b"GeneralizedRCNN")
89
- meta_arch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[meta_arch.decode("ascii")]
90
- self._convert_outputs = meta_arch.get_outputs_converter(predict_net, init_net)
91
- else:
92
- self._convert_outputs = convert_outputs
93
-
94
- def _infer_output_devices(self, inputs_dict):
95
- def _get_device_type(torch_tensor):
96
- assert torch_tensor.device.type in ["cpu", "cuda"]
97
- assert torch_tensor.device.index == 0
98
- return torch_tensor.device.type
99
-
100
- predict_net = self.protobuf_model.net.Proto()
101
- input_device_types = {
102
- (name, 0): _get_device_type(tensor) for name, tensor in inputs_dict.items()
103
- }
104
- device_type_map = infer_device_type(
105
- predict_net, known_status=input_device_types, device_name_style="pytorch"
106
- )
107
- ssa, versions = core.get_ssa(predict_net)
108
- versioned_outputs = [(name, versions[name]) for name in predict_net.external_output]
109
- output_devices = [device_type_map[outp] for outp in versioned_outputs]
110
- return output_devices
111
-
112
- def _convert_inputs(self, batched_inputs):
113
- # currently all models convert inputs in the same way
114
- data, im_info = convert_batched_inputs_to_c2_format(
115
- batched_inputs, self.size_divisibility, self.device
116
- )
117
- return {"data": data, "im_info": im_info}
118
-
119
- def forward(self, batched_inputs):
120
- c2_inputs = self._convert_inputs(batched_inputs)
121
- c2_results = self.protobuf_model(c2_inputs)
122
-
123
- if any(t.device.type != "cpu" for _, t in c2_inputs.items()):
124
- output_devices = self._infer_output_devices(c2_inputs)
125
- else:
126
- output_devices = ["cpu" for _ in self.protobuf_model.net.Proto().external_output]
127
-
128
- def _cast_caffe2_blob_to_torch_tensor(blob, device):
129
- return torch.Tensor(blob).to(device) if isinstance(blob, np.ndarray) else None
130
-
131
- c2_results = {
132
- name: _cast_caffe2_blob_to_torch_tensor(c2_results[name], device)
133
- for name, device in zip(self.protobuf_model.net.Proto().external_output, output_devices)
134
- }
135
-
136
- return self._convert_outputs(batched_inputs, c2_inputs, c2_results)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/mr/allocator.h DELETED
@@ -1,250 +0,0 @@
1
- /*
2
- * Copyright 2018 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- /*! \file allocator.h
18
- * \brief Allocator types usable with NPA-based memory resources.
19
- */
20
-
21
- #pragma once
22
-
23
- #include <limits>
24
-
25
- #include <thrust/detail/config/exec_check_disable.h>
26
- #include <thrust/detail/type_traits/pointer_traits.h>
27
-
28
- #include <thrust/mr/detail/config.h>
29
- #include <thrust/mr/validator.h>
30
- #include <thrust/mr/polymorphic_adaptor.h>
31
-
32
- namespace thrust
33
- {
34
- namespace mr
35
- {
36
-
37
- /*! \addtogroup memory_management Memory Management
38
- * \addtogroup memory_management_classes Memory Management Classes
39
- * \ingroup memory_management
40
- * \{
41
- */
42
-
43
- /*! An \p mr::allocator is a template that fulfills the C++ requirements for Allocators,
44
- * allowing to use the NPA-based memory resources where an Allocator is required. Unlike
45
- * memory resources, but like other allocators, \p mr::allocator is typed and bound to
46
- * allocate object of a specific type, however it can be freely rebound to other types.
47
- *
48
- * \tparam T the type that will be allocated by this allocator.
49
- * \tparam MR the upstream memory resource to use for memory allocation. Must derive from
50
- * \p thrust::mr::memory_resource and must be \p final (in C++11 and beyond).
51
- */
52
- template<typename T, class MR>
53
- class allocator : private validator<MR>
54
- {
55
- public:
56
- /*! The pointer to void type of this allocator. */
57
- typedef typename MR::pointer void_pointer;
58
-
59
- /*! The value type allocated by this allocator. Equivalent to \p T. */
60
- typedef T value_type;
61
- /*! The pointer type allocated by this allocator. Equivaled to the pointer type of \p MR rebound to \p T. */
62
- typedef typename thrust::detail::pointer_traits<void_pointer>::template rebind<T>::other pointer;
63
- /*! The pointer to const type. Equivalent to a pointer type of \p MR reboud to <tt>const T</tt>. */
64
- typedef typename thrust::detail::pointer_traits<void_pointer>::template rebind<const T>::other const_pointer;
65
- /*! The reference to the type allocated by this allocator. Supports smart references. */
66
- typedef typename thrust::detail::pointer_traits<pointer>::reference reference;
67
- /*! The const reference to the type allocated by this allocator. Supports smart references. */
68
- typedef typename thrust::detail::pointer_traits<const_pointer>::reference const_reference;
69
- /*! The size type of this allocator. Always \p std::size_t. */
70
- typedef std::size_t size_type;
71
- /*! The difference type between pointers allocated by this allocator. */
72
- typedef typename thrust::detail::pointer_traits<pointer>::difference_type difference_type;
73
-
74
- /*! Specifies that the allocator shall be propagated on container copy assignment. */
75
- typedef detail::true_type propagate_on_container_copy_assignment;
76
- /*! Specifies that the allocator shall be propagated on container move assignment. */
77
- typedef detail::true_type propagate_on_container_move_assignment;
78
- /*! Specifies that the allocator shall be propagated on container swap. */
79
- typedef detail::true_type propagate_on_container_swap;
80
-
81
- /*! The \p rebind metafunction provides the type of an \p allocator instantiated with another type.
82
- *
83
- * \tparam U the other type to use for instantiation.
84
- */
85
- template<typename U>
86
- struct rebind
87
- {
88
- /*! The typedef \p other gives the type of the rebound \p allocator.
89
- */
90
- typedef allocator<U, MR> other;
91
- };
92
-
93
- /*! Calculates the maximum number of elements allocated by this allocator.
94
- *
95
- * \returns the maximum value of \p std::size_t, divided by the size of \p T.
96
- */
97
- __thrust_exec_check_disable__
98
- __host__ __device__
99
- size_type max_size() const
100
- {
101
- return std::numeric_limits<size_type>::max() / sizeof(T);
102
- }
103
-
104
- /*! Constructor.
105
- *
106
- * \param resource the resource to be used to allocate raw memory.
107
- */
108
- __host__ __device__
109
- allocator(MR * resource) : mem_res(resource)
110
- {
111
- }
112
-
113
- /*! Copy constructor. Copies the resource pointer. */
114
- template<typename U>
115
- __host__ __device__
116
- allocator(const allocator<U, MR> & other) : mem_res(other.resource())
117
- {
118
- }
119
-
120
- /*! Allocates objects of type \p T.
121
- *
122
- * \param n number of elements to allocate
123
- * \returns a pointer to the newly allocated storage.
124
- */
125
- THRUST_NODISCARD
126
- __host__
127
- pointer allocate(size_type n)
128
- {
129
- return static_cast<pointer>(mem_res->do_allocate(n * sizeof(T), THRUST_ALIGNOF(T)));
130
- }
131
-
132
- /*! Deallocates objects of type \p T.
133
- *
134
- * \param p pointer returned by a previous call to \p allocate
135
- * \param n number of elements, passed as an argument to the \p allocate call that produced \p p
136
- */
137
- __host__
138
- void deallocate(pointer p, size_type n)
139
- {
140
- return mem_res->do_deallocate(p, n * sizeof(T), THRUST_ALIGNOF(T));
141
- }
142
-
143
- /*! Extracts the memory resource used by this allocator.
144
- *
145
- * \returns the memory resource used by this allocator.
146
- */
147
- __host__ __device__
148
- MR * resource() const
149
- {
150
- return mem_res;
151
- }
152
-
153
- private:
154
- MR * mem_res;
155
- };
156
-
157
- /*! Compares the allocators for equality by comparing the underlying memory resources. */
158
- template<typename T, typename MR>
159
- __host__ __device__
160
- bool operator==(const allocator<T, MR> & lhs, const allocator<T, MR> & rhs) THRUST_NOEXCEPT
161
- {
162
- return *lhs.resource() == *rhs.resource();
163
- }
164
-
165
- /*! Compares the allocators for inequality by comparing the underlying memory resources. */
166
- template<typename T, typename MR>
167
- __host__ __device__
168
- bool operator!=(const allocator<T, MR> & lhs, const allocator<T, MR> & rhs) THRUST_NOEXCEPT
169
- {
170
- return !(lhs == rhs);
171
- }
172
-
173
- #if THRUST_CPP_DIALECT >= 2011
174
-
175
- template<typename T, typename Pointer>
176
- using polymorphic_allocator = allocator<T, polymorphic_adaptor_resource<Pointer> >;
177
-
178
- #else // C++11
179
-
180
- template<typename T, typename Pointer>
181
- class polymorphic_allocator : public allocator<T, polymorphic_adaptor_resource<Pointer> >
182
- {
183
- typedef allocator<T, polymorphic_adaptor_resource<Pointer> > base;
184
-
185
- public:
186
- /*! Initializes the base class with the parameter \p resource.
187
- */
188
- polymorphic_allocator(polymorphic_adaptor_resource<Pointer> * resource) : base(resource)
189
- {
190
- }
191
- };
192
-
193
- #endif // C++11
194
-
195
- /*! A helper allocator class that uses global instances of a given upstream memory resource. Requires the memory resource
196
- * to be default constructible.
197
- *
198
- * \tparam T the type that will be allocated by this allocator.
199
- * \tparam Upstream the upstream memory resource to use for memory allocation. Must derive from
200
- * \p thrust::mr::memory_resource and must be \p final (in C++11 and beyond).
201
- */
202
- template<typename T, typename Upstream>
203
- class stateless_resource_allocator : public thrust::mr::allocator<T, Upstream>
204
- {
205
- typedef thrust::mr::allocator<T, Upstream> base;
206
-
207
- public:
208
- /*! The \p rebind metafunction provides the type of an \p stateless_resource_allocator instantiated with another type.
209
- *
210
- * \tparam U the other type to use for instantiation.
211
- */
212
- template<typename U>
213
- struct rebind
214
- {
215
- /*! The typedef \p other gives the type of the rebound \p stateless_resource_allocator.
216
- */
217
- typedef stateless_resource_allocator<U, Upstream> other;
218
- };
219
-
220
- /*! Default constructor. Uses \p get_global_resource to get the global instance of \p Upstream and initializes the
221
- * \p allocator base subobject with that resource.
222
- */
223
- __host__
224
- stateless_resource_allocator() : base(get_global_resource<Upstream>())
225
- {
226
- }
227
-
228
- /*! Copy constructor. Copies the memory resource pointer. */
229
- __host__ __device__
230
- stateless_resource_allocator(const stateless_resource_allocator & other)
231
- : base(other) {}
232
-
233
- /*! Conversion constructor from an allocator of a different type. Copies the memory resource pointer. */
234
- template<typename U>
235
- __host__ __device__
236
- stateless_resource_allocator(const stateless_resource_allocator<U, Upstream> & other)
237
- : base(other) {}
238
-
239
- #if THRUST_CPP_DIALECT >= 2011
240
- stateless_resource_allocator & operator=(const stateless_resource_allocator &) = default;
241
- #endif
242
-
243
- /*! Destructor. */
244
- __host__ __device__
245
- ~stateless_resource_allocator() {}
246
- };
247
-
248
- } // end mr
249
- } // end thrust
250
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cuda/detail/scatter.h DELETED
@@ -1,106 +0,0 @@
1
- /******************************************************************************
2
- * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
3
- *
4
- * Redistribution and use in source and binary forms, with or without
5
- * modification, are permitted provided that the following conditions are met:
6
- * * Redistributions of source code must retain the above copyright
7
- * notice, this list of conditions and the following disclaimer.
8
- * * Redistributions in binary form must reproduce the above copyright
9
- * notice, this list of conditions and the following disclaimer in the
10
- * documentation and/or other materials provided with the distribution.
11
- * * Neither the name of the NVIDIA CORPORATION nor the
12
- * names of its contributors may be used to endorse or promote products
13
- * derived from this software without specific prior written permission.
14
- *
15
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18
- * ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
19
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25
- *
26
- ******************************************************************************/
27
- #pragma once
28
-
29
-
30
- #if THRUST_DEVICE_COMPILER == THRUST_DEVICE_COMPILER_NVCC
31
- #include <thrust/system/cuda/detail/transform.h>
32
- #include <thrust/iterator/permutation_iterator.h>
33
-
34
- namespace thrust
35
- {
36
- namespace cuda_cub {
37
-
38
- template <class Derived,
39
- class ItemsIt,
40
- class MapIt,
41
- class ResultIt>
42
- void __host__ __device__
43
- scatter(execution_policy<Derived>& policy,
44
- ItemsIt first,
45
- ItemsIt last,
46
- MapIt map,
47
- ResultIt result)
48
- {
49
- cuda_cub::transform(policy,
50
- first,
51
- last,
52
- thrust::make_permutation_iterator(result, map),
53
- identity());
54
- }
55
-
56
- template <class Derived,
57
- class ItemsIt,
58
- class MapIt,
59
- class StencilIt,
60
- class ResultIt,
61
- class Predicate>
62
- void __host__ __device__
63
- scatter_if(execution_policy<Derived>& policy,
64
- ItemsIt first,
65
- ItemsIt last,
66
- MapIt map,
67
- StencilIt stencil,
68
- ResultIt result,
69
- Predicate predicate)
70
- {
71
- cuda_cub::transform_if(policy,
72
- first,
73
- last,
74
- stencil,
75
- thrust::make_permutation_iterator(result, map),
76
- identity(),
77
- predicate);
78
- }
79
-
80
- template <class Derived,
81
- class ItemsIt,
82
- class MapIt,
83
- class StencilIt,
84
- class ResultIt,
85
- class Predicate>
86
- void __host__ __device__
87
- scatter_if(execution_policy<Derived>& policy,
88
- ItemsIt first,
89
- ItemsIt last,
90
- MapIt map,
91
- StencilIt stencil,
92
- ResultIt result)
93
- {
94
- cuda_cub::scatter_if(policy,
95
- first,
96
- last,
97
- map,
98
- stencil,
99
- result,
100
- identity());
101
- }
102
-
103
-
104
- } // namespace cuda_cub
105
- } // end namespace thrust
106
- #endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/__init__.py DELETED
@@ -1,10 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
-
3
- from .utils.env import setup_environment
4
-
5
- setup_environment()
6
-
7
-
8
- # This line will be programatically read/write by setup.py.
9
- # Leave them at the bottom of this file and don't touch them.
10
- __version__ = "0.4"
 
 
 
 
 
 
 
 
 
 
 
spaces/Cicooo/vits-uma-genshin-honkai/transforms.py DELETED
@@ -1,193 +0,0 @@
1
- import torch
2
- from torch.nn import functional as F
3
-
4
- import numpy as np
5
-
6
-
7
- DEFAULT_MIN_BIN_WIDTH = 1e-3
8
- DEFAULT_MIN_BIN_HEIGHT = 1e-3
9
- DEFAULT_MIN_DERIVATIVE = 1e-3
10
-
11
-
12
- def piecewise_rational_quadratic_transform(inputs,
13
- unnormalized_widths,
14
- unnormalized_heights,
15
- unnormalized_derivatives,
16
- inverse=False,
17
- tails=None,
18
- tail_bound=1.,
19
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
20
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
21
- min_derivative=DEFAULT_MIN_DERIVATIVE):
22
-
23
- if tails is None:
24
- spline_fn = rational_quadratic_spline
25
- spline_kwargs = {}
26
- else:
27
- spline_fn = unconstrained_rational_quadratic_spline
28
- spline_kwargs = {
29
- 'tails': tails,
30
- 'tail_bound': tail_bound
31
- }
32
-
33
- outputs, logabsdet = spline_fn(
34
- inputs=inputs,
35
- unnormalized_widths=unnormalized_widths,
36
- unnormalized_heights=unnormalized_heights,
37
- unnormalized_derivatives=unnormalized_derivatives,
38
- inverse=inverse,
39
- min_bin_width=min_bin_width,
40
- min_bin_height=min_bin_height,
41
- min_derivative=min_derivative,
42
- **spline_kwargs
43
- )
44
- return outputs, logabsdet
45
-
46
-
47
- def searchsorted(bin_locations, inputs, eps=1e-6):
48
- bin_locations[..., -1] += eps
49
- return torch.sum(
50
- inputs[..., None] >= bin_locations,
51
- dim=-1
52
- ) - 1
53
-
54
-
55
- def unconstrained_rational_quadratic_spline(inputs,
56
- unnormalized_widths,
57
- unnormalized_heights,
58
- unnormalized_derivatives,
59
- inverse=False,
60
- tails='linear',
61
- tail_bound=1.,
62
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
63
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
64
- min_derivative=DEFAULT_MIN_DERIVATIVE):
65
- inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
66
- outside_interval_mask = ~inside_interval_mask
67
-
68
- outputs = torch.zeros_like(inputs)
69
- logabsdet = torch.zeros_like(inputs)
70
-
71
- if tails == 'linear':
72
- unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
73
- constant = np.log(np.exp(1 - min_derivative) - 1)
74
- unnormalized_derivatives[..., 0] = constant
75
- unnormalized_derivatives[..., -1] = constant
76
-
77
- outputs[outside_interval_mask] = inputs[outside_interval_mask]
78
- logabsdet[outside_interval_mask] = 0
79
- else:
80
- raise RuntimeError('{} tails are not implemented.'.format(tails))
81
-
82
- outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
83
- inputs=inputs[inside_interval_mask],
84
- unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
85
- unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
86
- unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
87
- inverse=inverse,
88
- left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
89
- min_bin_width=min_bin_width,
90
- min_bin_height=min_bin_height,
91
- min_derivative=min_derivative
92
- )
93
-
94
- return outputs, logabsdet
95
-
96
- def rational_quadratic_spline(inputs,
97
- unnormalized_widths,
98
- unnormalized_heights,
99
- unnormalized_derivatives,
100
- inverse=False,
101
- left=0., right=1., bottom=0., top=1.,
102
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
103
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
104
- min_derivative=DEFAULT_MIN_DERIVATIVE):
105
- if torch.min(inputs) < left or torch.max(inputs) > right:
106
- raise ValueError('Input to a transform is not within its domain')
107
-
108
- num_bins = unnormalized_widths.shape[-1]
109
-
110
- if min_bin_width * num_bins > 1.0:
111
- raise ValueError('Minimal bin width too large for the number of bins')
112
- if min_bin_height * num_bins > 1.0:
113
- raise ValueError('Minimal bin height too large for the number of bins')
114
-
115
- widths = F.softmax(unnormalized_widths, dim=-1)
116
- widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
117
- cumwidths = torch.cumsum(widths, dim=-1)
118
- cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
119
- cumwidths = (right - left) * cumwidths + left
120
- cumwidths[..., 0] = left
121
- cumwidths[..., -1] = right
122
- widths = cumwidths[..., 1:] - cumwidths[..., :-1]
123
-
124
- derivatives = min_derivative + F.softplus(unnormalized_derivatives)
125
-
126
- heights = F.softmax(unnormalized_heights, dim=-1)
127
- heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
128
- cumheights = torch.cumsum(heights, dim=-1)
129
- cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
130
- cumheights = (top - bottom) * cumheights + bottom
131
- cumheights[..., 0] = bottom
132
- cumheights[..., -1] = top
133
- heights = cumheights[..., 1:] - cumheights[..., :-1]
134
-
135
- if inverse:
136
- bin_idx = searchsorted(cumheights, inputs)[..., None]
137
- else:
138
- bin_idx = searchsorted(cumwidths, inputs)[..., None]
139
-
140
- input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
141
- input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
142
-
143
- input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
144
- delta = heights / widths
145
- input_delta = delta.gather(-1, bin_idx)[..., 0]
146
-
147
- input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
148
- input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
149
-
150
- input_heights = heights.gather(-1, bin_idx)[..., 0]
151
-
152
- if inverse:
153
- a = (((inputs - input_cumheights) * (input_derivatives
154
- + input_derivatives_plus_one
155
- - 2 * input_delta)
156
- + input_heights * (input_delta - input_derivatives)))
157
- b = (input_heights * input_derivatives
158
- - (inputs - input_cumheights) * (input_derivatives
159
- + input_derivatives_plus_one
160
- - 2 * input_delta))
161
- c = - input_delta * (inputs - input_cumheights)
162
-
163
- discriminant = b.pow(2) - 4 * a * c
164
- assert (discriminant >= 0).all()
165
-
166
- root = (2 * c) / (-b - torch.sqrt(discriminant))
167
- outputs = root * input_bin_widths + input_cumwidths
168
-
169
- theta_one_minus_theta = root * (1 - root)
170
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
171
- * theta_one_minus_theta)
172
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
173
- + 2 * input_delta * theta_one_minus_theta
174
- + input_derivatives * (1 - root).pow(2))
175
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
176
-
177
- return outputs, -logabsdet
178
- else:
179
- theta = (inputs - input_cumwidths) / input_bin_widths
180
- theta_one_minus_theta = theta * (1 - theta)
181
-
182
- numerator = input_heights * (input_delta * theta.pow(2)
183
- + input_derivatives * theta_one_minus_theta)
184
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
185
- * theta_one_minus_theta)
186
- outputs = input_cumheights + numerator / denominator
187
-
188
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
189
- + 2 * input_delta * theta_one_minus_theta
190
- + input_derivatives * (1 - theta).pow(2))
191
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
192
-
193
- return outputs, logabsdet
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/ttGlyphSet.py DELETED
@@ -1,322 +0,0 @@
1
- """GlyphSets returned by a TTFont."""
2
-
3
- from abc import ABC, abstractmethod
4
- from collections.abc import Mapping
5
- from contextlib import contextmanager
6
- from copy import copy
7
- from types import SimpleNamespace
8
- from fontTools.misc.fixedTools import otRound
9
- from fontTools.misc.loggingTools import deprecateFunction
10
- from fontTools.misc.transform import Transform
11
- from fontTools.pens.transformPen import TransformPen, TransformPointPen
12
-
13
-
14
- class _TTGlyphSet(Mapping):
15
-
16
- """Generic dict-like GlyphSet class that pulls metrics from hmtx and
17
- glyph shape from TrueType or CFF.
18
- """
19
-
20
- def __init__(self, font, location, glyphsMapping):
21
- self.font = font
22
- self.defaultLocationNormalized = (
23
- {axis.axisTag: 0 for axis in self.font["fvar"].axes}
24
- if "fvar" in self.font
25
- else {}
26
- )
27
- self.location = location if location is not None else {}
28
- self.rawLocation = {} # VarComponent-only location
29
- self.originalLocation = location if location is not None else {}
30
- self.depth = 0
31
- self.locationStack = []
32
- self.rawLocationStack = []
33
- self.glyphsMapping = glyphsMapping
34
- self.hMetrics = font["hmtx"].metrics
35
- self.vMetrics = getattr(font.get("vmtx"), "metrics", None)
36
- self.hvarTable = None
37
- if location:
38
- from fontTools.varLib.varStore import VarStoreInstancer
39
-
40
- self.hvarTable = getattr(font.get("HVAR"), "table", None)
41
- if self.hvarTable is not None:
42
- self.hvarInstancer = VarStoreInstancer(
43
- self.hvarTable.VarStore, font["fvar"].axes, location
44
- )
45
- # TODO VVAR, VORG
46
-
47
- @contextmanager
48
- def pushLocation(self, location, reset: bool):
49
- self.locationStack.append(self.location)
50
- self.rawLocationStack.append(self.rawLocation)
51
- if reset:
52
- self.location = self.originalLocation.copy()
53
- self.rawLocation = self.defaultLocationNormalized.copy()
54
- else:
55
- self.location = self.location.copy()
56
- self.rawLocation = {}
57
- self.location.update(location)
58
- self.rawLocation.update(location)
59
-
60
- try:
61
- yield None
62
- finally:
63
- self.location = self.locationStack.pop()
64
- self.rawLocation = self.rawLocationStack.pop()
65
-
66
- @contextmanager
67
- def pushDepth(self):
68
- try:
69
- depth = self.depth
70
- self.depth += 1
71
- yield depth
72
- finally:
73
- self.depth -= 1
74
-
75
- def __contains__(self, glyphName):
76
- return glyphName in self.glyphsMapping
77
-
78
- def __iter__(self):
79
- return iter(self.glyphsMapping.keys())
80
-
81
- def __len__(self):
82
- return len(self.glyphsMapping)
83
-
84
- @deprecateFunction(
85
- "use 'glyphName in glyphSet' instead", category=DeprecationWarning
86
- )
87
- def has_key(self, glyphName):
88
- return glyphName in self.glyphsMapping
89
-
90
-
91
- class _TTGlyphSetGlyf(_TTGlyphSet):
92
- def __init__(self, font, location):
93
- self.glyfTable = font["glyf"]
94
- super().__init__(font, location, self.glyfTable)
95
- self.gvarTable = font.get("gvar")
96
-
97
- def __getitem__(self, glyphName):
98
- return _TTGlyphGlyf(self, glyphName)
99
-
100
-
101
- class _TTGlyphSetCFF(_TTGlyphSet):
102
- def __init__(self, font, location):
103
- tableTag = "CFF2" if "CFF2" in font else "CFF "
104
- self.charStrings = list(font[tableTag].cff.values())[0].CharStrings
105
- super().__init__(font, location, self.charStrings)
106
- self.blender = None
107
- if location:
108
- from fontTools.varLib.varStore import VarStoreInstancer
109
-
110
- varStore = getattr(self.charStrings, "varStore", None)
111
- if varStore is not None:
112
- instancer = VarStoreInstancer(
113
- varStore.otVarStore, font["fvar"].axes, location
114
- )
115
- self.blender = instancer.interpolateFromDeltas
116
-
117
- def __getitem__(self, glyphName):
118
- return _TTGlyphCFF(self, glyphName)
119
-
120
-
121
- class _TTGlyph(ABC):
122
-
123
- """Glyph object that supports the Pen protocol, meaning that it has
124
- .draw() and .drawPoints() methods that take a pen object as their only
125
- argument. Additionally there are 'width' and 'lsb' attributes, read from
126
- the 'hmtx' table.
127
-
128
- If the font contains a 'vmtx' table, there will also be 'height' and 'tsb'
129
- attributes.
130
- """
131
-
132
- def __init__(self, glyphSet, glyphName):
133
- self.glyphSet = glyphSet
134
- self.name = glyphName
135
- self.width, self.lsb = glyphSet.hMetrics[glyphName]
136
- if glyphSet.vMetrics is not None:
137
- self.height, self.tsb = glyphSet.vMetrics[glyphName]
138
- else:
139
- self.height, self.tsb = None, None
140
- if glyphSet.location and glyphSet.hvarTable is not None:
141
- varidx = (
142
- glyphSet.font.getGlyphID(glyphName)
143
- if glyphSet.hvarTable.AdvWidthMap is None
144
- else glyphSet.hvarTable.AdvWidthMap.mapping[glyphName]
145
- )
146
- self.width += glyphSet.hvarInstancer[varidx]
147
- # TODO: VVAR/VORG
148
-
149
- @abstractmethod
150
- def draw(self, pen):
151
- """Draw the glyph onto ``pen``. See fontTools.pens.basePen for details
152
- how that works.
153
- """
154
- raise NotImplementedError
155
-
156
- def drawPoints(self, pen):
157
- """Draw the glyph onto ``pen``. See fontTools.pens.pointPen for details
158
- how that works.
159
- """
160
- from fontTools.pens.pointPen import SegmentToPointPen
161
-
162
- self.draw(SegmentToPointPen(pen))
163
-
164
-
165
- class _TTGlyphGlyf(_TTGlyph):
166
- def draw(self, pen):
167
- """Draw the glyph onto ``pen``. See fontTools.pens.basePen for details
168
- how that works.
169
- """
170
- glyph, offset = self._getGlyphAndOffset()
171
-
172
- with self.glyphSet.pushDepth() as depth:
173
-
174
- if depth:
175
- offset = 0 # Offset should only apply at top-level
176
-
177
- if glyph.isVarComposite():
178
- self._drawVarComposite(glyph, pen, False)
179
- return
180
-
181
- glyph.draw(pen, self.glyphSet.glyfTable, offset)
182
-
183
- def drawPoints(self, pen):
184
- """Draw the glyph onto ``pen``. See fontTools.pens.pointPen for details
185
- how that works.
186
- """
187
- glyph, offset = self._getGlyphAndOffset()
188
-
189
- with self.glyphSet.pushDepth() as depth:
190
-
191
- if depth:
192
- offset = 0 # Offset should only apply at top-level
193
-
194
- if glyph.isVarComposite():
195
- self._drawVarComposite(glyph, pen, True)
196
- return
197
-
198
- glyph.drawPoints(pen, self.glyphSet.glyfTable, offset)
199
-
200
- def _drawVarComposite(self, glyph, pen, isPointPen):
201
-
202
- from fontTools.ttLib.tables._g_l_y_f import (
203
- VarComponentFlags,
204
- VAR_COMPONENT_TRANSFORM_MAPPING,
205
- )
206
-
207
- for comp in glyph.components:
208
-
209
- with self.glyphSet.pushLocation(
210
- comp.location, comp.flags & VarComponentFlags.RESET_UNSPECIFIED_AXES
211
- ):
212
- try:
213
- pen.addVarComponent(
214
- comp.glyphName, comp.transform, self.glyphSet.rawLocation
215
- )
216
- except AttributeError:
217
- t = comp.transform.toTransform()
218
- if isPointPen:
219
- tPen = TransformPointPen(pen, t)
220
- self.glyphSet[comp.glyphName].drawPoints(tPen)
221
- else:
222
- tPen = TransformPen(pen, t)
223
- self.glyphSet[comp.glyphName].draw(tPen)
224
-
225
- def _getGlyphAndOffset(self):
226
- if self.glyphSet.location and self.glyphSet.gvarTable is not None:
227
- glyph = self._getGlyphInstance()
228
- else:
229
- glyph = self.glyphSet.glyfTable[self.name]
230
-
231
- offset = self.lsb - glyph.xMin if hasattr(glyph, "xMin") else 0
232
- return glyph, offset
233
-
234
- def _getGlyphInstance(self):
235
- from fontTools.varLib.iup import iup_delta
236
- from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates
237
- from fontTools.varLib.models import supportScalar
238
-
239
- glyphSet = self.glyphSet
240
- glyfTable = glyphSet.glyfTable
241
- variations = glyphSet.gvarTable.variations[self.name]
242
- hMetrics = glyphSet.hMetrics
243
- vMetrics = glyphSet.vMetrics
244
- coordinates, _ = glyfTable._getCoordinatesAndControls(
245
- self.name, hMetrics, vMetrics
246
- )
247
- origCoords, endPts = None, None
248
- for var in variations:
249
- scalar = supportScalar(glyphSet.location, var.axes)
250
- if not scalar:
251
- continue
252
- delta = var.coordinates
253
- if None in delta:
254
- if origCoords is None:
255
- origCoords, control = glyfTable._getCoordinatesAndControls(
256
- self.name, hMetrics, vMetrics
257
- )
258
- endPts = (
259
- control[1] if control[0] >= 1 else list(range(len(control[1])))
260
- )
261
- delta = iup_delta(delta, origCoords, endPts)
262
- coordinates += GlyphCoordinates(delta) * scalar
263
-
264
- glyph = copy(glyfTable[self.name]) # Shallow copy
265
- width, lsb, height, tsb = _setCoordinates(glyph, coordinates, glyfTable)
266
- self.lsb = lsb
267
- self.tsb = tsb
268
- if glyphSet.hvarTable is None:
269
- # no HVAR: let's set metrics from the phantom points
270
- self.width = width
271
- self.height = height
272
- return glyph
273
-
274
-
275
- class _TTGlyphCFF(_TTGlyph):
276
- def draw(self, pen):
277
- """Draw the glyph onto ``pen``. See fontTools.pens.basePen for details
278
- how that works.
279
- """
280
- self.glyphSet.charStrings[self.name].draw(pen, self.glyphSet.blender)
281
-
282
-
283
- def _setCoordinates(glyph, coord, glyfTable):
284
- # Handle phantom points for (left, right, top, bottom) positions.
285
- assert len(coord) >= 4
286
- leftSideX = coord[-4][0]
287
- rightSideX = coord[-3][0]
288
- topSideY = coord[-2][1]
289
- bottomSideY = coord[-1][1]
290
-
291
- for _ in range(4):
292
- del coord[-1]
293
-
294
- if glyph.isComposite():
295
- assert len(coord) == len(glyph.components)
296
- glyph.components = [copy(comp) for comp in glyph.components] # Shallow copy
297
- for p, comp in zip(coord, glyph.components):
298
- if hasattr(comp, "x"):
299
- comp.x, comp.y = p
300
- elif glyph.isVarComposite():
301
- glyph.components = [copy(comp) for comp in glyph.components] # Shallow copy
302
- for comp in glyph.components:
303
- coord = comp.setCoordinates(coord)
304
- assert not coord
305
- elif glyph.numberOfContours == 0:
306
- assert len(coord) == 0
307
- else:
308
- assert len(coord) == len(glyph.coordinates)
309
- glyph.coordinates = coord
310
-
311
- glyph.recalcBounds(glyfTable)
312
-
313
- horizontalAdvanceWidth = otRound(rightSideX - leftSideX)
314
- verticalAdvanceWidth = otRound(topSideY - bottomSideY)
315
- leftSideBearing = otRound(glyph.xMin - leftSideX)
316
- topSideBearing = otRound(topSideY - glyph.yMax)
317
- return (
318
- horizontalAdvanceWidth,
319
- leftSideBearing,
320
- verticalAdvanceWidth,
321
- topSideBearing,
322
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/h11/_headers.py DELETED
@@ -1,278 +0,0 @@
1
- import re
2
- from typing import AnyStr, cast, List, overload, Sequence, Tuple, TYPE_CHECKING, Union
3
-
4
- from ._abnf import field_name, field_value
5
- from ._util import bytesify, LocalProtocolError, validate
6
-
7
- if TYPE_CHECKING:
8
- from ._events import Request
9
-
10
- try:
11
- from typing import Literal
12
- except ImportError:
13
- from typing_extensions import Literal # type: ignore
14
-
15
-
16
- # Facts
17
- # -----
18
- #
19
- # Headers are:
20
- # keys: case-insensitive ascii
21
- # values: mixture of ascii and raw bytes
22
- #
23
- # "Historically, HTTP has allowed field content with text in the ISO-8859-1
24
- # charset [ISO-8859-1], supporting other charsets only through use of
25
- # [RFC2047] encoding. In practice, most HTTP header field values use only a
26
- # subset of the US-ASCII charset [USASCII]. Newly defined header fields SHOULD
27
- # limit their field values to US-ASCII octets. A recipient SHOULD treat other
28
- # octets in field content (obs-text) as opaque data."
29
- # And it deprecates all non-ascii values
30
- #
31
- # Leading/trailing whitespace in header names is forbidden
32
- #
33
- # Values get leading/trailing whitespace stripped
34
- #
35
- # Content-Disposition actually needs to contain unicode semantically; to
36
- # accomplish this it has a terrifically weird way of encoding the filename
37
- # itself as ascii (and even this still has lots of cross-browser
38
- # incompatibilities)
39
- #
40
- # Order is important:
41
- # "a proxy MUST NOT change the order of these field values when forwarding a
42
- # message"
43
- # (and there are several headers where the order indicates a preference)
44
- #
45
- # Multiple occurences of the same header:
46
- # "A sender MUST NOT generate multiple header fields with the same field name
47
- # in a message unless either the entire field value for that header field is
48
- # defined as a comma-separated list [or the header is Set-Cookie which gets a
49
- # special exception]" - RFC 7230. (cookies are in RFC 6265)
50
- #
51
- # So every header aside from Set-Cookie can be merged by b", ".join if it
52
- # occurs repeatedly. But, of course, they can't necessarily be split by
53
- # .split(b","), because quoting.
54
- #
55
- # Given all this mess (case insensitive, duplicates allowed, order is
56
- # important, ...), there doesn't appear to be any standard way to handle
57
- # headers in Python -- they're almost like dicts, but... actually just
58
- # aren't. For now we punt and just use a super simple representation: headers
59
- # are a list of pairs
60
- #
61
- # [(name1, value1), (name2, value2), ...]
62
- #
63
- # where all entries are bytestrings, names are lowercase and have no
64
- # leading/trailing whitespace, and values are bytestrings with no
65
- # leading/trailing whitespace. Searching and updating are done via naive O(n)
66
- # methods.
67
- #
68
- # Maybe a dict-of-lists would be better?
69
-
70
- _content_length_re = re.compile(rb"[0-9]+")
71
- _field_name_re = re.compile(field_name.encode("ascii"))
72
- _field_value_re = re.compile(field_value.encode("ascii"))
73
-
74
-
75
- class Headers(Sequence[Tuple[bytes, bytes]]):
76
- """
77
- A list-like interface that allows iterating over headers as byte-pairs
78
- of (lowercased-name, value).
79
-
80
- Internally we actually store the representation as three-tuples,
81
- including both the raw original casing, in order to preserve casing
82
- over-the-wire, and the lowercased name, for case-insensitive comparisions.
83
-
84
- r = Request(
85
- method="GET",
86
- target="/",
87
- headers=[("Host", "example.org"), ("Connection", "keep-alive")],
88
- http_version="1.1",
89
- )
90
- assert r.headers == [
91
- (b"host", b"example.org"),
92
- (b"connection", b"keep-alive")
93
- ]
94
- assert r.headers.raw_items() == [
95
- (b"Host", b"example.org"),
96
- (b"Connection", b"keep-alive")
97
- ]
98
- """
99
-
100
- __slots__ = "_full_items"
101
-
102
- def __init__(self, full_items: List[Tuple[bytes, bytes, bytes]]) -> None:
103
- self._full_items = full_items
104
-
105
- def __bool__(self) -> bool:
106
- return bool(self._full_items)
107
-
108
- def __eq__(self, other: object) -> bool:
109
- return list(self) == list(other) # type: ignore
110
-
111
- def __len__(self) -> int:
112
- return len(self._full_items)
113
-
114
- def __repr__(self) -> str:
115
- return "<Headers(%s)>" % repr(list(self))
116
-
117
- def __getitem__(self, idx: int) -> Tuple[bytes, bytes]: # type: ignore[override]
118
- _, name, value = self._full_items[idx]
119
- return (name, value)
120
-
121
- def raw_items(self) -> List[Tuple[bytes, bytes]]:
122
- return [(raw_name, value) for raw_name, _, value in self._full_items]
123
-
124
-
125
- HeaderTypes = Union[
126
- List[Tuple[bytes, bytes]],
127
- List[Tuple[bytes, str]],
128
- List[Tuple[str, bytes]],
129
- List[Tuple[str, str]],
130
- ]
131
-
132
-
133
- @overload
134
- def normalize_and_validate(headers: Headers, _parsed: Literal[True]) -> Headers:
135
- ...
136
-
137
-
138
- @overload
139
- def normalize_and_validate(headers: HeaderTypes, _parsed: Literal[False]) -> Headers:
140
- ...
141
-
142
-
143
- @overload
144
- def normalize_and_validate(
145
- headers: Union[Headers, HeaderTypes], _parsed: bool = False
146
- ) -> Headers:
147
- ...
148
-
149
-
150
- def normalize_and_validate(
151
- headers: Union[Headers, HeaderTypes], _parsed: bool = False
152
- ) -> Headers:
153
- new_headers = []
154
- seen_content_length = None
155
- saw_transfer_encoding = False
156
- for name, value in headers:
157
- # For headers coming out of the parser, we can safely skip some steps,
158
- # because it always returns bytes and has already run these regexes
159
- # over the data:
160
- if not _parsed:
161
- name = bytesify(name)
162
- value = bytesify(value)
163
- validate(_field_name_re, name, "Illegal header name {!r}", name)
164
- validate(_field_value_re, value, "Illegal header value {!r}", value)
165
- assert isinstance(name, bytes)
166
- assert isinstance(value, bytes)
167
-
168
- raw_name = name
169
- name = name.lower()
170
- if name == b"content-length":
171
- lengths = {length.strip() for length in value.split(b",")}
172
- if len(lengths) != 1:
173
- raise LocalProtocolError("conflicting Content-Length headers")
174
- value = lengths.pop()
175
- validate(_content_length_re, value, "bad Content-Length")
176
- if seen_content_length is None:
177
- seen_content_length = value
178
- new_headers.append((raw_name, name, value))
179
- elif seen_content_length != value:
180
- raise LocalProtocolError("conflicting Content-Length headers")
181
- elif name == b"transfer-encoding":
182
- # "A server that receives a request message with a transfer coding
183
- # it does not understand SHOULD respond with 501 (Not
184
- # Implemented)."
185
- # https://tools.ietf.org/html/rfc7230#section-3.3.1
186
- if saw_transfer_encoding:
187
- raise LocalProtocolError(
188
- "multiple Transfer-Encoding headers", error_status_hint=501
189
- )
190
- # "All transfer-coding names are case-insensitive"
191
- # -- https://tools.ietf.org/html/rfc7230#section-4
192
- value = value.lower()
193
- if value != b"chunked":
194
- raise LocalProtocolError(
195
- "Only Transfer-Encoding: chunked is supported",
196
- error_status_hint=501,
197
- )
198
- saw_transfer_encoding = True
199
- new_headers.append((raw_name, name, value))
200
- else:
201
- new_headers.append((raw_name, name, value))
202
- return Headers(new_headers)
203
-
204
-
205
- def get_comma_header(headers: Headers, name: bytes) -> List[bytes]:
206
- # Should only be used for headers whose value is a list of
207
- # comma-separated, case-insensitive values.
208
- #
209
- # The header name `name` is expected to be lower-case bytes.
210
- #
211
- # Connection: meets these criteria (including cast insensitivity).
212
- #
213
- # Content-Length: technically is just a single value (1*DIGIT), but the
214
- # standard makes reference to implementations that do multiple values, and
215
- # using this doesn't hurt. Ditto, case insensitivity doesn't things either
216
- # way.
217
- #
218
- # Transfer-Encoding: is more complex (allows for quoted strings), so
219
- # splitting on , is actually wrong. For example, this is legal:
220
- #
221
- # Transfer-Encoding: foo; options="1,2", chunked
222
- #
223
- # and should be parsed as
224
- #
225
- # foo; options="1,2"
226
- # chunked
227
- #
228
- # but this naive function will parse it as
229
- #
230
- # foo; options="1
231
- # 2"
232
- # chunked
233
- #
234
- # However, this is okay because the only thing we are going to do with
235
- # any Transfer-Encoding is reject ones that aren't just "chunked", so
236
- # both of these will be treated the same anyway.
237
- #
238
- # Expect: the only legal value is the literal string
239
- # "100-continue". Splitting on commas is harmless. Case insensitive.
240
- #
241
- out: List[bytes] = []
242
- for _, found_name, found_raw_value in headers._full_items:
243
- if found_name == name:
244
- found_raw_value = found_raw_value.lower()
245
- for found_split_value in found_raw_value.split(b","):
246
- found_split_value = found_split_value.strip()
247
- if found_split_value:
248
- out.append(found_split_value)
249
- return out
250
-
251
-
252
- def set_comma_header(headers: Headers, name: bytes, new_values: List[bytes]) -> Headers:
253
- # The header name `name` is expected to be lower-case bytes.
254
- #
255
- # Note that when we store the header we use title casing for the header
256
- # names, in order to match the conventional HTTP header style.
257
- #
258
- # Simply calling `.title()` is a blunt approach, but it's correct
259
- # here given the cases where we're using `set_comma_header`...
260
- #
261
- # Connection, Content-Length, Transfer-Encoding.
262
- new_headers: List[Tuple[bytes, bytes]] = []
263
- for found_raw_name, found_name, found_raw_value in headers._full_items:
264
- if found_name != name:
265
- new_headers.append((found_raw_name, found_raw_value))
266
- for new_value in new_values:
267
- new_headers.append((name.title(), new_value))
268
- return normalize_and_validate(new_headers)
269
-
270
-
271
- def has_expect_100_continue(request: "Request") -> bool:
272
- # https://tools.ietf.org/html/rfc7231#section-5.1.1
273
- # "A server that receives a 100-continue expectation in an HTTP/1.0 request
274
- # MUST ignore that expectation."
275
- if request.http_version < b"1.1":
276
- return False
277
- expect = get_comma_header(request.headers, b"expect")
278
- return b"100-continue" in expect
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/_chunk_utils.py DELETED
@@ -1,64 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2022-present, the HuggingFace Inc. team.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Contains a utility to iterate by chunks over an iterator."""
16
- import itertools
17
- from typing import Iterable, TypeVar
18
-
19
-
20
- T = TypeVar("T")
21
-
22
-
23
- def chunk_iterable(iterable: Iterable[T], chunk_size: int) -> Iterable[Iterable[T]]:
24
- """Iterates over an iterator chunk by chunk.
25
-
26
- Taken from https://stackoverflow.com/a/8998040.
27
- See also https://github.com/huggingface/huggingface_hub/pull/920#discussion_r938793088.
28
-
29
- Args:
30
- iterable (`Iterable`):
31
- The iterable on which we want to iterate.
32
- chunk_size (`int`):
33
- Size of the chunks. Must be a strictly positive integer (e.g. >0).
34
-
35
- Example:
36
-
37
- ```python
38
- >>> from huggingface_hub.utils import chunk_iterable
39
-
40
- >>> for items in chunk_iterable(range(17), chunk_size=8):
41
- ... print(items)
42
- # [0, 1, 2, 3, 4, 5, 6, 7]
43
- # [8, 9, 10, 11, 12, 13, 14, 15]
44
- # [16] # smaller last chunk
45
- ```
46
-
47
- Raises:
48
- [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
49
- If `chunk_size` <= 0.
50
-
51
- <Tip warning={true}>
52
- The last chunk can be smaller than `chunk_size`.
53
- </Tip>
54
- """
55
- if not isinstance(chunk_size, int) or chunk_size <= 0:
56
- raise ValueError("`chunk_size` must be a strictly positive integer (>0).")
57
-
58
- iterator = iter(iterable)
59
- while True:
60
- try:
61
- next_item = next(iterator)
62
- except StopIteration:
63
- return
64
- yield itertools.chain((next_item,), itertools.islice(iterator, chunk_size - 1))