parquet-converter commited on
Commit
e5ccb28
·
1 Parent(s): 4ffac1c

Update parquet files (step 87 of 121)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/3dsk Complete Collection 27 GB Repackedl.md +0 -21
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Blender Software Crack Download [NEW].md +0 -12
  3. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bleach Vs Naruto 3.6 - The Official Version with 97 Characters and 51 Assists.md +0 -114
  4. spaces/1phancelerku/anime-remove-background/Arceus X V3.0 APK The Ultimate Roblox Exploit for Android - Supports PC Scripts and More.md +0 -147
  5. spaces/1phancelerku/anime-remove-background/Download My Talking Tom 2 APK on APKPure and Join the Adventure with Tom.md +0 -150
  6. spaces/1phancelerku/anime-remove-background/Download iso red hat and get access to the best open source software for cloud development.md +0 -115
  7. spaces/232labs/VToonify/vtoonify/model/simple_augment.py +0 -468
  8. spaces/AIGC-Audio/Make_An_Audio/wav_evaluation/models/CLAPWrapper.py +0 -253
  9. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/yolov5_s-v61_fast_1xb12-40e_608x352_cat.py +0 -70
  10. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/box/Box.js +0 -50
  11. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_ldm_original_checkpoint_to_diffusers.py +0 -359
  12. spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r101-d8_480x480_40k_pascal_context_59.py +0 -2
  13. spaces/AnimalEquality/chatbot/_proc/_docs/site_libs/bootstrap/bootstrap.min.css +0 -0
  14. spaces/AnshuK23/Customer-review-analysis/app.py +0 -7
  15. spaces/AntX-ai/Fintech/style.css +0 -28
  16. spaces/AnthonyErosion/HoctotAI/app.py +0 -52
  17. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/sbcharsetprober.py +0 -162
  18. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/__init__.py +0 -177
  19. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/dense_heads/centernet_head.py +0 -162
  20. spaces/BBrother/NewBingAI/Dockerfile +0 -144
  21. spaces/Bambicita/rvc-models/app-full.py +0 -250
  22. spaces/Benson/text-generation/Examples/Aparcamiento De Coches Escuela De Conduccin Apk.md +0 -43
  23. spaces/Benson/text-generation/Examples/Descargar Carx Carretera Carreras Hacked.md +0 -63
  24. spaces/Benson/text-generation/Examples/Descargar Da De Heno Para Ipad.md +0 -84
  25. spaces/BetterAPI/BetterChat_new/svelte.config.js +0 -26
  26. spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/docs/resource.py +0 -364
  27. spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/docs/waiter.py +0 -130
  28. spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/handlers.py +0 -1395
  29. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/_extension.py +0 -10
  30. spaces/CVH-vn1210/make_hair/minigpt4/common/__init__.py +0 -0
  31. spaces/CVPR/LIVE/pybind11/include/pybind11/detail/typeid.h +0 -55
  32. spaces/CVPR/lama-example/bin/split_tar.py +0 -22
  33. spaces/CYSD/AI-image-detector/README.md +0 -13
  34. spaces/CofAI/chat.b4/g4f/Provider/Providers/H2o.py +0 -106
  35. spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/roi_heads/box_head/__init__.py +0 -0
  36. spaces/DEEMOSTECH/ChatAvatar/static/css/main.a47c5861.css +0 -2
  37. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/abc/_tasks.py +0 -119
  38. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-dcd0cf9c.js +0 -2
  39. spaces/DaCuteRaccoon/dalle-mini/README.md +0 -11
  40. spaces/Dineshkumars/Text-Summarization/app.py +0 -68
  41. spaces/Dorado607/ChuanhuChatGPT/modules/models/base_model.py +0 -783
  42. spaces/Dref360/spectral-metric/utils.py +0 -7
  43. spaces/Eddycrack864/Applio-Inference/train/utils.py +0 -500
  44. spaces/FER-Universe/Face-Benchmarking/README.md +0 -12
  45. spaces/FlippFuzz/whisper-webui/src/hooks/subTaskProgressListener.py +0 -37
  46. spaces/FridaZuley/RVC_HFKawaii/julius/utils.py +0 -101
  47. spaces/FridaZuley/RVC_HFKawaii/train/process_ckpt.py +0 -259
  48. spaces/GT4SD/PatentToolkit/app.py +0 -576
  49. spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/align_rope_cross_zone.py +0 -36
  50. spaces/GiorgiSekhniashvili/geo-whisper/README.md +0 -13
spaces/1acneusushi/gradio-2dmoleculeeditor/data/3dsk Complete Collection 27 GB Repackedl.md DELETED
@@ -1,21 +0,0 @@
1
- <br />
2
- <h1>3dsk Complete Collection 27 GB Repacked: A Review</h1>
3
- <p>If you are looking for high-quality human photo references for 3D modeling and texturing, you might want to check out the 3dsk Complete Collection 27 GB Repacked. This is a huge bundle of over 5000 photos of male and female models in various poses, expressions, clothing, and environments. The photos are high-resolution (up to 16 megapixels) and have been repacked to reduce the file size without compromising the quality.</p>
4
- <h2>{3dsk Complete Collection 27 GB Repacked}l</h2><br /><p><b><b>DOWNLOAD</b> &#128504; <a href="https://byltly.com/2uKxgT">https://byltly.com/2uKxgT</a></b></p><br /><br />
5
- <p>The 3dsk Complete Collection 27 GB Repacked is ideal for artists who want to create realistic and detailed characters for games, movies, comics, or any other project that requires human anatomy and skin textures. The photos cover a wide range of body types, ages, ethnicities, and styles. You can use them as a reference for sculpting, painting, or retopologizing your 3D models. You can also use them as texture maps for your models' skin, hair, eyes, teeth, etc.</p>
6
- <p>The 3dsk Complete Collection 27 GB Repacked is available for download from the official website of 3d.sk, a leading provider of human photo references since 2004. The price is $199, which is a great deal considering the amount and quality of the photos you get. You can also buy individual photo sets or subscribe to a monthly plan if you prefer. The photos are royalty-free and can be used for personal or commercial projects.</p>
7
- <p>If you want to improve your skills and speed up your workflow as a 3D artist, you should definitely consider getting the 3dsk Complete Collection 27 GB Repacked. It is a valuable resource that will help you create stunning and realistic human characters for your projects.</p>
8
-
9
- <p>How to use the 3dsk Complete Collection 27 GB Repacked</p>
10
- <p>Using the 3dsk Complete Collection 27 GB Repacked is easy and straightforward. After you download the files from the website, you can unzip them and browse through the folders to find the photos you need. You can use any image viewer or editor to open and view the photos. You can also import them into your 3D software of choice and use them as reference images or texture maps.</p>
11
- <p></p>
12
- <p>Here are some tips on how to use the 3dsk Complete Collection 27 GB Repacked effectively:</p>
13
- <ul>
14
- <li>Choose the photos that match your project's style and requirements. For example, if you are creating a realistic character, you might want to use photos of models with natural poses and expressions. If you are creating a stylized character, you might want to use photos of models with exaggerated poses and expressions.</li>
15
- <li>Use multiple photos of the same model from different angles and perspectives to get a better understanding of the anatomy and proportions. You can also use photos of different models with similar features to get more variations and details.</li>
16
- <li>Use the photos as a guide, not as a copy. Don't rely too much on the photos and lose your artistic vision. Use your own creativity and imagination to add your own touch and personality to your characters.</li>
17
- <li>Have fun and experiment with different combinations and modifications of the photos. You can mix and match different parts of different models, change the colors and tones, add or remove details, etc. The possibilities are endless.</li>
18
- </ul>
19
- <p>The 3dsk Complete Collection 27 GB Repacked is a powerful tool that will help you create amazing human characters for your projects. Whether you are a beginner or a professional, you will find this collection useful and inspiring. Don't miss this opportunity and get your copy today!</p> 81aa517590<br />
20
- <br />
21
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Blender Software Crack Download [NEW].md DELETED
@@ -1,12 +0,0 @@
1
- <br />
2
- <h1>Why You Should Avoid Blender Software Crack Download</h1>
3
- <p>Blender is a free and open source 3D creation suite that can be used for modeling, animation, rendering, simulation, video editing, and more. It is one of the most popular and powerful tools for 3D artists and enthusiasts. However, some people may be tempted to download a cracked version of Blender software from unauthorized sources. This is a bad idea for several reasons. Here are some of the risks and disadvantages of Blender software crack download.</p>
4
- <ul>
5
- <li>It is illegal and unethical. Downloading a cracked version of Blender software is a violation of the GNU General Public License (GPL) that governs the distribution and use of Blender. It is also a form of software piracy that deprives the Blender Foundation and the developers of their rightful income and recognition. By downloading a cracked version of Blender software, you are breaking the law and disrespecting the hard work and dedication of the Blender community.</li>
6
- <li>It is unsafe and unreliable. Downloading a cracked version of Blender software from unauthorized sources exposes you to malware, viruses, spyware, and other malicious programs that can harm your computer and compromise your personal data. You also risk getting a corrupted or outdated version of Blender software that may not work properly or have missing or broken features. You may also encounter compatibility issues with other software or hardware that you use.</li>
7
- <li>It is unproductive and unsatisfying. Downloading a cracked version of Blender software deprives you of the benefits of using the official version of Blender. You will not be able to access the latest updates, bug fixes, improvements, and new features that are regularly released by the Blender Foundation. You will also miss out on the official documentation, tutorials, support, and feedback that are available to the legitimate users of Blender. You will also have a hard time joining or collaborating with other Blender users or projects that require the official version of Blender.</li>
8
- </ul>
9
- <p>In conclusion, downloading a cracked version of Blender software is not worth it. It is illegal, unsafe, unreliable, unproductive, and unsatisfying. It can also damage your reputation and credibility as a 3D artist or enthusiast. If you want to use Blender software, you should download it from the official website (https://www.blender.org/download/) or from a trusted source that complies with the GPL. This way, you can enjoy the full potential and benefits of Blender software without any risks or drawbacks.</p>
10
- <h2>blender software crack download</h2><br /><p><b><b>Download</b> &#10084; <a href="https://byltly.com/2uKxGA">https://byltly.com/2uKxGA</a></b></p><br /><br /> ddb901b051<br />
11
- <br />
12
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Bleach Vs Naruto 3.6 - The Official Version with 97 Characters and 51 Assists.md DELETED
@@ -1,114 +0,0 @@
1
-
2
- <h1>Download Bleach vs Naruto Game: A Guide for Anime Fans</h1>
3
- <p>If you are a fan of anime and manga, you have probably heard of Bleach and Naruto, two of the most popular Shonen Jump series in the history of the genre. But have you ever wondered what would happen if these two worlds collided? Well, wonder no more, because you can find out by playing Bleach vs Naruto game, a crossover fighting game that features characters from both series. In this article, we will tell you everything you need to know about this game, including what it is, why you should play it, how to download it, and some tips and tricks to help you win. So, let's get started!</p>
4
- <h2>What is Bleach vs Naruto Game?</h2>
5
- <h3>A crossover fighting game featuring characters from Bleach and Naruto</h3>
6
- <p>Bleach vs Naruto game is a flash game that was developed by 5Dplay and 4399, two Chinese gaming companies. It is a fan-made project that pays tribute to the two iconic anime and manga series, Bleach and Naruto. The game allows you to choose from more than 40 characters from both series, each with their own specific style and fighting technique. You can then pit them against each other in ruthless arena battles, either in single-player or multiplayer mode. You can also unlock new characters, stages, and modes as you progress through the game.</p>
7
- <h2>download bleach vs naruto game</h2><br /><p><b><b>DOWNLOAD</b> &#10040;&#10040;&#10040; <a href="https://urlin.us/2uSZwA">https://urlin.us/2uSZwA</a></b></p><br /><br />
8
- <h3>A flash game that can be played online or downloaded for free</h3>
9
- <p>One of the best things about Bleach vs Naruto game is that it is completely free to play. You can either play it online on your browser or download it to your computer or mobile device. The game is compatible with Windows, Mac, Linux, Android, and iOS platforms. The game is also regularly updated with new content and features, so you will never get bored of playing it.</p>
10
- <h2>Why Should You Play Bleach vs Naruto Game?</h2>
11
- <h3>Enjoy intense and spectacular battles with more than 40 heroes</h3>
12
- <p>If you are a fan of action-packed fighting games, you will love Bleach vs Naruto game. The game offers you a chance to experience the thrill of fighting with some of the most powerful and popular characters from both series. You can choose from heroes like Naruto, Sasuke, Sakura, Kakashi, Ichigo, Rukia, Orihime, Renji, and many more. You can also see how they interact with each other in different scenarios and dialogues. The game has high-quality graphics and animations that make the battles look realistic and exciting.</p>
13
- <h3>Experience the unique style and technique of each character</h3>
14
- <p>Another reason why you should play Bleach vs Naruto game is that it lets you explore the unique style and technique of each character. The game faithfully recreates the abilities and skills of each character from their respective series. For example, you can use Naruto's Rasengan, Sasuke's Chidori, Ichigo's Getsuga Tensho, Rukia's Sode no Shirayuki, and many more. You can also unleash their most powerful transformations, such as Naruto's Nine-Tails mode, Sasuke's Sharingan mode, Ichigo's Hollow mode, Rukia's Bankai mode, and so on. The game also has a balanced gameplay system that makes each character fun and challenging to play.</p>
15
- <h3>Discover the hidden powers and transformations of your favorite heroes</h3>
16
- <p>One more reason why you should play Bleach vs Naruto game is that it allows you to discover the hidden powers and transformations of your favorite heroes. The game has a special feature called Awakening mode, which lets you activate the ultimate form of your character when your health is low. This mode gives you a huge boost in power and speed, as well as access to new moves and abilities. For example, you can awaken Naruto's Sage mode, Sasuke's Susanoo mode, Ichigo's Final Getsuga Tensho mode, Rukia's Hakka no Togame mode, and many more. You can also switch between different forms of your character during the battle, such as Naruto's different tails modes, Sasuke's different Sharingan modes, Ichigo's different Hollow modes, and so on. The game has a lot of surprises and secrets for you to discover and enjoy.</p>
17
- <h2>How to Download Bleach vs Naruto Game?</h2>
18
- <h3>Visit the official website of the game or a trusted source</h3>
19
- <p>If you want to download Bleach vs Naruto game, the first thing you need to do is visit the official website of the game or a trusted source. The official website of the game is <a href="">https://www.5dplay.net/</a>, where you can find the latest version and updates of the game. You can also find other sources that offer the game for download, such as <a href="">https://www.gametop.com/download-free/bleach-vs-naruto/</a> or <a href="">https://www.crazygames.com/game/bleach-vs-naruto-32</a>. However, make sure that you only download from safe and reliable sources, as some websites may contain viruses or malware that can harm your device.</p>
20
- <h3>Choose the version and platform that suits you</h3>
21
- <p>The next thing you need to do is choose the version and platform that suits you. The game has different versions for different platforms, such as Windows, Mac, Linux, Android, and iOS. You can also choose between online and offline versions of the game. The online version lets you play the game on your browser without downloading anything, while the offline version lets you download the game to your device and play it anytime. The offline version also has some advantages over the online version, such as faster loading speed, better performance, and more features. You can choose the version and platform that best fits your preferences and needs.</p>
22
- <h3>Follow the instructions and start playing</h3>
23
- <p>The last thing you need to do is follow the instructions and start playing. If you choose the online version of the game, you just need to click on the play button on the website and wait for the game to load. You may need to enable flash player on your browser if it is not already enabled. If you choose the offline version of the game, you need to download the game file from the website and save it to your device. Then, you need to unzip or extract the file using a software like WinRAR or 7-Zip. After that, you need to open the folder and run the executable file of the game. You may need to install some additional software or drivers if they are not already installed on your device. Once you have done all these steps, you can start playing Bleach vs Naruto game and have fun!</p>
24
- <p>download bleach vs naruto pc game<br />
25
- download bleach vs naruto android game<br />
26
- download bleach vs naruto mod apk<br />
27
- download bleach vs naruto latest version<br />
28
- download bleach vs naruto 3.3 pc<br />
29
- download bleach vs naruto 3.8 sports version<br />
30
- download bleach vs naruto mugen<br />
31
- download bleach vs naruto offline game<br />
32
- download bleach vs naruto for windows 10<br />
33
- download bleach vs naruto for mac os<br />
34
- how to download and install bleach vs naruto on pc<br />
35
- how to download and play bleach vs naruto on android<br />
36
- how to add characters in bleach vs naruto game<br />
37
- how to unlock all characters in bleach vs naruto game<br />
38
- how to update bleach vs naruto game<br />
39
- where to download bleach vs naruto game for free<br />
40
- where to find bleach vs naruto game mods<br />
41
- where to get bleach vs naruto game cheats<br />
42
- where to watch bleach vs naruto game videos<br />
43
- where to learn bleach vs naruto game tips and tricks<br />
44
- best site to download bleach vs naruto game<br />
45
- best site to download bleach vs naruto game characters<br />
46
- best site to download bleach vs naruto game music<br />
47
- best site to download bleach vs naruto game wallpapers<br />
48
- best site to download bleach vs naruto game guides<br />
49
- is bleach vs naruto game safe to download<br />
50
- is bleach vs naruto game compatible with my device<br />
51
- is bleach vs naruto game worth playing<br />
52
- is bleach vs naruto game online or offline<br />
53
- is bleach vs naruto game easy or hard<br />
54
- what is the size of bleach vs naruto game file<br />
55
- what is the rating of bleach vs naruto game<br />
56
- what is the genre of bleach vs naruto game<br />
57
- what is the difference between bleach and naruto in the game<br />
58
- what is the latest update of bleach vs naruto game<br />
59
- why is bleach vs naruto game popular<br />
60
- why is bleach vs naruto game fun<br />
61
- why is bleach vs naruto game challenging<br />
62
- why is bleach vs naruto game addictive<br />
63
- why is bleach vs naruto game not working on my device</p>
64
- <h2>Tips and Tricks for Playing Bleach vs Naruto Game</h2>
65
- <h3>Learn the basic controls and combos of each character</h3>
66
- <p>One of the most important tips for playing Bleach vs Naruto game is to learn the basic controls and combos of each character. The game has a simple and intuitive control scheme that uses the keyboard or mouse for input. You can also customize the controls according to your preference in the settings menu. The basic controls are as follows:</p>
67
- <table>
68
- <tr><th>Key</th><th>Action</th></tr>
69
- <tr><td>WASD</td><td>Move</td></tr>
70
- <tr><td>J</td><td>Attack</td></tr>
71
- <tr><td>K</td><td>Jump</td></tr>
72
- <tr><td>L</td><td>Dodge</td></tr>
73
- <tr><td>U</td><td>Skill 1</td></tr>
74
- <tr><td>I</td><td>Skill 2</td></tr>
75
- <tr><td>O</td><td>Skill 3</td></tr>
76
- <tr><td>P</td><td>Awakening mode</td></tr>
77
- <tr><td>S+J/S+U/S+I/S+O</td><td>Special attack/Transformation/Summoning/Assist (depending on character)</td></tr>
78
- </table>
79
- <p>You can also combine different keys to perform combos and special moves. For example, you can press J+K to do a dash attack, J+L to do a counterattack, or U+I+O to do a triple skill attack. You can find more combos and moves in the game's tutorial or help menu.</p>
80
- <h3>Try different game modes and challenges to test your skills</h3>
81
- <p>Another tip for playing Bleach vs Naruto game is to try different game modes and challenges to test your skills. The game has several game modes and challenges that offer different objectives and difficulties. You can choose from the following game modes and challenges:</p>
82
- <ul>
83
- <li>Story mode: Follow the original storylines of Bleach and Naruto and fight against various enemies and bosses.</li>
84
- <li>Arcade mode: Fight against random opponents in a series of battles and try to get the highest score.</li>
85
- <li>VS mode: Fight against a friend or a computer-controlled opponent in a one-on-one or two-on-two battle.</li>
86
- <li>Survival mode: Fight against endless waves of enemies and try to survive as long as possible.</li>
87
- <li>Training mode: Practice your skills and learn new combos and moves.</li>
88
- <li>Challenge mode: Complete various tasks and missions with specific conditions and requirements.</li>
89
- </ul>
90
- <p>You can also adjust the difficulty level, the time limit, the number of rounds, and other settings in each game mode and challenge. You can also unlock new game modes and challenges as you play the game.</p>
91
- <h3>Play with your friends or against other players online</h3>
92
- <p>The last tip for playing Bleach vs Naruto game is to play with your friends or against other players online. The game has a multiplayer mode that lets you play with or against other players from around the world. You can either join an existing room or create your own room with your own rules. You can also chat with other players and make new friends. Playing with or against other players online can make the game more fun and challenging, as you can learn from their strategies and skills.</p>
93
- <h2>Conclusion</h2>
94
- <p>Bleach vs Naruto game is a great game for anime fans who want to enjoy a crossover fighting game that features characters from both series. The game has a lot of content and features that make it entertaining and engaging. You can play the game online or download it for free, choose from more than 40 characters, experience their unique style and technique, discover their hidden powers and transformations, try different game modes and challenges, and play with your friends or against other players online. The game is easy to play but hard to master, so you will always have something new to learn and improve. If you are looking for a fun and exciting fighting game that combines Bleach and Naruto, you should definitely try Bleach vs Naruto game!</p>
95
- <h2>FAQs</h2>
96
- <h4>Q: Is Bleach vs Naruto game safe to download?</h4>
97
- <p>A: Yes, Bleach vs Naruto game is safe to download as long as you download it from the official website of the game or a trusted source. However, you should always scan the file with an antivirus software before opening it, just to be safe.</p>
98
- <h4>Q: How can I update Bleach vs Naruto game?</h4>
99
- <p>A: You can update Bleach vs Naruto game by visiting the official website of the game or a trusted source and downloading the latest version of the game. You can also check for updates in the game's settings menu.</p>
100
- <h4>Q: How can I unlock new characters in Bleach vs Naruto game?</h4>
101
- <p>A: You can unlock new characters in Bleach vs Naruto game by playing the story mode, the arcade mode, or the challenge mode. You can also unlock some characters by entering certain codes in the character selection screen.</p>
102
- <h4>Q: How can I change the language of Bleach vs Naruto game?</h4>
103
- <p>A: You can change the language of Bleach vs Naruto game by clicking on the flag icon in the main menu or in the settings menu. You can choose from English, Chinese, Japanese, French, Spanish, Portuguese, Russian, Arabic, Turkish, Vietnamese, Indonesian, Thai, Korean, German, Italian, Polish, Romanian, Hungarian, Czech, Slovakian, Bulgarian, Greek, Hindi, Urdu, Persian, Hebrew, Malayalam, and Tamil languages.</p>
104
- <h4>Q: How can I contact the developers of Bleach vs Naruto game?</h4>
105
- <p>A: You can contact the developers of Bleach vs Naruto game by visiting their official website or their social media pages. You can also send them an email or leave a comment on their website. The developers are 5Dplay and 4399, and their contact information is as follows:</p>
106
- <ul>
107
- <li>Website: <a href="">https://www.5dplay.net/</a></li>
108
- <li>Email: <a href="">[email protected]</a></li>
109
- <li>Facebook: <a href="">https://www.facebook.com/5dplay.net/</a></li>
110
- <li>Twitter: <a href="">https://twitter.com/5dplay</a></li>
111
- <li>Weibo: <a href="">https://weibo.com/5dplay</a></li>
112
- </ul></p> 197e85843d<br />
113
- <br />
114
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Arceus X V3.0 APK The Ultimate Roblox Exploit for Android - Supports PC Scripts and More.md DELETED
@@ -1,147 +0,0 @@
1
- <br />
2
- <h1>Roblox Arceus X V3 0 APK: The Ultimate Android Roblox Mod Menu</h1>
3
- <p>Do you love playing Roblox games on your Android device? Do you want to have more fun and excitement by exploiting your favorite games with advanced features? If yes, then you need to try Roblox Arceus X V3 0 APK, the ultimate Android Roblox mod menu that will make your gaming experience more enjoyable and thrilling.</p>
4
- <p>In this article, we will tell you everything you need to know about Roblox Arceus X V3 0 APK, including what it is, what it can do, how to download and install it, and how to use it to exploit Roblox games. So, without further ado, let's get started!</p>
5
- <h2>roblox arceus x v3 0 apk</h2><br /><p><b><b>Download</b> &#128505; <a href="https://jinyurl.com/2uNKgH">https://jinyurl.com/2uNKgH</a></b></p><br /><br />
6
- <h2>What is Roblox Arceus X V3 0 APK?</h2>
7
- <h3>A brief introduction to Roblox Arceus X</h3>
8
- <p>Roblox Arceus X is a first and one of the most widely used Roblox mod menu/exploit specially developed for Android devices. It allows you to use features such as Android LuaU Execution, Infinite Jump, Super Speed, Btools, Script Hub, More! </p>
9
- <p>Roblox Arceus X is developed using Node.js, C++, JAVA. It’s an Android application that has a floating menu to execute scripts while you are in the game. </p>
10
- <p>Roblox Arceus X V3 0 APK is the latest version of Roblox Arceus X that has been released with many advanced features. The latest version of Roblox Arceus X will execute many PC scripts, which isn’t supported in the current version. The team of Roblox Arceus X are also going to release iOS and Mac versions of Roblox Arceus X soon. </p>
11
- <h3>The features and benefits of Roblox Arceus X</h3>
12
- <p>Roblox Arceus X has many features and benefits that will make your Roblox gaming experience more fun and exciting. Some of the main features and benefits are:</p>
13
- <ul>
14
- <li>You can exploit any Roblox game on your Android device with ease.</li>
15
- <li>You can access a script hub with hundreds of scripts for various popular games such as Adopt Me, Blox Fruits, Pet Simulator X, etc.</li>
16
- <li>You can use a script executor to run any script you want on any game you want.</li>
17
- <li>You can customize your settings such as speed, jump power, gravity, etc.</li>
18
- <li>You can use a mod menu to toggle various options such as fly, noclip, teleport, etc.</li>
19
- <li>You can enjoy a smooth and stable performance with no lag or crash.</li>
20
- <li>You can get updates and support from the developers regularly.</li>
21
- </ul>
22
- <h2>How to download and install Roblox Arceus X V3 0 APK?</h2>
23
- <h3>The steps to download Roblox Arceus X from our site</h3>
24
- <p>If you want to download Roblox Arceus X V3 0 APK from our site, you need to follow these simple steps:</p>
25
- <ol>
26
- <li>Click on the download button below to start the downloading process.</li>
27
- <li>Wait for a few seconds until the download is complete.</li>
28
- <li>Locate the downloaded file in your device's file manager and tap on it.</li>
29
- </ol>
30
- <h3>The steps to install and use Roblox Ar <h3>The steps to install and use Roblox Arceus X on your Android device</h3>
31
- <p>After you have downloaded Roblox Arceus X V3 0 APK from our site, you need to install and use it on your Android device. Here are the steps to do that:</p>
32
- <ol>
33
- <li>Before installing the APK file, you need to enable the "Unknown Sources" option in your device's settings. This will allow you to install apps from sources other than the Google Play Store.</li>
34
- <li>After enabling the option, go back to the file manager and tap on the APK file again.</li>
35
- <li>Follow the instructions on the screen to install Roblox Arceus X on your device.</li>
36
- <li>Once the installation is complete, open Roblox Arceus X from your app drawer or home screen.</li>
37
- <li>Grant the necessary permissions to Roblox Arceus X to access your device's features.</li>
38
- <li>Now, you can launch any Roblox game you want from Roblox Arceus X.</li>
39
- <li>You will see a floating menu on the screen with various options and features.</li>
40
- <li>You can tap on the menu to access the script hub, the script executor, the mod menu, and the custom settings.</li>
41
- <li>You can also drag and resize the menu as per your preference.</li>
42
- <li>Enjoy exploiting Roblox games with Roblox Arceus X!</li>
43
- </ol>
44
- <h2>How to exploit Roblox games with Roblox Arceus X V3 0 APK?</h2>
45
- <h3>The script hub and the script executor of Roblox Arceus X</h3>
46
- <p>One of the main features of Roblox Arceus X is the script hub and the script executor. The script hub is a collection of scripts for various popular Roblox games that you can use to exploit them. The script executor is a tool that allows you to run any script you want on any game you want.</p>
47
- <p>To use the script hub and the script executor of Roblox Arceus X, you need to follow these steps:</p>
48
- <p>roblox mod menu arceus x apk download<br />
49
- arceus x v3.1.0 public beta roblox exploit<br />
50
- how to use arceus x on android roblox<br />
51
- arceus x roblox script executor for mobile<br />
52
- roblox arceus x apk latest version free<br />
53
- arceus x v3.0 roblox mod menu features<br />
54
- download arceus x apk for roblox without linkvertise<br />
55
- arceus x roblox exploit for blox fruits<br />
56
- arceus x android roblox executor for shindo life<br />
57
- roblox arceus x apk no verification required<br />
58
- arceus x v3.1.0 roblox mod menu update<br />
59
- how to install arceus x apk on android roblox<br />
60
- arceus x roblox exploit for pet simulator x<br />
61
- arceus x android roblox executor for da hood<br />
62
- roblox arceus x apk safe and secure download<br />
63
- arceus x v3.0 roblox mod menu review<br />
64
- how to get key for arceus x apk on roblox<br />
65
- arceus x roblox exploit for combat tycoon<br />
66
- arceus x android roblox executor for anime adventures<br />
67
- roblox arceus x apk best scripts to use<br />
68
- arceus x v3.1.0 roblox mod menu tutorial<br />
69
- how to fix arceus x apk not working on roblox<br />
70
- arceus x roblox exploit for brookhaven rp<br />
71
- arceus x android roblox executor for livetopia<br />
72
- roblox arceus x apk alternatives and competitors<br />
73
- arceus x v3.0 roblox mod menu comparison<br />
74
- how to uninstall arceus x apk from roblox<br />
75
- arceus x roblox exploit for my restaurant<br />
76
- arceus x android roblox executor for doors script<br />
77
- roblox arceus x apk pros and cons analysis<br />
78
- arceus x v3.1.0 roblox mod menu feedback and ratings<br />
79
- how to contact arceus x apk developers for support on roblox<br />
80
- arceus x roblox exploit for adopt me script<br />
81
- arceus x android roblox executor for bedwars script<br />
82
- roblox arceus x apk frequently asked questions and answers<br />
83
- arceus x v3.0 roblox mod menu testimonials and reviews<br />
84
- how to join arceus x apk discord server for roblox exploits<br />
85
- arceus x roblox exploit for blox fruit script<br />
86
- arceus x android roblox executor for pet simulator script<br />
87
- roblox arceus x apk benefits and advantages over other executors<br />
88
- arceus x v3.1.0 roblox mod menu changelog and improvements<br />
89
- how to report bugs and issues with arceus x apk on roblox<br />
90
- arceus x roblox exploit for anime fighting simulator script<br />
91
- arceus x android roblox executor for tower of hell script<br />
92
- roblox arceus x apk drawbacks and disadvantages over other executors</p>
93
- <ol>
94
- <li>Launch any Roblox game you want from Roblox Arceus X.</li>
95
- <li>Tap on the floating menu and select "Script Hub".</li>
96
- <li>You will see a list of scripts for different games. You can scroll through them or use the search bar to find the one you want.</li>
97
- <li>Tap on the script you want to use and select "Execute".</li>
98
- <li>The script will run on the game and you will see its effects.</li>
99
- <li>You can also use the "Script Executor" option to run any script you have on your device or copy from any source.</li>
100
- <li>Just tap on "Script Executor" and select "Browse" or "Paste" depending on where your script is located.</li>
101
- <li>Select or paste your script and tap on "Execute".</li>
102
- <li>The script will run on the game and you will see its effects.</li>
103
- </ol>
104
- <h3>The mod menu and the custom settings of Roblox Arceus X</h3>
105
- <p>Another feature of Roblox Arceus X is the mod menu and the custom settings. The mod menu is a list of options that you can toggle on or off to exploit various aspects of the game. The custom settings are a set of sliders that you can adjust to change various parameters of the game.</p>
106
- <p>To use the mod menu and the custom settings of Roblox Arceus X, you need to follow these steps:</p>
107
- <ol>
108
- <li>Launch any Roblox game you want from Roblox Arceus X.</li>
109
- <li>Tap on the floating menu and select "Mod Menu".</li>
110
- <li>You will see a list of options such as fly, noclip, teleport, etc. You can tap on them to toggle them on or off.</li>
111
- <li>You will see their effects on the game immediately.</li>
112
- <li>You can also use the "Custom Settings" option to change various parameters such as speed, jump power, gravity, etc.</li>
113
- <li>Just tap on "Custom Settings" and adjust the sliders as per your preference.</li>
114
- <li>You will see their effects on the game immediately.</li>
115
- </ol>
116
- <h2>Conclusion</h2>
117
- <p>Roblox Arceus X V3 0 APK is an amazing Android Roblox mod menu that allows you to exploit any Roblox game with ease. It has many features and benefits that will make your gaming experience more fun and exciting. You can download and install it from our site in a few simple steps. You can also use it to access a script hub, a script executor, a mod menu, and custom settings. You can enjoy a smooth and stable performance with no lag or crash. You can get updates and support from the developers regularly. So So, what are you waiting for? Download Roblox Arceus X V3 0 APK from our site today and start exploiting Roblox games like never before!</p>
118
- <h2>FAQs</h2>
119
- <p>Here are some of the frequently asked questions about Roblox Arceus X V3 0 APK:</p>
120
- <table>
121
- <tr>
122
- <th>Question</th>
123
- <th>Answer</th>
124
- </tr>
125
- <tr>
126
- <td>Is Roblox Arceus X V3 0 APK safe to use?</td>
127
- <td>Yes, Roblox Arceus X V3 0 APK is safe to use. It does not contain any viruses, malware, or spyware. It also does not require any root access or permissions. However, you should always use it at your own risk and discretion, as exploiting Roblox games may violate the terms of service and result in bans or suspensions.</td>
128
- </tr>
129
- <tr>
130
- <td>Does Roblox Arceus X V3 0 APK work on all Android devices?</td>
131
- <td>Roblox Arceus X V3 0 APK works on most Android devices that have Android 4.4 or higher. However, some devices may not be compatible or may experience some issues. If you encounter any problems, you can contact the developers for help or feedback.</td>
132
- </tr>
133
- <tr>
134
- <td>Can I use Roblox Arceus X V3 0 APK on PC or iOS?</td>
135
- <td>No, Roblox Arceus X V3 0 APK is only for Android devices. However, the developers are working on releasing iOS and Mac versions of Roblox Arceus X soon. You can follow their social media accounts or join their Discord server for updates and announcements.</td>
136
- </tr>
137
- <tr>
138
- <td>Where can I get more scripts for Roblox Arceus X V3 0 APK?</td>
139
- <td>You can get more scripts for Roblox Arceus X V3 0 APK from various sources such as YouTube, Reddit, Discord, etc. You can also request scripts from the developers or the community. However, you should always check the scripts for any malicious code or harmful effects before using them.</td>
140
- </tr>
141
- <tr>
142
- <td>How can I support the developers of Roblox Arceus X V3 0 APK?</td>
143
- <td>You can support the developers of Roblox Arceus X V3 0 APK by donating to them via PayPal or Patreon. You can also share their app with your friends and family, leave positive reviews and ratings, and follow their social media accounts.</td>
144
- </tr>
145
- </table></p> 197e85843d<br />
146
- <br />
147
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download My Talking Tom 2 APK on APKPure and Join the Adventure with Tom.md DELETED
@@ -1,150 +0,0 @@
1
- <br />
2
- <h1>My Talking Tom 2 APK Download Apkpure: A Fun and Engaging Virtual Pet Game</h1>
3
- <p>If you are looking for a fun and engaging virtual pet game, you might want to check out My Talking Tom 2. This game is a sequel to the popular My Talking Tom game, which has been downloaded over a billion times. In this game, you get to adopt a cute and cuddly talking cat named Tom, who will become your best friend. You can interact with him, play with him, feed him, dress him up, and even take him on adventures. In this article, we will tell you everything you need to know about My Talking Tom 2, including how to download it from Apkpure, what are its features, tips and tricks, and reviews.</p>
4
- <h2>What is My Talking Tom 2?</h2>
5
- <p>My Talking Tom 2 is a virtual pet simulator game developed by Outfit7 Limited, a company that specializes in creating games featuring animated characters. The game is available for both Android and iOS devices, as well as on PC using an emulator. The game is free to play, but it contains in-app purchases and ads.</p>
6
- <h2>my talking tom 2 apk download apkpure</h2><br /><p><b><b>Download</b> &#127379; <a href="https://jinyurl.com/2uNQqa">https://jinyurl.com/2uNQqa</a></b></p><br /><br />
7
- <h3>A sequel to the popular My Talking Tom game</h3>
8
- <p>My Talking Tom 2 is a sequel to the original My Talking Tom game, which was released in 2013. The original game was a huge success, as it introduced a new concept of having a talking cat that responds to your voice and touch. The game also had many features that made it fun and addictive, such as mini-games, customization options, and social interactions. The sequel builds on the success of the original game by adding more features, content, and improvements.</p>
9
- <h3>A virtual pet simulator with a cute and cuddly talking cat</h3>
10
- <p>The main attraction of My Talking Tom 2 is Tom himself, a cute and cuddly talking cat that will become your virtual pet. You can name him, choose his fur color, and watch him grow from a baby kitten to an adult cat. You can also talk to him using your device's microphone, and he will repeat what you say in a funny voice. You can also touch him, pet him, tickle him, poke him, or pick him up. He will react differently depending on how you treat him. He also has a range of emotions, such as happy, sad, hungry, sleepy, bored, or sick.</p>
11
- <h3>A game with many features and activities to enjoy</h3>
12
- <p>My Talking Tom 2 is not just about talking to your cat. There are many other features and activities that you can enjoy in the game. For example:</p>
13
- <ul>
14
- <li>You can take care of Tom's needs by feeding him, bathing him, taking him to the toilet, curing his boo-boos, putting him to bed, etc.</li>
15
- <li>You can play mini-games with Tom or his pets (yes, he has pets too) to earn coins and rewards.</li>
16
- <li>You can customize Tom's appearance and house by buying clothes, accessories, furniture, wallpapers, etc.</li>
17
- <li>You can explore new worlds with Tom by using his plane, and collect souvenirs and stickers.</li>
18
- <li>You can interact with other players by visiting their Tom's, sending gifts, or joining competitions.</li>
19
- </ul>
20
- <p>As you can see, there is a lot to do in My Talking Tom 2. You will never get bored with this game.</p>
21
- <h2>How to Download My Talking Tom 2 APK from Apkpure?</h2>
22
- <p>If you want to download My Talking Tom 2 on your Android device, you have two options. You can either download it from the Google Play Store, or you can download it from Apkpure. Apkpure is a website that provides APK files for various Android apps and games. APK files are the installation files for Android applications. By downloading APK files from Apkpure, you can enjoy some benefits, such as:</p>
23
- <ul>
24
- <li>You can download the latest version of the app or game, even if it is not available in your region or country.</li>
25
- <li>You can download the app or game without any restrictions or limitations.</li>
26
- <li>You can download the app or game faster and easier than from the Google Play Store.</li>
27
- <li>You can download the app or game without any ads or malware.</li>
28
- </ul>
29
- <p>However, before you download My Talking Tom 2 APK from Apkpure, you need to take some precautions. You need to make sure that:</p>
30
- <ul>
31
- <li>You have enough storage space on your device to install the APK file.</li>
32
- <li>You have enabled the option to install apps from unknown sources on your device settings.</li>
33
- <li>You have verified the authenticity and security of the APK file by checking its size, signature, and permissions.</li>
34
- </ul>
35
- <p>Once you have done these steps, you can follow these steps to download and install My Talking Tom 2 APK from Apkpure:</p>
36
- <ol>
37
- <li>Go to the Apkpure website and search for My Talking Tom 2.</li>
38
- <li>Select the app from the search results and click on the Download APK button.</li>
39
- <li>Wait for the download to finish and locate the APK file on your device.</li>
40
- <li>Tap on the APK file and follow the instructions to install it on your device.</li>
41
- <li>Launch the app and enjoy playing with your virtual pet.</li>
42
- </ol>
43
- <h2>What are the Features of My Talking Tom 2?</h2>
44
- <p>My Talking Tom 2 is a game that offers many features for you to enjoy. Here are some of the main features of the game:</p>
45
- <p>My Talking Tom 2 mod apk unlimited money and stars<br />
46
- My Talking Tom 2 latest version apk free download<br />
47
- My Talking Tom 2 apk download for android 4.4.2<br />
48
- My Talking Tom 2 hack apk download no root<br />
49
- My Talking Tom 2 offline apk download<br />
50
- My Talking Tom 2 apk download for pc windows 10<br />
51
- My Talking Tom 2 apk pure app store<br />
52
- My Talking Tom 2 old version apk download 2018<br />
53
- My Talking Tom 2 apk download uptodown<br />
54
- My Talking Tom 2 apk download for ios<br />
55
- My Talking Tom 2 apk download apkmirror<br />
56
- My Talking Tom 2 apk download rexdl<br />
57
- My Talking Tom 2 apk download revdl<br />
58
- My Talking Tom 2 apk download mob.org<br />
59
- My Talking Tom 2 apk download android oyun club<br />
60
- My Talking Tom 2 apk download apkpure.com<br />
61
- My Talking Tom 2 apk download for jio phone<br />
62
- My Talking Tom 2 apk download for fire tablet<br />
63
- My Talking Tom 2 apk download for chromebook<br />
64
- My Talking Tom 2 apk download for nokia x2<br />
65
- My Talking Tom 2 apk download from play store<br />
66
- My Talking Tom 2 apk download without obb file<br />
67
- My Talking Tom 2 apk download with unlimited coins and diamonds<br />
68
- My Talking Tom 2 apk download with all unlocked features<br />
69
- My Talking Tom 2 apk download with new update and bug fixes<br />
70
- My Talking Tom 2 game play online free without downloading apk<br />
71
- My Talking Tom 2 game review and tips for beginners<br />
72
- My Talking Tom 2 game cheats and tricks to get more coins and stars<br />
73
- My Talking Tom 2 game features and gameplay modes explained<br />
74
- My Talking Tom 2 game guide and walkthrough for all levels and mini games<br />
75
- How to install my talking tom 2 apk on android device step by step<br />
76
- How to update my talking tom 2 apk to the latest version manually or automatically<br />
77
- How to backup and restore my talking tom 2 apk data on google drive or sd card<br />
78
- How to uninstall and reinstall my talking tom 2 apk without losing progress or data<br />
79
- How to fix my talking tom 2 apk not working or crashing issues on android device</p>
80
- <h3>Taking care of Tom's needs and emotions</h3>
81
- <p>As a virtual pet owner, you need to take care of Tom's needs and emotions. You need to feed him when he is hungry, bathe him when he is dirty, take him to the toilet when he needs to go, cure his boo-boos when he is hurt, put him to bed when he is sleepy, etc. You also need to pay attention to his mood and feelings. He can be happy, sad, hungry, sleepy, bored, or sick. You need to make him happy by playing with him, giving him hugs, or giving him treats. You also need to comfort him when he is sad by petting him or talking to him. By taking care of Tom's needs and emotions, you will strengthen your bond with him and make him love you more.</p>
82
- <h3>Playing mini-games and exploring new worlds</h3>
83
- <p>My Talking Tom 2 is not only about taking care of your cat. It is also about having fun with him. You can play mini-games with Tom or his pets (yes, he has pets too) to earn coins and rewards. There are many mini-games to choose from, such as puzzles, action games, arcade games, etc. You can also explore new worlds with Tom by using his plane. You can visit different places, such as a tropical island, a snowy mountain, a desert oasis, etc. You can also collect souvenirs and stickers from each place you visit. By playing mini-games and exploring new worlds, you will discover new things and have more fun with your cat.</p>
84
- <h3>Customizing Tom's appearance and house</h3>
85
- <p>My Talking Tom 2 is also about expressing your creativity and style. You can customize Tom's appearance and house by buying clothes, accessories, furniture, wallpapers, etc. You can make Tom look like a pirate, a cowboy, a superhero, or anything you want. You can also make his house look cozy, modern, colorful, or anything you want. You can also buy new items and surprises for Tom and his pets by using coins or diamonds. By customizing Tom's appearance and house, you will make him more unique and happy.</p>
86
- <h2>What are the Tips and Tricks for My Talking Tom 2?</h2>
87
- <p>My Talking Tom 2 is a game that is easy to play, but it also has some tips and tricks that can help you make the most out of it. Here are some of the tips and tricks for My Talking Tom 2:</p>
88
- <h3>How to keep Tom happy and healthy</h3>
89
- <p>Keeping Tom happy and healthy is the key to having a good relationship with him. Here are some ways to keep Tom happy and healthy:</p>
90
- <ul>
91
- <li>Feed him regularly with different types of food, such as fruits, vegetables, meat, fish, etc. Avoid feeding him too much junk food or spicy food, as they can make him sick or unhappy.</li>
92
- <li>Bathe him regularly to keep him clean and fresh. Use different types of soap, shampoo, or bubbles to make him enjoy the bath more.</li>
93
- <li>Take him to the toilet when he needs to go. Don't let him hold it for too long, as it can make him uncomfortable or sick.</li>
94
- <li>Cure his boo-boos when he gets hurt. Use different types of medicine, bandages, or ice packs to heal his wounds.</li>
95
- <li>Put him to bed when he is sleepy. Turn off the lights and play some soothing music to help him fall asleep faster.</li>
96
- </ul>
97
- <h3>How to earn coins and rewards</h3>
98
- <p>Earning coins and rewards is important to buy new items and surprises for Tom and his pets. Here are some ways to earn coins and rewards:</p>
99
- <ul>
100
- <li>Play mini-games with Tom or his pets. The more you play, the more coins and rewards you will earn.</li>
101
- <li>Explore new worlds with Tom using his plane. The more places you visit, the more coins and rewards you will earn.</li>
102
- <li>Interact with other players by visiting their Tom's, sending gifts, or joining competitions. The more you interact, the more coins and rewards you will earn.</li>
103
- <li>Watch ads or complete offers to get free coins or diamonds. You can also buy them with real money if you want.</li>
104
- </ul>
105
- <h3>How to unlock more items and surprises</h3>
106
- <p>Unlocking more items and surprises is fun and exciting, as it will make your game more enjoyable and diverse. Here are some ways to unlock more items and surprises:</p>
107
- <ul>
108
- <li>Level up your Tom by taking care of him and playing with him. The higher your level, the more items and surprises you will unlock.</li>
109
- <li>Collect souvenirs and stickers from each place you visit with Tom using his plane. The more you collect, the more items and surprises you will unlock.</li>
110
- <li>Spin the wheel of fortune every day to get a chance to win items and surprises.</li>
111
- <li>Open chests that contain items and surprises. You can get chests by playing mini-games, exploring new worlds, interacting with other players, or watching ads.</li>
112
- </ul>
113
- <h2>What are the Reviews of My Talking Tom 2?</h2>
114
- <p>My Talking Tom 2 is a game that has received many reviews from users and critics alike. Here are some of the reviews of My Talking Tom 2:</p>
115
- <h3>The positive reviews from users and critics</h3>
116
- <p>Many users and critics have praised My Talking Tom 2 for its fun and engaging gameplay, its cute and charming graphics, its variety of features and activities, its social interactions, its improvements over the original game, etc. Some of the positive reviews are:</p>
117
- <blockquote>"My Talking Tom 2 is a great game for kids and adults alike. It is very entertaining and relaxing. You can do so many things with Tom and his pets. You can also customize his appearance and house. The graphics are very cute and colorful. The game is also very easy to play and control. I love this game!"</blockquote>
118
- <blockquote>"My Talking Tom 2 is a game that has improved a lot from the first one. It has more features, more content, more mini-games, more worlds, more interactions, etc. The game is also very smooth and fast. The game is also very social, as you can visit other Tom's, send gifts, or join competitions. The game is also very funny, as Tom has a lot of hilarious reactions and expressions. The game is also very educational, as it teaches kids how to take care of a pet and how to be responsible. The game is awesome!"</blockquote>
119
- <blockquote>"My Talking Tom 2 is a game that is fun and engaging for all ages. It is a game that combines the best of virtual pet simulators, casual games, and social games. The game has a lot of variety and diversity, as it offers many features and activities to enjoy. The game also has a lot of charm and personality, as it features a cute and cuddly talking cat that will become your best friend. The game also has a lot of quality and polish, as it has great graphics, sound, animation, etc. The game is a must-have!"</blockquote>
120
- <h3>The negative reviews and complaints</h3>
121
- <p>However, not all users and critics have enjoyed My Talking Tom 2. Some of them have criticized the game for its repetitive and boring gameplay, its annoying and intrusive ads, its expensive and unfair in-app purchases, its privacy and security issues, its bugs and glitches, etc. Some of the negative reviews are:</p>
122
- <blockquote>"My Talking Tom 2 is a game that is repetitive and boring. You do the same things over and over again with Tom and his pets. You feed him, bathe him, take him to the toilet, cure his boo-boos, put him to bed, etc. There is nothing new or exciting in this game. The mini-games are also boring and easy. The worlds are also bland and empty. The game is a waste of time."</blockquote>
123
- <blockquote>"My Talking Tom 2 is a game that is annoying and intrusive. The game has too many ads that pop up every few minutes. The ads are also loud and long. They interrupt the gameplay and ruin the experience. The game also forces you to watch ads to get coins or rewards. The game also asks you to rate it or share it every time you open it. The game is a nuisance."</blockquote>
124
- <blockquote>"My Talking Tom 2 is a game that is expensive and unfair. The game has too many in-app purchases that are overpriced and unnecessary. The game also makes you spend coins or diamonds to buy items or surprises for Tom and his pets. The game also makes you wait for hours or days to unlock new items or surprises. The game also makes you compete with other players who have better items or surprises than you. The game is a rip-off."</blockquote>
125
- <h3>The overall rating and recommendation</h3>
126
- <p>Based on the reviews from users and critics, My Talking Tom 2 has an overall rating of 4.4 out of 5 stars on the Google Play Store and 4.6 out of 5 stars on the App Store. The game has been downloaded over 500 million times on both platforms. The game has also received many awards and recognitions, such as being one of the best casual games of 2018 by Google Play and being one of the top free games of 2019 by App Annie.</p>
127
- <p>Therefore, we can conclude that My Talking Tom 2 is a fun and engaging virtual pet game that will appeal to many people who love cats, games, or both. However, the game also has some drawbacks that might annoy or frustrate some people who are looking for more challenge, variety, or fairness in their games.</p>
128
- <p>If you are interested in trying out My Talking Tom 2 for yourself, you can download it from Apkpure using the steps we mentioned above.</p>
129
- <h2>Conclusion</h2>
130
- <p>In this article, we have told you everything you need to know about My Talking Tom 2 APK download Apkpure.</p>
131
- <p>We have explained what My Talking Tom 2 is, how to download it from Apkpure, what are its features, tips and tricks, and reviews. We have also provided you with an outline of the article and the article itself with HTML formatting. We hope that you have found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you. Thank you for reading and have a great day! <h2>FAQs</h2>
132
- <p>Here are some of the frequently asked questions about My Talking Tom 2 APK download Apkpure:</p>
133
- <h3>Q: Is My Talking Tom 2 safe to download from Apkpure?</h3>
134
- <p>A: Yes, My Talking Tom 2 is safe to download from Apkpure, as long as you follow the precautions we mentioned above. Apkpure is a reputable website that provides APK files for various Android apps and games. However, you should always be careful when downloading APK files from unknown sources, as they might contain viruses or malware.</p>
135
- <h3>Q: Is My Talking Tom 2 compatible with my device?</h3>
136
- <p>A: My Talking Tom 2 is compatible with most Android devices that have Android 4.4 or higher. However, some devices might not support some features or functions of the game. You can check the compatibility of your device by visiting the Google Play Store or the App Store.</p>
137
- <h3>Q: How can I update My Talking Tom 2?</h3>
138
- <p>A: You can update My Talking Tom 2 by downloading the latest version of the APK file from Apkpure and installing it on your device. Alternatively, you can update the game by visiting the Google Play Store or the App Store and tapping on the Update button.</p>
139
- <h3>Q: How can I contact the developers of My Talking Tom 2?</h3>
140
- <p>A: You can contact the developers of My Talking Tom 2 by visiting their website, their Facebook page, their Twitter account, or their YouTube channel. You can also send them an email at [email protected].</p>
141
- <h3>Q: How can I delete My Talking Tom 2 from my device?</h3>
142
- <p>A: You can delete My Talking Tom 2 from your device by following these steps:</p>
143
- <ol>
144
- <li>Go to your device settings and tap on Apps or Applications.</li>
145
- <li>Find and tap on My Talking Tom 2.</li>
146
- <li>Tap on Uninstall and confirm your choice.</li>
147
- <li>Wait for the uninstallation process to finish and check if the app is gone from your device.</li>
148
- </ol></p> 401be4b1e0<br />
149
- <br />
150
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download iso red hat and get access to the best open source software for cloud development.md DELETED
@@ -1,115 +0,0 @@
1
- <br />
2
- <h1>How to Download and Install Red Hat Enterprise Linux (RHEL)</h1>
3
- <p>Red Hat Enterprise Linux (RHEL) is a popular Linux operating system used by enterprises across the world. It offers a stable, secure, and high-performance platform for running various workloads on physical, virtual, or cloud environments. Whether you want to deploy web servers, database servers, container platforms, or high-performance computing clusters, RHEL can meet your needs.</p>
4
- <h2>download iso red hat</h2><br /><p><b><b>Download Zip</b> ->>> <a href="https://jinyurl.com/2uNO78">https://jinyurl.com/2uNO78</a></b></p><br /><br />
5
- <p>But before you can install and use RHEL, you need to download an ISO image file that contains the installation program and the software packages. An ISO image is a single file that represents the contents of a CD or DVD disc. You can use an ISO image to create a bootable media, such as a USB flash drive or a DVD disc, that you can use to install RHEL on your machine.</p>
6
- <p>In this article, we will show you how to download RHEL installation ISO image from the official Red Hat website, how to create a bootable media from the ISO image, and how to install RHEL from the bootable media. We will also explain some of the benefits of using RHEL and answer some frequently asked questions.</p>
7
- <h2>How to Download RHEL Installation ISO Image</h2>
8
- <p>To download RHEL installation ISO image, you need to have a Red Hat account and a valid subscription. A subscription gives you access to the latest versions of RHEL software, updates, security patches, technical support, and other benefits. You can get a free subscription for development purposes or purchase a subscription for production use.</p>
9
- <p>Here are the steps to download RHEL installation ISO image:</p>
10
- <ol>
11
- <li><strong>Create a Red Hat account and get a subscription</strong>. If you don't have a Red Hat account yet, you can create one for free at <a href="(^1^)">https://www.redhat.com/wapps/ugc/register.html</a>. If you want to get a free subscription for development purposes, you can follow this guide: <a href="(^2^)">https://developers.redhat.com/articles/getting-red-hat-developer-subscription-what-rhel-users-need-know/</a>. If you want to purchase a subscription for production use, you can visit this page: <a href="(^3^)">https://www.redhat.com/en/store/red-hat-enterprise-linux-server</a>.</li>
12
- <li><strong>Go to the Red Hat download page and choose your product</strong>. After you have the destination device</strong>. You may need to grant administrative privileges or enter your password to do this.</li>
13
- <li><strong>Select your ISO image as the source file and start the burning process</strong>. This will write the contents of the ISO image to your DVD disc and make it bootable.</li>
14
- <li><strong>Wait for the process to finish and safely eject your DVD disc from your machine</strong>. You now have a bootable DVD disc that you can use to install RHEL.</li>
15
- </ol>
16
- <h2>How to Install RHEL from the Bootable Media</h2>
17
- <p>After you have created a bootable media from the ISO image, you can use it to install RHEL on your machine. You need to have a machine that meets the minimum hardware requirements for RHEL, such as 2 GB of RAM, 20 GB of disk space, and a 64-bit processor. You also need to have access to the BIOS or UEFI settings of your machine, so that you can change the boot order and select the bootable media as the first option. Here are the steps to install RHEL from the bootable media:</p>
18
- <ol>
19
- <li><strong>Boot from the bootable media and select Install Red Hat Enterprise Linux</strong>. Insert your bootable media into your machine and power it on. As soon as you see the manufacturer's logo or a message like "Press any key to boot from CD or DVD", press any key to start the installation program. You will see a menu with several options, such as Install Red Hat Enterprise Linux, Test this media & install Red Hat Enterprise Linux, or Troubleshooting. Choose the first option, Install Red Hat Enterprise Linux, and press Enter.</li>
20
- <li><strong>Choose your language and region, and click Continue</strong>. On the welcome screen, you will see a list of languages and regions that you can choose from. Select the one that matches your preferences and click Continue.</li>
21
- <li><strong>Configure your installation options on the Installation Summary screen</strong>. On this screen, you will see several categories that you can configure, such as Installation Destination, Software Selection, Network & Host Name, Security Policy, and more. You can click on each category and make your choices according to your needs. For example, you can select which disk or partition you want to use for installing RHEL, which software packages or environment you want to install, what network settings and host name you want to use, what security policy you want to apply, and so on. You can also review the default settings and change them if necessary. After you have configured all the options, click Begin Installation.</li>
22
- <li><strong>Click Begin Installation and set up your root password and user account</strong>. As soon as you click Begin Installation, the installation program will start copying files and installing packages to your disk. During this process, you will see a progress bar and a message indicating how much time is left. You will also see two options at the bottom of the screen: Root Password and User Creation. You need to click on each option and set up your root password and user account. The root password is the password for the administrator account of your system, which has full access and control over everything. The user account is the account that you will use for your regular tasks and activities. You can create one or more user accounts with different privileges and roles. You can also choose whether to make one of them an administrator account or not. After you have set up your root password and user account, wait for the installation to complete.</li>
23
- <li><strong>Wait for the installation to complete and click Reboot</strong>. When the installation is finished, you will see a message saying "Complete! There are no remaining tasks". You can then click Reboot to restart your machine and boot into your newly installed RHEL system.</li>
24
- </ol>
25
- <h2>Conclusion</h2>
26
- <p>In this article, we have shown you how to download RHEL installation ISO image from the official Red Hat website, how to create a bootable media from the ISO image, and how to install RHEL from the bootable media. We have also explained some of the benefits of using RHEL, such as stability, security, performance, support, and features.</p>
27
- <p>If you want to learn more about RHEL and how to use it for various purposes, you can visit these resources:</p>
28
- <p>download iso red hat enterprise linux<br />
29
- download iso red hat developer studio<br />
30
- download iso red hat fuse<br />
31
- download iso red hat certificate system<br />
32
- download iso red hat directory server<br />
33
- download iso red hat decision manager<br />
34
- download iso red hat cloudforms<br />
35
- download iso red hat ceph storage<br />
36
- download iso red hat ansible automation platform<br />
37
- download iso red hat amq streams<br />
38
- download iso red hat amq online<br />
39
- download iso red hat amq interconnect<br />
40
- download iso red hat amq clients<br />
41
- download iso red hat amq broker<br />
42
- download iso red hat advanced cluster security for kubernetes<br />
43
- download iso red hat build of openjdk<br />
44
- download iso red hat build of quarkus<br />
45
- download iso red hat build of optaplanner<br />
46
- download iso red hat build of node.js<br />
47
- download iso red hat build of eclipse vert.x<br />
48
- download iso red hat build of thorntail<br />
49
- download iso red hat enterprise mrg realtime<br />
50
- download iso red hat enterprise mrg messaging<br />
51
- download iso red hat enterprise mrg grid<br />
52
- download iso red hat developer tools<br />
53
- how to download iso red hat linux<br />
54
- where to download iso red hat linux<br />
55
- best site to download iso red hat linux<br />
56
- free download iso red hat linux 8<br />
57
- free download iso red hat linux 7<br />
58
- free download iso red hat linux 6<br />
59
- free download iso red hat linux 5<br />
60
- free download iso red hat linux 4<br />
61
- free download iso red hat linux 9.0<br />
62
- free download iso red hat linux 8.0 workstation edition<br />
63
- free download iso red hat linux 7.3 personal edition<br />
64
- free download iso red hat linux 6.2 deluxe edition<br />
65
- free download iso red hat linux 5.2 standard edition<br />
66
- free download iso red hat linux 4.2 professional edition<br />
67
- torrent download iso red hat linux 8.2 server edition<br />
68
- torrent download iso red hat linux 7.6 advanced server edition<br />
69
- torrent download iso red hat linux 6.5 enterprise server edition<br />
70
- torrent download iso red hat linux 5.11 extended update support edition<br />
71
- torrent download iso red hat linux 4.8 extended life cycle support edition <br />
72
- direct link to download iso red hat linux 8.4 baseos edition <br />
73
- direct link to download iso red hat linux 7.9 appstream edition <br />
74
- direct link to download iso red hat linux 6.10 high availability edition <br />
75
- direct link to download iso red hat linux 5.10 smart management edition <br />
76
- direct link to download iso red hat linux 4.9 cluster suite edition</p>
77
- <ul>
78
- <li><a href="">https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux/documentation</a>: The official documentation for RHEL.</li>
79
- <li><a href="">https://access.redhat.com/solutions/1355683</a>: A list of common tasks and commands for RHEL.</li>
80
- <li><a href="">https://www.redhat.com/en/services/training-and-certification</a>: A collection of courses and certifications for RHEL users and professionals.</li>
81
- <li><a href="">https://developers.redhat.com/products/rhel/overview</a>: A portal for developers who want to use RHEL for their projects.</li>
82
- </ul>
83
- <p>We hope you have enjoyed this article and learned something new. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy learning!</p>
84
- <h2>FAQs</h2>
85
- <p>Here are some of the frequently asked questions about RHEL and its installation:</p>
86
- <h3>Q1: How long does a RHEL subscription last?</h3>
87
- <p>A: A RHEL subscription lasts for one year from the date of purchase. You can renew or upgrade your subscription before it expires to continue receiving updates, support, and other benefits.</p>
88
- <h3>Q2: What are the different support levels for RHEL subscriptions?</h3>
89
- <p>A: There are three support levels for RHEL subscriptions: Self-support, Standard, and Premium. Self-support gives you access to the software and updates, but no technical support. Standard gives you access to the software, updates, and unlimited web and phone support during business hours. Premium gives you access to the software, updates, and unlimited web and phone support 24/7. You can compare the different support levels and their prices on this page: <a href="">https://www.redhat.com/en/store/red-hat-enterprise-linux-server</a>.</p>
90
- <h3>Q3: How can I renew or upgrade my RHEL subscription?</h3>
91
- <p>A: You can renew or upgrade your RHEL subscription by logging into your Red Hat account and going to the Subscriptions section. There you can see your current subscriptions and their expiration dates, and choose to renew or upgrade them. You can also contact Red Hat sales or your Red Hat partner for assistance.</p>
92
- <h3>Q4: What are some of the features and tools included in RHEL?</h3>
93
- <p>A: RHEL includes many features and tools that make it a powerful and versatile operating system. Some of them are:</p>
94
- <ul>
95
- <li><strong>Red Hat Insights</strong>: A service that provides proactive analysis and recommendations for improving the security, performance, availability, and stability of your RHEL systems.</li>
96
- <li><strong>Red Hat Ansible Automation Platform</strong>: A platform that enables you to automate tasks and workflows across your RHEL systems and other devices.</li>
97
- <li><strong>Red Hat Satellite</strong>: A tool that helps you manage your RHEL systems across physical, virtual, or cloud environments.</li>
98
- <li><strong>Red Hat System Roles</strong>: A collection of Ansible roles that provide a consistent way to configure and manage your RHEL systems.</li>
99
- <li><strong>Red Hat Software Collections</strong>: A set of dynamic languages, databases, web servers, and other software components that are updated more frequently than the core RHEL packages.</li>
100
- <li><strong>Red Hat Developer Toolset</strong>: A set of development tools that are updated more frequently than the core RHEL packages.</li>
101
- </ul>
102
- <h3>Q5: How can I access Red Hat Software Collections and Red Hat Developer Toolset?</h3>
103
- <p>A: You can access Red Hat Software Collections and Red Hat Developer Toolset by enabling the optional and extras repositories on your RHEL system. You can do this by running the following commands as root:</p>
104
- <pre><code>yum-config-manager --enable rhel-8-server-optional-rpms yum-config-manager --enable rhel-8-server-extras-rpms </code></pre>
105
- <p>Then, you can install the software collections or the developer toolset packages by using the yum command. For example, to install Python 3.8 from the software collections, you can run:</p>
106
- <pre><code>yum install rh-python38 </code></pre>
107
- <p>To install GCC 10 from the developer toolset, you can run:</p>
108
- <pre><code>yum install devtoolset-10-gcc </code></pre>
109
- <p>You can find more information about Red Hat Software Collections and Red Hat Developer Toolset on these pages:</p>
110
- <ul>
111
- <li><a href="">https://access.redhat.com/documentation/en-us/red_hat_software_collections/</a></li>
112
- <li><a href="">https://access.redhat.com/documentation/en-us/red_hat_developer_toolset/</a></li>
113
- </ul></p> 401be4b1e0<br />
114
- <br />
115
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/232labs/VToonify/vtoonify/model/simple_augment.py DELETED
@@ -1,468 +0,0 @@
1
- # almost the same as model.stylegan.non_leaking
2
- # we only modify the parameters in sample_affine() to make the transformations mild
3
-
4
- import math
5
-
6
- import torch
7
- from torch import autograd
8
- from torch.nn import functional as F
9
- import numpy as np
10
-
11
- from model.stylegan.distributed import reduce_sum
12
- from model.stylegan.op import upfirdn2d
13
-
14
-
15
- class AdaptiveAugment:
16
- def __init__(self, ada_aug_target, ada_aug_len, update_every, device):
17
- self.ada_aug_target = ada_aug_target
18
- self.ada_aug_len = ada_aug_len
19
- self.update_every = update_every
20
-
21
- self.ada_update = 0
22
- self.ada_aug_buf = torch.tensor([0.0, 0.0], device=device)
23
- self.r_t_stat = 0
24
- self.ada_aug_p = 0
25
-
26
- @torch.no_grad()
27
- def tune(self, real_pred):
28
- self.ada_aug_buf += torch.tensor(
29
- (torch.sign(real_pred).sum().item(), real_pred.shape[0]),
30
- device=real_pred.device,
31
- )
32
- self.ada_update += 1
33
-
34
- if self.ada_update % self.update_every == 0:
35
- self.ada_aug_buf = reduce_sum(self.ada_aug_buf)
36
- pred_signs, n_pred = self.ada_aug_buf.tolist()
37
-
38
- self.r_t_stat = pred_signs / n_pred
39
-
40
- if self.r_t_stat > self.ada_aug_target:
41
- sign = 1
42
-
43
- else:
44
- sign = -1
45
-
46
- self.ada_aug_p += sign * n_pred / self.ada_aug_len
47
- self.ada_aug_p = min(1, max(0, self.ada_aug_p))
48
- self.ada_aug_buf.mul_(0)
49
- self.ada_update = 0
50
-
51
- return self.ada_aug_p
52
-
53
-
54
- SYM6 = (
55
- 0.015404109327027373,
56
- 0.0034907120842174702,
57
- -0.11799011114819057,
58
- -0.048311742585633,
59
- 0.4910559419267466,
60
- 0.787641141030194,
61
- 0.3379294217276218,
62
- -0.07263752278646252,
63
- -0.021060292512300564,
64
- 0.04472490177066578,
65
- 0.0017677118642428036,
66
- -0.007800708325034148,
67
- )
68
-
69
-
70
- def translate_mat(t_x, t_y, device="cpu"):
71
- batch = t_x.shape[0]
72
-
73
- mat = torch.eye(3, device=device).unsqueeze(0).repeat(batch, 1, 1)
74
- translate = torch.stack((t_x, t_y), 1)
75
- mat[:, :2, 2] = translate
76
-
77
- return mat
78
-
79
-
80
- def rotate_mat(theta, device="cpu"):
81
- batch = theta.shape[0]
82
-
83
- mat = torch.eye(3, device=device).unsqueeze(0).repeat(batch, 1, 1)
84
- sin_t = torch.sin(theta)
85
- cos_t = torch.cos(theta)
86
- rot = torch.stack((cos_t, -sin_t, sin_t, cos_t), 1).view(batch, 2, 2)
87
- mat[:, :2, :2] = rot
88
-
89
- return mat
90
-
91
-
92
- def scale_mat(s_x, s_y, device="cpu"):
93
- batch = s_x.shape[0]
94
-
95
- mat = torch.eye(3, device=device).unsqueeze(0).repeat(batch, 1, 1)
96
- mat[:, 0, 0] = s_x
97
- mat[:, 1, 1] = s_y
98
-
99
- return mat
100
-
101
-
102
- def translate3d_mat(t_x, t_y, t_z):
103
- batch = t_x.shape[0]
104
-
105
- mat = torch.eye(4).unsqueeze(0).repeat(batch, 1, 1)
106
- translate = torch.stack((t_x, t_y, t_z), 1)
107
- mat[:, :3, 3] = translate
108
-
109
- return mat
110
-
111
-
112
- def rotate3d_mat(axis, theta):
113
- batch = theta.shape[0]
114
-
115
- u_x, u_y, u_z = axis
116
-
117
- eye = torch.eye(3).unsqueeze(0)
118
- cross = torch.tensor([(0, -u_z, u_y), (u_z, 0, -u_x), (-u_y, u_x, 0)]).unsqueeze(0)
119
- outer = torch.tensor(axis)
120
- outer = (outer.unsqueeze(1) * outer).unsqueeze(0)
121
-
122
- sin_t = torch.sin(theta).view(-1, 1, 1)
123
- cos_t = torch.cos(theta).view(-1, 1, 1)
124
-
125
- rot = cos_t * eye + sin_t * cross + (1 - cos_t) * outer
126
-
127
- eye_4 = torch.eye(4).unsqueeze(0).repeat(batch, 1, 1)
128
- eye_4[:, :3, :3] = rot
129
-
130
- return eye_4
131
-
132
-
133
- def scale3d_mat(s_x, s_y, s_z):
134
- batch = s_x.shape[0]
135
-
136
- mat = torch.eye(4).unsqueeze(0).repeat(batch, 1, 1)
137
- mat[:, 0, 0] = s_x
138
- mat[:, 1, 1] = s_y
139
- mat[:, 2, 2] = s_z
140
-
141
- return mat
142
-
143
-
144
- def luma_flip_mat(axis, i):
145
- batch = i.shape[0]
146
-
147
- eye = torch.eye(4).unsqueeze(0).repeat(batch, 1, 1)
148
- axis = torch.tensor(axis + (0,))
149
- flip = 2 * torch.ger(axis, axis) * i.view(-1, 1, 1)
150
-
151
- return eye - flip
152
-
153
-
154
- def saturation_mat(axis, i):
155
- batch = i.shape[0]
156
-
157
- eye = torch.eye(4).unsqueeze(0).repeat(batch, 1, 1)
158
- axis = torch.tensor(axis + (0,))
159
- axis = torch.ger(axis, axis)
160
- saturate = axis + (eye - axis) * i.view(-1, 1, 1)
161
-
162
- return saturate
163
-
164
-
165
- def lognormal_sample(size, mean=0, std=1, device="cpu"):
166
- return torch.empty(size, device=device).log_normal_(mean=mean, std=std)
167
-
168
-
169
- def category_sample(size, categories, device="cpu"):
170
- category = torch.tensor(categories, device=device)
171
- sample = torch.randint(high=len(categories), size=(size,), device=device)
172
-
173
- return category[sample]
174
-
175
-
176
- def uniform_sample(size, low, high, device="cpu"):
177
- return torch.empty(size, device=device).uniform_(low, high)
178
-
179
-
180
- def normal_sample(size, mean=0, std=1, device="cpu"):
181
- return torch.empty(size, device=device).normal_(mean, std)
182
-
183
-
184
- def bernoulli_sample(size, p, device="cpu"):
185
- return torch.empty(size, device=device).bernoulli_(p)
186
-
187
-
188
- def random_mat_apply(p, transform, prev, eye, device="cpu"):
189
- size = transform.shape[0]
190
- select = bernoulli_sample(size, p, device=device).view(size, 1, 1)
191
- select_transform = select * transform + (1 - select) * eye
192
-
193
- return select_transform @ prev
194
-
195
-
196
- def sample_affine(p, size, height, width, device="cpu"):
197
- G = torch.eye(3, device=device).unsqueeze(0).repeat(size, 1, 1)
198
- eye = G
199
-
200
- # flip
201
- param = category_sample(size, (0, 1))
202
- Gc = scale_mat(1 - 2.0 * param, torch.ones(size), device=device)
203
- G = random_mat_apply(p, Gc, G, eye, device=device)
204
- # print('flip', G, scale_mat(1 - 2.0 * param, torch.ones(size)), sep='\n')
205
-
206
- # 90 rotate
207
- #param = category_sample(size, (0, 3))
208
- #Gc = rotate_mat(-math.pi / 2 * param, device=device)
209
- #G = random_mat_apply(p, Gc, G, eye, device=device)
210
- # print('90 rotate', G, rotate_mat(-math.pi / 2 * param), sep='\n')
211
-
212
- # integer translate
213
- param = uniform_sample(size, -0.125, 0.125)
214
- param_height = torch.round(param * height) / height
215
- param_width = torch.round(param * width) / width
216
- Gc = translate_mat(param_width, param_height, device=device)
217
- G = random_mat_apply(p, Gc, G, eye, device=device)
218
- # print('integer translate', G, translate_mat(param_width, param_height), sep='\n')
219
-
220
- # isotropic scale
221
- param = lognormal_sample(size, std=0.1 * math.log(2))
222
- Gc = scale_mat(param, param, device=device)
223
- G = random_mat_apply(p, Gc, G, eye, device=device)
224
- # print('isotropic scale', G, scale_mat(param, param), sep='\n')
225
-
226
- p_rot = 1 - math.sqrt(1 - p)
227
-
228
- # pre-rotate
229
- param = uniform_sample(size, -math.pi * 0.25, math.pi * 0.25)
230
- Gc = rotate_mat(-param, device=device)
231
- G = random_mat_apply(p_rot, Gc, G, eye, device=device)
232
- # print('pre-rotate', G, rotate_mat(-param), sep='\n')
233
-
234
- # anisotropic scale
235
- param = lognormal_sample(size, std=0.1 * math.log(2))
236
- Gc = scale_mat(param, 1 / param, device=device)
237
- G = random_mat_apply(p, Gc, G, eye, device=device)
238
- # print('anisotropic scale', G, scale_mat(param, 1 / param), sep='\n')
239
-
240
- # post-rotate
241
- param = uniform_sample(size, -math.pi * 0.25, math.pi * 0.25)
242
- Gc = rotate_mat(-param, device=device)
243
- G = random_mat_apply(p_rot, Gc, G, eye, device=device)
244
- # print('post-rotate', G, rotate_mat(-param), sep='\n')
245
-
246
- # fractional translate
247
- param = normal_sample(size, std=0.125)
248
- Gc = translate_mat(param, param, device=device)
249
- G = random_mat_apply(p, Gc, G, eye, device=device)
250
- # print('fractional translate', G, translate_mat(param, param), sep='\n')
251
-
252
- return G
253
-
254
-
255
- def sample_color(p, size):
256
- C = torch.eye(4).unsqueeze(0).repeat(size, 1, 1)
257
- eye = C
258
- axis_val = 1 / math.sqrt(3)
259
- axis = (axis_val, axis_val, axis_val)
260
-
261
- # brightness
262
- param = normal_sample(size, std=0.2)
263
- Cc = translate3d_mat(param, param, param)
264
- C = random_mat_apply(p, Cc, C, eye)
265
-
266
- # contrast
267
- param = lognormal_sample(size, std=0.5 * math.log(2))
268
- Cc = scale3d_mat(param, param, param)
269
- C = random_mat_apply(p, Cc, C, eye)
270
-
271
- # luma flip
272
- param = category_sample(size, (0, 1))
273
- Cc = luma_flip_mat(axis, param)
274
- C = random_mat_apply(p, Cc, C, eye)
275
-
276
- # hue rotation
277
- param = uniform_sample(size, -math.pi, math.pi)
278
- Cc = rotate3d_mat(axis, param)
279
- C = random_mat_apply(p, Cc, C, eye)
280
-
281
- # saturation
282
- param = lognormal_sample(size, std=1 * math.log(2))
283
- Cc = saturation_mat(axis, param)
284
- C = random_mat_apply(p, Cc, C, eye)
285
-
286
- return C
287
-
288
-
289
- def make_grid(shape, x0, x1, y0, y1, device):
290
- n, c, h, w = shape
291
- grid = torch.empty(n, h, w, 3, device=device)
292
- grid[:, :, :, 0] = torch.linspace(x0, x1, w, device=device)
293
- grid[:, :, :, 1] = torch.linspace(y0, y1, h, device=device).unsqueeze(-1)
294
- grid[:, :, :, 2] = 1
295
-
296
- return grid
297
-
298
-
299
- def affine_grid(grid, mat):
300
- n, h, w, _ = grid.shape
301
- return (grid.view(n, h * w, 3) @ mat.transpose(1, 2)).view(n, h, w, 2)
302
-
303
-
304
- def get_padding(G, height, width, kernel_size):
305
- device = G.device
306
-
307
- cx = (width - 1) / 2
308
- cy = (height - 1) / 2
309
- cp = torch.tensor(
310
- [(-cx, -cy, 1), (cx, -cy, 1), (cx, cy, 1), (-cx, cy, 1)], device=device
311
- )
312
- cp = G @ cp.T
313
-
314
- pad_k = kernel_size // 4
315
-
316
- pad = cp[:, :2, :].permute(1, 0, 2).flatten(1)
317
- pad = torch.cat((-pad, pad)).max(1).values
318
- pad = pad + torch.tensor([pad_k * 2 - cx, pad_k * 2 - cy] * 2, device=device)
319
- pad = pad.max(torch.tensor([0, 0] * 2, device=device))
320
- pad = pad.min(torch.tensor([width - 1, height - 1] * 2, device=device))
321
-
322
- pad_x1, pad_y1, pad_x2, pad_y2 = pad.ceil().to(torch.int32)
323
-
324
- return pad_x1, pad_x2, pad_y1, pad_y2
325
-
326
-
327
- def try_sample_affine_and_pad(img, p, kernel_size, G=None):
328
- batch, _, height, width = img.shape
329
-
330
- G_try = G
331
-
332
- if G is None:
333
- G_try = torch.inverse(sample_affine(p, batch, height, width))
334
-
335
- pad_x1, pad_x2, pad_y1, pad_y2 = get_padding(G_try, height, width, kernel_size)
336
-
337
- img_pad = F.pad(img, (pad_x1, pad_x2, pad_y1, pad_y2), mode="reflect")
338
-
339
- return img_pad, G_try, (pad_x1, pad_x2, pad_y1, pad_y2)
340
-
341
-
342
- class GridSampleForward(autograd.Function):
343
- @staticmethod
344
- def forward(ctx, input, grid):
345
- out = F.grid_sample(
346
- input, grid, mode="bilinear", padding_mode="zeros", align_corners=False
347
- )
348
- ctx.save_for_backward(input, grid)
349
-
350
- return out
351
-
352
- @staticmethod
353
- def backward(ctx, grad_output):
354
- input, grid = ctx.saved_tensors
355
- grad_input, grad_grid = GridSampleBackward.apply(grad_output, input, grid)
356
-
357
- return grad_input, grad_grid
358
-
359
-
360
- class GridSampleBackward(autograd.Function):
361
- @staticmethod
362
- def forward(ctx, grad_output, input, grid):
363
- op = torch._C._jit_get_operation("aten::grid_sampler_2d_backward")
364
- grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False)
365
- ctx.save_for_backward(grid)
366
-
367
- return grad_input, grad_grid
368
-
369
- @staticmethod
370
- def backward(ctx, grad_grad_input, grad_grad_grid):
371
- grid, = ctx.saved_tensors
372
- grad_grad_output = None
373
-
374
- if ctx.needs_input_grad[0]:
375
- grad_grad_output = GridSampleForward.apply(grad_grad_input, grid)
376
-
377
- return grad_grad_output, None, None
378
-
379
-
380
- grid_sample = GridSampleForward.apply
381
-
382
-
383
- def scale_mat_single(s_x, s_y):
384
- return torch.tensor(((s_x, 0, 0), (0, s_y, 0), (0, 0, 1)), dtype=torch.float32)
385
-
386
-
387
- def translate_mat_single(t_x, t_y):
388
- return torch.tensor(((1, 0, t_x), (0, 1, t_y), (0, 0, 1)), dtype=torch.float32)
389
-
390
-
391
- def random_apply_affine(img, p, G=None, antialiasing_kernel=SYM6):
392
- kernel = antialiasing_kernel
393
- len_k = len(kernel)
394
-
395
- kernel = torch.as_tensor(kernel).to(img)
396
- # kernel = torch.ger(kernel, kernel).to(img)
397
- kernel_flip = torch.flip(kernel, (0,))
398
-
399
- img_pad, G, (pad_x1, pad_x2, pad_y1, pad_y2) = try_sample_affine_and_pad(
400
- img, p, len_k, G
401
- )
402
-
403
- G_inv = (
404
- translate_mat_single((pad_x1 - pad_x2).item() / 2, (pad_y1 - pad_y2).item() / 2)
405
- @ G
406
- )
407
- up_pad = (
408
- (len_k + 2 - 1) // 2,
409
- (len_k - 2) // 2,
410
- (len_k + 2 - 1) // 2,
411
- (len_k - 2) // 2,
412
- )
413
- img_2x = upfirdn2d(img_pad, kernel.unsqueeze(0), up=(2, 1), pad=(*up_pad[:2], 0, 0))
414
- img_2x = upfirdn2d(img_2x, kernel.unsqueeze(1), up=(1, 2), pad=(0, 0, *up_pad[2:]))
415
- G_inv = scale_mat_single(2, 2) @ G_inv @ scale_mat_single(1 / 2, 1 / 2)
416
- G_inv = translate_mat_single(-0.5, -0.5) @ G_inv @ translate_mat_single(0.5, 0.5)
417
- batch_size, channel, height, width = img.shape
418
- pad_k = len_k // 4
419
- shape = (batch_size, channel, (height + pad_k * 2) * 2, (width + pad_k * 2) * 2)
420
- G_inv = (
421
- scale_mat_single(2 / img_2x.shape[3], 2 / img_2x.shape[2])
422
- @ G_inv
423
- @ scale_mat_single(1 / (2 / shape[3]), 1 / (2 / shape[2]))
424
- )
425
- grid = F.affine_grid(G_inv[:, :2, :].to(img_2x), shape, align_corners=False)
426
- img_affine = grid_sample(img_2x, grid)
427
- d_p = -pad_k * 2
428
- down_pad = (
429
- d_p + (len_k - 2 + 1) // 2,
430
- d_p + (len_k - 2) // 2,
431
- d_p + (len_k - 2 + 1) // 2,
432
- d_p + (len_k - 2) // 2,
433
- )
434
- img_down = upfirdn2d(
435
- img_affine, kernel_flip.unsqueeze(0), down=(2, 1), pad=(*down_pad[:2], 0, 0)
436
- )
437
- img_down = upfirdn2d(
438
- img_down, kernel_flip.unsqueeze(1), down=(1, 2), pad=(0, 0, *down_pad[2:])
439
- )
440
-
441
- return img_down, G
442
-
443
-
444
- def apply_color(img, mat):
445
- batch = img.shape[0]
446
- img = img.permute(0, 2, 3, 1)
447
- mat_mul = mat[:, :3, :3].transpose(1, 2).view(batch, 1, 3, 3)
448
- mat_add = mat[:, :3, 3].view(batch, 1, 1, 3)
449
- img = img @ mat_mul + mat_add
450
- img = img.permute(0, 3, 1, 2)
451
-
452
- return img
453
-
454
-
455
- def random_apply_color(img, p, C=None):
456
- if C is None:
457
- C = sample_color(p, img.shape[0])
458
-
459
- img = apply_color(img, C.to(img))
460
-
461
- return img, C
462
-
463
-
464
- def augment(img, p, transform_matrix=(None, None)):
465
- img, G = random_apply_affine(img, p, transform_matrix[0])
466
- img, C = random_apply_color(img, p, transform_matrix[1])
467
-
468
- return img, (G, C)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio/wav_evaluation/models/CLAPWrapper.py DELETED
@@ -1,253 +0,0 @@
1
- import random
2
- import torchaudio
3
- import collections
4
- import re
5
- import torch.nn.functional as F
6
- import numpy as np
7
- from transformers import AutoTokenizer
8
- from wav_evaluation.models.utils import read_config_as_args
9
- from wav_evaluation.models.clap import CLAP
10
- import math
11
- import torchaudio.transforms as T
12
- import os
13
- import torch
14
- string_classes = (str, bytes)
15
-
16
- class CLAPWrapper():
17
- """
18
- A class for interfacing CLAP model.
19
- """
20
-
21
- def __init__(self, model_fp,config_path, use_cuda=False):
22
- self.np_str_obj_array_pattern = re.compile(r'[SaUO]')
23
- self.file_path = os.path.realpath(__file__)
24
- self.default_collate_err_msg_format = (
25
- "default_collate: batch must contain tensors, numpy arrays, numbers, "
26
- "dicts or lists; found {}")
27
- with open(config_path,'r') as f:
28
- self.config_as_str = f.read()
29
- self.model_fp = model_fp
30
- self.use_cuda = use_cuda
31
- self.clap, self.tokenizer, self.args = self.load_clap()
32
-
33
- def load_clap(self):
34
- r"""Load CLAP model with args from config file"""
35
-
36
- args = read_config_as_args(self.config_as_str, is_config_str=True)
37
-
38
- if 'bert' in args.text_model:
39
- self.token_keys = ['input_ids', 'token_type_ids', 'attention_mask']
40
- else:
41
- self.token_keys = ['input_ids', 'attention_mask']
42
-
43
- clap = CLAP(
44
- audioenc_name=args.audioenc_name,
45
- sample_rate=args.sampling_rate,
46
- window_size=args.window_size,
47
- hop_size=args.hop_size,
48
- mel_bins=args.mel_bins,
49
- fmin=args.fmin,
50
- fmax=args.fmax,
51
- classes_num=args.num_classes,
52
- out_emb=args.out_emb,
53
- text_model=args.text_model,
54
- transformer_embed_dim=args.transformer_embed_dim,
55
- d_proj=args.d_proj
56
- )
57
-
58
-
59
- # Load pretrained weights for model
60
- model_state_dict = torch.load(self.model_fp, map_location=torch.device('cpu'))['model']
61
- clap.load_state_dict(model_state_dict)
62
- clap.eval() # set clap in eval mode
63
- tokenizer = AutoTokenizer.from_pretrained(args.text_model)
64
-
65
- if self.use_cuda and torch.cuda.is_available():
66
- clap = clap.cuda()
67
-
68
- return clap, tokenizer, args
69
-
70
- def default_collate(self, batch):
71
- r"""Puts each data field into a tensor with outer dimension batch size"""
72
- elem = batch[0]
73
- elem_type = type(elem)
74
- if isinstance(elem, torch.Tensor):
75
- out = None
76
- if torch.utils.data.get_worker_info() is not None:
77
- # If we're in a background process, concatenate directly into a
78
- # shared memory tensor to avoid an extra copy
79
- numel = sum([x.numel() for x in batch])
80
- storage = elem.storage()._new_shared(numel)
81
- out = elem.new(storage)
82
- return torch.stack(batch, 0, out=out)
83
- elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
84
- and elem_type.__name__ != 'string_':
85
- if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap':
86
- # array of string classes and object
87
- if self.np_str_obj_array_pattern.search(elem.dtype.str) is not None:
88
- raise TypeError(
89
- self.default_collate_err_msg_format.format(elem.dtype))
90
-
91
- return self.default_collate([torch.as_tensor(b) for b in batch])
92
- elif elem.shape == (): # scalars
93
- return torch.as_tensor(batch)
94
- elif isinstance(elem, float):
95
- return torch.tensor(batch, dtype=torch.float64)
96
- elif isinstance(elem, int):
97
- return torch.tensor(batch)
98
- elif isinstance(elem, string_classes):
99
- return batch
100
- elif isinstance(elem, collections.abc.Mapping):
101
- return {key: self.default_collate([d[key] for d in batch]) for key in elem}
102
- elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple
103
- return elem_type(*(self.default_collate(samples) for samples in zip(*batch)))
104
- elif isinstance(elem, collections.abc.Sequence):
105
- # check to make sure that the elements in batch have consistent size
106
- it = iter(batch)
107
- elem_size = len(next(it))
108
- if not all(len(elem) == elem_size for elem in it):
109
- raise RuntimeError(
110
- 'each element in list of batch should be of equal size')
111
- transposed = zip(*batch)
112
- return [self.default_collate(samples) for samples in transposed]
113
-
114
- raise TypeError(self.default_collate_err_msg_format.format(elem_type))
115
-
116
- def resample_and_duration(self,wav_sr,audio_duration,resample=False):
117
- audio_time_series,sample_rate = wav_sr
118
- resample_rate = self.args.sampling_rate
119
- if resample:
120
- resampler = T.Resample(sample_rate, resample_rate)
121
- audio_time_series = resampler(audio_time_series)
122
- audio_time_series = audio_time_series.reshape(-1)
123
-
124
- # audio_time_series is shorter than predefined audio duration,
125
- # so audio_time_series is extended
126
- if audio_duration*sample_rate >= audio_time_series.shape[0]:
127
- repeat_factor = int(np.ceil((audio_duration*sample_rate) /
128
- audio_time_series.shape[0]))
129
- # Repeat audio_time_series by repeat_factor to match audio_duration
130
- audio_time_series = audio_time_series.repeat(repeat_factor)
131
- # remove excess part of audio_time_series
132
- audio_time_series = audio_time_series[0:audio_duration*sample_rate]
133
- else:
134
- # audio_time_series is longer than predefined audio duration,
135
- # so audio_time_series is trimmed
136
- start_index = random.randrange(
137
- audio_time_series.shape[0] - audio_duration*sample_rate)
138
- audio_time_series = audio_time_series[start_index:start_index +
139
- audio_duration*sample_rate]
140
- return torch.FloatTensor(audio_time_series)
141
-
142
- def load_audio_into_tensor(self, audio_path, audio_duration, resample=False):
143
- r"""Loads audio file and returns raw audio."""
144
- # Randomly sample a segment of audio_duration from the clip or pad to match duration
145
- audio_time_series, sample_rate = torchaudio.load(audio_path)
146
- return self.resample_and_duration((audio_time_series, sample_rate),audio_duration,resample)
147
-
148
- def preprocess_audio(self, audio_files, resample):
149
- r"""Load list of audio files and return raw audio"""
150
- audio_tensors = []
151
- for audio_file in audio_files:
152
- if isinstance(audio_file,str):
153
- audio_tensor = self.load_audio_into_tensor(audio_file, self.args.duration, resample)
154
- elif isinstance(audio_file,tuple):
155
- audio_tensor = self.resample_and_duration(audio_file, self.args.duration, resample)
156
- else:
157
- raise TypeError(f"type of audiofile is {type(audio_file)},which is not supported")
158
- audio_tensor = audio_tensor.reshape(
159
- 1, -1).cuda() if self.use_cuda and torch.cuda.is_available() else audio_tensor.reshape(1, -1)
160
- audio_tensors.append(audio_tensor)
161
- return self.default_collate(audio_tensors)
162
-
163
- def preprocess_text(self, text_queries):
164
- r"""Load list of class labels and return tokenized text"""
165
- tokenized_texts = []
166
- for ttext in text_queries:
167
- tok = self.tokenizer.encode_plus(
168
- text=ttext, add_special_tokens=True, max_length=self.args.text_len, padding="max_length", return_tensors="pt") # max_length=self.args.text_len, padding=True,
169
- for key in self.token_keys:
170
- tok[key] = tok[key].reshape(-1).cuda() if self.use_cuda and torch.cuda.is_available() else tok[key].reshape(-1)
171
- tokenized_texts.append(tok)
172
- return self.default_collate(tokenized_texts)
173
-
174
- def get_text_embeddings(self, class_labels):
175
- r"""Load list of class labels and return text embeddings"""
176
- preprocessed_text = self.preprocess_text(class_labels)
177
- text_embeddings = self._get_text_embeddings(preprocessed_text)
178
- text_embeddings = text_embeddings/torch.norm(text_embeddings, dim=-1, keepdim=True)
179
- return text_embeddings
180
-
181
- def get_audio_embeddings(self, audio_files, resample):
182
- r"""Load list of audio files and return a audio embeddings"""
183
- preprocessed_audio = self.preprocess_audio(audio_files, resample)
184
- audio_embeddings = self._get_audio_embeddings(preprocessed_audio)
185
- audio_embeddings = audio_embeddings/torch.norm(audio_embeddings, dim=-1, keepdim=True)
186
- return audio_embeddings
187
-
188
- def _get_text_embeddings(self, preprocessed_text):
189
- r"""Load preprocessed text and return text embeddings"""
190
- with torch.no_grad():
191
- text_embeddings = self.clap.caption_encoder(preprocessed_text)
192
- text_embeddings = text_embeddings/torch.norm(text_embeddings, dim=-1, keepdim=True)
193
- return text_embeddings
194
-
195
- def _get_audio_embeddings(self, preprocessed_audio):
196
- r"""Load preprocessed audio and return a audio embeddings"""
197
- with torch.no_grad():
198
- preprocessed_audio = preprocessed_audio.reshape(
199
- preprocessed_audio.shape[0], preprocessed_audio.shape[2])
200
- #Append [0] the audio emebdding, [1] has output class probabilities
201
- audio_embeddings = self.clap.audio_encoder(preprocessed_audio)[0]
202
- audio_embeddings = audio_embeddings/torch.norm(audio_embeddings, dim=-1, keepdim=True)
203
- return audio_embeddings
204
-
205
- def compute_similarity(self, audio_embeddings, text_embeddings,use_logit_scale = True):
206
- r"""Compute similarity between text and audio embeddings"""
207
- if use_logit_scale:
208
- logit_scale = self.clap.logit_scale.exp()
209
- similarity = logit_scale*text_embeddings @ audio_embeddings.T
210
- else:
211
- similarity = text_embeddings @ audio_embeddings.T
212
- return similarity.T
213
-
214
- def cal_clap_score(self,txt,audio_path):
215
- text_embeddings = self.get_text_embeddings([txt])# 经过了norm的embedding
216
- audio_embeddings = self.get_audio_embeddings([audio_path], resample=True)# 这一步比较耗时,读取音频并重采样到44100
217
- score = self.compute_similarity(audio_embeddings, text_embeddings,use_logit_scale=False).squeeze().cpu().numpy()
218
- return score
219
-
220
- def _generic_batch_inference(self, func, *args):
221
- r"""Process audio and/or text per batch"""
222
- input_tmp = args[0]
223
- batch_size = args[-1]
224
- # args[0] has audio_files, args[1] has class_labels
225
- inputs = [args[0], args[1]] if len(args) == 3 else [args[0]]
226
- args0_len = len(args[0])
227
- # compute text_embeddings once for all the audio_files batches
228
- if len(inputs) == 2:
229
- text_embeddings = self.get_text_embeddings(args[1])
230
- inputs = [args[0], args[1], text_embeddings]
231
- dataset_idx = 0
232
- for _ in range(math.ceil(args0_len/batch_size)):
233
- next_batch_idx = dataset_idx + batch_size
234
- # batch size is bigger than available audio/text items
235
- if next_batch_idx >= args0_len:
236
- inputs[0] = input_tmp[dataset_idx:]
237
- return func(*tuple(inputs))
238
- else:
239
- inputs[0] = input_tmp[dataset_idx:next_batch_idx]
240
- yield func(*tuple(inputs))
241
- dataset_idx = next_batch_idx
242
-
243
- def get_audio_embeddings_per_batch(self, audio_files, batch_size):
244
- r"""Load preprocessed audio and return a audio embeddings per batch"""
245
- return self._generic_batch_inference(self.get_audio_embeddings, audio_files, batch_size)
246
-
247
- def get_text_embeddings_per_batch(self, class_labels, batch_size):
248
- r"""Load preprocessed text and return text embeddings per batch"""
249
- return self._generic_batch_inference(self.get_text_embeddings, class_labels, batch_size)
250
-
251
- def classify_audio_files_per_batch(self, audio_files, class_labels, batch_size):
252
- r"""Compute classification probabilities for each audio recording in a batch and each class label"""
253
- return self._generic_batch_inference(self.classify_audio_files, audio_files, class_labels, batch_size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov5/yolov5_s-v61_fast_1xb12-40e_608x352_cat.py DELETED
@@ -1,70 +0,0 @@
1
- _base_ = 'yolov5_s-v61_fast_1xb12-40e_cat.py'
2
-
3
- # This configuration is used to provide non-square training examples
4
- # Must be a multiple of 32
5
- img_scale = (608, 352) # w h
6
-
7
- anchors = [
8
- [(65, 35), (159, 45), (119, 80)], # P3/8
9
- [(215, 77), (224, 116), (170, 166)], # P4/16
10
- [(376, 108), (339, 176), (483, 190)] # P5/32
11
- ]
12
-
13
- # ===============================Unmodified in most cases====================
14
- _base_.model.bbox_head.loss_obj.loss_weight = 1.0 * ((img_scale[1] / 640)**2)
15
- _base_.model.bbox_head.prior_generator.base_sizes = anchors
16
-
17
- train_pipeline = [
18
- *_base_.pre_transform,
19
- dict(
20
- type='Mosaic',
21
- img_scale=img_scale,
22
- pad_val=114.0,
23
- pre_transform=_base_.pre_transform),
24
- dict(
25
- type='YOLOv5RandomAffine',
26
- max_rotate_degree=0.0,
27
- max_shear_degree=0.0,
28
- scaling_ratio_range=(1 - _base_.affine_scale, 1 + _base_.affine_scale),
29
- # img_scale is (width, height)
30
- border=(-img_scale[0] // 2, -img_scale[1] // 2),
31
- border_val=(114, 114, 114)),
32
- dict(
33
- type='mmdet.Albu',
34
- transforms=_base_.albu_train_transforms,
35
- bbox_params=dict(
36
- type='BboxParams',
37
- format='pascal_voc',
38
- label_fields=['gt_bboxes_labels', 'gt_ignore_flags']),
39
- keymap={
40
- 'img': 'image',
41
- 'gt_bboxes': 'bboxes'
42
- }),
43
- dict(type='YOLOv5HSVRandomAug'),
44
- dict(type='mmdet.RandomFlip', prob=0.5),
45
- dict(
46
- type='mmdet.PackDetInputs',
47
- meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip',
48
- 'flip_direction'))
49
- ]
50
-
51
- _base_.train_dataloader.dataset.pipeline = train_pipeline
52
-
53
- test_pipeline = [
54
- dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args),
55
- dict(type='YOLOv5KeepRatioResize', scale=img_scale),
56
- dict(
57
- type='LetterResize',
58
- scale=img_scale,
59
- allow_scale_up=False,
60
- pad_val=dict(img=114)),
61
- dict(type='mmdet.LoadAnnotations', with_bbox=True),
62
- dict(
63
- type='mmdet.PackDetInputs',
64
- meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
65
- 'scale_factor', 'pad_param'))
66
- ]
67
-
68
- val_dataloader = dict(
69
- dataset=dict(pipeline=test_pipeline, batch_shapes_cfg=None))
70
- test_dataloader = val_dataloader
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/box/Box.js DELETED
@@ -1,50 +0,0 @@
1
- import Base from '../base/Base.js';
2
- import { Lines } from '../utils/Geoms.js';
3
-
4
- class Box extends Base {
5
- constructor(scene, config) {
6
- super(scene, config);
7
- this.type = 'rexSpinnerCube';
8
- }
9
-
10
- buildShapes() {
11
- this.addShape((new Lines()).setName('border'));
12
- this.addShape((new Lines()).setName('fill'));
13
- }
14
-
15
- updateShapes() {
16
- var centerX = this.centerX;
17
- var centerY = this.centerY;
18
- var radius = this.radius;
19
-
20
- var halfWidth = radius * 0.7;
21
- var left = centerX - halfWidth,
22
- top = centerY - halfWidth,
23
- width = halfWidth * 2;
24
-
25
- this.getShape('border')
26
- .lineStyle(2, this.color, 1)
27
- .startAt(left, top).lineTo(width, 0, true)
28
- .lineTo(0, width, true).lineTo(-width, 0, true)
29
- .lineTo(0, -width, true).close();
30
-
31
- if (this.value < 0.5) {
32
- var t = (0.5 - this.value) * 2;
33
- var height = width * t;
34
- this.getShape('fill')
35
- .fillStyle(this.color, 1)
36
- .startAt(left, top).lineTo(width, 0, true)
37
- .lineTo(0, height, true).lineTo(-width, 0, true)
38
- .lineTo(0, -height, true).close();
39
-
40
- } else { // Rotate
41
- var t = (this.value - 0.5) * 2;
42
- var angle = 180 * t;
43
-
44
- this.getShape('border').rotateAround(centerX, centerY, angle);
45
- this.getShape('fill').fillStyle().lineStyle();
46
- }
47
- }
48
- }
49
-
50
- export default Box;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_ldm_original_checkpoint_to_diffusers.py DELETED
@@ -1,359 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 The HuggingFace Inc. team.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """ Conversion script for the LDM checkpoints. """
16
-
17
- import argparse
18
- import json
19
-
20
- import torch
21
-
22
- from diffusers import DDPMScheduler, LDMPipeline, UNet2DModel, VQModel
23
-
24
-
25
- def shave_segments(path, n_shave_prefix_segments=1):
26
- """
27
- Removes segments. Positive values shave the first segments, negative shave the last segments.
28
- """
29
- if n_shave_prefix_segments >= 0:
30
- return ".".join(path.split(".")[n_shave_prefix_segments:])
31
- else:
32
- return ".".join(path.split(".")[:n_shave_prefix_segments])
33
-
34
-
35
- def renew_resnet_paths(old_list, n_shave_prefix_segments=0):
36
- """
37
- Updates paths inside resnets to the new naming scheme (local renaming)
38
- """
39
- mapping = []
40
- for old_item in old_list:
41
- new_item = old_item.replace("in_layers.0", "norm1")
42
- new_item = new_item.replace("in_layers.2", "conv1")
43
-
44
- new_item = new_item.replace("out_layers.0", "norm2")
45
- new_item = new_item.replace("out_layers.3", "conv2")
46
-
47
- new_item = new_item.replace("emb_layers.1", "time_emb_proj")
48
- new_item = new_item.replace("skip_connection", "conv_shortcut")
49
-
50
- new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
51
-
52
- mapping.append({"old": old_item, "new": new_item})
53
-
54
- return mapping
55
-
56
-
57
- def renew_attention_paths(old_list, n_shave_prefix_segments=0):
58
- """
59
- Updates paths inside attentions to the new naming scheme (local renaming)
60
- """
61
- mapping = []
62
- for old_item in old_list:
63
- new_item = old_item
64
-
65
- new_item = new_item.replace("norm.weight", "group_norm.weight")
66
- new_item = new_item.replace("norm.bias", "group_norm.bias")
67
-
68
- new_item = new_item.replace("proj_out.weight", "proj_attn.weight")
69
- new_item = new_item.replace("proj_out.bias", "proj_attn.bias")
70
-
71
- new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
72
-
73
- mapping.append({"old": old_item, "new": new_item})
74
-
75
- return mapping
76
-
77
-
78
- def assign_to_checkpoint(
79
- paths, checkpoint, old_checkpoint, attention_paths_to_split=None, additional_replacements=None, config=None
80
- ):
81
- """
82
- This does the final conversion step: take locally converted weights and apply a global renaming
83
- to them. It splits attention layers, and takes into account additional replacements
84
- that may arise.
85
-
86
- Assigns the weights to the new checkpoint.
87
- """
88
- assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys."
89
-
90
- # Splits the attention layers into three variables.
91
- if attention_paths_to_split is not None:
92
- for path, path_map in attention_paths_to_split.items():
93
- old_tensor = old_checkpoint[path]
94
- channels = old_tensor.shape[0] // 3
95
-
96
- target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1)
97
-
98
- num_heads = old_tensor.shape[0] // config["num_head_channels"] // 3
99
-
100
- old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:])
101
- query, key, value = old_tensor.split(channels // num_heads, dim=1)
102
-
103
- checkpoint[path_map["query"]] = query.reshape(target_shape)
104
- checkpoint[path_map["key"]] = key.reshape(target_shape)
105
- checkpoint[path_map["value"]] = value.reshape(target_shape)
106
-
107
- for path in paths:
108
- new_path = path["new"]
109
-
110
- # These have already been assigned
111
- if attention_paths_to_split is not None and new_path in attention_paths_to_split:
112
- continue
113
-
114
- # Global renaming happens here
115
- new_path = new_path.replace("middle_block.0", "mid_block.resnets.0")
116
- new_path = new_path.replace("middle_block.1", "mid_block.attentions.0")
117
- new_path = new_path.replace("middle_block.2", "mid_block.resnets.1")
118
-
119
- if additional_replacements is not None:
120
- for replacement in additional_replacements:
121
- new_path = new_path.replace(replacement["old"], replacement["new"])
122
-
123
- # proj_attn.weight has to be converted from conv 1D to linear
124
- if "proj_attn.weight" in new_path:
125
- checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0]
126
- else:
127
- checkpoint[new_path] = old_checkpoint[path["old"]]
128
-
129
-
130
- def convert_ldm_checkpoint(checkpoint, config):
131
- """
132
- Takes a state dict and a config, and returns a converted checkpoint.
133
- """
134
- new_checkpoint = {}
135
-
136
- new_checkpoint["time_embedding.linear_1.weight"] = checkpoint["time_embed.0.weight"]
137
- new_checkpoint["time_embedding.linear_1.bias"] = checkpoint["time_embed.0.bias"]
138
- new_checkpoint["time_embedding.linear_2.weight"] = checkpoint["time_embed.2.weight"]
139
- new_checkpoint["time_embedding.linear_2.bias"] = checkpoint["time_embed.2.bias"]
140
-
141
- new_checkpoint["conv_in.weight"] = checkpoint["input_blocks.0.0.weight"]
142
- new_checkpoint["conv_in.bias"] = checkpoint["input_blocks.0.0.bias"]
143
-
144
- new_checkpoint["conv_norm_out.weight"] = checkpoint["out.0.weight"]
145
- new_checkpoint["conv_norm_out.bias"] = checkpoint["out.0.bias"]
146
- new_checkpoint["conv_out.weight"] = checkpoint["out.2.weight"]
147
- new_checkpoint["conv_out.bias"] = checkpoint["out.2.bias"]
148
-
149
- # Retrieves the keys for the input blocks only
150
- num_input_blocks = len({".".join(layer.split(".")[:2]) for layer in checkpoint if "input_blocks" in layer})
151
- input_blocks = {
152
- layer_id: [key for key in checkpoint if f"input_blocks.{layer_id}" in key]
153
- for layer_id in range(num_input_blocks)
154
- }
155
-
156
- # Retrieves the keys for the middle blocks only
157
- num_middle_blocks = len({".".join(layer.split(".")[:2]) for layer in checkpoint if "middle_block" in layer})
158
- middle_blocks = {
159
- layer_id: [key for key in checkpoint if f"middle_block.{layer_id}" in key]
160
- for layer_id in range(num_middle_blocks)
161
- }
162
-
163
- # Retrieves the keys for the output blocks only
164
- num_output_blocks = len({".".join(layer.split(".")[:2]) for layer in checkpoint if "output_blocks" in layer})
165
- output_blocks = {
166
- layer_id: [key for key in checkpoint if f"output_blocks.{layer_id}" in key]
167
- for layer_id in range(num_output_blocks)
168
- }
169
-
170
- for i in range(1, num_input_blocks):
171
- block_id = (i - 1) // (config["num_res_blocks"] + 1)
172
- layer_in_block_id = (i - 1) % (config["num_res_blocks"] + 1)
173
-
174
- resnets = [key for key in input_blocks[i] if f"input_blocks.{i}.0" in key]
175
- attentions = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key]
176
-
177
- if f"input_blocks.{i}.0.op.weight" in checkpoint:
178
- new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.weight"] = checkpoint[
179
- f"input_blocks.{i}.0.op.weight"
180
- ]
181
- new_checkpoint[f"down_blocks.{block_id}.downsamplers.0.conv.bias"] = checkpoint[
182
- f"input_blocks.{i}.0.op.bias"
183
- ]
184
- continue
185
-
186
- paths = renew_resnet_paths(resnets)
187
- meta_path = {"old": f"input_blocks.{i}.0", "new": f"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
188
- resnet_op = {"old": "resnets.2.op", "new": "downsamplers.0.op"}
189
- assign_to_checkpoint(
190
- paths, new_checkpoint, checkpoint, additional_replacements=[meta_path, resnet_op], config=config
191
- )
192
-
193
- if len(attentions):
194
- paths = renew_attention_paths(attentions)
195
- meta_path = {
196
- "old": f"input_blocks.{i}.1",
197
- "new": f"down_blocks.{block_id}.attentions.{layer_in_block_id}",
198
- }
199
- to_split = {
200
- f"input_blocks.{i}.1.qkv.bias": {
201
- "key": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
202
- "query": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
203
- "value": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
204
- },
205
- f"input_blocks.{i}.1.qkv.weight": {
206
- "key": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
207
- "query": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
208
- "value": f"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
209
- },
210
- }
211
- assign_to_checkpoint(
212
- paths,
213
- new_checkpoint,
214
- checkpoint,
215
- additional_replacements=[meta_path],
216
- attention_paths_to_split=to_split,
217
- config=config,
218
- )
219
-
220
- resnet_0 = middle_blocks[0]
221
- attentions = middle_blocks[1]
222
- resnet_1 = middle_blocks[2]
223
-
224
- resnet_0_paths = renew_resnet_paths(resnet_0)
225
- assign_to_checkpoint(resnet_0_paths, new_checkpoint, checkpoint, config=config)
226
-
227
- resnet_1_paths = renew_resnet_paths(resnet_1)
228
- assign_to_checkpoint(resnet_1_paths, new_checkpoint, checkpoint, config=config)
229
-
230
- attentions_paths = renew_attention_paths(attentions)
231
- to_split = {
232
- "middle_block.1.qkv.bias": {
233
- "key": "mid_block.attentions.0.key.bias",
234
- "query": "mid_block.attentions.0.query.bias",
235
- "value": "mid_block.attentions.0.value.bias",
236
- },
237
- "middle_block.1.qkv.weight": {
238
- "key": "mid_block.attentions.0.key.weight",
239
- "query": "mid_block.attentions.0.query.weight",
240
- "value": "mid_block.attentions.0.value.weight",
241
- },
242
- }
243
- assign_to_checkpoint(
244
- attentions_paths, new_checkpoint, checkpoint, attention_paths_to_split=to_split, config=config
245
- )
246
-
247
- for i in range(num_output_blocks):
248
- block_id = i // (config["num_res_blocks"] + 1)
249
- layer_in_block_id = i % (config["num_res_blocks"] + 1)
250
- output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]]
251
- output_block_list = {}
252
-
253
- for layer in output_block_layers:
254
- layer_id, layer_name = layer.split(".")[0], shave_segments(layer, 1)
255
- if layer_id in output_block_list:
256
- output_block_list[layer_id].append(layer_name)
257
- else:
258
- output_block_list[layer_id] = [layer_name]
259
-
260
- if len(output_block_list) > 1:
261
- resnets = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key]
262
- attentions = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key]
263
-
264
- resnet_0_paths = renew_resnet_paths(resnets)
265
- paths = renew_resnet_paths(resnets)
266
-
267
- meta_path = {"old": f"output_blocks.{i}.0", "new": f"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
268
- assign_to_checkpoint(paths, new_checkpoint, checkpoint, additional_replacements=[meta_path], config=config)
269
-
270
- if ["conv.weight", "conv.bias"] in output_block_list.values():
271
- index = list(output_block_list.values()).index(["conv.weight", "conv.bias"])
272
- new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.weight"] = checkpoint[
273
- f"output_blocks.{i}.{index}.conv.weight"
274
- ]
275
- new_checkpoint[f"up_blocks.{block_id}.upsamplers.0.conv.bias"] = checkpoint[
276
- f"output_blocks.{i}.{index}.conv.bias"
277
- ]
278
-
279
- # Clear attentions as they have been attributed above.
280
- if len(attentions) == 2:
281
- attentions = []
282
-
283
- if len(attentions):
284
- paths = renew_attention_paths(attentions)
285
- meta_path = {
286
- "old": f"output_blocks.{i}.1",
287
- "new": f"up_blocks.{block_id}.attentions.{layer_in_block_id}",
288
- }
289
- to_split = {
290
- f"output_blocks.{i}.1.qkv.bias": {
291
- "key": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
292
- "query": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
293
- "value": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
294
- },
295
- f"output_blocks.{i}.1.qkv.weight": {
296
- "key": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
297
- "query": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
298
- "value": f"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
299
- },
300
- }
301
- assign_to_checkpoint(
302
- paths,
303
- new_checkpoint,
304
- checkpoint,
305
- additional_replacements=[meta_path],
306
- attention_paths_to_split=to_split if any("qkv" in key for key in attentions) else None,
307
- config=config,
308
- )
309
- else:
310
- resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1)
311
- for path in resnet_0_paths:
312
- old_path = ".".join(["output_blocks", str(i), path["old"]])
313
- new_path = ".".join(["up_blocks", str(block_id), "resnets", str(layer_in_block_id), path["new"]])
314
-
315
- new_checkpoint[new_path] = checkpoint[old_path]
316
-
317
- return new_checkpoint
318
-
319
-
320
- if __name__ == "__main__":
321
- parser = argparse.ArgumentParser()
322
-
323
- parser.add_argument(
324
- "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
325
- )
326
-
327
- parser.add_argument(
328
- "--config_file",
329
- default=None,
330
- type=str,
331
- required=True,
332
- help="The config json file corresponding to the architecture.",
333
- )
334
-
335
- parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
336
-
337
- args = parser.parse_args()
338
-
339
- checkpoint = torch.load(args.checkpoint_path)
340
-
341
- with open(args.config_file) as f:
342
- config = json.loads(f.read())
343
-
344
- converted_checkpoint = convert_ldm_checkpoint(checkpoint, config)
345
-
346
- if "ldm" in config:
347
- del config["ldm"]
348
-
349
- model = UNet2DModel(**config)
350
- model.load_state_dict(converted_checkpoint)
351
-
352
- try:
353
- scheduler = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1]))
354
- vqvae = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1]))
355
-
356
- pipe = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
357
- pipe.save_pretrained(args.dump_path)
358
- except: # noqa: E722
359
- model.save_pretrained(args.dump_path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r101-d8_480x480_40k_pascal_context_59.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './fcn_r50-d8_480x480_40k_pascal_context_59.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/AnimalEquality/chatbot/_proc/_docs/site_libs/bootstrap/bootstrap.min.css DELETED
The diff for this file is too large to render. See raw diff
 
spaces/AnshuK23/Customer-review-analysis/app.py DELETED
@@ -1,7 +0,0 @@
1
- import streamlit as st
2
- from transformers import pipeline
3
- pipe = pipeline("sentiment-analysis")
4
- text = st.text_area("Enter Text")
5
- if text:
6
- out = pipe(text)
7
- st.json(out)
 
 
 
 
 
 
 
 
spaces/AntX-ai/Fintech/style.css DELETED
@@ -1,28 +0,0 @@
1
- body {
2
- padding: 2rem;
3
- font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
4
- }
5
-
6
- h1 {
7
- font-size: 16px;
8
- margin-top: 0;
9
- }
10
-
11
- p {
12
- color: rgb(107, 114, 128);
13
- font-size: 15px;
14
- margin-bottom: 10px;
15
- margin-top: 5px;
16
- }
17
-
18
- .card {
19
- max-width: 620px;
20
- margin: 0 auto;
21
- padding: 16px;
22
- border: 1px solid lightgray;
23
- border-radius: 16px;
24
- }
25
-
26
- .card p:last-child {
27
- margin-bottom: 0;
28
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnthonyErosion/HoctotAI/app.py DELETED
@@ -1,52 +0,0 @@
1
- import gradio as gr
2
- import time
3
-
4
- from transformers import AutoTokenizer, AutoModelForCausalLM
5
- import textwrap
6
- import re
7
-
8
- tokenizer = AutoTokenizer.from_pretrained("AnthonyErosion/HoctotAI")
9
- model = AutoModelForCausalLM.from_pretrained("AnthonyErosion/HoctotAI",device_map='auto',load_in_8bit=False)
10
-
11
- main_theme = gr.Theme.from_hub("gstaff/xkcd")
12
-
13
- with gr.Blocks(theme=main_theme) as MainChatbot:
14
- verify_text = lambda txt : '\n'.join([textwrap.fill(txt, width=150) for txt in txt.split('\n')])
15
-
16
- def conver_inp(text:str):
17
- return f"<|prompter|> {text} <|endoftext|><|assistant|>"
18
-
19
- def generate(text,max_new_tokens:int=1024,use_conver:bool=False,b_pair=False):
20
- text = conver_inp(text) if use_conver else text
21
-
22
- for i in range(max_new_tokens):
23
- enc = tokenizer(text,return_tensors='pt',add_special_tokens=False)
24
- text_r = text
25
- enc = model.generate(enc.input_ids,max_new_tokens=1,pad_token_id=0)
26
- text = tokenizer.decode(enc[0],skip_special_tokens=False)
27
- text = text[:-4]+tokenizer.eos_token if text[-4:] == '\n\n\n\n' else text
28
-
29
- if text.endswith(tokenizer.eos_token) or text.endswith('\n\n\n\n\n'):
30
- yield text[len(text_r):] if b_pair else text
31
- break
32
- else:
33
- yield text[len(text_r):] if b_pair else text
34
-
35
- chatbot = gr.Chatbot()
36
- msg = gr.Textbox(
37
- show_label=False,
38
- placeholder="Nhập tin nhắn và bấm Enter để nhận phản hồi... ⌨️",
39
- ).style(container=False)
40
-
41
- def respond(message, chat_history):
42
- bot_message = ""
43
- chat_history.append((message, bot_message))
44
- for v in generate(message,1024,True):
45
- bot_message = re.sub(r"<\|prompter\|>.*?<\|endoftext\|>", "", v)
46
- bot_message = bot_message.replace("<|assistant|>", "").replace("<|endoftext|>", "").strip()
47
- bot_message = verify_text(bot_message)
48
- chat_history[-1] = [message, bot_message]
49
- yield "", chat_history
50
- time.sleep(1)
51
- msg.submit(respond, [msg, chatbot], [msg, chatbot])
52
- MainChatbot.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/sbcharsetprober.py DELETED
@@ -1,162 +0,0 @@
1
- ######################## BEGIN LICENSE BLOCK ########################
2
- # The Original Code is Mozilla Universal charset detector code.
3
- #
4
- # The Initial Developer of the Original Code is
5
- # Netscape Communications Corporation.
6
- # Portions created by the Initial Developer are Copyright (C) 2001
7
- # the Initial Developer. All Rights Reserved.
8
- #
9
- # Contributor(s):
10
- # Mark Pilgrim - port to Python
11
- # Shy Shalom - original C code
12
- #
13
- # This library is free software; you can redistribute it and/or
14
- # modify it under the terms of the GNU Lesser General Public
15
- # License as published by the Free Software Foundation; either
16
- # version 2.1 of the License, or (at your option) any later version.
17
- #
18
- # This library is distributed in the hope that it will be useful,
19
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
20
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21
- # Lesser General Public License for more details.
22
- #
23
- # You should have received a copy of the GNU Lesser General Public
24
- # License along with this library; if not, write to the Free Software
25
- # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
26
- # 02110-1301 USA
27
- ######################### END LICENSE BLOCK #########################
28
-
29
- from typing import Dict, List, NamedTuple, Optional, Union
30
-
31
- from .charsetprober import CharSetProber
32
- from .enums import CharacterCategory, ProbingState, SequenceLikelihood
33
-
34
-
35
- class SingleByteCharSetModel(NamedTuple):
36
- charset_name: str
37
- language: str
38
- char_to_order_map: Dict[int, int]
39
- language_model: Dict[int, Dict[int, int]]
40
- typical_positive_ratio: float
41
- keep_ascii_letters: bool
42
- alphabet: str
43
-
44
-
45
- class SingleByteCharSetProber(CharSetProber):
46
- SAMPLE_SIZE = 64
47
- SB_ENOUGH_REL_THRESHOLD = 1024 # 0.25 * SAMPLE_SIZE^2
48
- POSITIVE_SHORTCUT_THRESHOLD = 0.95
49
- NEGATIVE_SHORTCUT_THRESHOLD = 0.05
50
-
51
- def __init__(
52
- self,
53
- model: SingleByteCharSetModel,
54
- is_reversed: bool = False,
55
- name_prober: Optional[CharSetProber] = None,
56
- ) -> None:
57
- super().__init__()
58
- self._model = model
59
- # TRUE if we need to reverse every pair in the model lookup
60
- self._reversed = is_reversed
61
- # Optional auxiliary prober for name decision
62
- self._name_prober = name_prober
63
- self._last_order = 255
64
- self._seq_counters: List[int] = []
65
- self._total_seqs = 0
66
- self._total_char = 0
67
- self._control_char = 0
68
- self._freq_char = 0
69
- self.reset()
70
-
71
- def reset(self) -> None:
72
- super().reset()
73
- # char order of last character
74
- self._last_order = 255
75
- self._seq_counters = [0] * SequenceLikelihood.get_num_categories()
76
- self._total_seqs = 0
77
- self._total_char = 0
78
- self._control_char = 0
79
- # characters that fall in our sampling range
80
- self._freq_char = 0
81
-
82
- @property
83
- def charset_name(self) -> Optional[str]:
84
- if self._name_prober:
85
- return self._name_prober.charset_name
86
- return self._model.charset_name
87
-
88
- @property
89
- def language(self) -> Optional[str]:
90
- if self._name_prober:
91
- return self._name_prober.language
92
- return self._model.language
93
-
94
- def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
95
- # TODO: Make filter_international_words keep things in self.alphabet
96
- if not self._model.keep_ascii_letters:
97
- byte_str = self.filter_international_words(byte_str)
98
- else:
99
- byte_str = self.remove_xml_tags(byte_str)
100
- if not byte_str:
101
- return self.state
102
- char_to_order_map = self._model.char_to_order_map
103
- language_model = self._model.language_model
104
- for char in byte_str:
105
- order = char_to_order_map.get(char, CharacterCategory.UNDEFINED)
106
- # XXX: This was SYMBOL_CAT_ORDER before, with a value of 250, but
107
- # CharacterCategory.SYMBOL is actually 253, so we use CONTROL
108
- # to make it closer to the original intent. The only difference
109
- # is whether or not we count digits and control characters for
110
- # _total_char purposes.
111
- if order < CharacterCategory.CONTROL:
112
- self._total_char += 1
113
- if order < self.SAMPLE_SIZE:
114
- self._freq_char += 1
115
- if self._last_order < self.SAMPLE_SIZE:
116
- self._total_seqs += 1
117
- if not self._reversed:
118
- lm_cat = language_model[self._last_order][order]
119
- else:
120
- lm_cat = language_model[order][self._last_order]
121
- self._seq_counters[lm_cat] += 1
122
- self._last_order = order
123
-
124
- charset_name = self._model.charset_name
125
- if self.state == ProbingState.DETECTING:
126
- if self._total_seqs > self.SB_ENOUGH_REL_THRESHOLD:
127
- confidence = self.get_confidence()
128
- if confidence > self.POSITIVE_SHORTCUT_THRESHOLD:
129
- self.logger.debug(
130
- "%s confidence = %s, we have a winner", charset_name, confidence
131
- )
132
- self._state = ProbingState.FOUND_IT
133
- elif confidence < self.NEGATIVE_SHORTCUT_THRESHOLD:
134
- self.logger.debug(
135
- "%s confidence = %s, below negative shortcut threshold %s",
136
- charset_name,
137
- confidence,
138
- self.NEGATIVE_SHORTCUT_THRESHOLD,
139
- )
140
- self._state = ProbingState.NOT_ME
141
-
142
- return self.state
143
-
144
- def get_confidence(self) -> float:
145
- r = 0.01
146
- if self._total_seqs > 0:
147
- r = (
148
- (
149
- self._seq_counters[SequenceLikelihood.POSITIVE]
150
- + 0.25 * self._seq_counters[SequenceLikelihood.LIKELY]
151
- )
152
- / self._total_seqs
153
- / self._model.typical_positive_ratio
154
- )
155
- # The more control characters (proportionnaly to the size
156
- # of the text), the less confident we become in the current
157
- # charset.
158
- r = r * (self._total_char - self._control_char) / self._total_char
159
- r = r * self._freq_char / self._total_char
160
- if r >= 1.0:
161
- r = 0.99
162
- return r
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/__init__.py DELETED
@@ -1,177 +0,0 @@
1
- """Rich text and beautiful formatting in the terminal."""
2
-
3
- import os
4
- from typing import IO, TYPE_CHECKING, Any, Callable, Optional, Union
5
-
6
- from ._extension import load_ipython_extension # noqa: F401
7
-
8
- __all__ = ["get_console", "reconfigure", "print", "inspect", "print_json"]
9
-
10
- if TYPE_CHECKING:
11
- from .console import Console
12
-
13
- # Global console used by alternative print
14
- _console: Optional["Console"] = None
15
-
16
- try:
17
- _IMPORT_CWD = os.path.abspath(os.getcwd())
18
- except FileNotFoundError:
19
- # Can happen if the cwd has been deleted
20
- _IMPORT_CWD = ""
21
-
22
-
23
- def get_console() -> "Console":
24
- """Get a global :class:`~rich.console.Console` instance. This function is used when Rich requires a Console,
25
- and hasn't been explicitly given one.
26
-
27
- Returns:
28
- Console: A console instance.
29
- """
30
- global _console
31
- if _console is None:
32
- from .console import Console
33
-
34
- _console = Console()
35
-
36
- return _console
37
-
38
-
39
- def reconfigure(*args: Any, **kwargs: Any) -> None:
40
- """Reconfigures the global console by replacing it with another.
41
-
42
- Args:
43
- *args (Any): Positional arguments for the replacement :class:`~rich.console.Console`.
44
- **kwargs (Any): Keyword arguments for the replacement :class:`~rich.console.Console`.
45
- """
46
- from pip._vendor.rich.console import Console
47
-
48
- new_console = Console(*args, **kwargs)
49
- _console = get_console()
50
- _console.__dict__ = new_console.__dict__
51
-
52
-
53
- def print(
54
- *objects: Any,
55
- sep: str = " ",
56
- end: str = "\n",
57
- file: Optional[IO[str]] = None,
58
- flush: bool = False,
59
- ) -> None:
60
- r"""Print object(s) supplied via positional arguments.
61
- This function has an identical signature to the built-in print.
62
- For more advanced features, see the :class:`~rich.console.Console` class.
63
-
64
- Args:
65
- sep (str, optional): Separator between printed objects. Defaults to " ".
66
- end (str, optional): Character to write at end of output. Defaults to "\\n".
67
- file (IO[str], optional): File to write to, or None for stdout. Defaults to None.
68
- flush (bool, optional): Has no effect as Rich always flushes output. Defaults to False.
69
-
70
- """
71
- from .console import Console
72
-
73
- write_console = get_console() if file is None else Console(file=file)
74
- return write_console.print(*objects, sep=sep, end=end)
75
-
76
-
77
- def print_json(
78
- json: Optional[str] = None,
79
- *,
80
- data: Any = None,
81
- indent: Union[None, int, str] = 2,
82
- highlight: bool = True,
83
- skip_keys: bool = False,
84
- ensure_ascii: bool = False,
85
- check_circular: bool = True,
86
- allow_nan: bool = True,
87
- default: Optional[Callable[[Any], Any]] = None,
88
- sort_keys: bool = False,
89
- ) -> None:
90
- """Pretty prints JSON. Output will be valid JSON.
91
-
92
- Args:
93
- json (str): A string containing JSON.
94
- data (Any): If json is not supplied, then encode this data.
95
- indent (int, optional): Number of spaces to indent. Defaults to 2.
96
- highlight (bool, optional): Enable highlighting of output: Defaults to True.
97
- skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False.
98
- ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False.
99
- check_circular (bool, optional): Check for circular references. Defaults to True.
100
- allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True.
101
- default (Callable, optional): A callable that converts values that can not be encoded
102
- in to something that can be JSON encoded. Defaults to None.
103
- sort_keys (bool, optional): Sort dictionary keys. Defaults to False.
104
- """
105
-
106
- get_console().print_json(
107
- json,
108
- data=data,
109
- indent=indent,
110
- highlight=highlight,
111
- skip_keys=skip_keys,
112
- ensure_ascii=ensure_ascii,
113
- check_circular=check_circular,
114
- allow_nan=allow_nan,
115
- default=default,
116
- sort_keys=sort_keys,
117
- )
118
-
119
-
120
- def inspect(
121
- obj: Any,
122
- *,
123
- console: Optional["Console"] = None,
124
- title: Optional[str] = None,
125
- help: bool = False,
126
- methods: bool = False,
127
- docs: bool = True,
128
- private: bool = False,
129
- dunder: bool = False,
130
- sort: bool = True,
131
- all: bool = False,
132
- value: bool = True,
133
- ) -> None:
134
- """Inspect any Python object.
135
-
136
- * inspect(<OBJECT>) to see summarized info.
137
- * inspect(<OBJECT>, methods=True) to see methods.
138
- * inspect(<OBJECT>, help=True) to see full (non-abbreviated) help.
139
- * inspect(<OBJECT>, private=True) to see private attributes (single underscore).
140
- * inspect(<OBJECT>, dunder=True) to see attributes beginning with double underscore.
141
- * inspect(<OBJECT>, all=True) to see all attributes.
142
-
143
- Args:
144
- obj (Any): An object to inspect.
145
- title (str, optional): Title to display over inspect result, or None use type. Defaults to None.
146
- help (bool, optional): Show full help text rather than just first paragraph. Defaults to False.
147
- methods (bool, optional): Enable inspection of callables. Defaults to False.
148
- docs (bool, optional): Also render doc strings. Defaults to True.
149
- private (bool, optional): Show private attributes (beginning with underscore). Defaults to False.
150
- dunder (bool, optional): Show attributes starting with double underscore. Defaults to False.
151
- sort (bool, optional): Sort attributes alphabetically. Defaults to True.
152
- all (bool, optional): Show all attributes. Defaults to False.
153
- value (bool, optional): Pretty print value. Defaults to True.
154
- """
155
- _console = console or get_console()
156
- from pip._vendor.rich._inspect import Inspect
157
-
158
- # Special case for inspect(inspect)
159
- is_inspect = obj is inspect
160
-
161
- _inspect = Inspect(
162
- obj,
163
- title=title,
164
- help=is_inspect or help,
165
- methods=is_inspect or methods,
166
- docs=is_inspect or docs,
167
- private=private,
168
- dunder=dunder,
169
- sort=sort,
170
- all=all,
171
- value=value,
172
- )
173
- _console.print(_inspect)
174
-
175
-
176
- if __name__ == "__main__": # pragma: no cover
177
- print("Hello, **World**")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/projects/CenterNet2/centernet/modeling/dense_heads/centernet_head.py DELETED
@@ -1,162 +0,0 @@
1
- import math
2
- from typing import List
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
-
7
- from detectron2.layers import ShapeSpec, get_norm
8
- from detectron2.config import configurable
9
- from ..layers.deform_conv import DFConv2d
10
-
11
- __all__ = ["CenterNetHead"]
12
-
13
- class Scale(nn.Module):
14
- def __init__(self, init_value=1.0):
15
- super(Scale, self).__init__()
16
- self.scale = nn.Parameter(torch.FloatTensor([init_value]))
17
-
18
- def forward(self, input):
19
- return input * self.scale
20
-
21
- class CenterNetHead(nn.Module):
22
- @configurable
23
- def __init__(self,
24
- # input_shape: List[ShapeSpec],
25
- in_channels,
26
- num_levels,
27
- *,
28
- num_classes=80,
29
- with_agn_hm=False,
30
- only_proposal=False,
31
- norm='GN',
32
- num_cls_convs=4,
33
- num_box_convs=4,
34
- num_share_convs=0,
35
- use_deformable=False,
36
- prior_prob=0.01):
37
- super().__init__()
38
- self.num_classes = num_classes
39
- self.with_agn_hm = with_agn_hm
40
- self.only_proposal = only_proposal
41
- self.out_kernel = 3
42
-
43
- head_configs = {
44
- "cls": (num_cls_convs if not self.only_proposal else 0, \
45
- use_deformable),
46
- "bbox": (num_box_convs, use_deformable),
47
- "share": (num_share_convs, use_deformable)}
48
-
49
- # in_channels = [s.channels for s in input_shape]
50
- # assert len(set(in_channels)) == 1, \
51
- # "Each level must have the same channel!"
52
- # in_channels = in_channels[0]
53
- channels = {
54
- 'cls': in_channels,
55
- 'bbox': in_channels,
56
- 'share': in_channels,
57
- }
58
- for head in head_configs:
59
- tower = []
60
- num_convs, use_deformable = head_configs[head]
61
- channel = channels[head]
62
- for i in range(num_convs):
63
- if use_deformable and i == num_convs - 1:
64
- conv_func = DFConv2d
65
- else:
66
- conv_func = nn.Conv2d
67
- tower.append(conv_func(
68
- in_channels if i == 0 else channel,
69
- channel,
70
- kernel_size=3, stride=1,
71
- padding=1, bias=True
72
- ))
73
- if norm == 'GN' and channel % 32 != 0:
74
- tower.append(nn.GroupNorm(25, channel))
75
- elif norm != '':
76
- tower.append(get_norm(norm, channel))
77
- tower.append(nn.ReLU())
78
- self.add_module('{}_tower'.format(head),
79
- nn.Sequential(*tower))
80
-
81
- self.bbox_pred = nn.Conv2d(
82
- in_channels, 4, kernel_size=self.out_kernel,
83
- stride=1, padding=self.out_kernel // 2
84
- )
85
-
86
- self.scales = nn.ModuleList(
87
- [Scale(init_value=1.0) for _ in range(num_levels)])
88
-
89
- for modules in [
90
- self.cls_tower, self.bbox_tower,
91
- self.share_tower,
92
- self.bbox_pred,
93
- ]:
94
- for l in modules.modules():
95
- if isinstance(l, nn.Conv2d):
96
- torch.nn.init.normal_(l.weight, std=0.01)
97
- torch.nn.init.constant_(l.bias, 0)
98
-
99
- torch.nn.init.constant_(self.bbox_pred.bias, 8.)
100
- prior_prob = prior_prob
101
- bias_value = -math.log((1 - prior_prob) / prior_prob)
102
-
103
- if self.with_agn_hm:
104
- self.agn_hm = nn.Conv2d(
105
- in_channels, 1, kernel_size=self.out_kernel,
106
- stride=1, padding=self.out_kernel // 2
107
- )
108
- torch.nn.init.constant_(self.agn_hm.bias, bias_value)
109
- torch.nn.init.normal_(self.agn_hm.weight, std=0.01)
110
-
111
- if not self.only_proposal:
112
- cls_kernel_size = self.out_kernel
113
- self.cls_logits = nn.Conv2d(
114
- in_channels, self.num_classes,
115
- kernel_size=cls_kernel_size,
116
- stride=1,
117
- padding=cls_kernel_size // 2,
118
- )
119
-
120
- torch.nn.init.constant_(self.cls_logits.bias, bias_value)
121
- torch.nn.init.normal_(self.cls_logits.weight, std=0.01)
122
-
123
- @classmethod
124
- def from_config(cls, cfg, input_shape):
125
- ret = {
126
- # 'input_shape': input_shape,
127
- 'in_channels': [s.channels for s in input_shape][0],
128
- 'num_levels': len(input_shape),
129
- 'num_classes': cfg.MODEL.CENTERNET.NUM_CLASSES,
130
- 'with_agn_hm': cfg.MODEL.CENTERNET.WITH_AGN_HM,
131
- 'only_proposal': cfg.MODEL.CENTERNET.ONLY_PROPOSAL,
132
- 'norm': cfg.MODEL.CENTERNET.NORM,
133
- 'num_cls_convs': cfg.MODEL.CENTERNET.NUM_CLS_CONVS,
134
- 'num_box_convs': cfg.MODEL.CENTERNET.NUM_BOX_CONVS,
135
- 'num_share_convs': cfg.MODEL.CENTERNET.NUM_SHARE_CONVS,
136
- 'use_deformable': cfg.MODEL.CENTERNET.USE_DEFORMABLE,
137
- 'prior_prob': cfg.MODEL.CENTERNET.PRIOR_PROB,
138
- }
139
- return ret
140
-
141
- def forward(self, x):
142
- clss = []
143
- bbox_reg = []
144
- agn_hms = []
145
- for l, feature in enumerate(x):
146
- feature = self.share_tower(feature)
147
- cls_tower = self.cls_tower(feature)
148
- bbox_tower = self.bbox_tower(feature)
149
- if not self.only_proposal:
150
- clss.append(self.cls_logits(cls_tower))
151
- else:
152
- clss.append(None)
153
-
154
- if self.with_agn_hm:
155
- agn_hms.append(self.agn_hm(bbox_tower))
156
- else:
157
- agn_hms.append(None)
158
- reg = self.bbox_pred(bbox_tower)
159
- reg = self.scales[l](reg)
160
- bbox_reg.append(F.relu(reg))
161
-
162
- return clss, bbox_reg, agn_hms
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BBrother/NewBingAI/Dockerfile DELETED
@@ -1,144 +0,0 @@
1
- # Build Stage
2
- # 使用 golang:alpine 作为构建阶段的基础镜像
3
- FROM golang:alpine AS builder
4
-
5
- # 添加 git,以便之后能从GitHub克隆项目
6
- RUN apk --no-cache add git
7
-
8
- # 从 GitHub 克隆 go-proxy-bingai 项目到 /workspace/app 目录下
9
- RUN git clone https://github.com/Harry-zklcdc/go-proxy-bingai.git /workspace/app
10
-
11
- # 设置工作目录为之前克隆的项目目录
12
- WORKDIR /workspace/app
13
-
14
- # 编译 go 项目。-ldflags="-s -w" 是为了减少编译后的二进制大小
15
- RUN go build -ldflags="-s -w" -tags netgo -trimpath -o go-proxy-bingai main.go
16
-
17
- # Runtime Stage
18
- # 使用轻量级的 alpine 镜像作为运行时的基础镜像
19
- FROM alpine
20
-
21
- # 设置工作目录
22
- WORKDIR /workspace/app
23
-
24
- # 从构建阶段复制编译后的二进制文件到运行时镜像中
25
- COPY --from=builder /workspace/app/go-proxy-bingai .
26
-
27
- # 设置环境变量,此处为随机字符
28
- ENV Go_Proxy_BingAI_USER_TOKEN_1="TC2A9SGkJJeORsQS1Cg8nEzSpMpN5zYzOo22k8DBQZxnYVkIurjux5gjvhEfDDxGJu3WgE4lzt3iO8MaxWI1aDfDNKOkDEHS"
29
- ENV Go_Proxy_BingAI_USER_TOKEN_2="paUscl1PDFWWgYwhvDdLNeCIXAj1RiL2PVbc3YohO414gW5YpFUq7jJbxitMvBtGk1HFGLSmXvC8eEnisux8qbB6lQgZDrfI"
30
- ENV Go_Proxy_BingAI_USER_TOKEN_3="0bIpK1cCKIM9Nh8TN4MNUNHR4ax8buwrKJ4NqGDnju9xKW7DFs2YdMgoaLOmHC7cVqdAfMSt8Vl29mCzJL0KNupZxmwsdF0E"
31
- ENV Go_Proxy_BingAI_USER_TOKEN_4="pdP9BR6XBiqsrDMNDRHcUJDsMnZ2MFXWi0WRJW0YNYqmrNGf1uSHR3eso2o3Vhi6TX4ytZu4lYvZyybOQF33xmCFghGit3Yi"
32
- ENV Go_Proxy_BingAI_USER_TOKEN_5="UOx7qpxxSUnRqDMZc7uUFrRX7xZ8XSQ5k2Bjtv0KSq9TjidcZJaKp0okoB9ljPSI4nvl28If0eCSFusYyF8Gekm9jYgpxcqV"
33
- ENV Go_Proxy_BingAI_USER_TOKEN_6="NDrzme4nPoTBMDFo9iKOCJySjUvEMtPiXxXHICKKB2C81uFSzFltG0lu0DVFzrzFUTelXkTcORFNemCvO2TPU5tkZau2VtXA"
34
- ENV Go_Proxy_BingAI_USER_TOKEN_7="pRE8biiUJXiS8tKb22xs07ycAGrPxQjhP6tMqPhdIw2rj8ZwOkSBx4EbfSVOe8SE8s9S8azD1J8l1YcwxZUderDEkRoF34tj"
35
- ENV Go_Proxy_BingAI_USER_TOKEN_8="kVqUz9VW9pQZUiqcCwQc6CKrk7QjNQ2gugiNwl8PBFQCoLUmLW5bsCzY8KglMwaFl0af7aVKoKSoQGS1K5qThyyCnVRTVZ0l"
36
- ENV Go_Proxy_BingAI_USER_TOKEN_9="UysPfryzg7u4Go5g9rNmIsFILwl9zyiCnlDpWsQzUoi7em2burrsetCvRfreNcjsGQs8FNGNQMZ1bsC231XlokNI8Glhmaqp"
37
- ENV Go_Proxy_BingAI_USER_TOKEN_10="G8NlHPjUSxzOKFRaMGPF3qjfTFTjUAOSpXzISCuRjCX1rFUhnE6nkTerHAScJArcYIfnbemJNJfmbW2tmc4WIu607yKuVQo7"
38
- ENV Go_Proxy_BingAI_USER_TOKEN_11="j8H0igMRTQzIk7PA4v4bqJxW27AHHHfyJcAUMPUvuC4PS0CSvnPwMGxMpVwUtFscm0QI5Drq17HnrXR2ZCdR5ZRtThoX3L1B"
39
- ENV Go_Proxy_BingAI_USER_TOKEN_12="AvRF7W8yMixNRDYcAOYPQJckOOyFbDyD2PyW46q6QZFdpe8LFODg7HCvXdN0RFtGlZxXLWJWGU1P9JdQCxjOo3Zdmxuq16Q6"
40
- ENV Go_Proxy_BingAI_USER_TOKEN_13="zHNekWFtOCNGpeNQfhARZu4DFW0sR6Wjr7oUPw2aPGnThodLnxdjSJZUSeR1CiPrwwzsC0MPf9SkT6HJyiP2mq67baJlNAAN"
41
- ENV Go_Proxy_BingAI_USER_TOKEN_14="EsBP4HVFwD4ZnDxdK2L0FHh2p3ewMIHfhtAy4E8zI5LWTmuNVcCZu8seWle1o1FOHNKBAjedz6nHGvroyUIkHUEm03qdbJeP"
42
- ENV Go_Proxy_BingAI_USER_TOKEN_15="BRtYyWvunlPyrw8iGCjKZ4cJ5HjmTSaRs0C7LUuENkWTml9xdSgwm6Cs3Sey58KFemExBMYdeLkkT7hjytpIlKOll9nhSGdn"
43
- ENV Go_Proxy_BingAI_USER_TOKEN_16="WJt8fwQ6Cv1YPf2wFBuCImPeZ3qGFVM0ZnuwuvDOMJlNs7EbW0UIUVmdM3AjRfVTw6pGhybgxzXzg8EeH7jZaG5P34Sehevu"
44
- ENV Go_Proxy_BingAI_USER_TOKEN_17="0BxGOZib0QxPto4W0QdgEHyYXrEYPW6x1RNBpPT5cwDFKZ8Epq70jaEvFiLToHQA5cBgfZIDTLAyTiKcuxbnFcTc7Lgv9SQP"
45
- ENV Go_Proxy_BingAI_USER_TOKEN_18="EjCGMaSvke6Gd6Dbdv0oiT9pVK0plHsBr385w0b7l5ZV4Lrj2afc26R83skTkdy6yhSttcb7ojVW9MZLGP6mEbO8jNQQuoim"
46
- ENV Go_Proxy_BingAI_USER_TOKEN_19="UQThrikXgI5vHvIijDwFoKu3379RuFHzSFhVXJf5Dm8zOmaNMxeUBmasnh8sFj4x6QcOBJFculXYMnWd0MrJflgQen0vFvJj"
47
- ENV Go_Proxy_BingAI_USER_TOKEN_20="lCI04iVaUYbHnwPegph7qv4NHoJ5Ffml9g6gdug5PQE1ZJHTxZRdKM3lz3p9Vu3IKCxezajGY6odIo29fHH1JdCMyjsz2U0r"
48
- ENV Go_Proxy_BingAI_USER_TOKEN_21="hPDp0pNFH4sXY0SBvuAFGtw7VuwckPffy6juSHg45bqtwg1bwHxYPKiCnH2j0ZBVxz6TrTfQkziX6oNf99qd7Wp9fcAJT8gX"
49
- ENV Go_Proxy_BingAI_USER_TOKEN_22="bR5gYcDj8MoEguFbzxbtLTnv45ZsEvtV5WZqHadEYXswA8ba1G24JjTyu4Fb0pVff6sruanLSLFgzz0zVRhuyTPXsOlvnsEh"
50
- ENV Go_Proxy_BingAI_USER_TOKEN_23="r5qlaySaHQXfKMhsPwQu7WQbmCjoQ8ckJhotAmerauG70oCsFaBiTj4SfQPzBv1shAcamDiUsw8Iv6sYe8ZmOKWM2uaJ12W8"
51
- ENV Go_Proxy_BingAI_USER_TOKEN_24="1LTdad7zzISpnbx3mq4Il9Bihx3MF0Nyd9wUvw8KYoFWp4CG9iwrqgWEA9Ll1ctvBGbGPyoatvNVwq9rZSn3OodQVXi3Z3bT"
52
- ENV Go_Proxy_BingAI_USER_TOKEN_25="8wClinPdX7KRzHUqIW31qukRj8IptqB0b5iykbT3EKdT3viqnq3CD5w3qTlHg3uEA8YEOQxezXNNV0iyuvo2qq9mDChp3kTa"
53
- ENV Go_Proxy_BingAI_USER_TOKEN_26="B2yjKb5DkF31NAsh2Mz8PMR6rNk9duAJu5eefEVZ0fie2UFv0wf9b3BKZfjuc7aDjfgxPqvmohIlm5djP3wD1iOcFykkPFnT"
54
- ENV Go_Proxy_BingAI_USER_TOKEN_27="EpgkayCxGHmCOqHj9o5hzebYiaSXtjAIgAgI4bf3cJZ4F9mHoFXRcybL4meqU4RuESwganh621opTZTwwB9UR0k9CVACxWZd"
55
- ENV Go_Proxy_BingAI_USER_TOKEN_28="5PvwBWYBGt63G2JPjeJGQ89u6Gqw56MKj9yRHIEuM9lrMb5tOSb42aGpcK8OY3lNMzOdQ1hVoFflMcRI1RZ7mECSQuyyoDoq"
56
- ENV Go_Proxy_BingAI_USER_TOKEN_29="W8jOgGUV5ww7j07u4wNYpBSd3VZMXYn0M0ySaVp6my6695Zt6AHdi9eIWNeHfjVUcofeur0IrVzXB9kQH5YNqgPUkB5iywQC"
57
- ENV Go_Proxy_BingAI_USER_TOKEN_30="6LEqwGC9BfBRVJXIGAwDv8avoRPa7tQzUNS6ZyZyQyQVRZgE6hO7e5pkVTewNkVdmVcAFohdk95tihYoLOMFe9olt4VskWTI"
58
- ENV Go_Proxy_BingAI_USER_TOKEN_31="IothdtIyxq3TI3YXcajNs7L6UvVnS3WUyyKaFNGRLqRYjmx4uHWF406KkAQkLrgFLo91vPddacy3xmvmrbHXK8FAXcmudM9B"
59
- ENV Go_Proxy_BingAI_USER_TOKEN_32="QX6GxonDsAtABUMkLebbxHkF0jlZYRcQSqUKY1av0MCINRTV991Za3wMx1xDqURiBt5L3atK9oGbk8JT7nZaC5cBg4YXIoPo"
60
- ENV Go_Proxy_BingAI_USER_TOKEN_33="EZ02rczDJQ7CjfqDeekV2FwqQUlTANe7CCAyPnmepVeka7mOYlyyE7whZtmJvvJTz25aWt7MW6fc9n6MD9Q9lu6tmFAqS7Ap"
61
- ENV Go_Proxy_BingAI_USER_TOKEN_34="Y0AIyWWY8U0hgVyZO7CR4yMVPMhVx3M95T8F7q5a0pptOe7xU0cTRpDfuKXCGwVG0nrHdZobVINV1sQB3CnQtNQPRiGF7rJV"
62
- ENV Go_Proxy_BingAI_USER_TOKEN_35="IjtiSvRx7AqcaPMB9Lx2GZjMdS9hx1H49QTtWSVcfP6NdSxe80uUjbENuSWGK10iiM1KA71kZCik3yGNPDSnehLS2ufzqeAw"
63
- ENV Go_Proxy_BingAI_USER_TOKEN_36="M2bejrY6vN78Uo8sCkUHnduiiQS7sHAKme2p8uTvMEUHj6BFuyeygh1fkbiMd1b0S5eDqGcJ82xswV8lyOoSfeu7AFaVTWww"
64
- ENV Go_Proxy_BingAI_USER_TOKEN_37="Z1f6Bn95HEjVVhfk5s8DYJxtOyL1uwbcYj9OAFjc87IcDliV8nQWbknMC2lCR0uRP6eaTjnKwtWU3arZ3Rc1x10ZNKrWQwp8"
65
- ENV Go_Proxy_BingAI_USER_TOKEN_38="nreTWeFh3w7fNlnbZAjXITRkiHYNWuQDAF60jp7cuJkoLCuTA5WR7jKD9cjZ1RNR3DV76lFnCA029gCdTdAvu7VD3Ii0Q0rP"
66
- ENV Go_Proxy_BingAI_USER_TOKEN_39="8HEOR8RvvPMiUZdsC7CWv9EIaI5TFMeYNMcr8FL455gIA7YjSZdaNcDkiZNQPmJsunIb3xkIHXk8YYgmlJ2UXyckkxKkjYZd"
67
- ENV Go_Proxy_BingAI_USER_TOKEN_40="7uoDMkzgZnZupAHxNj96y6PZW50mAAqimoFAi6sqGnKXPQsRy55xvWCfS9CHCo59Pr9RWbQq06Tckv1vxtcpmPZphkQ8bT6l"
68
- ENV Go_Proxy_BingAI_USER_TOKEN_41="jr6vz6vJ7KWJsH46GeJsqchnoJXE1MA1DDwZ8Jtejz7cyXioFu1lFk4Jnb9WugvdbifYhJ97I9XNea3BlfjWhbZSOuMv9pB3"
69
- ENV Go_Proxy_BingAI_USER_TOKEN_42="F0v3yQLuYLCA3L7Hjbbm8Jl0npuY5kEe41OIapozm1p6j3D1kfsq7ErL7s2LH6HkzuqmFPkGl83o4FuKt7T9rPullmDPe2SF"
70
- ENV Go_Proxy_BingAI_USER_TOKEN_43="2fJDrxZxfQCkTCkche84JvIPuqjHJHCQBjWgTlzv8jvgL5SkmbvwOeR7uAi2ViNQQoCZgoJAeR089aGC6BzvdJJhEgDxOeXw"
71
- ENV Go_Proxy_BingAI_USER_TOKEN_44="P4ebCHpsPYmXnhDzRPiNx1TQV8xQUkmme3QnCeulBf5NdDRJ53KW6ZkPNWhv5HpiHdKOOsQbQYkBa4d9ymog8KjBmFAWjr9g"
72
- ENV Go_Proxy_BingAI_USER_TOKEN_45="j7AujR9uXEfaAuq0ggSDElCBM0vweX16uJjEsnY8fdMzRHf1eFAsnfMnEhKQrGHYGkp8pet5AHdJDnlw6qiPDbZn3d5Hj2Ln"
73
- ENV Go_Proxy_BingAI_USER_TOKEN_46="XlDg11qT9HylFDaRSaXpGdOzN8bmmUFbMOODbOortTQ5Pw9BWnDVKCoMIqDdaTlpzky0nx6e4IbyzaBHhSs8iMYv5mmUCDcP"
74
- ENV Go_Proxy_BingAI_USER_TOKEN_47="kzALJ2wgXc4wxJzk9HCSvfxxpGaKNWn0KLrbkb1g2foIdL4ArCy5r59DeKN5sZfnWI8THkJIdxAhFuktIZWFK3X0oZnUMAO1"
75
- ENV Go_Proxy_BingAI_USER_TOKEN_48="uMcYZ6I8KrKC8Z4YcQjrLdVbiqFqsD7uN4tR2olGfs39ywTSof6O7BmesUiTacDHJfNyemoCOw4TDLh8RIKt41PnC0VKz0KU"
76
- ENV Go_Proxy_BingAI_USER_TOKEN_49="I5zmYN5ogsP7Wj0HOCadfo6VD5eX4CRm0H72vqI4gtOtciz2pv8dMfvp5bH5WRZYSa4AV1gI2c37C9DhiJOKgc1SuXRhGGE5"
77
- ENV Go_Proxy_BingAI_USER_TOKEN_50="gZtfqhmQSNsTmfsFjulnem7IgfRh4PTK9LgplLY9YsZp9HL6ehx2MMAWT9Sim838tijXjjfm9PELtFdvkq57yJtEKvqYFRlY"
78
- ENV Go_Proxy_BingAI_USER_TOKEN_51="Q5a2uP5MDmWqbnIcc3l4smPVmxoUgVFT2TV0si2vncEgZBk5umbOkrDj5XURTvLWHg67m6PGiGWzRhSpQnqOLwpLuP1BKt5D"
79
- ENV Go_Proxy_BingAI_USER_TOKEN_52="yZF6khS50tx06Kbvh12fncVOtIW7dpTdXoDEERB2MlYkGZu4GAz9tHf6I2rbo1Dg1Z4oKFheugmsJpkMPsDr8SpzNJPcDsFa"
80
- ENV Go_Proxy_BingAI_USER_TOKEN_53="fNdtn4ggIJn9RY9bZ6aAPLRaxRY21LNUvcZKFRBQeXb12cBlSG1xgr6W0xw9THKdxxToc8vvoTyW1AEXSTFSIUTT60Ja5tiI"
81
- ENV Go_Proxy_BingAI_USER_TOKEN_54="kCcz0yBqrtK6YrhzvAdO51MDh9oxXXKfNAWPqR8pnxj2bCKvCneZkQuBkY463SbWK6h8tHchmN2zk07qxsCGHwB9JHvNoiM5"
82
- ENV Go_Proxy_BingAI_USER_TOKEN_55="5zBykql9LzeS3CFXYLFBT4pOdvNT7yGTLwtm9kv79npchr3x5gODW5H45uisCRte7WBZRvMaZ5jkBWl3lTCeFfoOwww9daoq"
83
- ENV Go_Proxy_BingAI_USER_TOKEN_56="RKhsx4YuLj8DDhGgf4bJl3DuJAO1uKjAgZyCie7gD0TNJXgPn1EufcfUnkgOuF4eDWt311yRGkklpFevsh3GAnwUqJ4DDw54"
84
- ENV Go_Proxy_BingAI_USER_TOKEN_57="w5h6Y9Im9KJm7wNeX9L5UlVBDvS1fljBdLfCD43KOeu96ml4LDyTKExAvoVxhugmU74msBM44LGVHp3JLEmdFthIEzbsepKm"
85
- ENV Go_Proxy_BingAI_USER_TOKEN_58="7FeRl42SUNDyqFZMurvbYWyLuO6l5kMYyrlbM6CJclNHv0uznscu7muZGZNBd06LryVl2o5m3Bwy0cjLK4TlCL6nWHbCIe6W"
86
- ENV Go_Proxy_BingAI_USER_TOKEN_59="wm3Prp3ktBkV8pUwd06rw9gP1SClTX6CyhRX9Kdxjfh3BaVtOuSEUEEMhtrpSGViqYsAfydxWGE51kRPO4D7ibrjAsjwvcsV"
87
- ENV Go_Proxy_BingAI_USER_TOKEN_60="55RzuhfrcePzVq9DNA0E3rV1O1f1R96Ti1JvKN9NSmNPK9si9ResX7bz40X0X3td9NRzEvuAcpjtQANZOy58nE43Nqjg2nTV"
88
- ENV Go_Proxy_BingAI_USER_TOKEN_61="Qvr1ov9IXfMEjA0ovivxUBb6Z4xa8TgSSeLZG8MGMIQzeFTJqptGQmKpeyUdla8Dsg5ovX936IM6TZLkXgd0aREWGddrle8D"
89
- ENV Go_Proxy_BingAI_USER_TOKEN_62="gbadaXMM7f4iUTeFAPxf4Jsou81xlRi68Dh1AeZkpbhxsfIp4GhqDUaVsqRC16kC3lEOE9RXNGdjNNc7mt813WUIV5mCgvrD"
90
- ENV Go_Proxy_BingAI_USER_TOKEN_63="TiGokkh5V1UZmRomvX2MxS0RYV21xwrD6Ee4R0hiiVjb0pPquSDUWh0UXQTBDaoa8A9pFpiDu8RVoL0OdVmLkaWhnAfrWPWG"
91
- ENV Go_Proxy_BingAI_USER_TOKEN_64="1PPeKjfCS9TV7hQksiU3oBmh00nRzw1F4qznkVQXby6SMCQUAjDTzwh1l1wLANrfsDkQVQ8c3K07SUDoo1D3jmCfs5F62ng0"
92
- ENV Go_Proxy_BingAI_USER_TOKEN_65="GiUEI4VAfOIVaFJkPQ4nFpLskP86j8OZtrgGUAN9bNDjURgK5Cy94Mf7iROUNSJMPddTOhGgftSk03WsSR2cl7ajdTwUm4Wy"
93
- ENV Go_Proxy_BingAI_USER_TOKEN_66="xcioU6RQJ6chNkdb8uYsH5Ztc6kONipZS4jYBXSg3aFWubtK2IufR2bTaEWCqYGZ6uIUAIfZJ1n9vvKDXLoUdUJZK4ngxHoT"
94
- ENV Go_Proxy_BingAI_USER_TOKEN_67="MrInRvmdK2OiXahBPd3r3lwR9WK1ac9O1uP2ym6EBSlsQKR8scqPf72lM8z1ijICW3yT779HnbdM9SgWOgdOCX0Z1e06tk8C"
95
- ENV Go_Proxy_BingAI_USER_TOKEN_68="nc2ryqyIaUElJldJmpEO535KM7Cucn1dTATCI6JhOcuCqJ717kakJ1hVDwxr4BMKR03l58X26c1oDY0qCST3h2VThRSRAXT3"
96
- ENV Go_Proxy_BingAI_USER_TOKEN_69="TH7IXqDOyypEey9IUZYGyNCmOJ1PqscAgniuYJFUOW46O0m4K182G8QmtrA3wwaiTFWEH35mNKduWfWguLNli2RWwWyC1KAj"
97
- ENV Go_Proxy_BingAI_USER_TOKEN_70="lThaLBELlmi764QZZJb5ejrFKjJDoGYEixHcBFEjHSuBXPRWUC7e3ZEVPGoslvxEArLWRks9knanJyvcPeiVAXYHaLBKsOYO"
98
- ENV Go_Proxy_BingAI_USER_TOKEN_71="3T7PMbbhSzHrDCeHdbpVJXszUZbaZu7iqlRs2eBCShVFA7nYODESR15GleR9clw2UosxElid6EtV1382nZUaB58hHPglb0Fm"
99
- ENV Go_Proxy_BingAI_USER_TOKEN_72="KMJZ4DD5jMqaW2RDv0wmwldDZUnBBtXAbAhPUazodN8wt4r6SORPGsztlShcr9yqGHDadMugZtYjQXIuv8jCbAwYvbNx9SD0"
100
- ENV Go_Proxy_BingAI_USER_TOKEN_73="mpUHEa5GjjcJspDFxampgzT98vfF12hXnSXvqyxpzTSfj8JtMtF40xBLPQc4wMcXo4U7PskKJDgqTKU3wBSNlTUgpTCo7xoc"
101
- ENV Go_Proxy_BingAI_USER_TOKEN_74="6eQNY6oxlZhE6utQgJgPe6w6kpDL0mZY2FZxUdCE0kDjzRjPUKSXtsRkm98xG26uyf4WIKTwKkldOaZU9mBaeElIJqrzfZq5"
102
- ENV Go_Proxy_BingAI_USER_TOKEN_75="cFVkanin59BHvcaRAQrpSx3uOwrHgby7iGn6wTdYfrfe9wGKzbhgUwpqQvFE2U09euiDFzekUceKft9Htfne69Kj8FcNxq6J"
103
- ENV Go_Proxy_BingAI_USER_TOKEN_76="ha9BCiA8KBa6zmGbmLxCQg3hIlbwCUBpe7R4MzcCIRwZ5SOqmhAtM2cJ33z2V5OdcA09edpAxIxlQqMbK2ticbaGBfC4fwdd"
104
- ENV Go_Proxy_BingAI_USER_TOKEN_77="R05Cx33vZ1ZGGK1vTdHzmkYKllTFlekytxlx8AfTJdDKjdyl2RsRc3JmqfAVgKMgu5mAkEDmiCcR44oUAv6bTw4vkPFOiqJ7"
105
- ENV Go_Proxy_BingAI_USER_TOKEN_78="sZCNK9gpcDULArtJNynktU75wQIahZyKODg1YGzLvOgMfVTwFQHHkHcxtmiWv8yaQinm7HcNJer8sgLMDxy5Uqmwe19rojPi"
106
- ENV Go_Proxy_BingAI_USER_TOKEN_79="TZyCwOyfatASK3ocrbrGGGxBalcWEPir7wd2aOQL5dstQu13xCU8lNtTpt8ODRAPJb2w4KU4CS3lXIPf5v0OAwUXdVeyykyT"
107
- ENV Go_Proxy_BingAI_USER_TOKEN_80="vZTRETlgYLtIzN5dIAItGxCjbEYRKR6w85iIVkwozhAbCW7GKs8AF4WEl7Nk0M5mqeVLk8XInxWKrDobJJ6D42AWQl7hOtkk"
108
- ENV Go_Proxy_BingAI_USER_TOKEN_81="NSCnCPpmrxhi8ct3KGVdHz7awKrkHYDqKUMx1h9IWFRIhpAQbtzt45yqQO5P04qk6WmCwDs0kJT0LBglpaTBEzNsRIgTtjDV"
109
- ENV Go_Proxy_BingAI_USER_TOKEN_82="TpRakEtbhH3AEXCicqppgKa6DUzhW6omoBD4DOUnspWamei6VaGTIigpX5k3aUxzSyYB2YRNGn2TIG6i9H87gqVGKWYQikjF"
110
- ENV Go_Proxy_BingAI_USER_TOKEN_83="xlTFOpWVmr9ISw0lrQGeZQiQ3MpYJU3SVaGS1URRb4L95xKdqMMg4ru9aGtlSPBdLbA1RVhhVOqpewuz0U93XBFtoEFMGIIc"
111
- ENV Go_Proxy_BingAI_USER_TOKEN_84="WixkKjpNb5UsrCy0WIYkFcRoAPwdzeafWFRdg0QY03rGpkrIpFQh1hg8YFlNuYGjs3Ry7B6COZZDAZPVA6jYGjkdzemsstbK"
112
- ENV Go_Proxy_BingAI_USER_TOKEN_85="6ODol72Wzl1qGmMWorZilxHKDUVDgiFINWTI2tioJNOtSeIh4M93gYcjwf54ZE5dExiCUFQUxVwJGW8zJQMh2IBFmYBOUv3a"
113
- ENV Go_Proxy_BingAI_USER_TOKEN_86="nESmxxnShN6lB2QvCUt830UNE1cKJ83VkwhwByE7xCdrt4Rnw5yJH7P0YdboQX91Il80VPUKbKuqp30rpE8Remdkv9g6p91E"
114
- ENV Go_Proxy_BingAI_USER_TOKEN_87="BTbGrePewdI42YpzG0WDpj2oam4U27gnTTNyxYHPfM45OG9EdrWByRTykyAugAgUpQ4y4IRcjhh6zMMocLUtSFjpHfeuT7fV"
115
- ENV Go_Proxy_BingAI_USER_TOKEN_88="z8v3yyrqJE5Zelap5zUFjrGc7utwMzlg33exE4liQ6L3TEAE8gI5CnfC0iguySeM5DKnqPk0JHLv2rZll6YSxFiCmKUvqnH2"
116
- ENV Go_Proxy_BingAI_USER_TOKEN_89="t3uS2FhKSogdJjcpw8h0mefYTu2U56vQTgSBvSlEkwQxUfbyuWk5Hiv1BHuuvRc6yrUh0EB6TjLts16pSksEwxODHqq8ZHEv"
117
- ENV Go_Proxy_BingAI_USER_TOKEN_90="AzVkroU9cCT1kaze0EwfR68e9FX8mdDvW086UwKXVJu8CWx5nmgjBxvHkYs3OVCBDGTGsq9pkRQkQ4uZ6DCBqzsMVCvDadbF"
118
- ENV Go_Proxy_BingAI_USER_TOKEN_91="rTot428nhMOpsHQ9bN1Ufb0Sjhf8nmvXDu5fNj4j14RYMb9XSbG2PUYUqBaTYIGVL3ddGnzGnKfMnMkMX2j3Iz5c9EWZwNNl"
119
- ENV Go_Proxy_BingAI_USER_TOKEN_92="cf9BslFTYBRf0fumXdK2aaDSkSvcpHZ3pW4GAx7LJVp1QT3pjf9JqpumUlzEW8oNSNz5usrjLrTHCgIqLFWfyDTLdD3uCsSh"
120
- ENV Go_Proxy_BingAI_USER_TOKEN_93="xQ2pXfXN2H4Ft04sQBq1bMJX9Uu6VRpnMflAOXX3rxYuWE7xi1eEl3DnGdkpzsPamHHiKfC3gzvaPdrzNDfpXW04neQcXlkN"
121
- ENV Go_Proxy_BingAI_USER_TOKEN_94="r64AHCx04d4xeJeRbOVhP6iYdwKSjlZRCrkXj3XZWNyVGgh2jRVmZEyakLecV9mUTlCILQUKrpYC8Hls2gy3m7QVsDrcRzTy"
122
- ENV Go_Proxy_BingAI_USER_TOKEN_95="EhmIRFrC5iiD7tFYDJThdNiGjvRAVwTXR9Et9TBjLwSag9fYdfdL0B5uRc57oWyz1U89qEuRiMVaT4UIH2OBxLVXb8v1gkZZ"
123
- ENV Go_Proxy_BingAI_USER_TOKEN_96="JnWX2ny72xvuJaZuoRAUxtCVbO9zmF2nNwJOMGhe1niAXAvy0gt49sjxrcGZO6bgUaXURv4E3KOlqGfZjpBxkPF2L7TBpuHx"
124
- ENV Go_Proxy_BingAI_USER_TOKEN_97="Gb7nUWMB5k06SV3QY5JRH93tOuiTpdpGNBMkbvZb8zV2z2WaK0B5ZlcrXddBotcqtbpfxmFCJmwDlg1KIACwFZiFVDRTmgg9"
125
- ENV Go_Proxy_BingAI_USER_TOKEN_98="2fm8AEREnPZYRGcLN0nAXgRuT6ZbPAXoS8KkSoRooyewyVHtJ2keM2cATTprgsmHeKi0JJT9NJHjbsyAhzmpu3J8vWJV4TV1"
126
- ENV Go_Proxy_BingAI_USER_TOKEN_99="LyOYBUCmdtocPNFoMJmWbTvUMNczvjirLnLvRkUrlcvUxpHwNpgkHOXhmHtg15mwRzVdJJAz7NV7NYEmG2bIxqWnMCEg0JUy"
127
- ENV Go_Proxy_BingAI_USER_TOKEN_100="5AAn7568fHIP5CS943xExnYay5q6llB3g8HyBGHhxpXUhB7VQ3T0oaeiUn2ijAkFE2kytcFcIvOrt8Jnf4SghMhxTrbbdQbF"
128
-
129
- #ENV USER_KievRPSSecAuth="5AAn7568fHIP5CS943xExnYay5q6llB3g8HyBGHhxpXUhB7VQ3T0oaeiUn2ijAkFE2kytcFcIvOrt8Jnf4SghMhxTrbbdQbF"
130
-
131
- #ENV USER_RwBf="r=1&mta=0&xc=6622&rb=6622&gb=0&rg=9600&pc=6619&mtu=0&rbb=0.0&g=0&cid=&clo=0&v=33&l=2023-09-15T07:00:00.0000000Z&lft=0001-01-01T00:00:00.0000000&aof=0&o=16&p=BINGTRIAL5TO250P201808&c=MY00IA&t=2344&s=2023-02-25T05:09:23.4377301+00:00&ts=2023-09-16T01:58:16.7983448+00:00&rwred=0&wls=1&wlb=0&lka=0&lkt=0&TH=&e=FN4I60_LCobX2dnr0j-KwTJIek7CH4ldrfhbl_qNWuaFXUrSXRQcf-ZwZMeUTvJiRrh4kNKt27G-vmYj3fT7pw&A="
132
-
133
- #ENV USER_MUID="0926BE48454464DF1A1FADC4446V65CD"
134
-
135
- #ENV Go_Proxy_BingAI_AUTH_KEY="BBrother"
136
-
137
-
138
- #ENV Go_Proxy_BingAI_USER_TOKEN_1=1CD-ki-xOdD5T3k715LieDYox1aW9WXeQ1v3mE5P61b1T70K11v3E34xD-PXGqkPdmb_RQEfP-50cEIoH-wFZ16a-YUnIDae7-j1G_wpqBzXbXg4V6LBDfBonbyKPdAWedTOKgWpXEgBTq7z2HtrIEaCMa3P2p_-0Jvul-u7BO7jCxwKLYQ6BLiCiespUrm3P4oEmoeyWnIXEFRggZzhZ_A
139
-
140
- # 暴露8080端口
141
- EXPOSE 8080
142
-
143
- # 容器启动时运行的命令
144
- CMD ["/workspace/app/go-proxy-bingai"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bambicita/rvc-models/app-full.py DELETED
@@ -1,250 +0,0 @@
1
- import os
2
- import json
3
- import argparse
4
- import traceback
5
- import logging
6
- import gradio as gr
7
- import numpy as np
8
- import librosa
9
- import torch
10
- import asyncio
11
- import edge_tts
12
- import yt_dlp
13
- import ffmpeg
14
- import subprocess
15
- import sys
16
- import io
17
- import wave
18
- from datetime import datetime
19
- from fairseq import checkpoint_utils
20
- from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono
21
- from vc_infer_pipeline import VC
22
- from config import (
23
- is_half,
24
- device
25
- )
26
- logging.getLogger("numba").setLevel(logging.WARNING)
27
- limitation = os.getenv("SYSTEM") == "spaces" # limit audio length in huggingface spaces
28
-
29
- def create_vc_fn(tgt_sr, net_g, vc, if_f0, file_index, file_big_npy):
30
- def vc_fn(
31
- input_audio,
32
- f0_up_key,
33
- f0_method,
34
- index_rate,
35
- tts_mode,
36
- tts_text,
37
- tts_voice
38
- ):
39
- try:
40
- if tts_mode:
41
- if len(tts_text) > 100 and limitation:
42
- return "Text is too long", None
43
- if tts_text is None or tts_voice is None:
44
- return "You need to enter text and select a voice", None
45
- asyncio.run(edge_tts.Communicate(tts_text, "-".join(tts_voice.split('-')[:-1])).save("tts.mp3"))
46
- audio, sr = librosa.load("tts.mp3", sr=16000, mono=True)
47
- else:
48
- if args.files:
49
- audio, sr = librosa.load(input_audio, sr=16000, mono=True)
50
- else:
51
- if input_audio is None:
52
- return "You need to upload an audio", None
53
- sampling_rate, audio = input_audio
54
- duration = audio.shape[0] / sampling_rate
55
- if duration > 20 and limitation:
56
- return "Please upload an audio file that is less than 20 seconds. If you need to generate a longer audio file, please use Colab.", None
57
- audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
58
- if len(audio.shape) > 1:
59
- audio = librosa.to_mono(audio.transpose(1, 0))
60
- if sampling_rate != 16000:
61
- audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
62
- times = [0, 0, 0]
63
- f0_up_key = int(f0_up_key)
64
- audio_opt = vc.pipeline(
65
- hubert_model,
66
- net_g,
67
- 0,
68
- audio,
69
- times,
70
- f0_up_key,
71
- f0_method,
72
- file_index,
73
- file_big_npy,
74
- index_rate,
75
- if_f0,
76
- )
77
- print(
78
- f"[{datetime.now().strftime('%Y-%m-%d %H:%M')}]: npy: {times[0]}, f0: {times[1]}s, infer: {times[2]}s"
79
- )
80
- return "Success", (tgt_sr, audio_opt)
81
- except:
82
- info = traceback.format_exc()
83
- print(info)
84
- return info, (None, None)
85
- return vc_fn
86
-
87
- def cut_vocal_and_inst(yt_url):
88
- if yt_url != "":
89
- if not os.path.exists("/content/youtube_audio"):
90
- os.mkdir("/content/youtube_audio")
91
- ydl_opts = {
92
- 'format': 'bestaudio/best',
93
- 'postprocessors': [{
94
- 'key': 'FFmpegExtractAudio',
95
- 'preferredcodec': 'wav',
96
- }],
97
- "outtmpl": '/content/youtube_audio/audio',
98
- }
99
- with yt_dlp.YoutubeDL(ydl_opts) as ydl:
100
- ydl.download([yt_url])
101
- yt_audio_path = "/content/youtube_audio/audio.wav"
102
- command = f"demucs --two-stems=vocals {yt_audio_path}"
103
- result = subprocess.run(command.split(), stdout=subprocess.PIPE)
104
- print(result.stdout.decode())
105
- return ("/content/rvc-models/separated/htdemucs/audio/vocals.wav", "/content/rvc-models/separated/htdemucs/audio/no_vocals.wav", yt_audio_path, "/content/rvc-models/separated/htdemucs/audio/vocals.wav")
106
-
107
- def combine_vocal_and_inst(audio_data, audio_volume):
108
- print(audio_data)
109
- if not os.path.exists("/content/result"):
110
- os.mkdir("/content/result")
111
- vocal_path = "/content/result/output.wav"
112
- inst_path = "/content/rvc-models/separated/htdemucs/audio/no_vocals.wav"
113
- output_path = "/content/result/combine.mp3"
114
- with wave.open(vocal_path, "w") as wave_file:
115
- wave_file.setnchannels(1)
116
- wave_file.setsampwidth(2)
117
- wave_file.setframerate(audio_data[0])
118
- wave_file.writeframes(audio_data[1].tobytes())
119
- command = f'ffmpeg -y -i {inst_path} -i {vocal_path} -filter_complex [1:a]volume={audio_volume}dB[v];[0:a][v]amix=inputs=2:duration=longest -b:a 320k -c:a libmp3lame {output_path}'
120
- result = subprocess.run(command.split(), stdout=subprocess.PIPE)
121
- return output_path
122
-
123
- def load_hubert():
124
- global hubert_model
125
- models, _, _ = checkpoint_utils.load_model_ensemble_and_task(
126
- ["hubert_base.pt"],
127
- suffix="",
128
- )
129
- hubert_model = models[0]
130
- hubert_model = hubert_model.to(device)
131
- if is_half:
132
- hubert_model = hubert_model.half()
133
- else:
134
- hubert_model = hubert_model.float()
135
- hubert_model.eval()
136
-
137
- def change_to_tts_mode(tts_mode):
138
- if tts_mode:
139
- return gr.Audio.update(visible=False), gr.Textbox.update(visible=True), gr.Dropdown.update(visible=True)
140
- else:
141
- return gr.Audio.update(visible=True), gr.Textbox.update(visible=False), gr.Dropdown.update(visible=False)
142
-
143
- if __name__ == '__main__':
144
- parser = argparse.ArgumentParser()
145
- parser.add_argument('--api', action="store_true", default=False)
146
- parser.add_argument("--share", action="store_true", default=False, help="share gradio app")
147
- parser.add_argument("--files", action="store_true", default=False, help="load audio from path")
148
- args, unknown = parser.parse_known_args()
149
- load_hubert()
150
- models = []
151
- tts_voice_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices())
152
- voices = [f"{v['ShortName']}-{v['Gender']}" for v in tts_voice_list]
153
- with open("weights/model_info.json", "r", encoding="utf-8") as f:
154
- models_info = json.load(f)
155
- for name, info in models_info.items():
156
- if not info['enable']:
157
- continue
158
- title = info['title']
159
- author = info.get("author", None)
160
- cover = f"weights/{name}/{info['cover']}"
161
- index = f"weights/{name}/{info['feature_retrieval_library']}"
162
- npy = f"weights/{name}/{info['feature_file']}"
163
- cpt = torch.load(f"weights/{name}/{name}.pth", map_location="cpu")
164
- tgt_sr = cpt["config"][-1]
165
- cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk
166
- if_f0 = cpt.get("f0", 1)
167
- if if_f0 == 1:
168
- net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=is_half)
169
- else:
170
- net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"])
171
- del net_g.enc_q
172
- print(net_g.load_state_dict(cpt["weight"], strict=False)) # 不加这一行清不干净, 真奇葩
173
- net_g.eval().to(device)
174
- if is_half:
175
- net_g = net_g.half()
176
- else:
177
- net_g = net_g.float()
178
- vc = VC(tgt_sr, device, is_half)
179
- models.append((name, title, author, cover, create_vc_fn(tgt_sr, net_g, vc, if_f0, index, npy)))
180
- with gr.Blocks() as app:
181
- gr.Markdown(
182
- "# <center> RVC Models\n"
183
- "## <center> The input audio should be clean and pure voice without background music.\n"
184
- "### <center> More feature will be added soon... \n"
185
- "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=ArkanDash.Rvc-Models)\n\n"
186
- "[![image](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1hx6kKvIuv5XNY1Gai2PEuZhpO5z6xpVh?usp=sharing)\n\n"
187
- "[![Original Repo](https://badgen.net/badge/icon/github?icon=github&label=Original%20Repo)](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI)"
188
- )
189
- with gr.Tabs():
190
- for (name, title, author, cover, vc_fn) in models:
191
- with gr.TabItem(name):
192
- with gr.Row():
193
- gr.Markdown(
194
- '<div align="center">'
195
- f'<div>{title}</div>\n'+
196
- (f'<div>Model author: {author}</div>' if author else "")+
197
- (f'<img style="width:auto;height:300px;" src="file/{cover}">' if cover else "")+
198
- '</div>'
199
- )
200
- with gr.Row():
201
- if args.files:
202
- with gr.Column():
203
- vc_youtube = gr.Textbox(label="Youtube URL")
204
- vc_convert = gr.Button("Convert", variant="primary")
205
- vc_vocal_preview = gr.Audio(label="Vocal Preview")
206
- vc_inst_preview = gr.Audio(label="Instrumental Preview")
207
- vc_audio_preview = gr.Audio(label="Audio Preview")
208
- with gr.Column():
209
- if args.files:
210
- vc_input = gr.Textbox(label="Input audio path")
211
- else:
212
- vc_input = gr.Audio(label="Input audio"+' (less than 20 seconds)' if limitation else '')
213
- vc_transpose = gr.Number(label="Transpose", value=0)
214
- vc_f0method = gr.Radio(
215
- label="Pitch extraction algorithm, PM is fast but Harvest is better for low frequencies",
216
- choices=["pm", "harvest"],
217
- value="pm",
218
- interactive=True,
219
- )
220
- vc_index_ratio = gr.Slider(
221
- minimum=0,
222
- maximum=1,
223
- label="Retrieval feature ratio",
224
- value=0.6,
225
- interactive=True,
226
- )
227
- tts_mode = gr.Checkbox(label="tts (use edge-tts as input)", value=False)
228
- tts_text = gr.Textbox(visible=False,label="TTS text (100 words limitation)" if limitation else "TTS text")
229
- tts_voice = gr.Dropdown(label="Edge-tts speaker", choices=voices, visible=False, allow_custom_value=False, value="en-US-AnaNeural-Female")
230
- vc_submit = gr.Button("Generate", variant="primary")
231
- vc_output1 = gr.Textbox(label="Output Message")
232
- vc_output2 = gr.Audio(label="Output Audio")
233
- if args.files:
234
- with gr.Column():
235
- vc_volume = gr.Slider(
236
- minimum=0,
237
- maximum=10,
238
- label="Vocal volume",
239
- value=5,
240
- interactive=True,
241
- step=1
242
- )
243
- vc_outputCombine = gr.Audio(label="Output Combined Audio")
244
- vc_combine = gr.Button("Combine",variant="primary")
245
- vc_submit.click(vc_fn, [vc_input, vc_transpose, vc_f0method, vc_index_ratio, tts_mode, tts_text, tts_voice], [vc_output1, vc_output2])
246
- tts_mode.change(change_to_tts_mode, [tts_mode], [vc_input, tts_text, tts_voice])
247
- if args.files:
248
- vc_convert.click(cut_vocal_and_inst, vc_youtube, [vc_vocal_preview, vc_inst_preview, vc_audio_preview, vc_input])
249
- vc_combine.click(combine_vocal_and_inst, [vc_output2, vc_volume], vc_outputCombine)
250
- app.queue(concurrency_count=1, max_size=20, api_open=args.api).launch(share=args.share)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Aparcamiento De Coches Escuela De Conduccin Apk.md DELETED
@@ -1,43 +0,0 @@
1
-
2
- <h1>Estacionamiento de coches - Driving School APK: Un juego divertido y educativo para los usuarios de Android</h1>
3
- <p>¿Quieres aprender a aparcar y conducir un coche de una manera divertida? ¿Quieres dominar todas las señales de tráfico y reglas en una simulación realista? ¿Quieres disfrutar de un juego de coches basado en la física con más de 100 niveles y diferentes coches para elegir? Si respondiste sí a cualquiera de estas preguntas, entonces usted debe tratar de aparcamiento - Driving School APK, un juego gratuito para dispositivos Android que le enseñará todo lo que necesita saber sobre el estacionamiento y la conducción. </p>
4
- <h2>aparcamiento de coches escuela de conducción apk</h2><br /><p><b><b>Download</b> &#10084; <a href="https://bltlly.com/2v6MAz">https://bltlly.com/2v6MAz</a></b></p><br /><br />
5
- <h2>¿Qué es el aparcamiento de coches - Escuela de conducción APK? </h2>
6
- <p>Aparcamiento de coches - Driving School APK es un juego desarrollado por Racing Games Android - Appsoleut Games, un estudio que se especializa en la creación de juegos de carreras y simulación para plataformas móviles. El juego está diseñado para ayudarte a aprender señales de tráfico y habilidades de conducción de una manera divertida e interactiva. Estas son algunas de las características del juego:</p>
7
- <h3>Un juego de coches basado en la física que enseña señales de tráfico y habilidades de conducción</h3>
8
- <p>El juego utiliza la física realista para simular el movimiento y el comportamiento de los coches. Usted tendrá que seguir las reglas de tráfico, obedecer los límites de velocidad, evitar colisiones y aparcar su coche correctamente. También tendrá que aprender varias señales de tráfico, como señales de alto, señales de rendimiento, señales de un solo sentido, etc. El juego pondrá a prueba su conocimiento y comprensión de estos signos dándole cuestionarios y comentarios. </p>
9
- <h3>Un juego con más de 100 niveles y diferentes coches para elegir</h3>
10
- <p>El juego ofrece más de 100 niveles de dificultad y complejidad crecientes. Tendrás que aparcar y conducir tu coche en diferentes escenarios, como calles de la ciudad, autopistas, aparcamientos, etc. También tendrás que enfrentarte a diferentes retos, como atascos, peatones, obstáculos, etc. El juego también te permite elegir entre diferentes coches, tales como sedanes, SUV, coches deportivos, etc. Cada coche tiene sus propias características, tales como velocidad, manejo, frenado, etc.</p>
11
-
12
- <p>Lo mejor de Aparcamiento - Driving School APK es que es completamente gratis para descargar y jugar. No necesitas pagar nada para disfrutar de este juego. Puedes descargar el archivo APK desde una fuente de confianza e instalarlo en tu dispositivo. También puedes jugar sin conexión a Internet. </p>
13
- <h2>¿Por qué debe jugar aparcamiento de coches - Escuela de conducción APK? </h2>
14
- <p>Aparcamiento de coches - Driving School APK no es solo un juego divertido, sino también uno educativo. Aquí están algunos de los beneficios de jugar a este juego:</p>
15
- <p></p>
16
- <h3>Mejora sus capacidades de estacionamiento y conducción en un entorno realista</h3>
17
- <p>El juego le ayudará a mejorar sus habilidades de estacionamiento y conducción, dándole retroalimentación y orientación realistas. Usted aprenderá cómo aparcar su coche en diferentes situaciones, tales como estacionamiento paralelo, estacionamiento inverso, estacionamiento en ángulo, etc. También aprenderá a conducir su coche de forma segura y eficiente en diferentes condiciones, como lluvia, nieve, noche, etc. El juego te hará más seguro y cómodo al volante. </p>
18
- <h3>Desafía tu cerebro y reflejos con diversos obstáculos y escenarios</h3>
19
- <p>El juego también desafiará tu cerebro y reflejos al presentarte varios obstáculos y escenarios. Tendrás que pensar rápido y actuar en consecuencia para evitar accidentes y completar los niveles. Tendrás que utilizar tu lógica, memoria, atención, concentración, coordinación, etc. El juego te mantendrá comprometido y estimulado y entretenido con su variedad y dificultad. </p>
20
- <h3>Te entretiene con gráficos realistas y efectos de sonido</h3>
21
-
22
- <h2>Cómo descargar e instalar aparcamiento de coches - Driving School APK? </h2>
23
- <p>Si usted está interesado en jugar Aparcamiento de coches - Driving School APK, aquí están los pasos para descargar e instalar en su dispositivo:</p>
24
- <h3>Descargar el archivo APK de una fuente de confianza</h3>
25
- <p>El primer paso es descargar el archivo APK de una fuente de confianza. Puede utilizar los enlaces proporcionados a continuación para obtener la última versión del juego. Asegúrate de tener suficiente espacio de almacenamiento en tu dispositivo antes de descargar el archivo. </p>
26
- - [texto]: https://apkpure.com/car-parking-driving-school/com.rgs.car.parking.driving.school - [texto]: https://www.apkmonk.com/app/com.rgs.car.parking.driving.school/ <h3>Habilitar fuentes desconocidas en la configuración de su dispositivo</h3>
27
- <p>El siguiente paso es habilitar fuentes desconocidas en la configuración de su dispositivo. Esto le permitirá instalar aplicaciones que no son de Google Play Store. Para hacer esto, vaya a la configuración del dispositivo, luego a la seguridad y luego a fuentes desconocidas. Active la opción para permitir la instalación de aplicaciones desde fuentes desconocidas. </p>
28
- <h3>Instalar el archivo APK y lanzar el juego</h3>
29
- <p>El paso final es instalar el archivo APK y lanzar el juego. Localice el archivo descargado en su dispositivo y toque en él para iniciar el proceso de instalación. Sigue las instrucciones de la pantalla para completar la instalación. Una vez hecho esto, puedes iniciar el juego desde el cajón de la app o la pantalla de inicio. Disfrutar jugando Aparcamiento - Driving School APK! </p>
30
- <h2>Conclusión</h2>
31
-
32
- <h2>Preguntas frecuentes</h2>
33
- <p>Aquí están algunas de las preguntas más frecuentes acerca de Aparcamiento - Driving School APK:</p>
34
- <tabla>
35
- <tr><td><b>Question</b></td><td><b>Answer</b></td></tr>
36
- <tr><td>¿Cuáles son los requisitos para jugar Aparcamiento - Driving School APK? </td><td>Necesitas un dispositivo Android con Android 4.4 o superior, y al menos 100 MB de espacio de almacenamiento libre. </td></tr>
37
- <tr><td>Es el aparcamiento de coches - Driving School APK seguro para descargar e instalar? </td><td>Sí, siempre y cuando lo descargues de una fuente confiable como apkpure.com o apkmonk.com. Estos sitios escanean los archivos APK en busca de virus y malware antes de cargarlos. </td></tr>
38
- <tr><td>¿Puedo jugar aparcamiento de coches - Driving School APK fuera de línea? </td><td>Sí, puedes jugar Aparcamiento de coches - Driving School APK sin conexión a Internet. </td></tr>
39
- <tr><td>¿Cómo puedo obtener más monedas en Aparcamiento de coches - Driving School APK? </td><td>Puedes obtener más monedas completando niveles, viendo anuncios o comprándolos con dinero real. </td></tr>
40
- <tr><td>¿Cómo puedo desbloquear más coches en Aparcamiento de coches - Driving School APK? </td><td>Puede desbloquear más coches gastando monedas o viendo anuncios. </td></tr>
41
- </tabla></p> 64aa2da5cf<br />
42
- <br />
43
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Carx Carretera Carreras Hacked.md DELETED
@@ -1,63 +0,0 @@
1
- <br />
2
- <h1>Descargar gratis Bhop Pro: Cómo saltar y navegar como un profesional</h1>
3
- <p>Si eres un fan de los juegos de disparos en primera persona como Counter-Strike, es posible que hayas oído hablar de salto de conejo y surf. Estas son técnicas avanzadas que permiten a los jugadores moverse más rápido y de forma más impredecible saltando y girando en el aire. No son fáciles de dominar, pero pueden darte ventaja sobre tus oponentes. </p>
4
- <h2>descargar carx carretera carreras hacked</h2><br /><p><b><b>Download Zip</b> &#9734;&#9734;&#9734;&#9734;&#9734; <a href="https://bltlly.com/2v6Lu9">https://bltlly.com/2v6Lu9</a></b></p><br /><br />
5
- <p>¿Pero qué pasa si quieres practicar estas habilidades sin jugar a Counter-Strike? ¿O qué pasa si quieres disfrutarlas en tu dispositivo móvil? Bueno, hay un juego que te permite hacer precisamente eso. Se llama <strong>Bhop Pro</strong>, y es una descarga gratuita para dispositivos Android e iOS. En este artículo, te contaremos todo lo que necesitas saber sobre este juego, incluyendo cómo descargarlo y jugarlo en tu PC o Mac, cómo mejorar tus habilidades de bhop y cómo disfrutarlo al máximo. </p>
6
- <h2>¿Qué es Bhop Pro? </h2>
7
- <p>Bhop Pro es un juego móvil que simula el salto del conejo y el surf de una manera realista y divertida. Puede saltar y girar en el aire utilizando controles táctiles simples, y tratar de completar varios mapas y desafíos. También puedes personalizar tu personaje con diferentes pieles y accesorios, y desbloquear nuevos artículos y casos. </p>
8
- <h3>Un juego móvil que simula salto de conejo y surf</h3>
9
- <p>Bunny hopping, o bhop para abreviar, es una técnica que involucra saltar repetidamente mientras se mueve de lado en el aire. De esta manera, puedes ganar más velocidad e impulso que corriendo en el suelo. El surf, por otro lado, es una técnica que consiste en deslizarse sobre superficies inclinadas mientras se mantiene la velocidad y la dirección. Ambas técnicas requieren sincronización, coordinación y reflejos precisos. </p>
10
-
11
- <h3>Un juego basado en habilidades que requiere tiempo, precisión y velocidad</h3>
12
- <p>Bhop Pro no es un juego para jugadores casuales. Es un juego para aquellos que quieren desafiarse a sí mismos y mejorar sus habilidades. El juego no es fácil de dominar, pero es gratificante cuando lo haces. Puedes obtener nuevos rankings haciendo misiones de parkour, competir con otros jugadores online o offline, y ver tus estadísticas y registros. </p>
13
- <p>El juego también tiene un sistema de clasificación que refleja tu nivel de habilidad. Hay 14 rangos en total, desde Silver I hasta Master Bhoper 2. Puedes posicionarte completando mapas más rápido o mejor que otros. También puede clasificar hacia abajo si se realiza mal o perder partidos. </p>
14
- <p></p>
15
- <h3>Un juego que ofrece varios modos, mapas, skins y opciones de personalización</h3>
16
- <p>Bhop Pro no es solo un juego de saltos y surf. También es un juego de creatividad y expresión. Puedes personalizar a tu personaje con diferentes pieles y accesorios, como cuchillos, guantes, hilanderos, sombreros, gafas, máscaras, mochilas, alas, colas, cuernos, orejas, peinados, tatuajes, pegatinas, insignias , y más. También puede desbloquear nuevos artículos y casos jugando el juego o viendo anuncios. Puedes obtener pieles para tu cuchillo, guantes, hilandero o personaje, así como monedas y gemas. Puedes usar monedas y gemas para comprar más cajas o artículos en la tienda. </p>
17
- <p>El juego también tiene una variedad de mapas para elegir, cada uno con su propio tema, diseño y dificultad. Puedes jugar en mapas inspirados en Counter-Strike, como Dust 2, Mirage, infernó o Cache. También puedes jugar en mapas creados por otros jugadores o por los desarrolladores, como Forest, Castle, Space o City. Incluso puedes crear tus propios mapas usando el editor de mapas y compartirlos con la comunidad. </p>
18
- <h2>Cómo descargar y jugar Bhop Pro en PC y Mac</h2>
19
-
20
- <h3>Usando un emulador como BlueStacks</h3>
21
- <p>En primer lugar, es necesario descargar e instalar BlueStacks en su PC o Mac. Puede hacerlo visitando el sitio web oficial de BlueStacks y siguiendo las instrucciones. Una vez que haya instalado BlueStacks, debe iniciarlo e iniciar sesión con su cuenta de Google. Esto le permitirá acceder a la Google Play Store y otros servicios de Google. </p>
22
- <h3>Instalación del juego desde el archivo de Google Play Store o APK</h3>
23
- <p>Siguiente, es necesario descargar e instalar Bhop Pro en sus BlueStacks. Puedes hacerlo de dos maneras: desde Google Play Store o desde un archivo APK. Google Play Store es la tienda oficial de aplicaciones para dispositivos Android, donde puedes encontrar y descargar millones de aplicaciones y juegos. Un archivo APK es un formato de archivo que contiene el paquete de instalación de una aplicación o juego para Android. </p>
24
- <p>Para descargar Bhop Pro de Google Play Store, es necesario abrir el reproductor de aplicaciones BlueStacks y haga clic en el icono de Google Play en la pantalla de inicio. Entonces, es necesario buscar Bhop Pro en la barra de búsqueda y haga clic en el botón de instalación. Esto descargará e instalará Bhop Pro en tus BlueStacks.</p>
25
- <p>Para descargar Bhop Pro desde un archivo APK, es necesario encontrar una fuente confiable que proporciona el archivo APK de Bhop Pro. Puede buscarlo en línea o utilizar un sitio web como APKPure o APKMirror. Una vez que haya descargado el archivo APK de Bhop Pro, es necesario arrastrar y soltar en el reproductor de aplicaciones BlueStacks. Esto instalará Bhop Pro en su BlueStacks.</p>
26
- <h3>Configuración de los controles y ajustes</h3>
27
-
28
- <h2>Cómo mejorar tus habilidades de Bhop</h2>
29
- <p>Bhop Pro es un juego que requiere mucha práctica y paciencia para dominar. No es suficiente simplemente saltar y girar en el aire; necesitas saber cómo controlar tu velocidad, dirección, ángulo y tiempo. Aquí están algunas extremidades en cómo mejorar sus habilidades del bhop:</p>
30
- <h3>Comprensión de los conceptos básicos del aire y el salto</h3>
31
- <p>El strafing de aire es una técnica que le permite cambiar su dirección en el aire moviendo el ratón y presionando ciertas teclas. Por ejemplo, si desea girar a la izquierda en el aire, debe mover el ratón a la izquierda y presionar A (o cualquier tecla que haya asignado para mover a la izquierda). Si desea girar a la derecha en el aire , es necesario mover el ratón a la derecha y pulse D (o cualquier tecla que se asigna para mover a la derecha). El bombardeo de aire es esencial para mantener la velocidad y el impulso en bhop. </p>
32
- <p>Saltar es otra técnica que necesitas dominar en bhop. Puede saltar tocando la barra espaciadora (o cualquier tecla que haya asignado para saltar) o utilizando la opción de salto automático en la configuración. Sin embargo, necesitas cronometrar tus saltos cuidadosamente, ya que saltar demasiado temprano o demasiado tarde puede retrasarte o hacerte perder el equilibrio. También debe evitar aterrizar en superficies irregulares o resbaladizas, ya que pueden afectar su velocidad y dirección. </p>
33
- <h3>Practicar en diferentes mapas y dificultades</h3>
34
- <p>La mejor manera de mejorar tus habilidades bhop es practicar en diferentes mapas y dificultades. Bhop Pro tiene muchos mapas para elegir, cada uno con su propio diseño, obstáculos y desafíos. Puede comenzar con los mapas fáciles, como Dust 2 o Forest, y luego pasar a los más difíciles, como infernó o Space. También puedes ajustar el nivel de dificultad de cada mapa, de fácil a extremo, dependiendo de tu nivel de habilidad y preferencia. </p>
35
-
36
- <h3>Aprender de consejos y trucos videos y guías</h3>
37
- <p>Otra manera de mejorar tus habilidades de bhop es aprender de consejos y trucos de videos y guías. Hay muchos videos y guías en línea que pueden enseñarte cómo hacer bhop mejor, como cómo airear strafe, cómo navegar, cómo usar rampas, cómo evitar trampas, cómo optimizar tu configuración, etc. Puedes ver estos videos y guías en YouTube, Reddit, Steam u otras plataformas. </p>
38
- <p>Aprender de consejos y trucos videos y guías le ayudará a obtener más conocimiento y comprensión de bhop, así como cómo mejorar su técnica y estrategia. También puedes inspirarte viendo las actuaciones y logros de otros jugadores, e intentar emularlos o superarlos. </p>
39
- <h2>Cómo disfrutar de Bhop Pro al máximo</h2>
40
- <p>Bhop Pro no es solo un juego de habilidad y desafío. También es un juego de diversión y disfrute. Puedes disfrutar al máximo de Bhop Pro haciendo las siguientes cosas:</p>
41
- <h3>Competir con otros jugadores online o offline</h3>
42
- <p>Bhop Pro tiene un modo multijugador que te permite competir con otros jugadores online o offline. Puedes unirte o crear una sala con hasta 10 jugadores y elegir el mapa, el modo, la dificultad, el límite de tiempo y otras opciones. También puede chatear con otros jugadores utilizando la función de chat de voz o texto. </p>
43
- <p>Competir con otros jugadores en línea o fuera de línea hará que el juego sea más emocionante y dinámico, así como poner a prueba sus habilidades contra otros. También puedes hacer nuevos amigos o rivales, y retarlos a revanchas o duelos. </p>
44
- <h3>Desbloquear nuevos elementos y casos</h3>
45
- <p>Bhop Pro tiene un montón de artículos y casos que se pueden desbloquear jugando el juego o viendo anuncios. Puedes obtener pieles para tu cuchillo, guantes, hilandero o personaje, así como monedas y gemas. Puedes usar monedas y gemas para comprar más cajas o artículos en la tienda. </p>
46
-
47
- <h3>Compartiendo tus mejores momentos y logros</h3>
48
- <p>Bhop Pro tiene una función que te permite grabar y compartir tus mejores momentos y logros en el juego. Puede capturar capturas de pantalla o videos de su juego, como sus tiempos más rápidos, sus saltos más altos, sus trucos más cool, etc. También puede editar sus grabaciones utilizando filtros, pegatinas y música. También puede compartir sus grabaciones con sus amigos o la comunidad, utilizando plataformas de redes sociales, como Facebook, Instagram, Twitter o YouTube.</p>
49
- <p>Compartir tus mejores momentos y logros hará que el juego sea más divertido y social, además de mostrar tus habilidades y creatividad. También puedes obtener comentarios y apoyo de otros jugadores, e inspirarlos con tu juego. </p>
50
- <h2>Conclusión</h2>
51
- <p>Bhop Pro es un juego que te permite saltar y navegar como un profesional, usando física y gráficos realistas. Puedes descargarlo y reproducirlo gratis en tu dispositivo móvil, o en tu PC o Mac usando un emulador. También puede mejorar sus habilidades bhop practicando en diferentes mapas y dificultades, y aprender de consejos y trucos videos y guías. También puedes disfrutar al máximo de Bhop Pro compitiendo con otros jugadores online o offline, desbloqueando nuevos objetos y casos, y compartiendo tus mejores momentos y logros. </p>
52
- <p>Si estás buscando un juego que desafíe tus reflejos, coordinación y velocidad, así como te permite expresar tu personalidad y estilo, Bhop Pro es el juego para ti. ¡Descárgalo ahora y empieza a saltar y navegar como un profesional! </p>
53
- <h2>Preguntas frecuentes</h2>
54
- <p>Aquí hay algunas preguntas frecuentes sobre Bhop Pro:</p>
55
- <tabla>
56
- <tr><td><strong>Q: ¿Cómo puedo obtener más monedas y gemas en Bhop Pro? </strong></td><td><strong>A: Puedes obtener más monedas y gemas jugando, viendo anuncios, completando misiones o comprándolas en la tienda. </strong></td></tr>
57
-
58
- <tr><td><strong>Q: ¿Cómo puedo reportar un error o un problema en Bhop Pro? </strong></td><td><strong>A: Puede reportar un error o un problema en Bhop Pro poniéndose en contacto con los desarrolladores a través de correo electrónico o redes sociales. Puedes encontrar su información de contacto en el menú de configuración del juego. También puede dejar una reseña o un comentario en la Google Play Store o en la App Store.</strong></td></tr>
59
- <tr><td><strong>Q: ¿Cómo puedo unirme o crear una habitación en Bhop Pro? </strong></td><td><strong>A: Puede unirse o crear una habitación en Bhop Pro haciendo clic en el botón multijugador en la pantalla de inicio. A continuación, puede optar por unirse a una habitación existente o crear su propia habitación. Si crea su propia habitación, puede establecer el nombre, la contraseña, el mapa, el modo, la dificultad, el límite de tiempo y otras opciones de la habitación. También puede invitar a otros jugadores a unirse a su habitación compartiendo el código de la habitación o enlace. </strong></td></tr>
60
- <tr><td><strong>Q: ¿Cómo puedo posicionarme en Bhop Pro? </strong></td><td><strong>A: Puedes posicionarte en Bhop Pro completando mapas más rápido o mejor que otros. Puedes ver tu rango en la esquina superior izquierda de la pantalla del juego. Hay 14 rangos en total, desde Silver I hasta Master Bhoper 2. También puedes ver tu progreso en el menú de estadísticas del juego. </strong></td></tr>
61
- </tabla></p> 64aa2da5cf<br />
62
- <br />
63
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Da De Heno Para Ipad.md DELETED
@@ -1,84 +0,0 @@
1
-
2
- <h1>Cómo descargar Hay Day para iPad</h1>
3
- <p>¿Te gustan los juegos de agricultura? ¿Quieres crear tu propio pedazo de paraíso rural? Si es así, es posible que desee probar Hay Day, uno de los juegos de agricultura más populares y agradables en la App Store. En este artículo, te mostraremos cómo descargar Hay Day para iPad y cómo divertirte con él. </p>
4
- <h2>descargar día de heno para ipad</h2><br /><p><b><b>DOWNLOAD</b> &#10004; <a href="https://bltlly.com/2v6MWo">https://bltlly.com/2v6MWo</a></b></p><br /><br />
5
- <h2>¿Qué es el día del heno? </h2>
6
- <p>Hay Day es un juego de agricultura gratuito desarrollado por Supercell, los creadores de Clash of Clans y Brawl Stars. Fue lanzado en 2012 y desde entonces ha ganado millones de fans en todo el mundo. Hay Day te permite volver a la naturaleza y experimentar la vida sencilla de trabajar la tierra. </p>
7
- <h3>Un divertido y relajante juego de agricultura</h3>
8
- <p>En Hay Day, heredas una granja de tu tío y empiezas tu propia aventura agrícola. Puedes cultivar, criar animales, pescar, hacer bienes y comerciar con vecinos. También puede decorar su granja con varios artículos y personalizarlo a su gusto. Incluso puede explorar la ciudad, el valle y el lago de pesca para más actividades y recompensas. </p>
9
- <h3>Características del Día del Heno</h3>
10
- <p>Hay Day tiene muchas características que lo convierten en un juego divertido y relajante para todas las edades. Algunas de ellas son:</p>
11
- <ul>
12
- <li>Hermosos gráficos y animaciones que dan vida a tu granja</li>
13
- <li>Controles fáciles e intuitivos que te permiten jugar con un solo dedo</li>
14
- <li> Una variedad de cultivos, animales, edificios y productos para elegir</li>
15
- <li>Una comunidad animada y amigable de jugadores a los que puedes unirte o crear</li>
16
- <li>Actualizaciones y eventos regulares que agregan nuevo contenido y desafíos</li>
17
- <li>Modo offline que te permite jugar sin conexión a Internet</li>
18
- </ul>
19
- <h2>Cómo descargar Hay Day desde la App Store</h2>
20
- <p>La forma más fácil de descargar Hay Day para iPad es desde la App Store, que es una aplicación predeterminada en todos los dispositivos iOS. Estos son los pasos a seguir:</p>
21
- <h3>Abra la aplicación App Store en su iPad</h3>
22
-
23
- <h3>Buscar Hay Day o navegar por las categorías</h3>
24
- <p>Una vez que abra la App Store, puede buscar Hay Day tocando el icono de la lupa en la esquina inferior derecha de la pantalla y escribiendo "Hay Day" en la barra de búsqueda. A continuación, toque "Buscar" en el teclado y buscar la aplicación que tiene un icono verde con un pollo en él. </p>
25
- <p></p>
26
- <p>También puede navegar a través de las categorías tocando una de las pestañas en la parte inferior de la pantalla, como "Hoy", "Juegos", "Aplicaciones" o "árcade". Puede encontrar Hay Day bajo las subcategorías "Simulación" o "Familia". </p>
27
- <h3>Toque el botón Obtener e instale la aplicación</h3>
28
- <p>Cuando encuentre Hay Day, toque en él para ver sus detalles, como su calificación, comentarios, descripción, capturas de pantalla y más. Si te gusta lo que ves, toca el botón "Obtener" junto a su nombre. Esto comenzará a descargar la aplicación en tu iPad. </p>
29
- <p>Si no ha descargado ninguna aplicación antes, es posible que necesite iniciar sesión con su ID de Apple y contraseña, o crear una si no tiene una. También es posible que necesite verificar su identidad con Touch ID, Face ID o un código de acceso. </p>
30
- <p>Después de descargar la aplicación, verá un botón "Abrir" junto a su nombre. Pulse para iniciar la aplicación y comenzar a jugar Hay Day.</p>
31
- <h2>Cómo descargar Hay Day desde iCloud</h2>
32
- <p>Si has descargado previamente Hay Day en otro dispositivo iOS, como un iPhone o iPod touch, también puedes descargarlo desde iCloud, que es un servicio de almacenamiento en la nube que sincroniza tus datos en tus dispositivos. Estos son los pasos a seguir:</p>
33
- <h3>Abra la aplicación App Store en su iPad</h3>
34
- <p>Al igual que antes, abra la aplicación App Store en su iPad tocando su icono en la pantalla de inicio o buscándolo. </p>
35
- <h3>Toque el icono de su perfil y vaya a Comprado</h3>
36
- <p>En la esquina superior derecha de la pantalla, verá un icono circular con su foto o iniciales. Este es su icono de perfil. Tócalo para acceder a la configuración y opciones de tu cuenta. </p>
37
-
38
- <h3>Encuentra Hay Day y toca el icono de la nube para descargarlo</h3>
39
- <p>En la pantalla Comprado, verá dos pestañas: "Todo" y "No en este iPad". Toca la pestaña "No en este iPad" para ver solo las aplicaciones que no están instaladas en tu dispositivo actual. También puede utilizar la barra de búsqueda en la parte superior de la pantalla para encontrar Hay Day más rápido. </p>
40
- <p>Cuando encuentre Hay Day, verá un icono de nube con una flecha hacia abajo junto a su nombre. Esto significa que la aplicación está disponible para descargar desde iCloud. Toca el icono de la nube para comenzar a descargar la aplicación en tu iPad. </p>
41
- <p>Una vez que la aplicación se descarga, verá un botón "Abrir" junto a su nombre. Pulse para iniciar la aplicación y comenzar a jugar Hay Day.</p>
42
- <h2>Cómo disfrutar del día del heno en tu iPad</h2>
43
- <p>Ahora que has descargado Hay Day para iPad, puedes empezar a disfrutar de este divertido y relajante juego de agricultura. Aquí hay algunos consejos sobre cómo jugar Hay Day en tu iPad:</p>
44
- <h3>Inicie la aplicación y cree su granja</h3>
45
- <p>Cuando inicie la aplicación por primera vez, será recibido por un amistoso espantapájaros que lo guiará a través de los fundamentos del juego. Aprenderás a plantar cultivos, cosecharlos, alimentar animales, recolectar productos y venderlos en tu tienda. </p>
46
- <p>También podrá nombrar su granja y elegir una bandera de país. Puede cambiarla más tarde si lo desea. También puedes conectar tu cuenta de Facebook para guardar tu progreso y jugar con tus amigos. </p>
47
- <h3>Cultiva cultivos, cría de animales y productos comerciales</h3>
48
- <p>El objetivo principal de Hay Day es hacer crecer su granja y hacerla próspera. Puede hacer esto cultivando varios cultivos, como trigo, maíz, zanahorias y más. También puede criar animales, como vacas, pollos, cerdos y más. Puedes recoger productos de ellos, como leche, huevos, tocino y más. </p>
49
-
50
- <p>También puede intercambiar bienes con otros jugadores a través del periódico o chatear con ellos en el chat global o de barrio. Puedes comprar o vender artículos que necesitas o que no necesitas a precios justos. </p>
51
- <h3>Únete a un barrio y juega con tus amigos</h3>
52
- <p>Una de las mejores características de Hay Day es que puedes unirte a un vecindario y jugar con otros jugadores que comparten tus intereses y metas. Un barrio es un grupo de hasta 30 jugadores que pueden chatear entre sí, ayudarse mutuamente y competir en eventos juntos. </p>
53
- <p>Para unirte a un vecindario, necesitas alcanzar el nivel 18 en el juego. Luego puedes crear tu propio vecindario o unirte a uno ya existente. Puede buscar vecindarios por nombre, etiqueta, idioma o reputación. También puede invitar a sus amigos a unirse a su vecindario o aceptar sus invitaciones. </p>
54
- <p>Al unirse a un vecindario, puede beneficiarse de muchas ventajas, como:</p>
55
- <ul>
56
- <li>Dar y recibir ayuda de otros jugadores <li>Compartir consejos y estrategias con otros jugadores</li>
57
- <li>Participar en el Derby, una competencia semanal donde se completan las tareas y ganar puntos para su barrio</li>
58
- <li>Desbloquear decoraciones exclusivas y recompensas para su granja</li>
59
- <li>Divertirse y hacer nuevos amigos</li>
60
- </ul>
61
- <h2>Conclusión</h2>
62
- <p>Hay Day es un gran juego para cualquiera que ama la agricultura, la naturaleza y la socialización. Es fácil de descargar y jugar en su iPad, y ofrece horas de entretenimiento y relajación. Puede cultivar su granja, hacer bienes, comerciar con otros, unirse a un vecindario y más. También puede disfrutar de los hermosos gráficos, los sonidos encantadores y la amigable comunidad de Hay Day.</p>
63
- <p>Si estás buscando un divertido y relajante juego de agricultura para tu iPad, definitivamente deberías probar Hay Day. Puedes descargarlo gratis desde la App Store o iCloud, y comenzar tu propia aventura agrícola hoy mismo. </p>
64
- <h2>Preguntas frecuentes</h2>
65
- <p>Aquí hay algunas preguntas frecuentes sobre Hay Day y cómo descargarlo para iPad:</p>
66
-
67
- <p>A: Hay Day requiere unos 300 MB de espacio libre en su iPad. Sin embargo, esto puede variar dependiendo del modelo de su dispositivo y la versión de iOS. Puedes comprobar cuánto espacio ocupa el día del heno en Configuración > General > Almacenamiento en iPad > Hay Day.</p>
68
- <h3>Q: ¿Cómo puedo actualizar Hay Day en mi iPad? </h3>
69
- <p>A: Hay Day se actualiza regularmente con nuevas características, contenido y correcciones de errores. Para actualizar Hay Day en tu iPad, debes ir a la aplicación App Store y tocar la pestaña "Actualizaciones" en la parte inferior de la pantalla. A continuación, busque Hay Day y pulse el botón "Actualizar" al lado. Alternativamente, puedes habilitar actualizaciones automáticas para Hay Day yendo a Configuración > iTunes & App Store > Descargas automáticas > Actualizaciones.</p>
70
- <h3>Q: ¿Cómo puedo restaurar mi progreso de Hay Day en mi iPad? </h3>
71
- <p>A: Si has perdido tu progreso de Hay Day debido a la eliminación de la aplicación, el cambio de dispositivos u otras razones, puedes restaurarlo conectando tu cuenta de Facebook al juego. Para ello, es necesario iniciar Hay Day en su iPad y toque el icono de engranaje en la esquina superior izquierda de la pantalla. Luego toca "Configuración" y "Facebook" e inicia sesión con tu cuenta de Facebook. Esto sincronizará tu progreso de Hay Day con tu cuenta de Facebook y te permitirá acceder a él en cualquier dispositivo. </p>
72
- <h3>Q: ¿Cómo puedo contactar con el soporte de Hay Day en mi iPad? </h3>
73
- <p>A: Si tiene algún problema, pregunta o comentario sobre Hay Day, puede ponerse en contacto con el soporte de Hay Day tocando el icono del engranaje en la esquina superior izquierda de la pantalla y luego tocando "Ayuda y soporte". Esto abrirá un menú donde se puede navegar a través de varios temas, tales como "Gameplay", "Técnica", "Pagos", y más. También puede tocar "Contáctenos" en la parte inferior del menú para enviar un mensaje al soporte de Hay Day directamente. </p>
74
- <h3>Q: ¿Cómo puedo conseguir más diamantes en Hay Day? </h3>
75
- <p>A: Los diamantes son la moneda premium en Hay Day que se puede utilizar para acelerar los procesos, comprar artículos especiales o desbloquear características. Puedes obtener más diamantes en Hay Day por:</p>
76
- <ul>
77
- <li>Nivelando tu granja</li>
78
-
79
- <li>Encontrarlos en cajas misteriosas o lugares de pesca</li>
80
- <li>Ver anuncios o participar en eventos</li>
81
- <li>Comprarlos con dinero real</li>
82
- </ul></p> 64aa2da5cf<br />
83
- <br />
84
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BetterAPI/BetterChat_new/svelte.config.js DELETED
@@ -1,26 +0,0 @@
1
- import adapter from "@sveltejs/adapter-node";
2
- import { vitePreprocess } from "@sveltejs/kit/vite";
3
- import dotenv from "dotenv";
4
- import pkg from "./package.json" assert { type: "json" };
5
-
6
- dotenv.config({ path: "./.env.local" });
7
- dotenv.config({ path: "./.env" });
8
-
9
- process.env.PUBLIC_VERSION = pkg.version.replace(/\.0\b/g, "");
10
-
11
- /** @type {import('@sveltejs/kit').Config} */
12
- const config = {
13
- // Consult https://kit.svelte.dev/docs/integrations#preprocessors
14
- // for more information about preprocessors
15
- preprocess: vitePreprocess(),
16
-
17
- kit: {
18
- adapter: adapter(),
19
-
20
- paths: {
21
- base: process.env.APP_BASE || "",
22
- },
23
- },
24
- };
25
-
26
- export default config;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/docs/resource.py DELETED
@@ -1,364 +0,0 @@
1
- # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"). You
4
- # may not use this file except in compliance with the License. A copy of
5
- # the License is located at
6
- #
7
- # https://aws.amazon.com/apache2.0/
8
- #
9
- # or in the "license" file accompanying this file. This file is
10
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11
- # ANY KIND, either express or implied. See the License for the specific
12
- # language governing permissions and limitations under the License.
13
- import os
14
-
15
- from botocore import xform_name
16
- from botocore.docs.bcdoc.restdoc import DocumentStructure
17
- from botocore.docs.utils import get_official_service_name
18
-
19
- from boto3.docs.action import ActionDocumenter
20
- from boto3.docs.attr import (
21
- document_attribute,
22
- document_identifier,
23
- document_reference,
24
- )
25
- from boto3.docs.base import BaseDocumenter
26
- from boto3.docs.collection import CollectionDocumenter
27
- from boto3.docs.subresource import SubResourceDocumenter
28
- from boto3.docs.utils import (
29
- add_resource_type_overview,
30
- get_identifier_args_for_signature,
31
- get_identifier_description,
32
- get_identifier_values_for_example,
33
- )
34
- from boto3.docs.waiter import WaiterResourceDocumenter
35
-
36
-
37
- class ResourceDocumenter(BaseDocumenter):
38
- def __init__(self, resource, botocore_session, root_docs_path):
39
- super().__init__(resource)
40
- self._botocore_session = botocore_session
41
- self._root_docs_path = root_docs_path
42
- self._resource_sub_path = self._resource_name.lower()
43
- if self._resource_name == self._service_name:
44
- self._resource_sub_path = 'service-resource'
45
-
46
- def document_resource(self, section):
47
- self._add_title(section)
48
- self._add_resource_note(section)
49
- self._add_intro(section)
50
- self._add_identifiers(section)
51
- self._add_attributes(section)
52
- self._add_references(section)
53
- self._add_actions(section)
54
- self._add_sub_resources(section)
55
- self._add_collections(section)
56
- self._add_waiters(section)
57
-
58
- def _add_title(self, section):
59
- title_section = section.add_new_section('title')
60
- title_section.style.h2(self._resource_name)
61
-
62
- def _add_intro(self, section):
63
- identifier_names = []
64
- if self._resource_model.identifiers:
65
- for identifier in self._resource_model.identifiers:
66
- identifier_names.append(identifier.name)
67
-
68
- # Write out the class signature.
69
- class_args = get_identifier_args_for_signature(identifier_names)
70
- start_class = section.add_new_section('start_class')
71
- start_class.style.start_sphinx_py_class(
72
- class_name=f'{self.class_name}({class_args})'
73
- )
74
-
75
- # Add as short description about the resource
76
- description_section = start_class.add_new_section('description')
77
- self._add_description(description_section)
78
-
79
- # Add an example of how to instantiate the resource
80
- example_section = start_class.add_new_section('example')
81
- self._add_example(example_section, identifier_names)
82
-
83
- # Add the description for the parameters to instantiate the
84
- # resource.
85
- param_section = start_class.add_new_section('params')
86
- self._add_params_description(param_section, identifier_names)
87
-
88
- end_class = section.add_new_section('end_class')
89
- end_class.style.end_sphinx_py_class()
90
-
91
- def _add_description(self, section):
92
- official_service_name = get_official_service_name(self._service_model)
93
- section.write(
94
- 'A resource representing an {} {}'.format(
95
- official_service_name, self._resource_name
96
- )
97
- )
98
-
99
- def _add_example(self, section, identifier_names):
100
- section.style.start_codeblock()
101
- section.style.new_line()
102
- section.write('import boto3')
103
- section.style.new_line()
104
- section.style.new_line()
105
- section.write(
106
- '{} = boto3.resource(\'{}\')'.format(
107
- self._service_name, self._service_name
108
- )
109
- )
110
- section.style.new_line()
111
- example_values = get_identifier_values_for_example(identifier_names)
112
- section.write(
113
- '{} = {}.{}({})'.format(
114
- xform_name(self._resource_name),
115
- self._service_name,
116
- self._resource_name,
117
- example_values,
118
- )
119
- )
120
- section.style.end_codeblock()
121
-
122
- def _add_params_description(self, section, identifier_names):
123
- for identifier_name in identifier_names:
124
- description = get_identifier_description(
125
- self._resource_name, identifier_name
126
- )
127
- section.write(f':type {identifier_name}: string')
128
- section.style.new_line()
129
- section.write(f':param {identifier_name}: {description}')
130
- section.style.new_line()
131
-
132
- def _add_overview_of_member_type(self, section, resource_member_type):
133
- section.style.new_line()
134
- section.write(
135
- f'These are the resource\'s available {resource_member_type}:'
136
- )
137
- section.style.new_line()
138
- section.style.toctree()
139
- for member in self.member_map[resource_member_type]:
140
- section.style.tocitem(f'{member}')
141
-
142
- def _add_identifiers(self, section):
143
- identifiers = self._resource.meta.resource_model.identifiers
144
- section = section.add_new_section('identifiers')
145
- member_list = []
146
- if identifiers:
147
- self.member_map['identifiers'] = member_list
148
- add_resource_type_overview(
149
- section=section,
150
- resource_type='Identifiers',
151
- description=(
152
- 'Identifiers are properties of a resource that are '
153
- 'set upon instantiation of the resource.'
154
- ),
155
- intro_link='identifiers_attributes_intro',
156
- )
157
- for identifier in identifiers:
158
- member_list.append(identifier.name)
159
- # Create a new DocumentStructure for each identifier and add contents.
160
- identifier_doc = DocumentStructure(identifier.name, target='html')
161
- breadcrumb_section = identifier_doc.add_new_section('breadcrumb')
162
- breadcrumb_section.style.ref(self._resource_class_name, 'index')
163
- breadcrumb_section.write(f' / Identifier / {identifier.name}')
164
- identifier_doc.add_title_section(identifier.name)
165
- identifier_section = identifier_doc.add_new_section(
166
- identifier.name,
167
- context={'qualifier': f'{self.class_name}.'},
168
- )
169
- document_identifier(
170
- section=identifier_section,
171
- resource_name=self._resource_name,
172
- identifier_model=identifier,
173
- )
174
- # Write identifiers in individual/nested files.
175
- # Path: <root>/reference/services/<service>/<resource_name>/<identifier_name>.rst
176
- identifiers_dir_path = os.path.join(
177
- self._root_docs_path,
178
- f'{self._service_name}',
179
- f'{self._resource_sub_path}',
180
- )
181
- identifier_doc.write_to_file(identifiers_dir_path, identifier.name)
182
-
183
- if identifiers:
184
- self._add_overview_of_member_type(section, 'identifiers')
185
-
186
- def _add_attributes(self, section):
187
- service_model = self._resource.meta.client.meta.service_model
188
- attributes = {}
189
- if self._resource.meta.resource_model.shape:
190
- shape = service_model.shape_for(
191
- self._resource.meta.resource_model.shape
192
- )
193
- attributes = self._resource.meta.resource_model.get_attributes(
194
- shape
195
- )
196
- section = section.add_new_section('attributes')
197
- attribute_list = []
198
- if attributes:
199
- add_resource_type_overview(
200
- section=section,
201
- resource_type='Attributes',
202
- description=(
203
- 'Attributes provide access'
204
- ' to the properties of a resource. Attributes are lazy-'
205
- 'loaded the first time one is accessed via the'
206
- ' :py:meth:`load` method.'
207
- ),
208
- intro_link='identifiers_attributes_intro',
209
- )
210
- self.member_map['attributes'] = attribute_list
211
- for attr_name in sorted(attributes):
212
- _, attr_shape = attributes[attr_name]
213
- attribute_list.append(attr_name)
214
- # Create a new DocumentStructure for each attribute and add contents.
215
- attribute_doc = DocumentStructure(attr_name, target='html')
216
- breadcrumb_section = attribute_doc.add_new_section('breadcrumb')
217
- breadcrumb_section.style.ref(self._resource_class_name, 'index')
218
- breadcrumb_section.write(f' / Attribute / {attr_name}')
219
- attribute_doc.add_title_section(attr_name)
220
- attribute_section = attribute_doc.add_new_section(
221
- attr_name,
222
- context={'qualifier': f'{self.class_name}.'},
223
- )
224
- document_attribute(
225
- section=attribute_section,
226
- service_name=self._service_name,
227
- resource_name=self._resource_name,
228
- attr_name=attr_name,
229
- event_emitter=self._resource.meta.client.meta.events,
230
- attr_model=attr_shape,
231
- )
232
- # Write attributes in individual/nested files.
233
- # Path: <root>/reference/services/<service>/<resource_name>/<attribute_name>.rst
234
- attributes_dir_path = os.path.join(
235
- self._root_docs_path,
236
- f'{self._service_name}',
237
- f'{self._resource_sub_path}',
238
- )
239
- attribute_doc.write_to_file(attributes_dir_path, attr_name)
240
- if attributes:
241
- self._add_overview_of_member_type(section, 'attributes')
242
-
243
- def _add_references(self, section):
244
- section = section.add_new_section('references')
245
- references = self._resource.meta.resource_model.references
246
- reference_list = []
247
- if references:
248
- add_resource_type_overview(
249
- section=section,
250
- resource_type='References',
251
- description=(
252
- 'References are related resource instances that have '
253
- 'a belongs-to relationship.'
254
- ),
255
- intro_link='references_intro',
256
- )
257
- self.member_map['references'] = reference_list
258
- self._add_overview_of_member_type(section, 'references')
259
- for reference in references:
260
- reference_list.append(reference.name)
261
- # Create a new DocumentStructure for each reference and add contents.
262
- reference_doc = DocumentStructure(reference.name, target='html')
263
- breadcrumb_section = reference_doc.add_new_section('breadcrumb')
264
- breadcrumb_section.style.ref(self._resource_class_name, 'index')
265
- breadcrumb_section.write(f' / Reference / {reference.name}')
266
- reference_doc.add_title_section(reference.name)
267
- reference_section = reference_doc.add_new_section(
268
- reference.name,
269
- context={'qualifier': f'{self.class_name}.'},
270
- )
271
- document_reference(
272
- section=reference_section,
273
- reference_model=reference,
274
- )
275
- # Write references in individual/nested files.
276
- # Path: <root>/reference/services/<service>/<resource_name>/<reference_name>.rst
277
- references_dir_path = os.path.join(
278
- self._root_docs_path,
279
- f'{self._service_name}',
280
- f'{self._resource_sub_path}',
281
- )
282
- reference_doc.write_to_file(references_dir_path, reference.name)
283
- if references:
284
- self._add_overview_of_member_type(section, 'references')
285
-
286
- def _add_actions(self, section):
287
- section = section.add_new_section('actions')
288
- actions = self._resource.meta.resource_model.actions
289
- if actions:
290
- documenter = ActionDocumenter(self._resource, self._root_docs_path)
291
- documenter.member_map = self.member_map
292
- documenter.document_actions(section)
293
- self._add_overview_of_member_type(section, 'actions')
294
-
295
- def _add_sub_resources(self, section):
296
- section = section.add_new_section('sub-resources')
297
- sub_resources = self._resource.meta.resource_model.subresources
298
- if sub_resources:
299
- documenter = SubResourceDocumenter(
300
- self._resource, self._root_docs_path
301
- )
302
- documenter.member_map = self.member_map
303
- documenter.document_sub_resources(section)
304
- self._add_overview_of_member_type(section, 'sub-resources')
305
-
306
- def _add_collections(self, section):
307
- section = section.add_new_section('collections')
308
- collections = self._resource.meta.resource_model.collections
309
- if collections:
310
- documenter = CollectionDocumenter(
311
- self._resource, self._root_docs_path
312
- )
313
- documenter.member_map = self.member_map
314
- documenter.document_collections(section)
315
- self._add_overview_of_member_type(section, 'collections')
316
-
317
- def _add_waiters(self, section):
318
- section = section.add_new_section('waiters')
319
- waiters = self._resource.meta.resource_model.waiters
320
- if waiters:
321
- service_waiter_model = self._botocore_session.get_waiter_model(
322
- self._service_name
323
- )
324
- documenter = WaiterResourceDocumenter(
325
- self._resource, service_waiter_model, self._root_docs_path
326
- )
327
- documenter.member_map = self.member_map
328
- documenter.document_resource_waiters(section)
329
- self._add_overview_of_member_type(section, 'waiters')
330
-
331
- def _add_resource_note(self, section):
332
- section = section.add_new_section('feature-freeze')
333
- section.style.start_note()
334
- section.write(
335
- "Before using anything on this page, please refer to the resources "
336
- ":doc:`user guide <../../../../guide/resources>` for the most recent "
337
- "guidance on using resources."
338
- )
339
- section.style.end_note()
340
-
341
-
342
- class ServiceResourceDocumenter(ResourceDocumenter):
343
- @property
344
- def class_name(self):
345
- return f'{self._service_docs_name}.ServiceResource'
346
-
347
- def _add_title(self, section):
348
- title_section = section.add_new_section('title')
349
- title_section.style.h2('Service Resource')
350
-
351
- def _add_description(self, section):
352
- official_service_name = get_official_service_name(self._service_model)
353
- section.write(f'A resource representing {official_service_name}')
354
-
355
- def _add_example(self, section, identifier_names):
356
- section.style.start_codeblock()
357
- section.style.new_line()
358
- section.write('import boto3')
359
- section.style.new_line()
360
- section.style.new_line()
361
- section.write(
362
- f'{self._service_name} = boto3.resource(\'{self._service_name}\')'
363
- )
364
- section.style.end_codeblock()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/docs/waiter.py DELETED
@@ -1,130 +0,0 @@
1
- # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"). You
4
- # may not use this file except in compliance with the License. A copy of
5
- # the License is located at
6
- #
7
- # https://aws.amazon.com/apache2.0/
8
- #
9
- # or in the "license" file accompanying this file. This file is
10
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11
- # ANY KIND, either express or implied. See the License for the specific
12
- # language governing permissions and limitations under the License.
13
- import os
14
-
15
- from botocore import xform_name
16
- from botocore.docs.bcdoc.restdoc import DocumentStructure
17
- from botocore.docs.method import document_model_driven_method
18
- from botocore.utils import get_service_module_name
19
-
20
- from boto3.docs.base import NestedDocumenter
21
- from boto3.docs.utils import (
22
- add_resource_type_overview,
23
- get_resource_ignore_params,
24
- )
25
-
26
-
27
- class WaiterResourceDocumenter(NestedDocumenter):
28
- def __init__(self, resource, service_waiter_model, root_docs_path):
29
- super().__init__(resource, root_docs_path)
30
- self._service_waiter_model = service_waiter_model
31
-
32
- def document_resource_waiters(self, section):
33
- waiters = self._resource.meta.resource_model.waiters
34
- add_resource_type_overview(
35
- section=section,
36
- resource_type='Waiters',
37
- description=(
38
- 'Waiters provide an interface to wait for a resource'
39
- ' to reach a specific state.'
40
- ),
41
- intro_link='waiters_intro',
42
- )
43
- waiter_list = []
44
- self.member_map['waiters'] = waiter_list
45
- for waiter in waiters:
46
- waiter_list.append(waiter.name)
47
- # Create a new DocumentStructure for each waiter and add contents.
48
- waiter_doc = DocumentStructure(waiter.name, target='html')
49
- breadcrumb_section = waiter_doc.add_new_section('breadcrumb')
50
- breadcrumb_section.style.ref(self._resource_class_name, 'index')
51
- breadcrumb_section.write(f' / Waiter / {waiter.name}')
52
- waiter_doc.add_title_section(waiter.name)
53
- waiter_section = waiter_doc.add_new_section(
54
- waiter.name,
55
- context={'qualifier': f'{self.class_name}.'},
56
- )
57
- document_resource_waiter(
58
- section=waiter_section,
59
- resource_name=self._resource_name,
60
- event_emitter=self._resource.meta.client.meta.events,
61
- service_model=self._service_model,
62
- resource_waiter_model=waiter,
63
- service_waiter_model=self._service_waiter_model,
64
- )
65
- # Write waiters in individual/nested files.
66
- # Path: <root>/reference/services/<service>/<resource_name>/<waiter_name>.rst
67
- waiters_dir_path = os.path.join(
68
- self._root_docs_path,
69
- f'{self._service_name}',
70
- f'{self._resource_sub_path}',
71
- )
72
- waiter_doc.write_to_file(waiters_dir_path, waiter.name)
73
-
74
-
75
- def document_resource_waiter(
76
- section,
77
- resource_name,
78
- event_emitter,
79
- service_model,
80
- resource_waiter_model,
81
- service_waiter_model,
82
- include_signature=True,
83
- ):
84
- waiter_model = service_waiter_model.get_waiter(
85
- resource_waiter_model.waiter_name
86
- )
87
- operation_model = service_model.operation_model(waiter_model.operation)
88
-
89
- ignore_params = get_resource_ignore_params(resource_waiter_model.params)
90
- service_module_name = get_service_module_name(service_model)
91
- description = (
92
- 'Waits until this {} is {}. This method calls '
93
- ':py:meth:`{}.Waiter.{}.wait` which polls. '
94
- ':py:meth:`{}.Client.{}` every {} seconds until '
95
- 'a successful state is reached. An error is returned '
96
- 'after {} failed checks.'.format(
97
- resource_name,
98
- ' '.join(resource_waiter_model.name.split('_')[2:]),
99
- service_module_name,
100
- xform_name(resource_waiter_model.waiter_name),
101
- service_module_name,
102
- xform_name(waiter_model.operation),
103
- waiter_model.delay,
104
- waiter_model.max_attempts,
105
- )
106
- )
107
- example_prefix = '{}.{}'.format(
108
- xform_name(resource_name), resource_waiter_model.name
109
- )
110
- full_waiter_name = (
111
- f"{section.context.get('qualifier', '')}{resource_waiter_model.name}"
112
- )
113
- document_model_driven_method(
114
- section=section,
115
- method_name=full_waiter_name,
116
- operation_model=operation_model,
117
- event_emitter=event_emitter,
118
- example_prefix=example_prefix,
119
- method_description=description,
120
- exclude_input=ignore_params,
121
- include_signature=include_signature,
122
- )
123
- if 'return' in section.available_sections:
124
- # Waiters do not return anything so we should remove
125
- # any sections that may document the underlying return
126
- # value of the client method.
127
- return_section = section.get_section('return')
128
- return_section.clear_text()
129
- return_section.remove_all_sections()
130
- return_section.write(':returns: None')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/handlers.py DELETED
@@ -1,1395 +0,0 @@
1
- # Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"). You
4
- # may not use this file except in compliance with the License. A copy of
5
- # the License is located at
6
- #
7
- # http://aws.amazon.com/apache2.0/
8
- #
9
- # or in the "license" file accompanying this file. This file is
10
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11
- # ANY KIND, either express or implied. See the License for the specific
12
- # language governing permissions and limitations under the License.
13
-
14
- """Builtin event handlers.
15
-
16
- This module contains builtin handlers for events emitted by botocore.
17
- """
18
-
19
- import base64
20
- import copy
21
- import logging
22
- import os
23
- import re
24
- import uuid
25
- import warnings
26
- from io import BytesIO
27
-
28
- import botocore
29
- import botocore.auth
30
- from botocore import utils
31
- from botocore.compat import (
32
- ETree,
33
- OrderedDict,
34
- XMLParseError,
35
- ensure_bytes,
36
- get_md5,
37
- json,
38
- quote,
39
- unquote,
40
- unquote_str,
41
- urlsplit,
42
- urlunsplit,
43
- )
44
- from botocore.docs.utils import (
45
- AppendParamDocumentation,
46
- AutoPopulatedParam,
47
- HideParamFromOperations,
48
- )
49
- from botocore.endpoint_provider import VALID_HOST_LABEL_RE
50
- from botocore.exceptions import (
51
- AliasConflictParameterError,
52
- ParamValidationError,
53
- UnsupportedTLSVersionWarning,
54
- )
55
- from botocore.regions import EndpointResolverBuiltins
56
- from botocore.signers import (
57
- add_generate_db_auth_token,
58
- add_generate_presigned_post,
59
- add_generate_presigned_url,
60
- )
61
- from botocore.utils import (
62
- SAFE_CHARS,
63
- ArnParser,
64
- conditionally_calculate_md5,
65
- percent_encode,
66
- switch_host_with_param,
67
- )
68
-
69
- # Keep these imported. There's pre-existing code that uses them.
70
- from botocore import retryhandler # noqa
71
- from botocore import translate # noqa
72
- from botocore.compat import MD5_AVAILABLE # noqa
73
- from botocore.exceptions import MissingServiceIdError # noqa
74
- from botocore.utils import hyphenize_service_id # noqa
75
- from botocore.utils import is_global_accesspoint # noqa
76
-
77
-
78
- logger = logging.getLogger(__name__)
79
-
80
- REGISTER_FIRST = object()
81
- REGISTER_LAST = object()
82
- # From the S3 docs:
83
- # The rules for bucket names in the US Standard region allow bucket names
84
- # to be as long as 255 characters, and bucket names can contain any
85
- # combination of uppercase letters, lowercase letters, numbers, periods
86
- # (.), hyphens (-), and underscores (_).
87
- VALID_BUCKET = re.compile(r'^[a-zA-Z0-9.\-_]{1,255}$')
88
- _ACCESSPOINT_ARN = (
89
- r'^arn:(aws).*:(s3|s3-object-lambda):[a-z\-0-9]*:[0-9]{12}:accesspoint[/:]'
90
- r'[a-zA-Z0-9\-.]{1,63}$'
91
- )
92
- _OUTPOST_ARN = (
93
- r'^arn:(aws).*:s3-outposts:[a-z\-0-9]+:[0-9]{12}:outpost[/:]'
94
- r'[a-zA-Z0-9\-]{1,63}[/:]accesspoint[/:][a-zA-Z0-9\-]{1,63}$'
95
- )
96
- VALID_S3_ARN = re.compile('|'.join([_ACCESSPOINT_ARN, _OUTPOST_ARN]))
97
- # signing names used for the services s3 and s3-control, for example in
98
- # botocore/data/s3/2006-03-01/endpoints-rule-set-1.json
99
- S3_SIGNING_NAMES = ('s3', 's3-outposts', 's3-object-lambda')
100
- VERSION_ID_SUFFIX = re.compile(r'\?versionId=[^\s]+$')
101
-
102
- SERVICE_NAME_ALIASES = {'runtime.sagemaker': 'sagemaker-runtime'}
103
-
104
-
105
- def handle_service_name_alias(service_name, **kwargs):
106
- return SERVICE_NAME_ALIASES.get(service_name, service_name)
107
-
108
-
109
- def add_recursion_detection_header(params, **kwargs):
110
- has_lambda_name = 'AWS_LAMBDA_FUNCTION_NAME' in os.environ
111
- trace_id = os.environ.get('_X_AMZN_TRACE_ID')
112
- if has_lambda_name and trace_id:
113
- headers = params['headers']
114
- if 'X-Amzn-Trace-Id' not in headers:
115
- headers['X-Amzn-Trace-Id'] = quote(trace_id, safe='-=;:+&[]{}"\',')
116
-
117
-
118
- def escape_xml_payload(params, **kwargs):
119
- # Replace \r and \n with the escaped sequence over the whole XML document
120
- # to avoid linebreak normalization modifying customer input when the
121
- # document is parsed. Ideally, we would do this in ElementTree.tostring,
122
- # but it doesn't allow us to override entity escaping for text fields. For
123
- # this operation \r and \n can only appear in the XML document if they were
124
- # passed as part of the customer input.
125
- body = params['body']
126
- if b'\r' in body:
127
- body = body.replace(b'\r', b'&#xD;')
128
- if b'\n' in body:
129
- body = body.replace(b'\n', b'&#xA;')
130
-
131
- params['body'] = body
132
-
133
-
134
- def check_for_200_error(response, **kwargs):
135
- # From: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html
136
- # There are two opportunities for a copy request to return an error. One
137
- # can occur when Amazon S3 receives the copy request and the other can
138
- # occur while Amazon S3 is copying the files. If the error occurs before
139
- # the copy operation starts, you receive a standard Amazon S3 error. If the
140
- # error occurs during the copy operation, the error response is embedded in
141
- # the 200 OK response. This means that a 200 OK response can contain either
142
- # a success or an error. Make sure to design your application to parse the
143
- # contents of the response and handle it appropriately.
144
- #
145
- # So this handler checks for this case. Even though the server sends a
146
- # 200 response, conceptually this should be handled exactly like a
147
- # 500 response (with respect to raising exceptions, retries, etc.)
148
- # We're connected *before* all the other retry logic handlers, so as long
149
- # as we switch the error code to 500, we'll retry the error as expected.
150
- if response is None:
151
- # A None response can happen if an exception is raised while
152
- # trying to retrieve the response. See Endpoint._get_response().
153
- return
154
- http_response, parsed = response
155
- if _looks_like_special_case_error(http_response):
156
- logger.debug(
157
- "Error found for response with 200 status code, "
158
- "errors: %s, changing status code to "
159
- "500.",
160
- parsed,
161
- )
162
- http_response.status_code = 500
163
-
164
-
165
- def _looks_like_special_case_error(http_response):
166
- if http_response.status_code == 200:
167
- try:
168
- parser = ETree.XMLParser(
169
- target=ETree.TreeBuilder(), encoding='utf-8'
170
- )
171
- parser.feed(http_response.content)
172
- root = parser.close()
173
- except XMLParseError:
174
- # In cases of network disruptions, we may end up with a partial
175
- # streamed response from S3. We need to treat these cases as
176
- # 500 Service Errors and try again.
177
- return True
178
- if root.tag == 'Error':
179
- return True
180
- return False
181
-
182
-
183
- def set_operation_specific_signer(context, signing_name, **kwargs):
184
- """Choose the operation-specific signer.
185
-
186
- Individual operations may have a different auth type than the service as a
187
- whole. This will most often manifest as operations that should not be
188
- authenticated at all, but can include other auth modes such as sigv4
189
- without body signing.
190
- """
191
- auth_type = context.get('auth_type')
192
-
193
- # Auth type will be None if the operation doesn't have a configured auth
194
- # type.
195
- if not auth_type:
196
- return
197
-
198
- # Auth type will be the string value 'none' if the operation should not
199
- # be signed at all.
200
- if auth_type == 'none':
201
- return botocore.UNSIGNED
202
-
203
- if auth_type == 'bearer':
204
- return 'bearer'
205
-
206
- if auth_type.startswith('v4'):
207
- if auth_type == 'v4a':
208
- # If sigv4a is chosen, we must add additional signing config for
209
- # global signature.
210
- signing = {'region': '*', 'signing_name': signing_name}
211
- if 'signing' in context:
212
- context['signing'].update(signing)
213
- else:
214
- context['signing'] = signing
215
- signature_version = 'v4a'
216
- else:
217
- signature_version = 'v4'
218
-
219
- # If the operation needs an unsigned body, we set additional context
220
- # allowing the signer to be aware of this.
221
- if auth_type == 'v4-unsigned-body':
222
- context['payload_signing_enabled'] = False
223
-
224
- # Signing names used by s3 and s3-control use customized signers "s3v4"
225
- # and "s3v4a".
226
- if signing_name in S3_SIGNING_NAMES:
227
- signature_version = f's3{signature_version}'
228
-
229
- return signature_version
230
-
231
-
232
- def decode_console_output(parsed, **kwargs):
233
- if 'Output' in parsed:
234
- try:
235
- # We're using 'replace' for errors because it is
236
- # possible that console output contains non string
237
- # chars we can't utf-8 decode.
238
- value = base64.b64decode(
239
- bytes(parsed['Output'], 'latin-1')
240
- ).decode('utf-8', 'replace')
241
- parsed['Output'] = value
242
- except (ValueError, TypeError, AttributeError):
243
- logger.debug('Error decoding base64', exc_info=True)
244
-
245
-
246
- def generate_idempotent_uuid(params, model, **kwargs):
247
- for name in model.idempotent_members:
248
- if name not in params:
249
- params[name] = str(uuid.uuid4())
250
- logger.debug(
251
- "injecting idempotency token (%s) into param '%s'."
252
- % (params[name], name)
253
- )
254
-
255
-
256
- def decode_quoted_jsondoc(value):
257
- try:
258
- value = json.loads(unquote(value))
259
- except (ValueError, TypeError):
260
- logger.debug('Error loading quoted JSON', exc_info=True)
261
- return value
262
-
263
-
264
- def json_decode_template_body(parsed, **kwargs):
265
- if 'TemplateBody' in parsed:
266
- try:
267
- value = json.loads(
268
- parsed['TemplateBody'], object_pairs_hook=OrderedDict
269
- )
270
- parsed['TemplateBody'] = value
271
- except (ValueError, TypeError):
272
- logger.debug('error loading JSON', exc_info=True)
273
-
274
-
275
- def validate_bucket_name(params, **kwargs):
276
- if 'Bucket' not in params:
277
- return
278
- bucket = params['Bucket']
279
- if not VALID_BUCKET.search(bucket) and not VALID_S3_ARN.search(bucket):
280
- error_msg = (
281
- f'Invalid bucket name "{bucket}": Bucket name must match '
282
- f'the regex "{VALID_BUCKET.pattern}" or be an ARN matching '
283
- f'the regex "{VALID_S3_ARN.pattern}"'
284
- )
285
- raise ParamValidationError(report=error_msg)
286
-
287
-
288
- def sse_md5(params, **kwargs):
289
- """
290
- S3 server-side encryption requires the encryption key to be sent to the
291
- server base64 encoded, as well as a base64-encoded MD5 hash of the
292
- encryption key. This handler does both if the MD5 has not been set by
293
- the caller.
294
- """
295
- _sse_md5(params, 'SSECustomer')
296
-
297
-
298
- def copy_source_sse_md5(params, **kwargs):
299
- """
300
- S3 server-side encryption requires the encryption key to be sent to the
301
- server base64 encoded, as well as a base64-encoded MD5 hash of the
302
- encryption key. This handler does both if the MD5 has not been set by
303
- the caller specifically if the parameter is for the copy-source sse-c key.
304
- """
305
- _sse_md5(params, 'CopySourceSSECustomer')
306
-
307
-
308
- def _sse_md5(params, sse_member_prefix='SSECustomer'):
309
- if not _needs_s3_sse_customization(params, sse_member_prefix):
310
- return
311
-
312
- sse_key_member = sse_member_prefix + 'Key'
313
- sse_md5_member = sse_member_prefix + 'KeyMD5'
314
- key_as_bytes = params[sse_key_member]
315
- if isinstance(key_as_bytes, str):
316
- key_as_bytes = key_as_bytes.encode('utf-8')
317
- key_md5_str = base64.b64encode(get_md5(key_as_bytes).digest()).decode(
318
- 'utf-8'
319
- )
320
- key_b64_encoded = base64.b64encode(key_as_bytes).decode('utf-8')
321
- params[sse_key_member] = key_b64_encoded
322
- params[sse_md5_member] = key_md5_str
323
-
324
-
325
- def _needs_s3_sse_customization(params, sse_member_prefix):
326
- return (
327
- params.get(sse_member_prefix + 'Key') is not None
328
- and sse_member_prefix + 'KeyMD5' not in params
329
- )
330
-
331
-
332
- def disable_signing(**kwargs):
333
- """
334
- This handler disables request signing by setting the signer
335
- name to a special sentinel value.
336
- """
337
- return botocore.UNSIGNED
338
-
339
-
340
- def add_expect_header(model, params, **kwargs):
341
- if model.http.get('method', '') not in ['PUT', 'POST']:
342
- return
343
- if 'body' in params:
344
- body = params['body']
345
- if hasattr(body, 'read'):
346
- # Any file like object will use an expect 100-continue
347
- # header regardless of size.
348
- logger.debug("Adding expect 100 continue header to request.")
349
- params['headers']['Expect'] = '100-continue'
350
-
351
-
352
- class DeprecatedServiceDocumenter:
353
- def __init__(self, replacement_service_name):
354
- self._replacement_service_name = replacement_service_name
355
-
356
- def inject_deprecation_notice(self, section, event_name, **kwargs):
357
- section.style.start_important()
358
- section.write('This service client is deprecated. Please use ')
359
- section.style.ref(
360
- self._replacement_service_name,
361
- self._replacement_service_name,
362
- )
363
- section.write(' instead.')
364
- section.style.end_important()
365
-
366
-
367
- def document_copy_source_form(section, event_name, **kwargs):
368
- if 'request-example' in event_name:
369
- parent = section.get_section('structure-value')
370
- param_line = parent.get_section('CopySource')
371
- value_portion = param_line.get_section('member-value')
372
- value_portion.clear_text()
373
- value_portion.write(
374
- "'string' or {'Bucket': 'string', "
375
- "'Key': 'string', 'VersionId': 'string'}"
376
- )
377
- elif 'request-params' in event_name:
378
- param_section = section.get_section('CopySource')
379
- type_section = param_section.get_section('param-type')
380
- type_section.clear_text()
381
- type_section.write(':type CopySource: str or dict')
382
- doc_section = param_section.get_section('param-documentation')
383
- doc_section.clear_text()
384
- doc_section.write(
385
- "The name of the source bucket, key name of the source object, "
386
- "and optional version ID of the source object. You can either "
387
- "provide this value as a string or a dictionary. The "
388
- "string form is {bucket}/{key} or "
389
- "{bucket}/{key}?versionId={versionId} if you want to copy a "
390
- "specific version. You can also provide this value as a "
391
- "dictionary. The dictionary format is recommended over "
392
- "the string format because it is more explicit. The dictionary "
393
- "format is: {'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}."
394
- " Note that the VersionId key is optional and may be omitted."
395
- " To specify an S3 access point, provide the access point"
396
- " ARN for the ``Bucket`` key in the copy source dictionary. If you"
397
- " want to provide the copy source for an S3 access point as a"
398
- " string instead of a dictionary, the ARN provided must be the"
399
- " full S3 access point object ARN"
400
- " (i.e. {accesspoint_arn}/object/{key})"
401
- )
402
-
403
-
404
- def handle_copy_source_param(params, **kwargs):
405
- """Convert CopySource param for CopyObject/UploadPartCopy.
406
-
407
- This handler will deal with two cases:
408
-
409
- * CopySource provided as a string. We'll make a best effort
410
- to URL encode the key name as required. This will require
411
- parsing the bucket and version id from the CopySource value
412
- and only encoding the key.
413
- * CopySource provided as a dict. In this case we're
414
- explicitly given the Bucket, Key, and VersionId so we're
415
- able to encode the key and ensure this value is serialized
416
- and correctly sent to S3.
417
-
418
- """
419
- source = params.get('CopySource')
420
- if source is None:
421
- # The call will eventually fail but we'll let the
422
- # param validator take care of this. It will
423
- # give a better error message.
424
- return
425
- if isinstance(source, str):
426
- params['CopySource'] = _quote_source_header(source)
427
- elif isinstance(source, dict):
428
- params['CopySource'] = _quote_source_header_from_dict(source)
429
-
430
-
431
- def _quote_source_header_from_dict(source_dict):
432
- try:
433
- bucket = source_dict['Bucket']
434
- key = source_dict['Key']
435
- version_id = source_dict.get('VersionId')
436
- if VALID_S3_ARN.search(bucket):
437
- final = f'{bucket}/object/{key}'
438
- else:
439
- final = f'{bucket}/{key}'
440
- except KeyError as e:
441
- raise ParamValidationError(
442
- report=f'Missing required parameter: {str(e)}'
443
- )
444
- final = percent_encode(final, safe=SAFE_CHARS + '/')
445
- if version_id is not None:
446
- final += '?versionId=%s' % version_id
447
- return final
448
-
449
-
450
- def _quote_source_header(value):
451
- result = VERSION_ID_SUFFIX.search(value)
452
- if result is None:
453
- return percent_encode(value, safe=SAFE_CHARS + '/')
454
- else:
455
- first, version_id = value[: result.start()], value[result.start() :]
456
- return percent_encode(first, safe=SAFE_CHARS + '/') + version_id
457
-
458
-
459
- def _get_cross_region_presigned_url(
460
- request_signer, request_dict, model, source_region, destination_region
461
- ):
462
- # The better way to do this is to actually get the
463
- # endpoint_resolver and get the endpoint_url given the
464
- # source region. In this specific case, we know that
465
- # we can safely replace the dest region with the source
466
- # region because of the supported EC2 regions, but in
467
- # general this is not a safe assumption to make.
468
- # I think eventually we should try to plumb through something
469
- # that allows us to resolve endpoints from regions.
470
- request_dict_copy = copy.deepcopy(request_dict)
471
- request_dict_copy['body']['DestinationRegion'] = destination_region
472
- request_dict_copy['url'] = request_dict['url'].replace(
473
- destination_region, source_region
474
- )
475
- request_dict_copy['method'] = 'GET'
476
- request_dict_copy['headers'] = {}
477
- return request_signer.generate_presigned_url(
478
- request_dict_copy, region_name=source_region, operation_name=model.name
479
- )
480
-
481
-
482
- def _get_presigned_url_source_and_destination_regions(request_signer, params):
483
- # Gets the source and destination regions to be used
484
- destination_region = request_signer._region_name
485
- source_region = params.get('SourceRegion')
486
- return source_region, destination_region
487
-
488
-
489
- def inject_presigned_url_ec2(params, request_signer, model, **kwargs):
490
- # The customer can still provide this, so we should pass if they do.
491
- if 'PresignedUrl' in params['body']:
492
- return
493
- src, dest = _get_presigned_url_source_and_destination_regions(
494
- request_signer, params['body']
495
- )
496
- url = _get_cross_region_presigned_url(
497
- request_signer, params, model, src, dest
498
- )
499
- params['body']['PresignedUrl'] = url
500
- # EC2 Requires that the destination region be sent over the wire in
501
- # addition to the source region.
502
- params['body']['DestinationRegion'] = dest
503
-
504
-
505
- def inject_presigned_url_rds(params, request_signer, model, **kwargs):
506
- # SourceRegion is not required for RDS operations, so it's possible that
507
- # it isn't set. In that case it's probably a local copy so we don't need
508
- # to do anything else.
509
- if 'SourceRegion' not in params['body']:
510
- return
511
-
512
- src, dest = _get_presigned_url_source_and_destination_regions(
513
- request_signer, params['body']
514
- )
515
-
516
- # Since SourceRegion isn't actually modeled for RDS, it needs to be
517
- # removed from the request params before we send the actual request.
518
- del params['body']['SourceRegion']
519
-
520
- if 'PreSignedUrl' in params['body']:
521
- return
522
-
523
- url = _get_cross_region_presigned_url(
524
- request_signer, params, model, src, dest
525
- )
526
- params['body']['PreSignedUrl'] = url
527
-
528
-
529
- def json_decode_policies(parsed, model, **kwargs):
530
- # Any time an IAM operation returns a policy document
531
- # it is a string that is json that has been urlencoded,
532
- # i.e urlencode(json.dumps(policy_document)).
533
- # To give users something more useful, we will urldecode
534
- # this value and json.loads() the result so that they have
535
- # the policy document as a dictionary.
536
- output_shape = model.output_shape
537
- if output_shape is not None:
538
- _decode_policy_types(parsed, model.output_shape)
539
-
540
-
541
- def _decode_policy_types(parsed, shape):
542
- # IAM consistently uses the policyDocumentType shape to indicate
543
- # strings that have policy documents.
544
- shape_name = 'policyDocumentType'
545
- if shape.type_name == 'structure':
546
- for member_name, member_shape in shape.members.items():
547
- if (
548
- member_shape.type_name == 'string'
549
- and member_shape.name == shape_name
550
- and member_name in parsed
551
- ):
552
- parsed[member_name] = decode_quoted_jsondoc(
553
- parsed[member_name]
554
- )
555
- elif member_name in parsed:
556
- _decode_policy_types(parsed[member_name], member_shape)
557
- if shape.type_name == 'list':
558
- shape_member = shape.member
559
- for item in parsed:
560
- _decode_policy_types(item, shape_member)
561
-
562
-
563
- def parse_get_bucket_location(parsed, http_response, **kwargs):
564
- # s3.GetBucketLocation cannot be modeled properly. To
565
- # account for this we just manually parse the XML document.
566
- # The "parsed" passed in only has the ResponseMetadata
567
- # filled out. This handler will fill in the LocationConstraint
568
- # value.
569
- if http_response.raw is None:
570
- return
571
- response_body = http_response.content
572
- parser = ETree.XMLParser(target=ETree.TreeBuilder(), encoding='utf-8')
573
- parser.feed(response_body)
574
- root = parser.close()
575
- region = root.text
576
- parsed['LocationConstraint'] = region
577
-
578
-
579
- def base64_encode_user_data(params, **kwargs):
580
- if 'UserData' in params:
581
- if isinstance(params['UserData'], str):
582
- # Encode it to bytes if it is text.
583
- params['UserData'] = params['UserData'].encode('utf-8')
584
- params['UserData'] = base64.b64encode(params['UserData']).decode(
585
- 'utf-8'
586
- )
587
-
588
-
589
- def document_base64_encoding(param):
590
- description = (
591
- '**This value will be base64 encoded automatically. Do '
592
- 'not base64 encode this value prior to performing the '
593
- 'operation.**'
594
- )
595
- append = AppendParamDocumentation(param, description)
596
- return append.append_documentation
597
-
598
-
599
- def validate_ascii_metadata(params, **kwargs):
600
- """Verify S3 Metadata only contains ascii characters.
601
-
602
- From: http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
603
-
604
- "Amazon S3 stores user-defined metadata in lowercase. Each name, value pair
605
- must conform to US-ASCII when using REST and UTF-8 when using SOAP or
606
- browser-based uploads via POST."
607
-
608
- """
609
- metadata = params.get('Metadata')
610
- if not metadata or not isinstance(metadata, dict):
611
- # We have to at least type check the metadata as a dict type
612
- # because this handler is called before param validation.
613
- # We'll go ahead and return because the param validator will
614
- # give a descriptive error message for us.
615
- # We might need a post-param validation event.
616
- return
617
- for key, value in metadata.items():
618
- try:
619
- key.encode('ascii')
620
- value.encode('ascii')
621
- except UnicodeEncodeError:
622
- error_msg = (
623
- 'Non ascii characters found in S3 metadata '
624
- 'for key "%s", value: "%s". \nS3 metadata can only '
625
- 'contain ASCII characters. ' % (key, value)
626
- )
627
- raise ParamValidationError(report=error_msg)
628
-
629
-
630
- def fix_route53_ids(params, model, **kwargs):
631
- """
632
- Check for and split apart Route53 resource IDs, setting
633
- only the last piece. This allows the output of one operation
634
- (e.g. ``'foo/1234'``) to be used as input in another
635
- operation (e.g. it expects just ``'1234'``).
636
- """
637
- input_shape = model.input_shape
638
- if not input_shape or not hasattr(input_shape, 'members'):
639
- return
640
-
641
- members = [
642
- name
643
- for (name, shape) in input_shape.members.items()
644
- if shape.name in ['ResourceId', 'DelegationSetId', 'ChangeId']
645
- ]
646
-
647
- for name in members:
648
- if name in params:
649
- orig_value = params[name]
650
- params[name] = orig_value.split('/')[-1]
651
- logger.debug('%s %s -> %s', name, orig_value, params[name])
652
-
653
-
654
- def inject_account_id(params, **kwargs):
655
- if params.get('accountId') is None:
656
- # Glacier requires accountId, but allows you
657
- # to specify '-' for the current owners account.
658
- # We add this default value if the user does not
659
- # provide the accountId as a convenience.
660
- params['accountId'] = '-'
661
-
662
-
663
- def add_glacier_version(model, params, **kwargs):
664
- request_dict = params
665
- request_dict['headers']['x-amz-glacier-version'] = model.metadata[
666
- 'apiVersion'
667
- ]
668
-
669
-
670
- def add_accept_header(model, params, **kwargs):
671
- if params['headers'].get('Accept', None) is None:
672
- request_dict = params
673
- request_dict['headers']['Accept'] = 'application/json'
674
-
675
-
676
- def add_glacier_checksums(params, **kwargs):
677
- """Add glacier checksums to the http request.
678
-
679
- This will add two headers to the http request:
680
-
681
- * x-amz-content-sha256
682
- * x-amz-sha256-tree-hash
683
-
684
- These values will only be added if they are not present
685
- in the HTTP request.
686
-
687
- """
688
- request_dict = params
689
- headers = request_dict['headers']
690
- body = request_dict['body']
691
- if isinstance(body, bytes):
692
- # If the user provided a bytes type instead of a file
693
- # like object, we're temporarily create a BytesIO object
694
- # so we can use the util functions to calculate the
695
- # checksums which assume file like objects. Note that
696
- # we're not actually changing the body in the request_dict.
697
- body = BytesIO(body)
698
- starting_position = body.tell()
699
- if 'x-amz-content-sha256' not in headers:
700
- headers['x-amz-content-sha256'] = utils.calculate_sha256(
701
- body, as_hex=True
702
- )
703
- body.seek(starting_position)
704
- if 'x-amz-sha256-tree-hash' not in headers:
705
- headers['x-amz-sha256-tree-hash'] = utils.calculate_tree_hash(body)
706
- body.seek(starting_position)
707
-
708
-
709
- def document_glacier_tree_hash_checksum():
710
- doc = '''
711
- This is a required field.
712
-
713
- Ideally you will want to compute this value with checksums from
714
- previous uploaded parts, using the algorithm described in
715
- `Glacier documentation <http://docs.aws.amazon.com/amazonglacier/latest/dev/checksum-calculations.html>`_.
716
-
717
- But if you prefer, you can also use botocore.utils.calculate_tree_hash()
718
- to compute it from raw file by::
719
-
720
- checksum = calculate_tree_hash(open('your_file.txt', 'rb'))
721
-
722
- '''
723
- return AppendParamDocumentation('checksum', doc).append_documentation
724
-
725
-
726
- def document_cloudformation_get_template_return_type(
727
- section, event_name, **kwargs
728
- ):
729
- if 'response-params' in event_name:
730
- template_body_section = section.get_section('TemplateBody')
731
- type_section = template_body_section.get_section('param-type')
732
- type_section.clear_text()
733
- type_section.write('(*dict*) --')
734
- elif 'response-example' in event_name:
735
- parent = section.get_section('structure-value')
736
- param_line = parent.get_section('TemplateBody')
737
- value_portion = param_line.get_section('member-value')
738
- value_portion.clear_text()
739
- value_portion.write('{}')
740
-
741
-
742
- def switch_host_machinelearning(request, **kwargs):
743
- switch_host_with_param(request, 'PredictEndpoint')
744
-
745
-
746
- def check_openssl_supports_tls_version_1_2(**kwargs):
747
- import ssl
748
-
749
- try:
750
- openssl_version_tuple = ssl.OPENSSL_VERSION_INFO
751
- if openssl_version_tuple < (1, 0, 1):
752
- warnings.warn(
753
- 'Currently installed openssl version: %s does not '
754
- 'support TLS 1.2, which is required for use of iot-data. '
755
- 'Please use python installed with openssl version 1.0.1 or '
756
- 'higher.' % (ssl.OPENSSL_VERSION),
757
- UnsupportedTLSVersionWarning,
758
- )
759
- # We cannot check the openssl version on python2.6, so we should just
760
- # pass on this conveniency check.
761
- except AttributeError:
762
- pass
763
-
764
-
765
- def change_get_to_post(request, **kwargs):
766
- # This is useful when we need to change a potentially large GET request
767
- # into a POST with x-www-form-urlencoded encoding.
768
- if request.method == 'GET' and '?' in request.url:
769
- request.headers['Content-Type'] = 'application/x-www-form-urlencoded'
770
- request.method = 'POST'
771
- request.url, request.data = request.url.split('?', 1)
772
-
773
-
774
- def set_list_objects_encoding_type_url(params, context, **kwargs):
775
- if 'EncodingType' not in params:
776
- # We set this context so that we know it wasn't the customer that
777
- # requested the encoding.
778
- context['encoding_type_auto_set'] = True
779
- params['EncodingType'] = 'url'
780
-
781
-
782
- def decode_list_object(parsed, context, **kwargs):
783
- # This is needed because we are passing url as the encoding type. Since the
784
- # paginator is based on the key, we need to handle it before it can be
785
- # round tripped.
786
- #
787
- # From the documentation: If you specify encoding-type request parameter,
788
- # Amazon S3 includes this element in the response, and returns encoded key
789
- # name values in the following response elements:
790
- # Delimiter, Marker, Prefix, NextMarker, Key.
791
- _decode_list_object(
792
- top_level_keys=['Delimiter', 'Marker', 'NextMarker'],
793
- nested_keys=[('Contents', 'Key'), ('CommonPrefixes', 'Prefix')],
794
- parsed=parsed,
795
- context=context,
796
- )
797
-
798
-
799
- def decode_list_object_v2(parsed, context, **kwargs):
800
- # From the documentation: If you specify encoding-type request parameter,
801
- # Amazon S3 includes this element in the response, and returns encoded key
802
- # name values in the following response elements:
803
- # Delimiter, Prefix, ContinuationToken, Key, and StartAfter.
804
- _decode_list_object(
805
- top_level_keys=['Delimiter', 'Prefix', 'StartAfter'],
806
- nested_keys=[('Contents', 'Key'), ('CommonPrefixes', 'Prefix')],
807
- parsed=parsed,
808
- context=context,
809
- )
810
-
811
-
812
- def decode_list_object_versions(parsed, context, **kwargs):
813
- # From the documentation: If you specify encoding-type request parameter,
814
- # Amazon S3 includes this element in the response, and returns encoded key
815
- # name values in the following response elements:
816
- # KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter.
817
- _decode_list_object(
818
- top_level_keys=[
819
- 'KeyMarker',
820
- 'NextKeyMarker',
821
- 'Prefix',
822
- 'Delimiter',
823
- ],
824
- nested_keys=[
825
- ('Versions', 'Key'),
826
- ('DeleteMarkers', 'Key'),
827
- ('CommonPrefixes', 'Prefix'),
828
- ],
829
- parsed=parsed,
830
- context=context,
831
- )
832
-
833
-
834
- def _decode_list_object(top_level_keys, nested_keys, parsed, context):
835
- if parsed.get('EncodingType') == 'url' and context.get(
836
- 'encoding_type_auto_set'
837
- ):
838
- # URL decode top-level keys in the response if present.
839
- for key in top_level_keys:
840
- if key in parsed:
841
- parsed[key] = unquote_str(parsed[key])
842
- # URL decode nested keys from the response if present.
843
- for (top_key, child_key) in nested_keys:
844
- if top_key in parsed:
845
- for member in parsed[top_key]:
846
- member[child_key] = unquote_str(member[child_key])
847
-
848
-
849
- def convert_body_to_file_like_object(params, **kwargs):
850
- if 'Body' in params:
851
- if isinstance(params['Body'], str):
852
- params['Body'] = BytesIO(ensure_bytes(params['Body']))
853
- elif isinstance(params['Body'], bytes):
854
- params['Body'] = BytesIO(params['Body'])
855
-
856
-
857
- def _add_parameter_aliases(handler_list):
858
- # Mapping of original parameter to parameter alias.
859
- # The key is <service>.<operation>.parameter
860
- # The first part of the key is used for event registration.
861
- # The last part is the original parameter name and the value is the
862
- # alias to expose in documentation.
863
- aliases = {
864
- 'ec2.*.Filter': 'Filters',
865
- 'logs.CreateExportTask.from': 'fromTime',
866
- 'cloudsearchdomain.Search.return': 'returnFields',
867
- }
868
-
869
- for original, new_name in aliases.items():
870
- event_portion, original_name = original.rsplit('.', 1)
871
- parameter_alias = ParameterAlias(original_name, new_name)
872
-
873
- # Add the handlers to the list of handlers.
874
- # One handler is to handle when users provide the alias.
875
- # The other handler is to update the documentation to show only
876
- # the alias.
877
- parameter_build_event_handler_tuple = (
878
- 'before-parameter-build.' + event_portion,
879
- parameter_alias.alias_parameter_in_call,
880
- REGISTER_FIRST,
881
- )
882
- docs_event_handler_tuple = (
883
- 'docs.*.' + event_portion + '.complete-section',
884
- parameter_alias.alias_parameter_in_documentation,
885
- )
886
- handler_list.append(parameter_build_event_handler_tuple)
887
- handler_list.append(docs_event_handler_tuple)
888
-
889
-
890
- class ParameterAlias:
891
- def __init__(self, original_name, alias_name):
892
- self._original_name = original_name
893
- self._alias_name = alias_name
894
-
895
- def alias_parameter_in_call(self, params, model, **kwargs):
896
- if model.input_shape:
897
- # Only consider accepting the alias if it is modeled in the
898
- # input shape.
899
- if self._original_name in model.input_shape.members:
900
- if self._alias_name in params:
901
- if self._original_name in params:
902
- raise AliasConflictParameterError(
903
- original=self._original_name,
904
- alias=self._alias_name,
905
- operation=model.name,
906
- )
907
- # Remove the alias parameter value and use the old name
908
- # instead.
909
- params[self._original_name] = params.pop(self._alias_name)
910
-
911
- def alias_parameter_in_documentation(self, event_name, section, **kwargs):
912
- if event_name.startswith('docs.request-params'):
913
- if self._original_name not in section.available_sections:
914
- return
915
- # Replace the name for parameter type
916
- param_section = section.get_section(self._original_name)
917
- param_type_section = param_section.get_section('param-type')
918
- self._replace_content(param_type_section)
919
-
920
- # Replace the name for the parameter description
921
- param_name_section = param_section.get_section('param-name')
922
- self._replace_content(param_name_section)
923
- elif event_name.startswith('docs.request-example'):
924
- section = section.get_section('structure-value')
925
- if self._original_name not in section.available_sections:
926
- return
927
- # Replace the name for the example
928
- param_section = section.get_section(self._original_name)
929
- self._replace_content(param_section)
930
-
931
- def _replace_content(self, section):
932
- content = section.getvalue().decode('utf-8')
933
- updated_content = content.replace(
934
- self._original_name, self._alias_name
935
- )
936
- section.clear_text()
937
- section.write(updated_content)
938
-
939
-
940
- class ClientMethodAlias:
941
- def __init__(self, actual_name):
942
- """Aliases a non-extant method to an existing method.
943
-
944
- :param actual_name: The name of the method that actually exists on
945
- the client.
946
- """
947
- self._actual = actual_name
948
-
949
- def __call__(self, client, **kwargs):
950
- return getattr(client, self._actual)
951
-
952
-
953
- # TODO: Remove this class as it is no longer used
954
- class HeaderToHostHoister:
955
- """Takes a header and moves it to the front of the hoststring."""
956
-
957
- _VALID_HOSTNAME = re.compile(r'(?!-)[a-z\d-]{1,63}(?<!-)$', re.IGNORECASE)
958
-
959
- def __init__(self, header_name):
960
- self._header_name = header_name
961
-
962
- def hoist(self, params, **kwargs):
963
- """Hoist a header to the hostname.
964
-
965
- Hoist a header to the beginning of the hostname with a suffix "." after
966
- it. The original header should be removed from the header map. This
967
- method is intended to be used as a target for the before-call event.
968
- """
969
- if self._header_name not in params['headers']:
970
- return
971
- header_value = params['headers'][self._header_name]
972
- self._ensure_header_is_valid_host(header_value)
973
- original_url = params['url']
974
- new_url = self._prepend_to_host(original_url, header_value)
975
- params['url'] = new_url
976
-
977
- def _ensure_header_is_valid_host(self, header):
978
- match = self._VALID_HOSTNAME.match(header)
979
- if not match:
980
- raise ParamValidationError(
981
- report=(
982
- 'Hostnames must contain only - and alphanumeric characters, '
983
- 'and between 1 and 63 characters long.'
984
- )
985
- )
986
-
987
- def _prepend_to_host(self, url, prefix):
988
- url_components = urlsplit(url)
989
- parts = url_components.netloc.split('.')
990
- parts = [prefix] + parts
991
- new_netloc = '.'.join(parts)
992
- new_components = (
993
- url_components.scheme,
994
- new_netloc,
995
- url_components.path,
996
- url_components.query,
997
- '',
998
- )
999
- new_url = urlunsplit(new_components)
1000
- return new_url
1001
-
1002
-
1003
- def inject_api_version_header_if_needed(model, params, **kwargs):
1004
- if not model.is_endpoint_discovery_operation:
1005
- return
1006
- params['headers']['x-amz-api-version'] = model.service_model.api_version
1007
-
1008
-
1009
- def remove_lex_v2_start_conversation(class_attributes, **kwargs):
1010
- """Operation requires h2 which is currently unsupported in Python"""
1011
- if 'start_conversation' in class_attributes:
1012
- del class_attributes['start_conversation']
1013
-
1014
-
1015
- def add_retry_headers(request, **kwargs):
1016
- retries_context = request.context.get('retries')
1017
- if not retries_context:
1018
- return
1019
- headers = request.headers
1020
- headers['amz-sdk-invocation-id'] = retries_context['invocation-id']
1021
- sdk_retry_keys = ('ttl', 'attempt', 'max')
1022
- sdk_request_headers = [
1023
- f'{key}={retries_context[key]}'
1024
- for key in sdk_retry_keys
1025
- if key in retries_context
1026
- ]
1027
- headers['amz-sdk-request'] = '; '.join(sdk_request_headers)
1028
-
1029
-
1030
- def remove_bucket_from_url_paths_from_model(params, model, context, **kwargs):
1031
- """Strips leading `{Bucket}/` from any operations that have it.
1032
-
1033
- The original value is retained in a separate "authPath" field. This is
1034
- used in the HmacV1Auth signer. See HmacV1Auth.canonical_resource in
1035
- botocore/auth.py for details.
1036
-
1037
- This change is applied to the operation model during the first time the
1038
- operation is invoked and then stays in effect for the lifetime of the
1039
- client object.
1040
-
1041
- When the ruleset based endpoint resolver is in effect, both the endpoint
1042
- ruleset AND the service model place the bucket name in the final URL.
1043
- The result is an invalid URL. This handler modifies the operation model to
1044
- no longer place the bucket name. Previous versions of botocore fixed the
1045
- URL after the fact when necessary. Since the introduction of ruleset based
1046
- endpoint resolution, the problem exists in ALL URLs that contain a bucket
1047
- name and can therefore be addressed before the URL gets assembled.
1048
- """
1049
- req_uri = model.http['requestUri']
1050
- bucket_path = '/{Bucket}'
1051
- if req_uri.startswith(bucket_path):
1052
- model.http['requestUri'] = req_uri[len(bucket_path) :]
1053
- # If the request URI is ONLY a bucket, the auth_path must be
1054
- # terminated with a '/' character to generate a signature that the
1055
- # server will accept.
1056
- needs_slash = req_uri == bucket_path
1057
- model.http['authPath'] = f'{req_uri}/' if needs_slash else req_uri
1058
-
1059
-
1060
- def remove_accid_host_prefix_from_model(params, model, context, **kwargs):
1061
- """Removes the `{AccountId}.` prefix from the operation model.
1062
-
1063
- This change is applied to the operation model during the first time the
1064
- operation is invoked and then stays in effect for the lifetime of the
1065
- client object.
1066
-
1067
- When the ruleset based endpoint resolver is in effect, both the endpoint
1068
- ruleset AND the service model place the {AccountId}. prefix in the URL.
1069
- The result is an invalid endpoint. This handler modifies the operation
1070
- model to remove the `endpoint.hostPrefix` field while leaving the
1071
- `RequiresAccountId` static context parameter in place.
1072
- """
1073
- has_ctx_param = any(
1074
- ctx_param.name == 'RequiresAccountId' and ctx_param.value is True
1075
- for ctx_param in model.static_context_parameters
1076
- )
1077
- if (
1078
- model.endpoint is not None
1079
- and model.endpoint.get('hostPrefix') == '{AccountId}.'
1080
- and has_ctx_param
1081
- ):
1082
- del model.endpoint['hostPrefix']
1083
-
1084
-
1085
- def remove_arn_from_signing_path(request, **kwargs):
1086
- auth_path = request.auth_path
1087
- if isinstance(auth_path, str) and auth_path.startswith('/arn%3A'):
1088
- auth_path_parts = auth_path.split('/')
1089
- if len(auth_path_parts) > 1 and ArnParser.is_arn(
1090
- unquote(auth_path_parts[1])
1091
- ):
1092
- request.auth_path = '/'.join(['', *auth_path_parts[2:]])
1093
-
1094
-
1095
- def customize_endpoint_resolver_builtins(
1096
- builtins, model, params, context, **kwargs
1097
- ):
1098
- """Modify builtin parameter values for endpoint resolver
1099
-
1100
- Modifies the builtins dict in place. Changes are in effect for one call.
1101
- The corresponding event is emitted only if at least one builtin parameter
1102
- value is required for endpoint resolution for the operation.
1103
- """
1104
- bucket_name = params.get('Bucket')
1105
- bucket_is_arn = bucket_name is not None and ArnParser.is_arn(bucket_name)
1106
- # In some situations the host will return AuthorizationHeaderMalformed
1107
- # when the signing region of a sigv4 request is not the bucket's
1108
- # region (which is likely unknown by the user of GetBucketLocation).
1109
- # Avoid this by always using path-style addressing.
1110
- if model.name == 'GetBucketLocation':
1111
- builtins[EndpointResolverBuiltins.AWS_S3_FORCE_PATH_STYLE] = True
1112
- # All situations where the bucket name is an ARN are not compatible
1113
- # with path style addressing.
1114
- elif bucket_is_arn:
1115
- builtins[EndpointResolverBuiltins.AWS_S3_FORCE_PATH_STYLE] = False
1116
-
1117
- # Bucket names that are invalid host labels require path-style addressing.
1118
- # If path-style addressing was specifically requested, the default builtin
1119
- # value is already set.
1120
- path_style_required = (
1121
- bucket_name is not None and not VALID_HOST_LABEL_RE.match(bucket_name)
1122
- )
1123
- path_style_requested = builtins[
1124
- EndpointResolverBuiltins.AWS_S3_FORCE_PATH_STYLE
1125
- ]
1126
-
1127
- # Path-style addressing is incompatible with the global endpoint for
1128
- # presigned URLs. If the bucket name is an ARN, the ARN's region should be
1129
- # used in the endpoint.
1130
- if (
1131
- context.get('use_global_endpoint')
1132
- and not path_style_required
1133
- and not path_style_requested
1134
- and not bucket_is_arn
1135
- ):
1136
- builtins[EndpointResolverBuiltins.AWS_REGION] = 'aws-global'
1137
- builtins[EndpointResolverBuiltins.AWS_S3_USE_GLOBAL_ENDPOINT] = True
1138
-
1139
-
1140
- # This is a list of (event_name, handler).
1141
- # When a Session is created, everything in this list will be
1142
- # automatically registered with that Session.
1143
-
1144
- BUILTIN_HANDLERS = [
1145
- ('choose-service-name', handle_service_name_alias),
1146
- (
1147
- 'getattr.mturk.list_hi_ts_for_qualification_type',
1148
- ClientMethodAlias('list_hits_for_qualification_type'),
1149
- ),
1150
- (
1151
- 'before-parameter-build.s3.UploadPart',
1152
- convert_body_to_file_like_object,
1153
- REGISTER_LAST,
1154
- ),
1155
- (
1156
- 'before-parameter-build.s3.PutObject',
1157
- convert_body_to_file_like_object,
1158
- REGISTER_LAST,
1159
- ),
1160
- ('creating-client-class', add_generate_presigned_url),
1161
- ('creating-client-class.s3', add_generate_presigned_post),
1162
- ('creating-client-class.iot-data', check_openssl_supports_tls_version_1_2),
1163
- ('creating-client-class.lex-runtime-v2', remove_lex_v2_start_conversation),
1164
- ('after-call.iam', json_decode_policies),
1165
- ('after-call.ec2.GetConsoleOutput', decode_console_output),
1166
- ('after-call.cloudformation.GetTemplate', json_decode_template_body),
1167
- ('after-call.s3.GetBucketLocation', parse_get_bucket_location),
1168
- ('before-parameter-build', generate_idempotent_uuid),
1169
- ('before-parameter-build.s3', validate_bucket_name),
1170
- ('before-parameter-build.s3', remove_bucket_from_url_paths_from_model),
1171
- (
1172
- 'before-parameter-build.s3.ListObjects',
1173
- set_list_objects_encoding_type_url,
1174
- ),
1175
- (
1176
- 'before-parameter-build.s3.ListObjectsV2',
1177
- set_list_objects_encoding_type_url,
1178
- ),
1179
- (
1180
- 'before-parameter-build.s3.ListObjectVersions',
1181
- set_list_objects_encoding_type_url,
1182
- ),
1183
- ('before-parameter-build.s3.CopyObject', handle_copy_source_param),
1184
- ('before-parameter-build.s3.UploadPartCopy', handle_copy_source_param),
1185
- ('before-parameter-build.s3.CopyObject', validate_ascii_metadata),
1186
- ('before-parameter-build.s3.PutObject', validate_ascii_metadata),
1187
- (
1188
- 'before-parameter-build.s3.CreateMultipartUpload',
1189
- validate_ascii_metadata,
1190
- ),
1191
- ('before-parameter-build.s3-control', remove_accid_host_prefix_from_model),
1192
- ('docs.*.s3.CopyObject.complete-section', document_copy_source_form),
1193
- ('docs.*.s3.UploadPartCopy.complete-section', document_copy_source_form),
1194
- ('before-endpoint-resolution.s3', customize_endpoint_resolver_builtins),
1195
- ('before-call', add_recursion_detection_header),
1196
- ('before-call.s3', add_expect_header),
1197
- ('before-call.glacier', add_glacier_version),
1198
- ('before-call.apigateway', add_accept_header),
1199
- ('before-call.s3.PutObject', conditionally_calculate_md5),
1200
- ('before-call.s3.UploadPart', conditionally_calculate_md5),
1201
- ('before-call.s3.DeleteObjects', escape_xml_payload),
1202
- ('before-call.s3.PutBucketLifecycleConfiguration', escape_xml_payload),
1203
- ('before-call.glacier.UploadArchive', add_glacier_checksums),
1204
- ('before-call.glacier.UploadMultipartPart', add_glacier_checksums),
1205
- ('before-call.ec2.CopySnapshot', inject_presigned_url_ec2),
1206
- ('request-created', add_retry_headers),
1207
- ('request-created.machinelearning.Predict', switch_host_machinelearning),
1208
- ('needs-retry.s3.UploadPartCopy', check_for_200_error, REGISTER_FIRST),
1209
- ('needs-retry.s3.CopyObject', check_for_200_error, REGISTER_FIRST),
1210
- (
1211
- 'needs-retry.s3.CompleteMultipartUpload',
1212
- check_for_200_error,
1213
- REGISTER_FIRST,
1214
- ),
1215
- ('choose-signer.cognito-identity.GetId', disable_signing),
1216
- ('choose-signer.cognito-identity.GetOpenIdToken', disable_signing),
1217
- ('choose-signer.cognito-identity.UnlinkIdentity', disable_signing),
1218
- (
1219
- 'choose-signer.cognito-identity.GetCredentialsForIdentity',
1220
- disable_signing,
1221
- ),
1222
- ('choose-signer.sts.AssumeRoleWithSAML', disable_signing),
1223
- ('choose-signer.sts.AssumeRoleWithWebIdentity', disable_signing),
1224
- ('choose-signer', set_operation_specific_signer),
1225
- ('before-parameter-build.s3.HeadObject', sse_md5),
1226
- ('before-parameter-build.s3.GetObject', sse_md5),
1227
- ('before-parameter-build.s3.PutObject', sse_md5),
1228
- ('before-parameter-build.s3.CopyObject', sse_md5),
1229
- ('before-parameter-build.s3.CopyObject', copy_source_sse_md5),
1230
- ('before-parameter-build.s3.CreateMultipartUpload', sse_md5),
1231
- ('before-parameter-build.s3.UploadPart', sse_md5),
1232
- ('before-parameter-build.s3.UploadPartCopy', sse_md5),
1233
- ('before-parameter-build.s3.UploadPartCopy', copy_source_sse_md5),
1234
- ('before-parameter-build.s3.SelectObjectContent', sse_md5),
1235
- ('before-parameter-build.ec2.RunInstances', base64_encode_user_data),
1236
- (
1237
- 'before-parameter-build.autoscaling.CreateLaunchConfiguration',
1238
- base64_encode_user_data,
1239
- ),
1240
- ('before-parameter-build.route53', fix_route53_ids),
1241
- ('before-parameter-build.glacier', inject_account_id),
1242
- ('before-sign.s3', remove_arn_from_signing_path),
1243
- ('after-call.s3.ListObjects', decode_list_object),
1244
- ('after-call.s3.ListObjectsV2', decode_list_object_v2),
1245
- ('after-call.s3.ListObjectVersions', decode_list_object_versions),
1246
- # Cloudsearchdomain search operation will be sent by HTTP POST
1247
- ('request-created.cloudsearchdomain.Search', change_get_to_post),
1248
- # Glacier documentation customizations
1249
- (
1250
- 'docs.*.glacier.*.complete-section',
1251
- AutoPopulatedParam(
1252
- 'accountId',
1253
- 'Note: this parameter is set to "-" by'
1254
- 'default if no value is not specified.',
1255
- ).document_auto_populated_param,
1256
- ),
1257
- (
1258
- 'docs.*.glacier.UploadArchive.complete-section',
1259
- AutoPopulatedParam('checksum').document_auto_populated_param,
1260
- ),
1261
- (
1262
- 'docs.*.glacier.UploadMultipartPart.complete-section',
1263
- AutoPopulatedParam('checksum').document_auto_populated_param,
1264
- ),
1265
- (
1266
- 'docs.request-params.glacier.CompleteMultipartUpload.complete-section',
1267
- document_glacier_tree_hash_checksum(),
1268
- ),
1269
- # Cloudformation documentation customizations
1270
- (
1271
- 'docs.*.cloudformation.GetTemplate.complete-section',
1272
- document_cloudformation_get_template_return_type,
1273
- ),
1274
- # UserData base64 encoding documentation customizations
1275
- (
1276
- 'docs.*.ec2.RunInstances.complete-section',
1277
- document_base64_encoding('UserData'),
1278
- ),
1279
- (
1280
- 'docs.*.autoscaling.CreateLaunchConfiguration.complete-section',
1281
- document_base64_encoding('UserData'),
1282
- ),
1283
- # EC2 CopySnapshot documentation customizations
1284
- (
1285
- 'docs.*.ec2.CopySnapshot.complete-section',
1286
- AutoPopulatedParam('PresignedUrl').document_auto_populated_param,
1287
- ),
1288
- (
1289
- 'docs.*.ec2.CopySnapshot.complete-section',
1290
- AutoPopulatedParam('DestinationRegion').document_auto_populated_param,
1291
- ),
1292
- # S3 SSE documentation modifications
1293
- (
1294
- 'docs.*.s3.*.complete-section',
1295
- AutoPopulatedParam('SSECustomerKeyMD5').document_auto_populated_param,
1296
- ),
1297
- # S3 SSE Copy Source documentation modifications
1298
- (
1299
- 'docs.*.s3.*.complete-section',
1300
- AutoPopulatedParam(
1301
- 'CopySourceSSECustomerKeyMD5'
1302
- ).document_auto_populated_param,
1303
- ),
1304
- # Add base64 information to Lambda
1305
- (
1306
- 'docs.*.lambda.UpdateFunctionCode.complete-section',
1307
- document_base64_encoding('ZipFile'),
1308
- ),
1309
- # The following S3 operations cannot actually accept a ContentMD5
1310
- (
1311
- 'docs.*.s3.*.complete-section',
1312
- HideParamFromOperations(
1313
- 's3',
1314
- 'ContentMD5',
1315
- [
1316
- 'DeleteObjects',
1317
- 'PutBucketAcl',
1318
- 'PutBucketCors',
1319
- 'PutBucketLifecycle',
1320
- 'PutBucketLogging',
1321
- 'PutBucketNotification',
1322
- 'PutBucketPolicy',
1323
- 'PutBucketReplication',
1324
- 'PutBucketRequestPayment',
1325
- 'PutBucketTagging',
1326
- 'PutBucketVersioning',
1327
- 'PutBucketWebsite',
1328
- 'PutObjectAcl',
1329
- ],
1330
- ).hide_param,
1331
- ),
1332
- #############
1333
- # RDS
1334
- #############
1335
- ('creating-client-class.rds', add_generate_db_auth_token),
1336
- ('before-call.rds.CopyDBClusterSnapshot', inject_presigned_url_rds),
1337
- ('before-call.rds.CreateDBCluster', inject_presigned_url_rds),
1338
- ('before-call.rds.CopyDBSnapshot', inject_presigned_url_rds),
1339
- ('before-call.rds.CreateDBInstanceReadReplica', inject_presigned_url_rds),
1340
- (
1341
- 'before-call.rds.StartDBInstanceAutomatedBackupsReplication',
1342
- inject_presigned_url_rds,
1343
- ),
1344
- # RDS PresignedUrl documentation customizations
1345
- (
1346
- 'docs.*.rds.CopyDBClusterSnapshot.complete-section',
1347
- AutoPopulatedParam('PreSignedUrl').document_auto_populated_param,
1348
- ),
1349
- (
1350
- 'docs.*.rds.CreateDBCluster.complete-section',
1351
- AutoPopulatedParam('PreSignedUrl').document_auto_populated_param,
1352
- ),
1353
- (
1354
- 'docs.*.rds.CopyDBSnapshot.complete-section',
1355
- AutoPopulatedParam('PreSignedUrl').document_auto_populated_param,
1356
- ),
1357
- (
1358
- 'docs.*.rds.CreateDBInstanceReadReplica.complete-section',
1359
- AutoPopulatedParam('PreSignedUrl').document_auto_populated_param,
1360
- ),
1361
- (
1362
- 'docs.*.rds.StartDBInstanceAutomatedBackupsReplication.complete-section',
1363
- AutoPopulatedParam('PreSignedUrl').document_auto_populated_param,
1364
- ),
1365
- #############
1366
- # Neptune
1367
- #############
1368
- ('before-call.neptune.CopyDBClusterSnapshot', inject_presigned_url_rds),
1369
- ('before-call.neptune.CreateDBCluster', inject_presigned_url_rds),
1370
- # Neptune PresignedUrl documentation customizations
1371
- (
1372
- 'docs.*.neptune.CopyDBClusterSnapshot.complete-section',
1373
- AutoPopulatedParam('PreSignedUrl').document_auto_populated_param,
1374
- ),
1375
- (
1376
- 'docs.*.neptune.CreateDBCluster.complete-section',
1377
- AutoPopulatedParam('PreSignedUrl').document_auto_populated_param,
1378
- ),
1379
- #############
1380
- # DocDB
1381
- #############
1382
- ('before-call.docdb.CopyDBClusterSnapshot', inject_presigned_url_rds),
1383
- ('before-call.docdb.CreateDBCluster', inject_presigned_url_rds),
1384
- # DocDB PresignedUrl documentation customizations
1385
- (
1386
- 'docs.*.docdb.CopyDBClusterSnapshot.complete-section',
1387
- AutoPopulatedParam('PreSignedUrl').document_auto_populated_param,
1388
- ),
1389
- (
1390
- 'docs.*.docdb.CreateDBCluster.complete-section',
1391
- AutoPopulatedParam('PreSignedUrl').document_auto_populated_param,
1392
- ),
1393
- ('before-call', inject_api_version_header_if_needed),
1394
- ]
1395
- _add_parameter_aliases(BUILTIN_HANDLERS)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/_extension.py DELETED
@@ -1,10 +0,0 @@
1
- from typing import Any
2
-
3
-
4
- def load_ipython_extension(ip: Any) -> None: # pragma: no cover
5
- # prevent circular import
6
- from pip._vendor.rich.pretty import install
7
- from pip._vendor.rich.traceback import install as tr_install
8
-
9
- install()
10
- tr_install()
 
 
 
 
 
 
 
 
 
 
 
spaces/CVH-vn1210/make_hair/minigpt4/common/__init__.py DELETED
File without changes
spaces/CVPR/LIVE/pybind11/include/pybind11/detail/typeid.h DELETED
@@ -1,55 +0,0 @@
1
- /*
2
- pybind11/detail/typeid.h: Compiler-independent access to type identifiers
3
-
4
- Copyright (c) 2016 Wenzel Jakob <[email protected]>
5
-
6
- All rights reserved. Use of this source code is governed by a
7
- BSD-style license that can be found in the LICENSE file.
8
- */
9
-
10
- #pragma once
11
-
12
- #include <cstdio>
13
- #include <cstdlib>
14
-
15
- #if defined(__GNUG__)
16
- #include <cxxabi.h>
17
- #endif
18
-
19
- #include "common.h"
20
-
21
- PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
22
- PYBIND11_NAMESPACE_BEGIN(detail)
23
- /// Erase all occurrences of a substring
24
- inline void erase_all(std::string &string, const std::string &search) {
25
- for (size_t pos = 0;;) {
26
- pos = string.find(search, pos);
27
- if (pos == std::string::npos) break;
28
- string.erase(pos, search.length());
29
- }
30
- }
31
-
32
- PYBIND11_NOINLINE inline void clean_type_id(std::string &name) {
33
- #if defined(__GNUG__)
34
- int status = 0;
35
- std::unique_ptr<char, void (*)(void *)> res {
36
- abi::__cxa_demangle(name.c_str(), nullptr, nullptr, &status), std::free };
37
- if (status == 0)
38
- name = res.get();
39
- #else
40
- detail::erase_all(name, "class ");
41
- detail::erase_all(name, "struct ");
42
- detail::erase_all(name, "enum ");
43
- #endif
44
- detail::erase_all(name, "pybind11::");
45
- }
46
- PYBIND11_NAMESPACE_END(detail)
47
-
48
- /// Return a string representation of a C++ type
49
- template <typename T> static std::string type_id() {
50
- std::string name(typeid(T).name());
51
- detail::clean_type_id(name);
52
- return name;
53
- }
54
-
55
- PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/lama-example/bin/split_tar.py DELETED
@@ -1,22 +0,0 @@
1
- #!/usr/bin/env python3
2
-
3
-
4
- import tqdm
5
- import webdataset as wds
6
-
7
-
8
- def main(args):
9
- input_dataset = wds.Dataset(args.infile)
10
- output_dataset = wds.ShardWriter(args.outpattern)
11
- for rec in tqdm.tqdm(input_dataset):
12
- output_dataset.write(rec)
13
-
14
-
15
- if __name__ == '__main__':
16
- import argparse
17
-
18
- aparser = argparse.ArgumentParser()
19
- aparser.add_argument('infile', type=str)
20
- aparser.add_argument('outpattern', type=str)
21
-
22
- main(aparser.parse_args())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CYSD/AI-image-detector/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: AI Image Detector
3
- emoji: 🚀
4
- colorFrom: purple
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.4.1
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: umm-maybe/AI-image-detector
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat.b4/g4f/Provider/Providers/H2o.py DELETED
@@ -1,106 +0,0 @@
1
- from requests import Session
2
- from uuid import uuid4
3
- from json import loads
4
- import os
5
- import json
6
- import requests
7
- from ...typing import sha256, Dict, get_type_hints
8
-
9
- url = 'https://gpt-gm.h2o.ai'
10
- model = ['falcon-40b', 'falcon-7b', 'llama-13b']
11
- supports_stream = True
12
- needs_auth = False
13
-
14
- models = {
15
- 'falcon-7b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3',
16
- 'falcon-40b': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
17
- 'llama-13b': 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b'
18
- }
19
-
20
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
21
- conversation = 'instruction: this is a conversation beween, a user and an AI assistant, respond to the latest message, referring to the conversation if needed\n'
22
- for message in messages:
23
- conversation += '%s: %s\n' % (message['role'], message['content'])
24
- conversation += 'assistant:'
25
-
26
- client = Session()
27
- client.headers = {
28
- 'authority': 'gpt-gm.h2o.ai',
29
- 'origin': 'https://gpt-gm.h2o.ai',
30
- 'referer': 'https://gpt-gm.h2o.ai/',
31
- 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
32
- 'sec-ch-ua-mobile': '?0',
33
- 'sec-ch-ua-platform': '"Windows"',
34
- 'sec-fetch-dest': 'document',
35
- 'sec-fetch-mode': 'navigate',
36
- 'sec-fetch-site': 'same-origin',
37
- 'sec-fetch-user': '?1',
38
- 'upgrade-insecure-requests': '1',
39
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
40
- }
41
-
42
- client.get('https://gpt-gm.h2o.ai/')
43
- response = client.post('https://gpt-gm.h2o.ai/settings', data={
44
- 'ethicsModalAccepted': 'true',
45
- 'shareConversationsWithModelAuthors': 'true',
46
- 'ethicsModalAcceptedAt': '',
47
- 'activeModel': 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
48
- 'searchEnabled': 'true',
49
- })
50
-
51
- headers = {
52
- 'authority': 'gpt-gm.h2o.ai',
53
- 'accept': '*/*',
54
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
55
- 'origin': 'https://gpt-gm.h2o.ai',
56
- 'referer': 'https://gpt-gm.h2o.ai/',
57
- 'sec-ch-ua': '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
58
- 'sec-ch-ua-mobile': '?0',
59
- 'sec-ch-ua-platform': '"Windows"',
60
- 'sec-fetch-dest': 'empty',
61
- 'sec-fetch-mode': 'cors',
62
- 'sec-fetch-site': 'same-origin',
63
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
64
- }
65
-
66
- json_data = {
67
- 'model': models[model]
68
- }
69
-
70
- response = client.post('https://gpt-gm.h2o.ai/conversation',
71
- headers=headers, json=json_data)
72
- conversationId = response.json()['conversationId']
73
-
74
-
75
- completion = client.post(f'https://gpt-gm.h2o.ai/conversation/{conversationId}', stream=True, json = {
76
- 'inputs': conversation,
77
- 'parameters': {
78
- 'temperature': kwargs.get('temperature', 0.4),
79
- 'truncate': kwargs.get('truncate', 2048),
80
- 'max_new_tokens': kwargs.get('max_new_tokens', 1024),
81
- 'do_sample': kwargs.get('do_sample', True),
82
- 'repetition_penalty': kwargs.get('repetition_penalty', 1.2),
83
- 'return_full_text': kwargs.get('return_full_text', False)
84
- },
85
- 'stream': True,
86
- 'options': {
87
- 'id': kwargs.get('id', str(uuid4())),
88
- 'response_id': kwargs.get('response_id', str(uuid4())),
89
- 'is_retry': False,
90
- 'use_cache': False,
91
- 'web_search_id': ''
92
- }
93
- })
94
-
95
- for line in completion.iter_lines():
96
- if b'data' in line:
97
- line = loads(line.decode('utf-8').replace('data:', ''))
98
- token = line['token']['text']
99
-
100
- if token == '<|endoftext|>':
101
- break
102
- else:
103
- yield (token)
104
-
105
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
106
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/roi_heads/box_head/__init__.py DELETED
File without changes
spaces/DEEMOSTECH/ChatAvatar/static/css/main.a47c5861.css DELETED
@@ -1,2 +0,0 @@
1
- html{overflow-x:hidden;overflow-y:overlay}body{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;box-sizing:border-box;color:#cfcfcf;font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif;margin:0}code{font-family:source-code-pro,Menlo,Monaco,Consolas,Courier New,monospace}::-webkit-scrollbar{height:16px;width:16px}::-webkit-scrollbar-thumb{border:5px solid;border-radius:8px}::-webkit-scrollbar-corner{display:none}.root{display:flex;justify-content:center;width:100%}.container{background:#121317;height:100vh;width:100%}.\!container{width:100%!important}@media (min-width:640px){.container{max-width:640px}.\!container{max-width:640px!important}}@media (min-width:768px){.container{max-width:768px}.\!container{max-width:768px!important}}@media (min-width:1024px){.container{max-width:1024px}.\!container{max-width:1024px!important}}@media (min-width:1280px){.container{max-width:1280px}.\!container{max-width:1280px!important}}@media (min-width:1536px){.container{max-width:1536px}.\!container{max-width:1536px!important}}.App{--theme-color:#4a00e0;--font-dark-color:#434343;--font-gray-color:#aaa;--font-light-color:#cfcfcf;--bg-light-color:#fff;--bg-gray0-color:#f8f8f8;--bg-gray1-color:#ececec;--bg-gray2-color:#7c7c7c;--bg-gray3-color:#373737;--bg-theme-color:#e7e3f1;--bg-dark-color:#121317;--side-gap:5rem;--radius:0.5rem;--shadow:-10px 0px 12px 1px hsla(0,0%,53%,.16);text-align:center}.App *{box-sizing:border-box;transition:all .3s}.App ::-webkit-scrollbar-thumb{background-color:rgba(0,0,0,.2)}textarea{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;border:1px solid transparent;color:var(--font-dark-color);font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif;font-size:1rem;line-height:1.5rem;outline:none;padding:0;resize:none}textarea:focus{border-color:var(--theme-color)}img{-webkit-user-drag:none;-webkit-user-select:none;user-select:none}.gallery_con__Y2mej{align-items:flex-start;display:flex;justify-content:center;margin-top:var(--side-gap);padding:0 var(--side-gap);width:100%}.gallery_menuCon__fVdFJ{margin-right:2rem;width:-webkit-max-content;width:max-content}.gallery_menu__U2btD{align-items:center;background-color:initial;border:2px solid transparent;border-radius:1.5rem;cursor:pointer;display:flex;height:3rem;justify-content:center;line-height:1rem;margin-bottom:1rem;text-align:center;width:6rem}.gallery_menu__U2btD.gallery_selected__T2qcs,.gallery_menu__U2btD:hover{background-color:var(--bg-gray3-color);color:#fff}.gallery_menu__U2btD.gallery_selected__T2qcs{border-color:#fff}.gallery_cardsCon__wAfcp{align-items:flex-start;display:flex;flex-grow:1;flex-shrink:1;flex-wrap:wrap;justify-content:flex-start;max-height:100vh;max-width:calc(1600px + 9rem);overflow-y:auto}.gallery_cardsCon__wAfcp::-webkit-scrollbar-thumb{background-color:hsla(0,0%,100%,.2);border:5px solid #121317;border-radius:8px}.gallery_card__noUoL{background-color:var(--bg-gray3-color);border-radius:var(--radius);cursor:pointer;font-size:.75rem;height:260px;margin-bottom:1rem;margin-right:1rem;overflow:hidden;position:relative;width:200px}.gallery_coverImg__BYj-o,.gallery_coverImg__BYj-o img{height:100%;width:100%}.gallery_prompt__9PEmb{background-color:#f8f8f880;border-radius:var(--radius);bottom:1rem;color:var(--font-dark-color);height:0;left:1rem;overflow:hidden;padding:0 .5rem;position:absolute;right:1rem;text-align:left;white-space:pre-wrap;word-break:break-all}.gallery_prompt__9PEmb.gallery_show__c2k50{height:-webkit-fit-content;height:-moz-fit-content;height:fit-content;padding:.5rem}.gallery_infoCon__E8oLy{align-items:center;bottom:1rem;color:var(--font-dark-color);display:flex;justify-content:flex-start;left:1rem;position:absolute;right:1rem}.gallery_avatar__KWBmI,.gallery_avatar__KWBmI img{border-radius:12px;height:24px;overflow:hidden;width:24px}.gallery_avatar__KWBmI{margin-right:1rem}.gallery_spaceholder__xJwYU{flex-grow:1;flex-shrink:1}.header_con__M\+u1W{align-items:center;display:flex;justify-content:center;padding:0 var(--side-gap);width:100vw}.header_header__Y7CqP{align-items:center;border-bottom:1px solid hsla(0,0%,100%,.1);display:flex;justify-content:space-between;padding:1rem 0;width:100%}.header_logoCon__MIdGL{align-items:flex-start;display:flex;height:3rem;justify-content:center}.header_logo__90zuC{height:3rem;margin-right:1rem}.header_logoCon__MIdGL>div{font-size:2rem;font-weight:700;line-height:2rem;margin-top:5px}.header_avatar__B3zXB{background:var(--bg-gray2-color);border-radius:50%;overflow:hidden}.header_avatar__B3zXB,.header_avatar__B3zXB img{height:3rem;width:3rem}.login_con__\+RJgQ{background:#000;box-shadow:-5px 0 20px 0 hsla(0,0%,100%,.2);height:100vh;padding:var(--side-gap);position:fixed;right:0;top:0;z-index:9}.login_close__JulM-{cursor:pointer;-webkit-user-select:none;user-select:none}.result_con__gHOU1{align-items:center;color:var(--font-dark-color);display:flex;height:100vh;justify-content:center;position:fixed;top:0;width:0;width:100vw}.result_con__gHOU1 *{flex-shrink:0}.result_board__PCvVJ{align-items:center;background-color:var(--bg-light-color);border-radius:var(--radius);display:flex;height:80vh;justify-content:center;min-height:36rem;min-width:64rem;padding:1.5rem;width:100vh}.result_col__S-fRD{align-items:center;display:flex;flex-direction:column;flex-shrink:0;height:100%;justify-content:flex-start;position:relative;width:calc(50% - .5rem)}.result_col__S-fRD:first-child{margin-right:1rem}.result_colTitle__R8k\+A{align-items:flex-end;color:var(--font-gray-color);display:flex;font-size:1.2rem;font-weight:700;height:3rem;justify-content:space-between;line-height:1.2rem;margin-bottom:1rem;width:100%}.result_colTitle__R8k\+A>div{margin-bottom:.5rem}.result_colTitle__R8k\+A>div.result_restart__fLq8E{border-radius:5px;cursor:pointer;font-size:1rem;font-weight:400;margin-bottom:0;margin-left:1rem;padding:.5rem;-webkit-user-select:none;user-select:none}.result_restart__fLq8E:hover{background-color:var(--bg-gray0-color);color:var(--font-dark-color)}.result_spaceholder__GAxGZ{flex-grow:1;flex-shrink:1}.result_lang__85-De{cursor:pointer;font-weight:400;margin-right:1rem;-webkit-user-select:none;user-select:none}.result_lang__85-De.result_en__n-Jo7{margin-left:1rem;margin-right:0;width:4rem}.result_lang__85-De:hover{font-weight:700}.result_lang__85-De.result_selected__kDzD1{color:var(--font-dark-color);font-weight:700}.result_regene__yKazF{color:var(--theme-color);cursor:pointer;font-weight:400;-webkit-user-select:none;user-select:none}.result_chatCon__Hm\+zJ{background-color:var(--bg-gray0-color);border-radius:var(--radius);height:calc(100% - 4rem);padding:1rem}.result_chatCon__Hm\+zJ,.result_chatMsgCon__x8UTP{align-items:center;display:flex;flex-direction:column;flex-grow:1;flex-shrink:1;justify-content:flex-start;width:100%}.result_chatMsgCon__x8UTP{overflow-y:overlay;text-align:left}.result_chatMsgCon__x8UTP::-webkit-scrollbar-thumb{border:none;border-radius:3px}.result_chatMsgCon__x8UTP::-webkit-scrollbar{width:6px}.result_chatMsgRow__dr9Qg{align-items:flex-start;display:flex;flex-direction:row;justify-content:flex-start;margin-bottom:1rem;width:100%}.result_chatMsgRow__dr9Qg.result_user__bUuRg{flex-direction:row-reverse}.result_avatar__B2zOp{background:var(--bg-gray2-color);border-radius:1.5rem;margin-left:0;margin-right:1rem;overflow:hidden}.result_avatar__B2zOp,.result_avatar__B2zOp img{height:3rem;width:3rem}.result_user__bUuRg .result_avatar__B2zOp{margin-left:1rem;margin-right:0}.result_bubble__GexXm{background:var(--bg-theme-color);border-radius:var(--radius);flex-shrink:1;line-height:1.5rem;padding:.75rem 1rem;white-space:pre-wrap;word-break:break-all}.result_bubble__GexXm.result_unactive__zyVF2{background:var(--bg-gray1-color)}.result_user__bUuRg .result_bubble__GexXm{background:var(--bg-light-color)}.result_chatIptCon__LXDF-{align-items:center;display:flex;flex-direction:column;justify-content:flex-start;width:100%}.result_chatTipsCon__w4uUf{align-items:flex-end;display:flex;flex-direction:row;justify-content:flex-start;margin-top:1rem;max-width:100%;overflow-x:auto;overflow-y:hidden;width:100%}.result_chatTipsCon__w4uUf::-webkit-scrollbar-thumb{border-color:var(--bg-gray0-color)}.result_chatTips__6b9zJ{background:var(--bg-light-color);border-radius:var(--radius);cursor:pointer;margin-right:1rem;padding:1rem;text-align:left;white-space:pre-wrap;width:15.5rem;word-break:break-all}.result_chatTips__6b9zJ:last-child{margin-right:0}.result_chatRowCon__jLGk3{align-items:flex-start;display:flex;flex-direction:row;justify-content:space-between;margin-top:1rem;width:100%}.result_iptLineCon__nLuWa{flex-grow:1;flex-shrink:1;line-height:1.5rem;margin-right:1rem;position:relative;text-align:left}.result_iptSpaceholder__hAkD5{border:1px solid transparent;max-height:calc(9rem + 2px);visibility:hidden}.result_iptSpaceholder__hAkD5,.result_ipt__tA\+g4{padding:.75rem 1rem;white-space:pre-wrap;word-break:break-all}.result_ipt__tA\+g4{background:var(--bg-light-color);border-radius:var(--radius);bottom:0;left:0;overflow-y:auto;position:absolute;right:0;top:0}.result_ipt__tA\+g4::-webkit-scrollbar-thumb{border-color:var(--bg-light-color)}.result_btn__h5tQr{align-items:center;background-color:var(--theme-color);border:1px solid var(--theme-color);border-radius:1.5rem;color:#fff;cursor:pointer;display:flex;font-weight:700;height:calc(3rem - 2px);justify-content:center;line-height:1rem;padding:0 1.5rem;-webkit-user-select:none;user-select:none}.result_btn__h5tQr:hover{background:transparent;color:var(--theme-color)}.result_con__gHOU1 .result_btn__h5tQr.result_disabled__lB61-{background:var(--bg-gray2-color);border-color:var(--bg-gray2-color);color:var(--font-light-color);cursor:not-allowed}.result_iptArea__23TZc{background:var(--bg-gray0-color);border-radius:var(--radius);height:12rem;margin-bottom:1rem;padding:1rem;text-align:left;width:100%}.result_iptArea__23TZc::-webkit-scrollbar-thumb{border-color:var(--bg-gray0-color)}.result_generateBtn__UGmBG{margin-bottom:1rem;width:100%}.result_candidateCon__x9kyB{align-items:flex-start;background-color:var(--bg-gray0-color);border-radius:var(--radius);display:flex;flex-direction:row;flex-grow:1;flex-shrink:1;justify-content:space-between;overflow-y:overlay;padding:1rem;position:relative;width:100%}.result_candidateCon__x9kyB::-webkit-scrollbar-thumb{border-color:var(--bg-gray0-color)}.result_candidateCol__eoHna{margin-right:1rem;position:relative;width:calc(33.33333% - .66667rem)}.result_candidateCol__eoHna:last-child{margin-right:0}.result_candidateCol__eoHna img{border-radius:var(--radius);cursor:pointer;margin-bottom:1rem;width:100%}.result_creatorCon__tIm3e{align-items:flex-end;color:var(--font-gray-color);display:flex;font-size:1.2rem;font-weight:700;height:3rem;justify-content:flex-start;line-height:1.2rem;margin-bottom:1rem;width:100%}.result_creatorInfoCon__pET8h{text-align:left}.result_creatorName__VLTXL{color:var(--font-dark-color);font-size:1.2rem;font-weight:700;line-height:1.8rem}.result_creatorInfo__CkbWU{color:var(--font-gray-color);font-size:1rem;line-height:1.2rem}.result_modelView__Y25w5{background:var(--bg-gray0-color);border-radius:var(--radius);flex-grow:1;flex-shrink:1;overflow:hidden;width:100%}.result_modelInfoCon__bXw5O{align-items:center;bottom:1rem;display:flex;flex-direction:column;justify-content:flex-end;left:1rem;position:absolute;right:1rem;text-align:left}.result_progressInfo__g9iwR{margin-bottom:.5rem;width:100%}.result_progressTrack__I6zDn{background:var(--bg-light-color);border-radius:2px;height:4px;position:relative;width:100%}.result_progressThumb__mbBQj{background-color:var(--theme-color);border-radius:2px;height:4px;left:0;position:absolute;top:0}.result_modelPrompt__DzUbD{background:var(--bg-light-color);border-radius:var(--radius);margin-top:1rem;min-height:3rem;padding:1rem;width:100%}.welcome_con__o1kmf{align-items:center;display:flex;flex-direction:column;justify-content:flex-start;padding-top:calc(50vh - 18rem);position:relative;width:100%}.welcome_con__o1kmf>img{position:absolute;top:4rem;width:50vw}.welcome_mainCon__H1gv\+{z-index:999}.welcome_title__Gd8m4{color:#fff;font-family:Courier New;font-size:5rem;font-weight:700;line-height:5rem;margin-bottom:1.5rem}.welcome_ioCon__PQZXU{background-color:#fff;border-radius:1rem;border-style:solid;margin-left:8rem;margin-right:8rem;margin-top:24rem;padding:2rem;width:calc(100% - 16rem)}.welcome_iptCon__KpWEL{align-items:center;background:#ededf2;border-radius:1rem;display:flex;height:4rem;justify-content:space-between;margin-bottom:2rem;width:100%}.welcome_iptCon__KpWEL>img{height:2rem;margin-right:1rem;position:static;width:2rem}.welcome_ipt__ayi9Z{background:#ededf2;border:none;border-radius:1rem;color:var(--font-dark-color);flex-grow:1;font-size:1rem;height:100%;outline:none;padding:0 2rem}.welcome_ipt__ayi9Z::-webkit-input-placeholder{font-size:1rem}.welcome_ipt__ayi9Z::placeholder{font-size:1rem}.welcome_btnCon__Mx-ta,.welcome_btn__jCuoG{align-items:center;display:flex;justify-content:center}.welcome_btn__jCuoG{border:1px solid #8f8f8f;border-radius:1rem;cursor:pointer;height:3rem;line-height:1rem;-webkit-user-select:none;user-select:none;width:100%}.welcome_btn__jCuoG:last-child{background:#4a00e0;border:none;font-weight:700}.welcome_btn__jCuoG.welcome_disabled__pcSzv{cursor:not-allowed}.welcome_btn__jCuoG:hover{color:#fff}
2
- /*# sourceMappingURL=main.a47c5861.css.map*/
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/abc/_tasks.py DELETED
@@ -1,119 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import sys
4
- from abc import ABCMeta, abstractmethod
5
- from types import TracebackType
6
- from typing import TYPE_CHECKING, Any, Awaitable, Callable, TypeVar, overload
7
- from warnings import warn
8
-
9
- if sys.version_info >= (3, 8):
10
- from typing import Protocol
11
- else:
12
- from typing_extensions import Protocol
13
-
14
- if TYPE_CHECKING:
15
- from anyio._core._tasks import CancelScope
16
-
17
- T_Retval = TypeVar("T_Retval")
18
- T_contra = TypeVar("T_contra", contravariant=True)
19
-
20
-
21
- class TaskStatus(Protocol[T_contra]):
22
- @overload
23
- def started(self: TaskStatus[None]) -> None:
24
- ...
25
-
26
- @overload
27
- def started(self, value: T_contra) -> None:
28
- ...
29
-
30
- def started(self, value: T_contra | None = None) -> None:
31
- """
32
- Signal that the task has started.
33
-
34
- :param value: object passed back to the starter of the task
35
- """
36
-
37
-
38
- class TaskGroup(metaclass=ABCMeta):
39
- """
40
- Groups several asynchronous tasks together.
41
-
42
- :ivar cancel_scope: the cancel scope inherited by all child tasks
43
- :vartype cancel_scope: CancelScope
44
- """
45
-
46
- cancel_scope: CancelScope
47
-
48
- async def spawn(
49
- self,
50
- func: Callable[..., Awaitable[Any]],
51
- *args: object,
52
- name: object = None,
53
- ) -> None:
54
- """
55
- Start a new task in this task group.
56
-
57
- :param func: a coroutine function
58
- :param args: positional arguments to call the function with
59
- :param name: name of the task, for the purposes of introspection and debugging
60
-
61
- .. deprecated:: 3.0
62
- Use :meth:`start_soon` instead. If your code needs AnyIO 2 compatibility, you
63
- can keep using this until AnyIO 4.
64
-
65
- """
66
- warn(
67
- 'spawn() is deprecated -- use start_soon() (without the "await") instead',
68
- DeprecationWarning,
69
- )
70
- self.start_soon(func, *args, name=name)
71
-
72
- @abstractmethod
73
- def start_soon(
74
- self,
75
- func: Callable[..., Awaitable[Any]],
76
- *args: object,
77
- name: object = None,
78
- ) -> None:
79
- """
80
- Start a new task in this task group.
81
-
82
- :param func: a coroutine function
83
- :param args: positional arguments to call the function with
84
- :param name: name of the task, for the purposes of introspection and debugging
85
-
86
- .. versionadded:: 3.0
87
- """
88
-
89
- @abstractmethod
90
- async def start(
91
- self,
92
- func: Callable[..., Awaitable[Any]],
93
- *args: object,
94
- name: object = None,
95
- ) -> Any:
96
- """
97
- Start a new task and wait until it signals for readiness.
98
-
99
- :param func: a coroutine function
100
- :param args: positional arguments to call the function with
101
- :param name: name of the task, for the purposes of introspection and debugging
102
- :return: the value passed to ``task_status.started()``
103
- :raises RuntimeError: if the task finishes without calling ``task_status.started()``
104
-
105
- .. versionadded:: 3.0
106
- """
107
-
108
- @abstractmethod
109
- async def __aenter__(self) -> TaskGroup:
110
- """Enter the task group context and allow starting new tasks."""
111
-
112
- @abstractmethod
113
- async def __aexit__(
114
- self,
115
- exc_type: type[BaseException] | None,
116
- exc_val: BaseException | None,
117
- exc_tb: TracebackType | None,
118
- ) -> bool | None:
119
- """Exit the task group context waiting for all tasks to finish."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-dcd0cf9c.js DELETED
@@ -1,2 +0,0 @@
1
- import{S as Q,e as I,s as J,G as D,k as z,O as C,N as q,K as w,o as E,p as R,H as Z,ay as y,z as M,v as T,A as S,x as U,B as p,am as x,P as L,R as V,az as $,ap as F,U as j,M as B,Q as G,a1 as ee,E as le,ae,h as H,j as K,q as ie,r as te,t as N,F as A}from"./index-1d65707a.js";/* empty css */import{B as ne}from"./Button-f155035a.js";import{B as se}from"./BlockTitle-dee077e8.js";import"./Info-7c6961ef.js";function O(i,e,a){const l=i.slice();return l[13]=e[a],l[15]=a,l}function ue(i){let e;return{c(){e=L(i[3])},m(a,l){R(a,e,l)},p(a,l){l&8&&V(e,a[3])},d(a){a&&S(e)}}}function P(i,e){let a,l,s,o,m=!1,b,h,t=e[13]+"",_,f,n,d,v,r;function c(){return e[11](e[13],e[15])}return d=$(e[10][0]),{key:i,first:null,c(){a=q("label"),l=q("input"),b=C(),h=q("span"),_=L(t),f=C(),l.disabled=e[2],w(l,"type","radio"),w(l,"name",s="radio-"+e[6]),l.__value=o=e[13],F(l,l.__value),w(l,"class","svelte-1p9xokt"),w(h,"class","ml-2 svelte-1p9xokt"),w(a,"data-testid",n=`${e[13]}-radio-label`),w(a,"class","svelte-1p9xokt"),j(a,"disabled",e[2]),j(a,"selected",e[0]===e[13]),d.p(l),this.first=a},m(k,g){R(k,a,g),B(a,l),l.checked=l.__value===e[0],B(a,b),B(a,h),B(h,_),B(a,f),v||(r=[G(l,"change",e[9]),G(l,"input",c)],v=!0)},p(k,g){e=k,g&4&&(l.disabled=e[2]),g&64&&s!==(s="radio-"+e[6])&&w(l,"name",s),g&2&&o!==(o=e[13])&&(l.__value=o,F(l,l.__value),m=!0),(m||g&3)&&(l.checked=l.__value===e[0]),g&2&&t!==(t=e[13]+"")&&V(_,t),g&2&&n!==(n=`${e[13]}-radio-label`)&&w(a,"data-testid",n),g&4&&j(a,"disabled",e[2]),g&3&&j(a,"selected",e[0]===e[13])},d(k){k&&S(a),d.r(),v=!1,ee(r)}}}function _e(i){let e,a,l,s=[],o=new Map,m;e=new se({props:{show_label:i[5],info:i[4],$$slots:{default:[ue]},$$scope:{ctx:i}}});let b=D(i[1]);const h=t=>t[15];for(let t=0;t<b.length;t+=1){let _=O(i,b,t),f=h(_);o.set(f,s[t]=P(f,_))}return{c(){z(e.$$.fragment),a=C(),l=q("div");for(let t=0;t<s.length;t+=1)s[t].c();w(l,"class","wrap svelte-1p9xokt")},m(t,_){E(e,t,_),R(t,a,_),R(t,l,_);for(let f=0;f<s.length;f+=1)s[f]&&s[f].m(l,null);m=!0},p(t,[_]){const f={};_&32&&(f.show_label=t[5]),_&16&&(f.info=t[4]),_&65544&&(f.$$scope={dirty:_,ctx:t}),e.$set(f),_&199&&(b=D(t[1]),s=Z(s,_,h,1,t,b,o,l,y,P,null,O))},i(t){m||(M(e.$$.fragment,t),m=!0)},o(t){T(e.$$.fragment,t),m=!1},d(t){t&&(S(a),S(l)),U(e,t);for(let _=0;_<s.length;_+=1)s[_].d()}}}function fe(i,e,a){let{value:l}=e,{value_is_output:s=!1}=e,{choices:o}=e,{disabled:m=!1}=e,{label:b}=e,{info:h=void 0}=e,{show_label:t=!0}=e,{elem_id:_}=e;const f=p();function n(){f("change",l),s||f("input")}x(()=>{a(8,s=!1)});const d=[[]];function v(){l=this.__value,a(0,l)}const r=(c,k)=>f("select",{value:c,index:k});return i.$$set=c=>{"value"in c&&a(0,l=c.value),"value_is_output"in c&&a(8,s=c.value_is_output),"choices"in c&&a(1,o=c.choices),"disabled"in c&&a(2,m=c.disabled),"label"in c&&a(3,b=c.label),"info"in c&&a(4,h=c.info),"show_label"in c&&a(5,t=c.show_label),"elem_id"in c&&a(6,_=c.elem_id)},i.$$.update=()=>{i.$$.dirty&1&&n()},[l,o,m,b,h,t,_,f,s,v,d,r]}class oe extends Q{constructor(e){super(),I(this,e,fe,_e,J,{value:0,value_is_output:8,choices:1,disabled:2,label:3,info:4,show_label:5,elem_id:6})}}function ce(i){let e,a,l,s,o,m;const b=[i[13]];let h={};for(let n=0;n<b.length;n+=1)h=le(h,b[n]);e=new ae({props:h});function t(n){i[14](n)}function _(n){i[15](n)}let f={label:i[2],info:i[3],elem_id:i[4],show_label:i[9],choices:i[7],disabled:i[8]==="static"};return i[0]!==void 0&&(f.value=i[0]),i[1]!==void 0&&(f.value_is_output=i[1]),l=new oe({props:f}),H.push(()=>K(l,"value",t)),H.push(()=>K(l,"value_is_output",_)),l.$on("change",i[16]),l.$on("input",i[17]),l.$on("select",i[18]),{c(){z(e.$$.fragment),a=C(),z(l.$$.fragment)},m(n,d){E(e,n,d),R(n,a,d),E(l,n,d),m=!0},p(n,d){const v=d&8192?ie(b,[te(n[13])]):{};e.$set(v);const r={};d&4&&(r.label=n[2]),d&8&&(r.info=n[3]),d&16&&(r.elem_id=n[4]),d&512&&(r.show_label=n[9]),d&128&&(r.choices=n[7]),d&256&&(r.disabled=n[8]==="static"),!s&&d&1&&(s=!0,r.value=n[0],N(()=>s=!1)),!o&&d&2&&(o=!0,r.value_is_output=n[1],N(()=>o=!1)),l.$set(r)},i(n){m||(M(e.$$.fragment,n),M(l.$$.fragment,n),m=!0)},o(n){T(e.$$.fragment,n),T(l.$$.fragment,n),m=!1},d(n){n&&S(a),U(e,n),U(l,n)}}}function de(i){let e,a;return e=new ne({props:{visible:i[6],type:"fieldset",elem_id:i[4],elem_classes:i[5],container:i[10],scale:i[11],min_width:i[12],$$slots:{default:[ce]},$$scope:{ctx:i}}}),{c(){z(e.$$.fragment)},m(l,s){E(e,l,s),a=!0},p(l,[s]){const o={};s&64&&(o.visible=l[6]),s&16&&(o.elem_id=l[4]),s&32&&(o.elem_classes=l[5]),s&1024&&(o.container=l[10]),s&2048&&(o.scale=l[11]),s&4096&&(o.min_width=l[12]),s&533407&&(o.$$scope={dirty:s,ctx:l}),e.$set(o)},i(l){a||(M(e.$$.fragment,l),a=!0)},o(l){T(e.$$.fragment,l),a=!1},d(l){U(e,l)}}}function he(i,e,a){let{label:l="Radio"}=e,{info:s=void 0}=e,{elem_id:o=""}=e,{elem_classes:m=[]}=e,{visible:b=!0}=e,{value:h=null}=e,{value_is_output:t=!1}=e,{choices:_=[]}=e,{mode:f}=e,{show_label:n}=e,{container:d=!1}=e,{scale:v=null}=e,{min_width:r=void 0}=e,{loading_status:c}=e;function k(u){h=u,a(0,h)}function g(u){t=u,a(1,t)}function W(u){A.call(this,i,u)}function X(u){A.call(this,i,u)}function Y(u){A.call(this,i,u)}return i.$$set=u=>{"label"in u&&a(2,l=u.label),"info"in u&&a(3,s=u.info),"elem_id"in u&&a(4,o=u.elem_id),"elem_classes"in u&&a(5,m=u.elem_classes),"visible"in u&&a(6,b=u.visible),"value"in u&&a(0,h=u.value),"value_is_output"in u&&a(1,t=u.value_is_output),"choices"in u&&a(7,_=u.choices),"mode"in u&&a(8,f=u.mode),"show_label"in u&&a(9,n=u.show_label),"container"in u&&a(10,d=u.container),"scale"in u&&a(11,v=u.scale),"min_width"in u&&a(12,r=u.min_width),"loading_status"in u&&a(13,c=u.loading_status)},[h,t,l,s,o,m,b,_,f,n,d,v,r,c,k,g,W,X,Y]}class me extends Q{constructor(e){super(),I(this,e,he,de,J,{label:2,info:3,elem_id:4,elem_classes:5,visible:6,value:0,value_is_output:1,choices:7,mode:8,show_label:9,container:10,scale:11,min_width:12,loading_status:13})}}const we=me,Be=["static","dynamic"],Re=i=>({type:{payload:"string"},description:{payload:"selected choice"},example_data:i.choices.length>1?i.choices[0]:""});export{we as Component,Re as document,Be as modes};
2
- //# sourceMappingURL=index-dcd0cf9c.js.map
 
 
 
spaces/DaCuteRaccoon/dalle-mini/README.md DELETED
@@ -1,11 +0,0 @@
1
- ---
2
- title: DALL·E mini
3
- metaTitle: DALL·E mini by craiyon.com on Hugging Face
4
- emoji: 🥑
5
- colorFrom: yellow
6
- colorTo: green
7
- sdk: static
8
- pinned: true
9
- license: apache-2.0
10
- duplicated_from: dalle-mini/dalle-mini
11
- ---
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Dineshkumars/Text-Summarization/app.py DELETED
@@ -1,68 +0,0 @@
1
- import nltk
2
- from nltk.corpus import stopwords
3
- from nltk.cluster.util import cosine_distance
4
- import numpy as np
5
- import networkx as nx
6
-
7
- def read_para(string):
8
- article = string.split(". ")
9
- sentences = []
10
- for sentence in article:
11
- sentences.append(sentence.replace("[^a-zA-Z]", " ").split(" "))
12
- sentences.pop()
13
- return sentences
14
-
15
- def similarity_in_sentences(sent1, sent2, stopwords=None):
16
- if stopwords is None:
17
- stopwords = []
18
-
19
- sent1 = [w.lower() for w in sent1]
20
- sent2 = [w.lower() for w in sent2]
21
-
22
- all_words = list(set(sent1 + sent2))
23
-
24
- vector1 = [0] * len(all_words)
25
- vector2 = [0] * len(all_words)
26
- for w in sent1:
27
- if w in stopwords:
28
- continue
29
- vector1[all_words.index(w)] += 1
30
- for w in sent2:
31
- if w in stopwords:
32
- continue
33
- vector2[all_words.index(w)] += 1
34
- return 1 - cosine_distance(vector1, vector2)
35
-
36
- def build_similarity_matrix(sentences, stop_words):
37
- similarity_matrix = np.zeros((len(sentences), len(sentences)))
38
- for w1 in range(len(sentences)):
39
- for w2 in range(len(sentences)):
40
- if w1 == w2:
41
- continue
42
- similarity_matrix[w1][w2] = similarity_in_sentences(sentences[w1], sentences[w2], stop_words)
43
- return similarity_matrix
44
-
45
- def summary(txt, top_n=5):
46
- nltk.download("stopwords")
47
- stop_words = stopwords.words('english')
48
- summarized_text = []
49
- sentences = read_para(txt)
50
- sentence_similarity_martix = build_similarity_matrix(sentences, stop_words)
51
- sentence_similarity_graph = nx.from_numpy_array(sentence_similarity_martix)
52
- scores = nx.pagerank(sentence_similarity_graph)
53
- ranked_sentence = sorted(((scores[i],s) for i,s in enumerate(sentences)), reverse=True)
54
- for i in range(top_n):
55
- summarized_text.append(" ".join(ranked_sentence[i][1]))
56
- return(". ".join(summarized_text))
57
-
58
-
59
-
60
- import streamlit as st
61
- st.title("Text Summarizer")
62
- file = st.file_uploader("Upload file", type=["txt"])
63
- no_para=st.text_input("Enter the size of summarized paragraph :")
64
- if file is not None:
65
- if no_para is not None and len(no_para)>0:
66
- content = file.read().decode("utf-8")
67
- st.subheader("Summarized Text: ")
68
- st.markdown(summary(content,int(no_para)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Dorado607/ChuanhuChatGPT/modules/models/base_model.py DELETED
@@ -1,783 +0,0 @@
1
- from __future__ import annotations
2
- from typing import TYPE_CHECKING, List
3
-
4
- import logging
5
- import json
6
- import commentjson as cjson
7
- import os
8
- import sys
9
- import requests
10
- import urllib3
11
- import traceback
12
- import pathlib
13
-
14
- from tqdm import tqdm
15
- import colorama
16
- from duckduckgo_search import DDGS
17
- from itertools import islice
18
- import asyncio
19
- import aiohttp
20
- from enum import Enum
21
-
22
- from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
23
- from langchain.callbacks.manager import BaseCallbackManager
24
-
25
- from typing import Any, Dict, List, Optional, Union
26
-
27
- from langchain.callbacks.base import BaseCallbackHandler
28
- from langchain.input import print_text
29
- from langchain.schema import AgentAction, AgentFinish, LLMResult
30
- from threading import Thread, Condition
31
- from collections import deque
32
- from langchain.chat_models.base import BaseChatModel
33
- from langchain.schema import HumanMessage, AIMessage, SystemMessage, BaseMessage
34
-
35
- from ..presets import *
36
- from ..index_func import *
37
- from ..utils import *
38
- from .. import shared
39
- from ..config import retrieve_proxy
40
-
41
-
42
- class CallbackToIterator:
43
- def __init__(self):
44
- self.queue = deque()
45
- self.cond = Condition()
46
- self.finished = False
47
-
48
- def callback(self, result):
49
- with self.cond:
50
- self.queue.append(result)
51
- self.cond.notify() # Wake up the generator.
52
-
53
- def __iter__(self):
54
- return self
55
-
56
- def __next__(self):
57
- with self.cond:
58
- # Wait for a value to be added to the queue.
59
- while not self.queue and not self.finished:
60
- self.cond.wait()
61
- if not self.queue:
62
- raise StopIteration()
63
- return self.queue.popleft()
64
-
65
- def finish(self):
66
- with self.cond:
67
- self.finished = True
68
- self.cond.notify() # Wake up the generator if it's waiting.
69
-
70
-
71
- def get_action_description(text):
72
- match = re.search('```(.*?)```', text, re.S)
73
- json_text = match.group(1)
74
- # 把json转化为python字典
75
- json_dict = json.loads(json_text)
76
- # 提取'action'和'action_input'的值
77
- action_name = json_dict['action']
78
- action_input = json_dict['action_input']
79
- if action_name != "Final Answer":
80
- return f'<p style="font-size: smaller; color: gray;">{action_name}: {action_input}</p>'
81
- else:
82
- return ""
83
-
84
-
85
- class ChuanhuCallbackHandler(BaseCallbackHandler):
86
-
87
- def __init__(self, callback) -> None:
88
- """Initialize callback handler."""
89
- self.callback = callback
90
-
91
- def on_agent_action(
92
- self, action: AgentAction, color: Optional[str] = None, **kwargs: Any
93
- ) -> Any:
94
- self.callback(get_action_description(action.log))
95
-
96
- def on_tool_end(
97
- self,
98
- output: str,
99
- color: Optional[str] = None,
100
- observation_prefix: Optional[str] = None,
101
- llm_prefix: Optional[str] = None,
102
- **kwargs: Any,
103
- ) -> None:
104
- """If not the final action, print out observation."""
105
- # if observation_prefix is not None:
106
- # self.callback(f"\n\n{observation_prefix}")
107
- # self.callback(output)
108
- # if llm_prefix is not None:
109
- # self.callback(f"\n\n{llm_prefix}")
110
- if observation_prefix is not None:
111
- logging.info(observation_prefix)
112
- self.callback(output)
113
- if llm_prefix is not None:
114
- logging.info(llm_prefix)
115
-
116
- def on_agent_finish(
117
- self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
118
- ) -> None:
119
- # self.callback(f"{finish.log}\n\n")
120
- logging.info(finish.log)
121
-
122
- def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
123
- """Run on new LLM token. Only available when streaming is enabled."""
124
- self.callback(token)
125
-
126
- def on_chat_model_start(self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs: Any) -> Any:
127
- """Run when a chat model starts running."""
128
- pass
129
-
130
-
131
- class ModelType(Enum):
132
- Unknown = -1
133
- OpenAI = 0
134
- ChatGLM = 1
135
- LLaMA = 2
136
- XMChat = 3
137
- StableLM = 4
138
- MOSS = 5
139
- YuanAI = 6
140
- Minimax = 7
141
- ChuanhuAgent = 8
142
- GooglePaLM = 9
143
- LangchainChat = 10
144
-
145
- @classmethod
146
- def get_type(cls, model_name: str):
147
- model_type = None
148
- model_name_lower = model_name.lower()
149
- if "gpt" in model_name_lower:
150
- model_type = ModelType.OpenAI
151
- elif "chatglm" in model_name_lower:
152
- model_type = ModelType.ChatGLM
153
- elif "llama" in model_name_lower or "alpaca" in model_name_lower:
154
- model_type = ModelType.LLaMA
155
- elif "xmchat" in model_name_lower:
156
- model_type = ModelType.XMChat
157
- elif "stablelm" in model_name_lower:
158
- model_type = ModelType.StableLM
159
- elif "moss" in model_name_lower:
160
- model_type = ModelType.MOSS
161
- elif "yuanai" in model_name_lower:
162
- model_type = ModelType.YuanAI
163
- elif "minimax" in model_name_lower:
164
- model_type = ModelType.Minimax
165
- elif "川虎助理" in model_name_lower:
166
- model_type = ModelType.ChuanhuAgent
167
- elif "palm" in model_name_lower:
168
- model_type = ModelType.GooglePaLM
169
- elif "azure" or "api" in model_name_lower:
170
- model_type = ModelType.LangchainChat
171
- else:
172
- model_type = ModelType.Unknown
173
- return model_type
174
-
175
-
176
- class BaseLLMModel:
177
- def __init__(
178
- self,
179
- model_name,
180
- system_prompt=INITIAL_SYSTEM_PROMPT,
181
- temperature=1.0,
182
- top_p=1.0,
183
- n_choices=1,
184
- stop=None,
185
- max_generation_token=None,
186
- presence_penalty=0,
187
- frequency_penalty=0,
188
- logit_bias=None,
189
- user="",
190
- ) -> None:
191
- self.history = []
192
- self.all_token_counts = []
193
- self.model_name = model_name
194
- self.model_type = ModelType.get_type(model_name)
195
- try:
196
- self.token_upper_limit = MODEL_TOKEN_LIMIT[model_name]
197
- except KeyError:
198
- self.token_upper_limit = DEFAULT_TOKEN_LIMIT
199
- self.interrupted = False
200
- self.system_prompt = system_prompt
201
- self.api_key = None
202
- self.need_api_key = False
203
- self.single_turn = False
204
-
205
- self.temperature = temperature
206
- self.top_p = top_p
207
- self.n_choices = n_choices
208
- self.stop_sequence = stop
209
- self.max_generation_token = None
210
- self.presence_penalty = presence_penalty
211
- self.frequency_penalty = frequency_penalty
212
- self.logit_bias = logit_bias
213
- self.user_identifier = user
214
-
215
- def get_answer_stream_iter(self):
216
- """stream predict, need to be implemented
217
- conversations are stored in self.history, with the most recent question, in OpenAI format
218
- should return a generator, each time give the next word (str) in the answer
219
- """
220
- logging.warning(
221
- "stream predict not implemented, using at once predict instead")
222
- response, _ = self.get_answer_at_once()
223
- yield response
224
-
225
- def get_answer_at_once(self):
226
- """predict at once, need to be implemented
227
- conversations are stored in self.history, with the most recent question, in OpenAI format
228
- Should return:
229
- the answer (str)
230
- total token count (int)
231
- """
232
- logging.warning(
233
- "at once predict not implemented, using stream predict instead")
234
- response_iter = self.get_answer_stream_iter()
235
- count = 0
236
- for response in response_iter:
237
- count += 1
238
- return response, sum(self.all_token_counts) + count
239
-
240
- def billing_info(self):
241
- """get billing infomation, inplement if needed"""
242
- logging.warning("billing info not implemented, using default")
243
- return BILLING_NOT_APPLICABLE_MSG
244
-
245
- def count_token(self, user_input):
246
- """get token count from input, implement if needed"""
247
- # logging.warning("token count not implemented, using default")
248
- return len(user_input)
249
-
250
- def stream_next_chatbot(self, inputs, chatbot, fake_input=None, display_append=""):
251
- def get_return_value():
252
- return chatbot, status_text
253
-
254
- status_text = i18n("开始实时传输回答……")
255
- if fake_input:
256
- chatbot.append((fake_input, ""))
257
- else:
258
- chatbot.append((inputs, ""))
259
-
260
- user_token_count = self.count_token(inputs)
261
- self.all_token_counts.append(user_token_count)
262
- logging.debug(f"输入token计数: {user_token_count}")
263
-
264
- stream_iter = self.get_answer_stream_iter()
265
-
266
- if display_append:
267
- display_append = '\n\n<hr class="append-display no-in-raw" />' + display_append
268
- for partial_text in stream_iter:
269
- chatbot[-1] = (chatbot[-1][0], partial_text + display_append)
270
- self.all_token_counts[-1] += 1
271
- status_text = self.token_message()
272
- yield get_return_value()
273
- if self.interrupted:
274
- self.recover()
275
- break
276
- self.history.append(construct_assistant(partial_text))
277
-
278
- def next_chatbot_at_once(self, inputs, chatbot, fake_input=None, display_append=""):
279
- if fake_input:
280
- chatbot.append((fake_input, ""))
281
- else:
282
- chatbot.append((inputs, ""))
283
- if fake_input is not None:
284
- user_token_count = self.count_token(fake_input)
285
- else:
286
- user_token_count = self.count_token(inputs)
287
- self.all_token_counts.append(user_token_count)
288
- ai_reply, total_token_count = self.get_answer_at_once()
289
- self.history.append(construct_assistant(ai_reply))
290
- if fake_input is not None:
291
- self.history[-2] = construct_user(fake_input)
292
- chatbot[-1] = (chatbot[-1][0], ai_reply + display_append)
293
- if fake_input is not None:
294
- self.all_token_counts[-1] += count_token(
295
- construct_assistant(ai_reply))
296
- else:
297
- self.all_token_counts[-1] = total_token_count - \
298
- sum(self.all_token_counts)
299
- status_text = self.token_message()
300
- return chatbot, status_text
301
-
302
- def handle_file_upload(self, files, chatbot, language):
303
- """if the model accepts multi modal input, implement this function"""
304
- status = gr.Markdown.update()
305
- if files:
306
- index = construct_index(self.api_key, file_src=files)
307
- status = i18n("索引构建完成")
308
- return gr.Files.update(), chatbot, status
309
-
310
- def summarize_index(self, files, chatbot, language):
311
- status = gr.Markdown.update()
312
- if files:
313
- index = construct_index(self.api_key, file_src=files)
314
- status = i18n("总结完成")
315
- logging.info(i18n("生成内容总结中……"))
316
- os.environ["OPENAI_API_KEY"] = self.api_key
317
- from langchain.chains.summarize import load_summarize_chain
318
- from langchain.prompts import PromptTemplate
319
- from langchain.chat_models import ChatOpenAI
320
- from langchain.callbacks import StdOutCallbackHandler
321
- prompt_template = "Write a concise summary of the following:\n\n{text}\n\nCONCISE SUMMARY IN " + language + ":"
322
- PROMPT = PromptTemplate(
323
- template=prompt_template, input_variables=["text"])
324
- llm = ChatOpenAI()
325
- chain = load_summarize_chain(
326
- llm, chain_type="map_reduce", return_intermediate_steps=True, map_prompt=PROMPT, combine_prompt=PROMPT)
327
- summary = chain({"input_documents": list(index.docstore.__dict__[
328
- "_dict"].values())}, return_only_outputs=True)["output_text"]
329
- print(i18n("总结") + f": {summary}")
330
- chatbot.append([i18n("上传了")+str(len(files))+"个文件", summary])
331
- return chatbot, status
332
-
333
- def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot):
334
- fake_inputs = None
335
- display_append = []
336
- limited_context = False
337
- fake_inputs = real_inputs
338
- if files:
339
- from langchain.embeddings.huggingface import HuggingFaceEmbeddings
340
- from langchain.vectorstores.base import VectorStoreRetriever
341
- limited_context = True
342
- msg = "加载索引中……"
343
- logging.info(msg)
344
- index = construct_index(self.api_key, file_src=files)
345
- assert index is not None, "获取索引失败"
346
- msg = "索引获取成功,生成回答中……"
347
- logging.info(msg)
348
- with retrieve_proxy():
349
- retriever = VectorStoreRetriever(vectorstore=index, search_type="similarity_score_threshold", search_kwargs={
350
- "k": 6, "score_threshold": 0.5})
351
- relevant_documents = retriever.get_relevant_documents(
352
- real_inputs)
353
- reference_results = [[d.page_content.strip("�"), os.path.basename(
354
- d.metadata["source"])] for d in relevant_documents]
355
- reference_results = add_source_numbers(reference_results)
356
- display_append = add_details(reference_results)
357
- display_append = "\n\n" + "".join(display_append)
358
- real_inputs = (
359
- replace_today(PROMPT_TEMPLATE)
360
- .replace("{query_str}", real_inputs)
361
- .replace("{context_str}", "\n\n".join(reference_results))
362
- .replace("{reply_language}", reply_language)
363
- )
364
- elif use_websearch:
365
- search_results = []
366
- with DDGS() as ddgs:
367
- ddgs_gen = ddgs.text(real_inputs, backend="lite")
368
- for r in islice(ddgs_gen, 10):
369
- search_results.append(r)
370
- reference_results = []
371
- for idx, result in enumerate(search_results):
372
- logging.debug(f"搜索结果{idx + 1}:{result}")
373
- domain_name = urllib3.util.parse_url(result['href']).host
374
- reference_results.append([result['body'], result['href']])
375
- display_append.append(
376
- # f"{idx+1}. [{domain_name}]({result['href']})\n"
377
- f"<a href=\"{result['href']}\" target=\"_blank\">{idx+1}.&nbsp;{result['title']}</a>"
378
- )
379
- reference_results = add_source_numbers(reference_results)
380
- # display_append = "<ol>\n\n" + "".join(display_append) + "</ol>"
381
- display_append = '<div class = "source-a">' + \
382
- "".join(display_append) + '</div>'
383
- real_inputs = (
384
- replace_today(WEBSEARCH_PTOMPT_TEMPLATE)
385
- .replace("{query}", real_inputs)
386
- .replace("{web_results}", "\n\n".join(reference_results))
387
- .replace("{reply_language}", reply_language)
388
- )
389
- else:
390
- display_append = ""
391
- return limited_context, fake_inputs, display_append, real_inputs, chatbot
392
-
393
- def predict(
394
- self,
395
- inputs,
396
- chatbot,
397
- stream=False,
398
- use_websearch=False,
399
- files=None,
400
- reply_language="中文",
401
- should_check_token_count=True,
402
- ): # repetition_penalty, top_k
403
-
404
- status_text = "开始生成回答……"
405
- logging.info(
406
- "用户" + f"{self.user_identifier}" + "的输入为:" +
407
- colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL
408
- )
409
- if should_check_token_count:
410
- yield chatbot + [(inputs, "")], status_text
411
- if reply_language == "跟随问题语言(不稳定)":
412
- reply_language = "the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch."
413
-
414
- limited_context, fake_inputs, display_append, inputs, chatbot = self.prepare_inputs(
415
- real_inputs=inputs, use_websearch=use_websearch, files=files, reply_language=reply_language, chatbot=chatbot)
416
- yield chatbot + [(fake_inputs, "")], status_text
417
-
418
- if (
419
- self.need_api_key and
420
- self.api_key is None
421
- and not shared.state.multi_api_key
422
- ):
423
- status_text = STANDARD_ERROR_MSG + NO_APIKEY_MSG
424
- logging.info(status_text)
425
- chatbot.append((inputs, ""))
426
- if len(self.history) == 0:
427
- self.history.append(construct_user(inputs))
428
- self.history.append("")
429
- self.all_token_counts.append(0)
430
- else:
431
- self.history[-2] = construct_user(inputs)
432
- yield chatbot + [(inputs, "")], status_text
433
- return
434
- elif len(inputs.strip()) == 0:
435
- status_text = STANDARD_ERROR_MSG + NO_INPUT_MSG
436
- logging.info(status_text)
437
- yield chatbot + [(inputs, "")], status_text
438
- return
439
-
440
- if self.single_turn:
441
- self.history = []
442
- self.all_token_counts = []
443
- self.history.append(construct_user(inputs))
444
-
445
- try:
446
- if stream:
447
- logging.debug("使用流式传输")
448
- iter = self.stream_next_chatbot(
449
- inputs,
450
- chatbot,
451
- fake_input=fake_inputs,
452
- display_append=display_append,
453
- )
454
- for chatbot, status_text in iter:
455
- yield chatbot, status_text
456
- else:
457
- logging.debug("不使用流式传输")
458
- chatbot, status_text = self.next_chatbot_at_once(
459
- inputs,
460
- chatbot,
461
- fake_input=fake_inputs,
462
- display_append=display_append,
463
- )
464
- yield chatbot, status_text
465
- except Exception as e:
466
- traceback.print_exc()
467
- status_text = STANDARD_ERROR_MSG + str(e)
468
- yield chatbot, status_text
469
-
470
- if len(self.history) > 1 and self.history[-1]["content"] != inputs:
471
- logging.info(
472
- "回答为:"
473
- + colorama.Fore.BLUE
474
- + f"{self.history[-1]['content']}"
475
- + colorama.Style.RESET_ALL
476
- )
477
-
478
- if limited_context:
479
- # self.history = self.history[-4:]
480
- # self.all_token_counts = self.all_token_counts[-2:]
481
- self.history = []
482
- self.all_token_counts = []
483
-
484
- max_token = self.token_upper_limit - TOKEN_OFFSET
485
-
486
- if sum(self.all_token_counts) > max_token and should_check_token_count:
487
- count = 0
488
- while (
489
- sum(self.all_token_counts)
490
- > self.token_upper_limit * REDUCE_TOKEN_FACTOR
491
- and sum(self.all_token_counts) > 0
492
- ):
493
- count += 1
494
- del self.all_token_counts[0]
495
- del self.history[:2]
496
- logging.info(status_text)
497
- status_text = f"为了防止token超限,模型忘记了早期的 {count} 轮对话"
498
- yield chatbot, status_text
499
-
500
- self.auto_save(chatbot)
501
-
502
- def retry(
503
- self,
504
- chatbot,
505
- stream=False,
506
- use_websearch=False,
507
- files=None,
508
- reply_language="中文",
509
- ):
510
- logging.debug("重试中……")
511
- if len(self.history) > 0:
512
- inputs = self.history[-2]["content"]
513
- del self.history[-2:]
514
- if len(self.all_token_counts) > 0:
515
- self.all_token_counts.pop()
516
- elif len(chatbot) > 0:
517
- inputs = chatbot[-1][0]
518
- else:
519
- yield chatbot, f"{STANDARD_ERROR_MSG}上下文是空的"
520
- return
521
-
522
- iter = self.predict(
523
- inputs,
524
- chatbot,
525
- stream=stream,
526
- use_websearch=use_websearch,
527
- files=files,
528
- reply_language=reply_language,
529
- )
530
- for x in iter:
531
- yield x
532
- logging.debug("重试完毕")
533
-
534
- # def reduce_token_size(self, chatbot):
535
- # logging.info("开始减少token数量……")
536
- # chatbot, status_text = self.next_chatbot_at_once(
537
- # summarize_prompt,
538
- # chatbot
539
- # )
540
- # max_token_count = self.token_upper_limit * REDUCE_TOKEN_FACTOR
541
- # num_chat = find_n(self.all_token_counts, max_token_count)
542
- # logging.info(f"previous_token_count: {self.all_token_counts}, keeping {num_chat} chats")
543
- # chatbot = chatbot[:-1]
544
- # self.history = self.history[-2*num_chat:] if num_chat > 0 else []
545
- # self.all_token_counts = self.all_token_counts[-num_chat:] if num_chat > 0 else []
546
- # msg = f"保留了最近{num_chat}轮对话"
547
- # logging.info(msg)
548
- # logging.info("减少token数量完毕")
549
- # return chatbot, msg + "," + self.token_message(self.all_token_counts if len(self.all_token_counts) > 0 else [0])
550
-
551
- def interrupt(self):
552
- self.interrupted = True
553
-
554
- def recover(self):
555
- self.interrupted = False
556
-
557
- def set_token_upper_limit(self, new_upper_limit):
558
- self.token_upper_limit = new_upper_limit
559
- print(f"token上限设置为{new_upper_limit}")
560
-
561
- def set_temperature(self, new_temperature):
562
- self.temperature = new_temperature
563
-
564
- def set_top_p(self, new_top_p):
565
- self.top_p = new_top_p
566
-
567
- def set_n_choices(self, new_n_choices):
568
- self.n_choices = new_n_choices
569
-
570
- def set_stop_sequence(self, new_stop_sequence: str):
571
- new_stop_sequence = new_stop_sequence.split(",")
572
- self.stop_sequence = new_stop_sequence
573
-
574
- def set_max_tokens(self, new_max_tokens):
575
- self.max_generation_token = new_max_tokens
576
-
577
- def set_presence_penalty(self, new_presence_penalty):
578
- self.presence_penalty = new_presence_penalty
579
-
580
- def set_frequency_penalty(self, new_frequency_penalty):
581
- self.frequency_penalty = new_frequency_penalty
582
-
583
- def set_logit_bias(self, logit_bias):
584
- logit_bias = logit_bias.split()
585
- bias_map = {}
586
- encoding = tiktoken.get_encoding("cl100k_base")
587
- for line in logit_bias:
588
- word, bias_amount = line.split(":")
589
- if word:
590
- for token in encoding.encode(word):
591
- bias_map[token] = float(bias_amount)
592
- self.logit_bias = bias_map
593
-
594
- def set_user_identifier(self, new_user_identifier):
595
- self.user_identifier = new_user_identifier
596
-
597
- def set_system_prompt(self, new_system_prompt):
598
- self.system_prompt = new_system_prompt
599
-
600
- def set_key(self, new_access_key):
601
- if "*" not in new_access_key:
602
- self.api_key = new_access_key.strip()
603
- msg = i18n("API密钥更改为了") + hide_middle_chars(self.api_key)
604
- logging.info(msg)
605
- return self.api_key, msg
606
- else:
607
- return gr.update(), gr.update()
608
-
609
- def set_single_turn(self, new_single_turn):
610
- self.single_turn = new_single_turn
611
-
612
- def reset(self):
613
- self.history = []
614
- self.all_token_counts = []
615
- self.interrupted = False
616
- pathlib.Path(os.path.join(HISTORY_DIR, self.user_identifier, new_auto_history_filename(
617
- os.path.join(HISTORY_DIR, self.user_identifier)))).touch()
618
- return [], self.token_message([0])
619
-
620
- def delete_first_conversation(self):
621
- if self.history:
622
- del self.history[:2]
623
- del self.all_token_counts[0]
624
- return self.token_message()
625
-
626
- def delete_last_conversation(self, chatbot):
627
- if len(chatbot) > 0 and STANDARD_ERROR_MSG in chatbot[-1][1]:
628
- msg = "由于包含报错信息,只删除chatbot记录"
629
- chatbot.pop()
630
- return chatbot, self.history
631
- if len(self.history) > 0:
632
- self.history.pop()
633
- self.history.pop()
634
- if len(chatbot) > 0:
635
- msg = "删除了一组chatbot对话"
636
- chatbot.pop()
637
- if len(self.all_token_counts) > 0:
638
- msg = "删除了一组对话的token计数记录"
639
- self.all_token_counts.pop()
640
- msg = "删除了一组对话"
641
- return chatbot, msg
642
-
643
- def token_message(self, token_lst=None):
644
- if token_lst is None:
645
- token_lst = self.all_token_counts
646
- token_sum = 0
647
- for i in range(len(token_lst)):
648
- token_sum += sum(token_lst[: i + 1])
649
- return i18n("Token 计数: ") + f"{sum(token_lst)}" + i18n(",本次对话累计消耗了 ") + f"{token_sum} tokens"
650
-
651
- def save_chat_history(self, filename, chatbot, user_name):
652
- if filename == "":
653
- return
654
- if not filename.endswith(".json"):
655
- filename += ".json"
656
- return save_file(filename, self.system_prompt, self.history, chatbot, user_name)
657
-
658
- def auto_save(self, chatbot):
659
- history_file_path = get_history_filepath(self.user_identifier)
660
- save_file(history_file_path, self.system_prompt,
661
- self.history, chatbot, self.user_identifier)
662
-
663
- def export_markdown(self, filename, chatbot, user_name):
664
- if filename == "":
665
- return
666
- if not filename.endswith(".md"):
667
- filename += ".md"
668
- return save_file(filename, self.system_prompt, self.history, chatbot, user_name)
669
-
670
- def load_chat_history(self, filename, user_name):
671
- logging.debug(f"{user_name} 加载对话历史中……")
672
- logging.info(f"filename: {filename}")
673
- if type(filename) != str and filename is not None:
674
- filename = filename.name
675
- try:
676
- if "/" not in filename:
677
- history_file_path = os.path.join(
678
- HISTORY_DIR, user_name, filename)
679
- else:
680
- history_file_path = filename
681
- with open(history_file_path, "r", encoding="utf-8") as f:
682
- json_s = json.load(f)
683
- try:
684
- if type(json_s["history"][0]) == str:
685
- logging.info("历史记录格式为旧版,正在转换……")
686
- new_history = []
687
- for index, item in enumerate(json_s["history"]):
688
- if index % 2 == 0:
689
- new_history.append(construct_user(item))
690
- else:
691
- new_history.append(construct_assistant(item))
692
- json_s["history"] = new_history
693
- logging.info(new_history)
694
- except:
695
- pass
696
- logging.debug(f"{user_name} 加载对话历史完毕")
697
- self.history = json_s["history"]
698
- return os.path.basename(filename), json_s["system"], json_s["chatbot"]
699
- except:
700
- # 没有对话历史或者对话历史解析失败
701
- logging.info(f"没有找到对话历史记录 {filename}")
702
- return gr.update(), self.system_prompt, gr.update()
703
-
704
- def delete_chat_history(self, filename, user_name):
705
- if filename == "CANCELED":
706
- return gr.update(), gr.update(), gr.update()
707
- if filename == "":
708
- return i18n("你没有选择任何对话历史"), gr.update(), gr.update()
709
- if not filename.endswith(".json"):
710
- filename += ".json"
711
- if "/" not in filename:
712
- history_file_path = os.path.join(HISTORY_DIR, user_name, filename)
713
- else:
714
- history_file_path = filename
715
- try:
716
- os.remove(history_file_path)
717
- return i18n("删除对话历史成功"), get_history_names(False, user_name), []
718
- except:
719
- logging.info(f"删除对话历史失败 {history_file_path}")
720
- return i18n("对话历史")+filename+i18n("已经被删除啦"), gr.update(), gr.update()
721
-
722
- def auto_load(self):
723
- if self.user_identifier == "":
724
- self.reset()
725
- return self.system_prompt, gr.update()
726
- history_file_path = get_history_filepath(self.user_identifier)
727
- filename, system_prompt, chatbot = self.load_chat_history(
728
- history_file_path, self.user_identifier)
729
- return system_prompt, chatbot
730
-
731
- def like(self):
732
- """like the last response, implement if needed
733
- """
734
- return gr.update()
735
-
736
- def dislike(self):
737
- """dislike the last response, implement if needed
738
- """
739
- return gr.update()
740
-
741
-
742
- class Base_Chat_Langchain_Client(BaseLLMModel):
743
- def __init__(self, model_name, user_name=""):
744
- super().__init__(model_name, user=user_name)
745
- self.need_api_key = False
746
- self.model = self.setup_model()
747
-
748
- def setup_model(self):
749
- # inplement this to setup the model then return it
750
- pass
751
-
752
- def _get_langchain_style_history(self):
753
- history = [SystemMessage(content=self.system_prompt)]
754
- for i in self.history:
755
- if i["role"] == "user":
756
- history.append(HumanMessage(content=i["content"]))
757
- elif i["role"] == "assistant":
758
- history.append(AIMessage(content=i["content"]))
759
- return history
760
-
761
- def get_answer_at_once(self):
762
- assert isinstance(
763
- self.model, BaseChatModel), "model is not instance of LangChain BaseChatModel"
764
- history = self._get_langchain_style_history()
765
- response = self.model.generate(history)
766
- return response.content, sum(response.content)
767
-
768
- def get_answer_stream_iter(self):
769
- it = CallbackToIterator()
770
- assert isinstance(
771
- self.model, BaseChatModel), "model is not instance of LangChain BaseChatModel"
772
- history = self._get_langchain_style_history()
773
-
774
- def thread_func():
775
- self.model(messages=history, callbacks=[
776
- ChuanhuCallbackHandler(it.callback)])
777
- it.finish()
778
- t = Thread(target=thread_func)
779
- t.start()
780
- partial_text = ""
781
- for value in it:
782
- partial_text += value
783
- yield partial_text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Dref360/spectral-metric/utils.py DELETED
@@ -1,7 +0,0 @@
1
- import numpy as np
2
-
3
- def show_most_confused(ds, source_intent, target_intent, estimator, class_names):
4
- pair_name = f"{class_names[source_intent]} <> {class_names[target_intent]}"
5
- closest_to_second = np.argsort([sample.sample_probability_norm[target_intent] for sample in estimator.similarity_arrays[source_intent].values()])[::-1][:10]
6
- dataset_indices = estimator.class_indices[source_intent][closest_to_second]
7
- return {pair_name : [ds[int(di)]["text"] for di in dataset_indices]}
 
 
 
 
 
 
 
 
spaces/Eddycrack864/Applio-Inference/train/utils.py DELETED
@@ -1,500 +0,0 @@
1
- import os, traceback
2
- import glob
3
- import sys
4
- import argparse
5
- import logging
6
- import json
7
- import subprocess
8
- import numpy as np
9
- from scipy.io.wavfile import read
10
- import torch
11
-
12
- MATPLOTLIB_FLAG = False
13
-
14
- logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
15
- logger = logging
16
-
17
-
18
- def load_checkpoint_d(checkpoint_path, combd, sbd, optimizer=None, load_opt=1):
19
- assert os.path.isfile(checkpoint_path)
20
- checkpoint_dict = torch.load(checkpoint_path, map_location="cpu")
21
-
22
- ##################
23
- def go(model, bkey):
24
- saved_state_dict = checkpoint_dict[bkey]
25
- if hasattr(model, "module"):
26
- state_dict = model.module.state_dict()
27
- else:
28
- state_dict = model.state_dict()
29
- new_state_dict = {}
30
- for k, v in state_dict.items(): # 模型需要的shape
31
- try:
32
- new_state_dict[k] = saved_state_dict[k]
33
- if saved_state_dict[k].shape != state_dict[k].shape:
34
- print(
35
- "shape-%s-mismatch|need-%s|get-%s"
36
- % (k, state_dict[k].shape, saved_state_dict[k].shape)
37
- ) #
38
- raise KeyError
39
- except:
40
- # logger.info(traceback.format_exc())
41
- logger.info("%s is not in the checkpoint" % k) # pretrain缺失的
42
- new_state_dict[k] = v # 模型自带的随机值
43
- if hasattr(model, "module"):
44
- model.module.load_state_dict(new_state_dict, strict=False)
45
- else:
46
- model.load_state_dict(new_state_dict, strict=False)
47
-
48
- go(combd, "combd")
49
- go(sbd, "sbd")
50
- #############
51
- logger.info("Loaded model weights")
52
-
53
- iteration = checkpoint_dict["iteration"]
54
- learning_rate = checkpoint_dict["learning_rate"]
55
- if (
56
- optimizer is not None and load_opt == 1
57
- ): ###加载不了,如果是空的的话,重新初始化,可能还会影响lr时间表的更新,因此在train文件最外围catch
58
- # try:
59
- optimizer.load_state_dict(checkpoint_dict["optimizer"])
60
- # except:
61
- # traceback.print_exc()
62
- logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, iteration))
63
- return model, optimizer, learning_rate, iteration
64
-
65
-
66
- # def load_checkpoint(checkpoint_path, model, optimizer=None):
67
- # assert os.path.isfile(checkpoint_path)
68
- # checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
69
- # iteration = checkpoint_dict['iteration']
70
- # learning_rate = checkpoint_dict['learning_rate']
71
- # if optimizer is not None:
72
- # optimizer.load_state_dict(checkpoint_dict['optimizer'])
73
- # # print(1111)
74
- # saved_state_dict = checkpoint_dict['model']
75
- # # print(1111)
76
- #
77
- # if hasattr(model, 'module'):
78
- # state_dict = model.module.state_dict()
79
- # else:
80
- # state_dict = model.state_dict()
81
- # new_state_dict= {}
82
- # for k, v in state_dict.items():
83
- # try:
84
- # new_state_dict[k] = saved_state_dict[k]
85
- # except:
86
- # logger.info("%s is not in the checkpoint" % k)
87
- # new_state_dict[k] = v
88
- # if hasattr(model, 'module'):
89
- # model.module.load_state_dict(new_state_dict)
90
- # else:
91
- # model.load_state_dict(new_state_dict)
92
- # logger.info("Loaded checkpoint '{}' (epoch {})" .format(
93
- # checkpoint_path, iteration))
94
- # return model, optimizer, learning_rate, iteration
95
- def load_checkpoint(checkpoint_path, model, optimizer=None, load_opt=1):
96
- assert os.path.isfile(checkpoint_path)
97
- checkpoint_dict = torch.load(checkpoint_path, map_location="cpu")
98
-
99
- saved_state_dict = checkpoint_dict["model"]
100
- if hasattr(model, "module"):
101
- state_dict = model.module.state_dict()
102
- else:
103
- state_dict = model.state_dict()
104
- new_state_dict = {}
105
- for k, v in state_dict.items(): # 模型需要的shape
106
- try:
107
- new_state_dict[k] = saved_state_dict[k]
108
- if saved_state_dict[k].shape != state_dict[k].shape:
109
- print(
110
- "shape-%s-mismatch|need-%s|get-%s"
111
- % (k, state_dict[k].shape, saved_state_dict[k].shape)
112
- ) #
113
- raise KeyError
114
- except:
115
- # logger.info(traceback.format_exc())
116
- logger.info("%s is not in the checkpoint" % k) # pretrain缺失的
117
- new_state_dict[k] = v # 模型自带的随机值
118
- if hasattr(model, "module"):
119
- model.module.load_state_dict(new_state_dict, strict=False)
120
- else:
121
- model.load_state_dict(new_state_dict, strict=False)
122
- logger.info("Loaded model weights")
123
-
124
- iteration = checkpoint_dict["iteration"]
125
- learning_rate = checkpoint_dict["learning_rate"]
126
- if (
127
- optimizer is not None and load_opt == 1
128
- ): ###加载不了,如果是空的的话,重新初始化,可能还会影响lr时间表的更新,因此在train文件最外围catch
129
- # try:
130
- optimizer.load_state_dict(checkpoint_dict["optimizer"])
131
- # except:
132
- # traceback.print_exc()
133
- logger.info("Loaded checkpoint '{}' (epoch {})".format(checkpoint_path, iteration))
134
- return model, optimizer, learning_rate, iteration
135
-
136
-
137
- def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path):
138
- logger.info(
139
- "Saving model and optimizer state at epoch {} to {}".format(
140
- iteration, checkpoint_path
141
- )
142
- )
143
- if hasattr(model, "module"):
144
- state_dict = model.module.state_dict()
145
- else:
146
- state_dict = model.state_dict()
147
- torch.save(
148
- {
149
- "model": state_dict,
150
- "iteration": iteration,
151
- "optimizer": optimizer.state_dict(),
152
- "learning_rate": learning_rate,
153
- },
154
- checkpoint_path,
155
- )
156
-
157
-
158
- def save_checkpoint_d(combd, sbd, optimizer, learning_rate, iteration, checkpoint_path):
159
- logger.info(
160
- "Saving model and optimizer state at epoch {} to {}".format(
161
- iteration, checkpoint_path
162
- )
163
- )
164
- if hasattr(combd, "module"):
165
- state_dict_combd = combd.module.state_dict()
166
- else:
167
- state_dict_combd = combd.state_dict()
168
- if hasattr(sbd, "module"):
169
- state_dict_sbd = sbd.module.state_dict()
170
- else:
171
- state_dict_sbd = sbd.state_dict()
172
- torch.save(
173
- {
174
- "combd": state_dict_combd,
175
- "sbd": state_dict_sbd,
176
- "iteration": iteration,
177
- "optimizer": optimizer.state_dict(),
178
- "learning_rate": learning_rate,
179
- },
180
- checkpoint_path,
181
- )
182
-
183
-
184
- def summarize(
185
- writer,
186
- global_step,
187
- scalars={},
188
- histograms={},
189
- images={},
190
- audios={},
191
- audio_sampling_rate=22050,
192
- ):
193
- for k, v in scalars.items():
194
- writer.add_scalar(k, v, global_step)
195
- for k, v in histograms.items():
196
- writer.add_histogram(k, v, global_step)
197
- for k, v in images.items():
198
- writer.add_image(k, v, global_step, dataformats="HWC")
199
- for k, v in audios.items():
200
- writer.add_audio(k, v, global_step, audio_sampling_rate)
201
-
202
-
203
- def latest_checkpoint_path(dir_path, regex="G_*.pth"):
204
- f_list = glob.glob(os.path.join(dir_path, regex))
205
- f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f))))
206
- x = f_list[-1]
207
- print(x)
208
- return x
209
-
210
-
211
- def plot_spectrogram_to_numpy(spectrogram):
212
- global MATPLOTLIB_FLAG
213
- if not MATPLOTLIB_FLAG:
214
- import matplotlib
215
-
216
- matplotlib.use("Agg")
217
- MATPLOTLIB_FLAG = True
218
- mpl_logger = logging.getLogger("matplotlib")
219
- mpl_logger.setLevel(logging.WARNING)
220
- import matplotlib.pylab as plt
221
- import numpy as np
222
-
223
- fig, ax = plt.subplots(figsize=(10, 2))
224
- im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation="none")
225
- plt.colorbar(im, ax=ax)
226
- plt.xlabel("Frames")
227
- plt.ylabel("Channels")
228
- plt.tight_layout()
229
-
230
- fig.canvas.draw()
231
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
232
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
233
- plt.close()
234
- return data
235
-
236
-
237
- def plot_alignment_to_numpy(alignment, info=None):
238
- global MATPLOTLIB_FLAG
239
- if not MATPLOTLIB_FLAG:
240
- import matplotlib
241
-
242
- matplotlib.use("Agg")
243
- MATPLOTLIB_FLAG = True
244
- mpl_logger = logging.getLogger("matplotlib")
245
- mpl_logger.setLevel(logging.WARNING)
246
- import matplotlib.pylab as plt
247
- import numpy as np
248
-
249
- fig, ax = plt.subplots(figsize=(6, 4))
250
- im = ax.imshow(
251
- alignment.transpose(), aspect="auto", origin="lower", interpolation="none"
252
- )
253
- fig.colorbar(im, ax=ax)
254
- xlabel = "Decoder timestep"
255
- if info is not None:
256
- xlabel += "\n\n" + info
257
- plt.xlabel(xlabel)
258
- plt.ylabel("Encoder timestep")
259
- plt.tight_layout()
260
-
261
- fig.canvas.draw()
262
- data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
263
- data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
264
- plt.close()
265
- return data
266
-
267
-
268
- def load_wav_to_torch(full_path):
269
- sampling_rate, data = read(full_path)
270
- return torch.FloatTensor(data.astype(np.float32)), sampling_rate
271
-
272
-
273
- def load_filepaths_and_text(filename, split="|"):
274
- with open(filename, encoding='utf-8') as f:
275
- filepaths_and_text = [line.strip().split(split) for line in f]
276
- filepaths_and_text = [item for item in filepaths_and_text if len(item) == 5] # ensure there are 5 items.
277
- return filepaths_and_text
278
-
279
-
280
- def get_hparams(init=True):
281
- """
282
- todo:
283
- 结尾七人组:
284
- 保存频率、总epoch done
285
- bs done
286
- pretrainG、pretrainD done
287
- 卡号:os.en["CUDA_VISIBLE_DEVICES"] done
288
- if_latest done
289
- 模型:if_f0 done
290
- 采样率:自动选择config done
291
- 是否缓存数据集进GPU:if_cache_data_in_gpu done
292
-
293
- -m:
294
- 自动决定training_files路径,改掉train_nsf_load_pretrain.py里的hps.data.training_files done
295
- -c不要了
296
- """
297
- parser = argparse.ArgumentParser()
298
- # parser.add_argument('-c', '--config', type=str, default="configs/40k.json",help='JSON file for configuration')
299
- parser.add_argument(
300
- "-se",
301
- "--save_every_epoch",
302
- type=int,
303
- required=True,
304
- help="checkpoint save frequency (epoch)",
305
- )
306
- parser.add_argument(
307
- "-te", "--total_epoch", type=int, required=True, help="total_epoch"
308
- )
309
- parser.add_argument(
310
- "-pg", "--pretrainG", type=str, default="", help="Pretrained Discriminator path"
311
- )
312
- parser.add_argument(
313
- "-pd", "--pretrainD", type=str, default="", help="Pretrained Generator path"
314
- )
315
- parser.add_argument("-g", "--gpus", type=str, default="0", help="split by -")
316
- parser.add_argument(
317
- "-bs", "--batch_size", type=int, required=True, help="batch size"
318
- )
319
- parser.add_argument(
320
- "-e", "--experiment_dir", type=str, required=True, help="experiment dir"
321
- ) # -m
322
- parser.add_argument(
323
- "-sr", "--sample_rate", type=str, required=True, help="sample rate, 32k/40k/48k"
324
- )
325
- parser.add_argument(
326
- "-sw",
327
- "--save_every_weights",
328
- type=str,
329
- default="0",
330
- help="save the extracted model in weights directory when saving checkpoints",
331
- )
332
- parser.add_argument(
333
- "-v", "--version", type=str, required=True, help="model version"
334
- )
335
- parser.add_argument(
336
- "-f0",
337
- "--if_f0",
338
- type=int,
339
- required=True,
340
- help="use f0 as one of the inputs of the model, 1 or 0",
341
- )
342
- parser.add_argument(
343
- "-l",
344
- "--if_latest",
345
- type=int,
346
- required=True,
347
- help="if only save the latest G/D pth file, 1 or 0",
348
- )
349
- parser.add_argument(
350
- "-c",
351
- "--if_cache_data_in_gpu",
352
- type=int,
353
- required=True,
354
- help="if caching the dataset in GPU memory, 1 or 0",
355
- )
356
- parser.add_argument(
357
- "-li", "--log_interval", type=int, required=True, help="log interval"
358
- )
359
-
360
- args = parser.parse_args()
361
- name = args.experiment_dir
362
- experiment_dir = os.path.join("./logs", args.experiment_dir)
363
-
364
- if not os.path.exists(experiment_dir):
365
- os.makedirs(experiment_dir)
366
-
367
- if args.version == "v1" or args.sample_rate == "40k":
368
- config_path = "configs/%s.json" % args.sample_rate
369
- else:
370
- config_path = "configs/%s_v2.json" % args.sample_rate
371
- config_save_path = os.path.join(experiment_dir, "config.json")
372
- if init:
373
- with open(config_path, "r") as f:
374
- data = f.read()
375
- with open(config_save_path, "w") as f:
376
- f.write(data)
377
- else:
378
- with open(config_save_path, "r") as f:
379
- data = f.read()
380
- config = json.loads(data)
381
-
382
- hparams = HParams(**config)
383
- hparams.model_dir = hparams.experiment_dir = experiment_dir
384
- hparams.save_every_epoch = args.save_every_epoch
385
- hparams.name = name
386
- hparams.total_epoch = args.total_epoch
387
- hparams.pretrainG = args.pretrainG
388
- hparams.pretrainD = args.pretrainD
389
- hparams.version = args.version
390
- hparams.gpus = args.gpus
391
- hparams.train.batch_size = args.batch_size
392
- hparams.sample_rate = args.sample_rate
393
- hparams.if_f0 = args.if_f0
394
- hparams.if_latest = args.if_latest
395
- hparams.save_every_weights = args.save_every_weights
396
- hparams.if_cache_data_in_gpu = args.if_cache_data_in_gpu
397
- hparams.data.training_files = "%s/filelist.txt" % experiment_dir
398
-
399
- hparams.train.log_interval = args.log_interval
400
-
401
- # Update log_interval in the 'train' section of the config dictionary
402
- config["train"]["log_interval"] = args.log_interval
403
-
404
- # Save the updated config back to the config_save_path
405
- with open(config_save_path, "w") as f:
406
- json.dump(config, f, indent=4)
407
-
408
- return hparams
409
-
410
-
411
- def get_hparams_from_dir(model_dir):
412
- config_save_path = os.path.join(model_dir, "config.json")
413
- with open(config_save_path, "r") as f:
414
- data = f.read()
415
- config = json.loads(data)
416
-
417
- hparams = HParams(**config)
418
- hparams.model_dir = model_dir
419
- return hparams
420
-
421
-
422
- def get_hparams_from_file(config_path):
423
- with open(config_path, "r") as f:
424
- data = f.read()
425
- config = json.loads(data)
426
-
427
- hparams = HParams(**config)
428
- return hparams
429
-
430
-
431
- def check_git_hash(model_dir):
432
- source_dir = os.path.dirname(os.path.realpath(__file__))
433
- if not os.path.exists(os.path.join(source_dir, ".git")):
434
- logger.warn(
435
- "{} is not a git repository, therefore hash value comparison will be ignored.".format(
436
- source_dir
437
- )
438
- )
439
- return
440
-
441
- cur_hash = subprocess.getoutput("git rev-parse HEAD")
442
-
443
- path = os.path.join(model_dir, "githash")
444
- if os.path.exists(path):
445
- saved_hash = open(path).read()
446
- if saved_hash != cur_hash:
447
- logger.warn(
448
- "git hash values are different. {}(saved) != {}(current)".format(
449
- saved_hash[:8], cur_hash[:8]
450
- )
451
- )
452
- else:
453
- open(path, "w").write(cur_hash)
454
-
455
-
456
- def get_logger(model_dir, filename="train.log"):
457
- global logger
458
- logger = logging.getLogger(os.path.basename(model_dir))
459
- logger.setLevel(logging.DEBUG)
460
-
461
- formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
462
- if not os.path.exists(model_dir):
463
- os.makedirs(model_dir)
464
- h = logging.FileHandler(os.path.join(model_dir, filename))
465
- h.setLevel(logging.DEBUG)
466
- h.setFormatter(formatter)
467
- logger.addHandler(h)
468
- return logger
469
-
470
-
471
- class HParams:
472
- def __init__(self, **kwargs):
473
- for k, v in kwargs.items():
474
- if type(v) == dict:
475
- v = HParams(**v)
476
- self[k] = v
477
-
478
- def keys(self):
479
- return self.__dict__.keys()
480
-
481
- def items(self):
482
- return self.__dict__.items()
483
-
484
- def values(self):
485
- return self.__dict__.values()
486
-
487
- def __len__(self):
488
- return len(self.__dict__)
489
-
490
- def __getitem__(self, key):
491
- return getattr(self, key)
492
-
493
- def __setitem__(self, key, value):
494
- return setattr(self, key, value)
495
-
496
- def __contains__(self, key):
497
- return key in self.__dict__
498
-
499
- def __repr__(self):
500
- return self.__dict__.__repr__()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/FER-Universe/Face-Benchmarking/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Face Benchmarking
3
- emoji: 🏃
4
- colorFrom: purple
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.32.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/FlippFuzz/whisper-webui/src/hooks/subTaskProgressListener.py DELETED
@@ -1,37 +0,0 @@
1
- from src.hooks.progressListener import ProgressListener
2
-
3
- from typing import Union
4
-
5
- class SubTaskProgressListener(ProgressListener):
6
- """
7
- A sub task listener that reports the progress of a sub task to a base task listener
8
- Parameters
9
- ----------
10
- base_task_listener : ProgressListener
11
- The base progress listener to accumulate overall progress in.
12
- base_task_total : float
13
- The maximum total progress that will be reported to the base progress listener.
14
- sub_task_start : float
15
- The starting progress of a sub task, in respect to the base progress listener.
16
- sub_task_total : float
17
- The total amount of progress a sub task will report to the base progress listener.
18
- """
19
- def __init__(
20
- self,
21
- base_task_listener: ProgressListener,
22
- base_task_total: float,
23
- sub_task_start: float,
24
- sub_task_total: float,
25
- ):
26
- self.base_task_listener = base_task_listener
27
- self.base_task_total = base_task_total
28
- self.sub_task_start = sub_task_start
29
- self.sub_task_total = sub_task_total
30
-
31
- def on_progress(self, current: Union[int, float], total: Union[int, float]):
32
- sub_task_progress_frac = current / total
33
- sub_task_progress = self.sub_task_start + self.sub_task_total * sub_task_progress_frac
34
- self.base_task_listener.on_progress(sub_task_progress, self.base_task_total)
35
-
36
- def on_finished(self):
37
- self.base_task_listener.on_progress(self.sub_task_start + self.sub_task_total, self.base_task_total)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/FridaZuley/RVC_HFKawaii/julius/utils.py DELETED
@@ -1,101 +0,0 @@
1
- # File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details.
2
- # Author: adefossez, 2020
3
- """
4
- Non signal processing related utilities.
5
- """
6
-
7
- import inspect
8
- import typing as tp
9
- import sys
10
- import time
11
-
12
-
13
- def simple_repr(obj, attrs: tp.Optional[tp.Sequence[str]] = None,
14
- overrides: dict = {}):
15
- """
16
- Return a simple representation string for `obj`.
17
- If `attrs` is not None, it should be a list of attributes to include.
18
- """
19
- params = inspect.signature(obj.__class__).parameters
20
- attrs_repr = []
21
- if attrs is None:
22
- attrs = list(params.keys())
23
- for attr in attrs:
24
- display = False
25
- if attr in overrides:
26
- value = overrides[attr]
27
- elif hasattr(obj, attr):
28
- value = getattr(obj, attr)
29
- else:
30
- continue
31
- if attr in params:
32
- param = params[attr]
33
- if param.default is inspect._empty or value != param.default: # type: ignore
34
- display = True
35
- else:
36
- display = True
37
-
38
- if display:
39
- attrs_repr.append(f"{attr}={value}")
40
- return f"{obj.__class__.__name__}({','.join(attrs_repr)})"
41
-
42
-
43
- class MarkdownTable:
44
- """
45
- Simple MarkdownTable generator. The column titles should be large enough
46
- for the lines content. This will right align everything.
47
-
48
- >>> import io # we use io purely for test purposes, default is sys.stdout.
49
- >>> file = io.StringIO()
50
- >>> table = MarkdownTable(["Item Name", "Price"], file=file)
51
- >>> table.header(); table.line(["Honey", "5"]); table.line(["Car", "5,000"])
52
- >>> print(file.getvalue().strip()) # Strip for test purposes
53
- | Item Name | Price |
54
- |-----------|-------|
55
- | Honey | 5 |
56
- | Car | 5,000 |
57
- """
58
- def __init__(self, columns, file=sys.stdout):
59
- self.columns = columns
60
- self.file = file
61
-
62
- def _writeln(self, line):
63
- self.file.write("|" + "|".join(line) + "|\n")
64
-
65
- def header(self):
66
- self._writeln(f" {col} " for col in self.columns)
67
- self._writeln("-" * (len(col) + 2) for col in self.columns)
68
-
69
- def line(self, line):
70
- out = []
71
- for val, col in zip(line, self.columns):
72
- val = format(val, '>' + str(len(col)))
73
- out.append(" " + val + " ")
74
- self._writeln(out)
75
-
76
-
77
- class Chrono:
78
- """
79
- Measures ellapsed time, calling `torch.cuda.synchronize` if necessary.
80
- `Chrono` instances can be used as context managers (e.g. with `with`).
81
- Upon exit of the block, you can access the duration of the block in seconds
82
- with the `duration` attribute.
83
-
84
- >>> with Chrono() as chrono:
85
- ... _ = sum(range(10_000))
86
- ...
87
- >>> print(chrono.duration < 10) # Should be true unless on a really slow computer.
88
- True
89
- """
90
- def __init__(self):
91
- self.duration = None
92
-
93
- def __enter__(self):
94
- self._begin = time.time()
95
- return self
96
-
97
- def __exit__(self, exc_type, exc_value, exc_tracebck):
98
- import torch
99
- if torch.cuda.is_available():
100
- torch.cuda.synchronize()
101
- self.duration = time.time() - self._begin
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/FridaZuley/RVC_HFKawaii/train/process_ckpt.py DELETED
@@ -1,259 +0,0 @@
1
- import torch, traceback, os, pdb, sys
2
-
3
- now_dir = os.getcwd()
4
- sys.path.append(now_dir)
5
- from collections import OrderedDict
6
- from i18n import I18nAuto
7
-
8
- i18n = I18nAuto()
9
-
10
-
11
- def savee(ckpt, sr, if_f0, name, epoch, version, hps):
12
- try:
13
- opt = OrderedDict()
14
- opt["weight"] = {}
15
- for key in ckpt.keys():
16
- if "enc_q" in key:
17
- continue
18
- opt["weight"][key] = ckpt[key].half()
19
- opt["config"] = [
20
- hps.data.filter_length // 2 + 1,
21
- 32,
22
- hps.model.inter_channels,
23
- hps.model.hidden_channels,
24
- hps.model.filter_channels,
25
- hps.model.n_heads,
26
- hps.model.n_layers,
27
- hps.model.kernel_size,
28
- hps.model.p_dropout,
29
- hps.model.resblock,
30
- hps.model.resblock_kernel_sizes,
31
- hps.model.resblock_dilation_sizes,
32
- hps.model.upsample_rates,
33
- hps.model.upsample_initial_channel,
34
- hps.model.upsample_kernel_sizes,
35
- hps.model.spk_embed_dim,
36
- hps.model.gin_channels,
37
- hps.data.sampling_rate,
38
- ]
39
- opt["info"] = "%sepoch" % epoch
40
- opt["sr"] = sr
41
- opt["f0"] = if_f0
42
- opt["version"] = version
43
- torch.save(opt, "weights/%s.pth" % name)
44
- return "Success."
45
- except:
46
- return traceback.format_exc()
47
-
48
-
49
- def show_info(path):
50
- try:
51
- a = torch.load(path, map_location="cpu")
52
- return "Epochs: %s\nSample rate: %s\nPitch guidance: %s\nRVC Version: %s" % (
53
- a.get("info", "None"),
54
- a.get("sr", "None"),
55
- a.get("f0", "None"),
56
- a.get("version", "None"),
57
- )
58
- except:
59
- return traceback.format_exc()
60
-
61
-
62
- def extract_small_model(path, name, sr, if_f0, info, version):
63
- try:
64
- ckpt = torch.load(path, map_location="cpu")
65
- if "model" in ckpt:
66
- ckpt = ckpt["model"]
67
- opt = OrderedDict()
68
- opt["weight"] = {}
69
- for key in ckpt.keys():
70
- if "enc_q" in key:
71
- continue
72
- opt["weight"][key] = ckpt[key].half()
73
- if sr == "40k":
74
- opt["config"] = [
75
- 1025,
76
- 32,
77
- 192,
78
- 192,
79
- 768,
80
- 2,
81
- 6,
82
- 3,
83
- 0,
84
- "1",
85
- [3, 7, 11],
86
- [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
87
- [10, 10, 2, 2],
88
- 512,
89
- [16, 16, 4, 4],
90
- 109,
91
- 256,
92
- 40000,
93
- ]
94
- elif sr == "48k":
95
- if version == "v1":
96
- opt["config"] = [
97
- 1025,
98
- 32,
99
- 192,
100
- 192,
101
- 768,
102
- 2,
103
- 6,
104
- 3,
105
- 0,
106
- "1",
107
- [3, 7, 11],
108
- [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
109
- [10, 6, 2, 2, 2],
110
- 512,
111
- [16, 16, 4, 4, 4],
112
- 109,
113
- 256,
114
- 48000,
115
- ]
116
- else:
117
- opt["config"] = [
118
- 1025,
119
- 32,
120
- 192,
121
- 192,
122
- 768,
123
- 2,
124
- 6,
125
- 3,
126
- 0,
127
- "1",
128
- [3, 7, 11],
129
- [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
130
- [12, 10, 2, 2],
131
- 512,
132
- [24, 20, 4, 4],
133
- 109,
134
- 256,
135
- 48000,
136
- ]
137
- elif sr == "32k":
138
- if version == "v1":
139
- opt["config"] = [
140
- 513,
141
- 32,
142
- 192,
143
- 192,
144
- 768,
145
- 2,
146
- 6,
147
- 3,
148
- 0,
149
- "1",
150
- [3, 7, 11],
151
- [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
152
- [10, 4, 2, 2, 2],
153
- 512,
154
- [16, 16, 4, 4, 4],
155
- 109,
156
- 256,
157
- 32000,
158
- ]
159
- else:
160
- opt["config"] = [
161
- 513,
162
- 32,
163
- 192,
164
- 192,
165
- 768,
166
- 2,
167
- 6,
168
- 3,
169
- 0,
170
- "1",
171
- [3, 7, 11],
172
- [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
173
- [10, 8, 2, 2],
174
- 512,
175
- [20, 16, 4, 4],
176
- 109,
177
- 256,
178
- 32000,
179
- ]
180
- if info == "":
181
- info = "Extracted model."
182
- opt["info"] = info
183
- opt["version"] = version
184
- opt["sr"] = sr
185
- opt["f0"] = int(if_f0)
186
- torch.save(opt, "weights/%s.pth" % name)
187
- return "Success."
188
- except:
189
- return traceback.format_exc()
190
-
191
-
192
- def change_info(path, info, name):
193
- try:
194
- ckpt = torch.load(path, map_location="cpu")
195
- ckpt["info"] = info
196
- if name == "":
197
- name = os.path.basename(path)
198
- torch.save(ckpt, "weights/%s" % name)
199
- return "Success."
200
- except:
201
- return traceback.format_exc()
202
-
203
-
204
- def merge(path1, path2, alpha1, sr, f0, info, name, version):
205
- try:
206
-
207
- def extract(ckpt):
208
- a = ckpt["model"]
209
- opt = OrderedDict()
210
- opt["weight"] = {}
211
- for key in a.keys():
212
- if "enc_q" in key:
213
- continue
214
- opt["weight"][key] = a[key]
215
- return opt
216
-
217
- ckpt1 = torch.load(path1, map_location="cpu")
218
- ckpt2 = torch.load(path2, map_location="cpu")
219
- cfg = ckpt1["config"]
220
- if "model" in ckpt1:
221
- ckpt1 = extract(ckpt1)
222
- else:
223
- ckpt1 = ckpt1["weight"]
224
- if "model" in ckpt2:
225
- ckpt2 = extract(ckpt2)
226
- else:
227
- ckpt2 = ckpt2["weight"]
228
- if sorted(list(ckpt1.keys())) != sorted(list(ckpt2.keys())):
229
- return "Fail to merge the models. The model architectures are not the same."
230
- opt = OrderedDict()
231
- opt["weight"] = {}
232
- for key in ckpt1.keys():
233
- # try:
234
- if key == "emb_g.weight" and ckpt1[key].shape != ckpt2[key].shape:
235
- min_shape0 = min(ckpt1[key].shape[0], ckpt2[key].shape[0])
236
- opt["weight"][key] = (
237
- alpha1 * (ckpt1[key][:min_shape0].float())
238
- + (1 - alpha1) * (ckpt2[key][:min_shape0].float())
239
- ).half()
240
- else:
241
- opt["weight"][key] = (
242
- alpha1 * (ckpt1[key].float()) + (1 - alpha1) * (ckpt2[key].float())
243
- ).half()
244
- # except:
245
- # pdb.set_trace()
246
- opt["config"] = cfg
247
- """
248
- if(sr=="40k"):opt["config"] = [1025, 32, 192, 192, 768, 2, 6, 3, 0, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10, 10, 2, 2], 512, [16, 16, 4, 4,4], 109, 256, 40000]
249
- elif(sr=="48k"):opt["config"] = [1025, 32, 192, 192, 768, 2, 6, 3, 0, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10,6,2,2,2], 512, [16, 16, 4, 4], 109, 256, 48000]
250
- elif(sr=="32k"):opt["config"] = [513, 32, 192, 192, 768, 2, 6, 3, 0, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10, 4, 2, 2, 2], 512, [16, 16, 4, 4,4], 109, 256, 32000]
251
- """
252
- opt["sr"] = sr
253
- opt["f0"] = 1 if f0 else 0
254
- opt["version"] = version
255
- opt["info"] = info
256
- torch.save(opt, "weights/%s.pth" % name)
257
- return "Success."
258
- except:
259
- return traceback.format_exc()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/GT4SD/PatentToolkit/app.py DELETED
@@ -1,576 +0,0 @@
1
- import os
2
- import gradio as gr
3
- import pandas as pd
4
- import torch
5
- import torch.nn as nn
6
- import transformers
7
- from transformers import AutoTokenizer, AutoConfig, LlamaForCausalLM, LlamaTokenizer, GenerationConfig, AutoModel, pipeline
8
- import pandas as pd
9
- import tensorflow as tf
10
- import numpy as np
11
- import math
12
- import time
13
- import csv
14
- import nltk
15
- from nltk.tokenize import word_tokenize
16
- from nltk.corpus import stopwords
17
- nltk.download('stopwords')
18
- nltk.download('punkt')
19
- import string
20
- import huggingface_hub
21
- from huggingface_hub import Repository
22
- from datetime import datetime
23
-
24
- ########### Import Classifier Embeddings #########
25
- class_embeddings = pd.read_csv('Embeddings/MainClassEmbeddings.csv')
26
-
27
- ########### DATA CLEANER VARIABLES #############
28
- all_stopwords = stopwords.words('english') # Making sure to only use English stopwords
29
- extra_stopwords = ['ii', 'iii'] # Can add extra stopwords to be removed from dataset/input abstracts
30
- all_stopwords.extend(extra_stopwords)
31
-
32
- modelpath = os.environ.get("MODEL_PATH")
33
-
34
- ########### GET CLAIMED TRAINED MODEL ###########
35
- tokenizer = LlamaTokenizer.from_pretrained(modelpath)
36
-
37
- model = LlamaForCausalLM.from_pretrained(
38
- modelpath,
39
- load_in_8bit=True,
40
- device_map='auto',
41
- )
42
-
43
- HF_TOKEN = os.environ.get("HF_TOKEN")
44
-
45
- DATASET_REPO_URL = "https://huggingface.co/datasets/thepolymerguy/logger"
46
- DATA_FILENAME = "data.csv"
47
- DATA_FILE = os.path.join("data", DATA_FILENAME)
48
-
49
- repo = Repository(
50
- local_dir="data", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN
51
- )
52
-
53
- def store_log():
54
- with open(DATA_FILE, "a") as csvfile:
55
- writer = csv.DictWriter(csvfile, fieldnames=["count", "time"])
56
- writer.writerow(
57
- {"count": 1, "time": str(datetime.now())}
58
- )
59
- commit_url = repo.push_to_hub()
60
- print(commit_url)
61
- return
62
-
63
- ########## DEFINING FUNCTIONS ###################
64
-
65
- def mean_pooling(model_output, attention_mask):
66
- token_embeddings = model_output[0]
67
- input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
68
- return tf.reduce_sum(token_embeddings * input_mask_expanded, 1) / tf.clip_by_value(input_mask_expanded.sum(1), clip_value_min=1e-9, clip_value_max=math.inf)
69
-
70
- def broad_scope_class_predictor(class_embeddings, abstract_embedding, SearchType, N=5, Sensitivity='Medium'):
71
- predictions = pd.DataFrame(columns=['Class Name', 'Score'])
72
- for i in range(len(class_embeddings)):
73
- class_name = class_embeddings.iloc[i, 0]
74
- embedding = class_embeddings.iloc[i, 2]
75
- embedding = convert_saved_embeddings(embedding)
76
- abstract_embedding = abstract_embedding.numpy()
77
- abstract_embedding = torch.from_numpy(abstract_embedding)
78
- cos = torch.nn.CosineSimilarity(dim=1)
79
- score = cos(abstract_embedding, embedding).numpy().tolist()
80
- result = [class_name, score[0]]
81
- predictions.loc[len(predictions)] = result
82
- if Sensitivity == 'High':
83
- Threshold = 0.5
84
- elif Sensitivity == 'Medium':
85
- Threshold = 0.40
86
- elif Sensitivity == 'Low':
87
- Threshold = 0.35
88
- GreenLikelihood = 'False'
89
- HighestSimilarity = predictions.nlargest(N, ['Score'])
90
- HighestSimilarity = HighestSimilarity['Class Name'].tolist()
91
- HighestSimilarityClass = [x.split('/')[0] for x in HighestSimilarity]
92
- if SearchType == 'Google Patent Search':
93
- Links = [f'https://patents.google.com/?q=({x}%2f00)&oq={x}%2f00' for x in HighestSimilarityClass]
94
- elif SearchType == 'Espacenet Patent Search':
95
- Links = [f'https://worldwide.espacenet.com/patent/search?q=cpc%3D{x}%2F00%2Flow' for x in HighestSimilarityClass]
96
- HighestSimilarity = pd.DataFrame({'Class':HighestSimilarity, 'Links':Links})
97
- return HighestSimilarity
98
-
99
-
100
- def sentence_embedder(sentences, model_path):
101
- tokenizer = AutoTokenizer.from_pretrained(model_path) #instantiating the sentence embedder using HuggingFace library
102
- model = AutoModel.from_pretrained(model_path, from_tf=True) #making a model instance
103
- encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
104
- # Compute token embeddings
105
- with torch.no_grad():
106
- model_output = model(**encoded_input)
107
- sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) #outputs a (1, 384) tensor representation of input text
108
- return sentence_embeddings
109
-
110
-
111
- def add_text(history, text):
112
- history = history + [(text, None)]
113
- return history, ""
114
-
115
-
116
- def convert_saved_embeddings(embedding_string):
117
- """
118
- Preparing pre-computed embeddings for use for comparison with new abstract embeddings .
119
- Pre-computed embeddings are saved as tensors in string format so need to be converted back to numpy arrays in order to calculate cosine similarity.
120
- :param embedding_string:
121
- :return: Should be a single tensor with dims (,384) in string format
122
- """
123
- embedding = embedding_string.replace('(', '')
124
- embedding = embedding.replace(')', '')
125
- embedding = embedding.replace('[', '')
126
- embedding = embedding.replace(']', '')
127
- embedding = embedding.replace('tensor', '')
128
- embedding = embedding.replace(' ', '')
129
- embedding = embedding.split(',')
130
- embedding = [float(x) for x in embedding]
131
- embedding = np.array(embedding)
132
- embedding = np.expand_dims(embedding, axis=0)
133
- embedding = torch.from_numpy(embedding)
134
- return embedding
135
-
136
- ########## LOADING PRE-COMPUTED EMBEDDINGS ##########
137
-
138
- def clean_data(input, type='Dataframe'):
139
- if type == 'Dataframe':
140
- cleaneddf = pd.DataFrame(columns=['Class', 'Description'])
141
- for i in range(0, len(input)):
142
- row_list = input.loc[i, :].values.flatten().tolist()
143
- noNaN_row = [x for x in row_list if str(x) != 'nan']
144
- listrow = []
145
- if len(noNaN_row) > 0:
146
- row = noNaN_row[:-1]
147
- row = [x.strip() for x in row]
148
- row = (" ").join(row)
149
- text_tokens = word_tokenize(row) # splits abstracts into individual tokens to allow removal of stopwords by list comprehension
150
- Stopword_Filtered_List = [word for word in text_tokens if not word in all_stopwords] # removes stopwords
151
- row = (" ").join(Stopword_Filtered_List) # returns abstract to string form
152
- removechars = ['[', ']', '{', '}', ';', '(', ')', ',', '.', ':', '/', '-', '#', '?', '@', '£', '$']
153
- for char in removechars:
154
- row = list(map(lambda x: x.replace(char, ''), row))
155
-
156
- row = ''.join(row)
157
- wnum = row.split(' ')
158
- wnum = [x.lower() for x in wnum]
159
- #remove duplicate words
160
- wnum = list(dict.fromkeys(wnum))
161
- #removing numbers
162
- wonum = []
163
- for x in wnum:
164
- xv = list(x)
165
- xv = [i.isnumeric() for i in xv]
166
- if True in xv:
167
- continue
168
- else:
169
- wonum.append(x)
170
- row = ' '.join(wonum)
171
- l = [noNaN_row[-1], row]
172
- cleaneddf.loc[len(cleaneddf)] = l
173
- cleaneddf = cleaneddf.drop_duplicates(subset=['Description'])
174
- cleaneddf.to_csv('E:/Users/eeo21/Startup/CPC_Classifications_List/additionalcleanedclasses.csv', index=False)
175
- return cleaneddf
176
-
177
- elif type == 'String':
178
- text_tokens = word_tokenize(input) # splits abstracts into individual tokens to allow removal of stopwords by list comprehension
179
- Stopword_Filtered_List = [word for word in text_tokens if not word in all_stopwords] # removes stopwords
180
- row = (" ").join(Stopword_Filtered_List) # returns abstract to string form
181
- removechars = ['[', ']', '{', '}', ';', '(', ')', ',', '.', ':', '/', '-', '#', '?', '@', '£', '$']
182
- for char in removechars:
183
- row = list(map(lambda x: x.replace(char, ''), row))
184
- row = ''.join(row)
185
- wnum = row.split(' ')
186
- wnum = [x.lower() for x in wnum]
187
- # remove duplicate words
188
- wnum = list(dict.fromkeys(wnum))
189
- # removing numbers
190
- wonum = []
191
- for x in wnum:
192
- xv = list(x)
193
- xv = [i.isnumeric() for i in xv]
194
- if True in xv:
195
- continue
196
- else:
197
- wonum.append(x)
198
- row = ' '.join(wonum)
199
- return row
200
-
201
- def classifier(userin, SearchType):
202
- clean_in = clean_data(userin, type='String')
203
- in_emb = sentence_embedder(clean_in, 'Model_bert')
204
-
205
- Number = 10
206
- broad_scope_predictions = broad_scope_class_predictor(class_embeddings, in_emb, SearchType, Number, Sensitivity='High')
207
-
208
- class_links = []
209
- for i in range(Number):
210
- class_links.append("[[{}]]({})".format(broad_scope_predictions['Class'][i], broad_scope_predictions['Links'][i]))
211
-
212
-
213
- md_class = '\n'.join(class_links)
214
-
215
- store_log()
216
-
217
- return md_class
218
-
219
- def generateresponse(history, temp, top_p, tokens):
220
-
221
- global model
222
- global tokenizer
223
-
224
- user = history[-1][0]
225
-
226
- PROMPT = f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
227
- ### Instruction:
228
- {user}
229
- ### Response:"""
230
-
231
- pipe = pipeline(
232
- "text-generation",
233
- model=model,
234
- tokenizer=tokenizer,
235
- max_length=tokens,
236
- temperature=temp,
237
- top_p=top_p,
238
- repetition_penalty=1.15
239
- )
240
-
241
- outputs = pipe(PROMPT)
242
- outputs = outputs[0]['generated_text']
243
- outputs = str(outputs).split('### Response')[1]
244
-
245
- response = f"Response{outputs}"
246
-
247
- store_log()
248
-
249
- return response
250
-
251
- def run_model(userin, dropd, temp, top_p, tokens):
252
-
253
- global model
254
- global tokenizer
255
-
256
- if dropd in ["An apparatus", "A method of use", "A method", "A method of manufacturing", "A system"]:
257
- PROMPT = claim_selector(userin, dropd)
258
- elif dropd in ["Generate a Detailed Description Paragraph", "Generate a Abstract", "What are the Benefits/Technical Effects"]:
259
- PROMPT = desc_selector(userin, dropd)
260
-
261
- pipe = pipeline(
262
- "text-generation",
263
- model=model,
264
- tokenizer=tokenizer,
265
- max_length=tokens,
266
- temperature=temp,
267
- top_p=top_p,
268
- repetition_penalty=1.15
269
- )
270
-
271
- outputs = pipe(PROMPT)
272
-
273
- outputs = outputs[0]['generated_text']
274
- outputs = str(outputs).split('### Response')[1]
275
- outputs = outputs.split('\n \n \n \n*')[0]
276
-
277
- response = f"Response{outputs}"
278
-
279
- store_log()
280
-
281
- return response
282
-
283
- def prosecute(application, priorart, temp, top_p, tokens):
284
-
285
- global model
286
- global tokenizer
287
-
288
- pipe = pipeline(
289
- "text-generation",
290
- model=model,
291
- tokenizer=tokenizer,
292
- max_length=tokens,
293
- temperature=temp,
294
- top_p=top_p,
295
- repetition_penalty=1.15
296
- )
297
-
298
- PROMPT = f"""
299
- Draft an argument for the patentability in favour of the application using the European Patent Office Problem Solution appraoch by summarising the difference between the Application and the prior art. If there is no differnce, say that the present invention is not novel/inventive.
300
-
301
- Application: {application}
302
-
303
- Prior Art: {priorart}
304
-
305
- ### Response: The objective technical problem solved by the present invention"""
306
-
307
- outputs = pipe(PROMPT)
308
-
309
- outputs = outputs[0]['generated_text']
310
- outputs = str(outputs).split('### Response')[1]
311
- outputs = outputs.split('\n \n \n \n*')[0]
312
-
313
- response = f"Response{outputs}"
314
-
315
- store_log()
316
-
317
- return response
318
-
319
- def ideator(userin, temp, top_p, tokens):
320
-
321
- global model
322
- global tokenizer
323
-
324
- pipe = pipeline(
325
- "text-generation",
326
- model=model,
327
- tokenizer=tokenizer,
328
- max_length=tokens,
329
- temperature=temp,
330
- top_p=top_p,
331
- repetition_penalty=1.15
332
- )
333
-
334
- PROMPT = f"""
335
- How can I make {userin}
336
-
337
- ### Response: You could implement the invention as follows:"""
338
-
339
- outputs = pipe(PROMPT)
340
-
341
- outputs = outputs[0]['generated_text']
342
- outputs = str(outputs).split('### Response')[1]
343
- outputs = outputs.split('\n \n \n \n*')[0]
344
-
345
-
346
- response = f"Response{outputs}"
347
-
348
- store_log()
349
-
350
- return response
351
-
352
- def Chat(userin, temp, top_p, tokens):
353
-
354
- global model
355
- global tokenizer
356
-
357
- pipe = pipeline(
358
- "text-generation",
359
- model=model,
360
- tokenizer=tokenizer,
361
- max_length=tokens,
362
- temperature=temp,
363
- top_p=top_p,
364
- repetition_penalty=1.15
365
- )
366
-
367
- PROMPT = f"""Below is a query from a user. Respond appropriately to the query.
368
- ### Query:
369
- {userin}
370
- ### Response:"""
371
-
372
- outputs = pipe(PROMPT)
373
-
374
- outputs = outputs[0]['generated_text']
375
- outputs = str(outputs).split('### Response')[1]
376
- outputs = outputs.split('\n \n \n \n*')[0]
377
-
378
- response = f"Response{outputs}"
379
-
380
- store_log()
381
-
382
- return response
383
-
384
- def claim_selector(userin, dropd):
385
-
386
- PROMPT = f"""
387
- Draft a patent claim 1 for {dropd} for the following invention: {userin}
388
- ### Response:{dropd} comprising:"""
389
-
390
- return PROMPT
391
-
392
- def desc_selector(userin, dropd):
393
-
394
- PROMPT = f"""
395
- {dropd} for a patent application for the following invention: {userin}
396
- ### Response:"""
397
-
398
- return PROMPT
399
-
400
- ############# GRADIO APP ###############
401
-
402
- theme = gr.themes.Base(
403
- primary_hue="indigo",
404
- ).set(
405
- prose_text_size='*text_sm'
406
- )
407
-
408
- with gr.Blocks(title='Patent Toolkit', theme=theme) as demo:
409
-
410
- gr.Markdown("""
411
- # GENERATIVE TOOLKIT FOR PATENT ATTORNEYS AND INVENTORS
412
- The patenting process can be complex, time-consuming and expensive. We believe that AI will one day alleviate these problems.
413
-
414
- As a proof of concept, we've trained Meta's Llama on over 200k entries, with a focus on tasks related to the intellectual property domain.
415
-
416
- We are currently running this demo on a less powerful version of our model due to computational limitations. If you would like to see our most powerful model in action, please contact us at this email: [email protected]
417
-
418
- We know that confidentiality is probably the number one concern for attorneys when considering using such tools. We don't store any of your inputs to use for further training and we don't use the OpenAI API (ChatGPT) as our backend, meaning that confidentiality is not comprimised!
419
-
420
- Please note that this is for research purposes and shouldn't be used commercially.
421
-
422
- None of the outputs of this model, taken in part or in its entirety, constitutes legal advice. If you are seeking protection for you intellectual property, consult a registered patent/trademark attorney.
423
- """)
424
-
425
- # with gr.Tab("Ideator"):
426
- # gr.Markdown("""
427
- # Use this tool to generate ideas for how to implement an invention/creation.
428
- # """)
429
- # with gr.Row(scale=1, min_width=600):
430
- # with gr.Column():
431
- # userin = gr.Text(label="Input", lines=5)
432
- # with gr.Column():
433
- # text2 = gr.Textbox(label="Output", lines=5)
434
- # with gr.Row():
435
- # btn = gr.Button("Submit")
436
- # with gr.Row():
437
- # with gr.Accordion("Parameters"):
438
- # temp = gr.Slider(minimum=0, maximum=1, value=0.6, label="Temperature", step=0.1)
439
- # top_p = gr.Slider(minimum=0.5, maximum=1, value=0.95, label="Top P", step=0.1)
440
- # tokens = gr.Slider(minimum=5, maximum=2058, value=512, label="Max Tokens", step=1)
441
-
442
- # btn.click(fn=ideator, inputs=[userin, temp, top_p, tokens], outputs=text2)
443
-
444
- with gr.Tab("Claim Drafter"):
445
- gr.Markdown("""
446
- Use this tool to expand your idea into the technical language of a patent claim. You can specify the type of claim you want using the dropdown menu.
447
- """)
448
- Claimchoices = gr.Dropdown(["An apparatus", "A method of use", "A method", "A method of manufacturing", "A system"], label='Choose Claim Type Here')
449
-
450
- with gr.Row(scale=1, min_width=600):
451
- text1 = gr.Textbox(label="Input",
452
- placeholder='Type in your idea here!', lines=5)
453
- text2 = gr.Textbox(label="Output", lines=5)
454
- with gr.Row():
455
- btn = gr.Button("Submit")
456
- with gr.Row():
457
- with gr.Accordion("Parameters"):
458
- temp = gr.Slider(minimum=0, maximum=1, value=0.6, label="Temperature", step=0.1)
459
- top_p = gr.Slider(minimum=0.5, maximum=1, value=0.95, label="Top P", step=0.1)
460
- tokens = gr.Slider(minimum=5, maximum=2058, value=512, label="Max Tokens", step=1)
461
-
462
- btn.click(fn=claim_selector, inputs=[text1, Claimchoices]).then(run_model, inputs=[text1, Claimchoices, temp, top_p, tokens], outputs=text2)
463
-
464
- with gr.Tab("Description Generator"):
465
- gr.Markdown("""
466
- Use this tool to expand your patent claim into a description. You can also use this tool to generate abstracts and give you ideas about the benefit of an invention by changing the settings in the dropdown menu.
467
- """)
468
- Descriptionchoices = gr.Dropdown(["Generate a Detailed Description Paragraph", "Generate a Abstract", "What are the Benefits/Technical Effects"], label='Choose Generation Type Here')
469
- with gr.Row(scale=1, min_width=600):
470
-
471
- text1 = gr.Textbox(label="Input",
472
- placeholder='Type in your idea here!', lines=5)
473
- text2 = gr.Textbox(label="Output", lines=5)
474
- with gr.Row():
475
- btn = gr.Button("Submit")
476
- with gr.Row():
477
- with gr.Accordion("Parameters"):
478
- temp = gr.Slider(minimum=0, maximum=1, value=0.6, label="Temperature", step=0.1)
479
- top_p = gr.Slider(minimum=0.5, maximum=1, value=0.95, label="Top P", step=0.1)
480
- tokens = gr.Slider(minimum=5, maximum=2058, value=512, label="Max Tokens", step=1)
481
-
482
- btn.click(fn=desc_selector, inputs=[text1, Descriptionchoices]).then(run_model, inputs=[text1, Descriptionchoices, temp, top_p, tokens], outputs=text2)
483
-
484
- # with gr.Tab("Prosecution Beta"):
485
- # gr.Markdown("""
486
- # Use this tool to generate ideas for how to overcome objections to novelty and inventive step. For now, this tool only works on relatively short inputs, so maybe try very simple inventions or short paragraphs.
487
- # """)
488
- # with gr.Row(scale=1, min_width=600):
489
- # with gr.Column():
490
- # application = gr.Text(label="Present Invention", lines=5)
491
- # priorart = gr.Text(label="Prior Art Document", lines=5)
492
- # with gr.Column():
493
- # text2 = gr.Textbox(label="Output", lines=5)
494
- # with gr.Row():
495
- # btn = gr.Button("Submit")
496
-
497
- # with gr.Row():
498
- # with gr.Accordion("Parameters"):
499
- # temp = gr.Slider(minimum=0, maximum=1, value=0.6, label="Temperature", step=0.1)
500
- # top_p = gr.Slider(minimum=0.5, maximum=1, value=0.95, label="Top P", step=0.1)
501
- # tokens = gr.Slider(minimum=5, maximum=2058, value=512, label="Max Tokens", step=1)
502
-
503
- # btn.click(fn=prosecute, inputs=[application, priorart, temp, top_p, tokens], outputs=text2)
504
-
505
- with gr.Tab("CPC Search Tool"):
506
- gr.Markdown("""
507
- Use this tool to classify your invention according to the Cooperative Patent Classification system.
508
- Click on the link to initiate either an Espacenet or Google Patents classification search using the generated classifications. You can specify which you would like using the dropdown menu.
509
- """)
510
-
511
- ClassifyChoices = gr.Dropdown(["Google Patent Search", "Espacenet Patent Search"], label='Choose Search Type Here')
512
- with gr.Row(scale=1, min_width=600):
513
- with gr.Column(scale=5):
514
- userin = gr.Textbox(label="Input", placeholder='Type in your Claim/Description/Abstract Here',lines=5)
515
- with gr.Column(scale=1):
516
- with gr.Accordion("CPC classes"):
517
- output = gr.Markdown() #gr.Textbox(label="Output", lines=5)
518
- with gr.Row():
519
- classify_btn = gr.Button("Classify")
520
- classify_btn.click(fn=classifier, inputs=[userin, ClassifyChoices] , outputs=output)
521
-
522
- with gr.Tab("Chat"):
523
- gr.Markdown("""
524
- Do you want a bit more freedom over the outputs you generate? No problem! You can use a chatbot version of our model below. You can ask it anything.
525
- We haven't done any filtering, so that we can understand exactly which biases/inappropriate responses exist in our model.
526
- If you're concerned about any outputs, please get in contact with us to let us know what you saw. We will use this inform the development of later versions of this model.
527
- """)
528
- with gr.Row(scale=1, min_width=600):
529
- with gr.Column():
530
- userin = gr.Text(label="Question", lines=5)
531
- with gr.Column():
532
- text2 = gr.Textbox(label="Answer", lines=5)
533
- with gr.Row():
534
- btn = gr.Button("Submit")
535
-
536
- with gr.Row():
537
- with gr.Accordion("Parameters"):
538
- temp = gr.Slider(minimum=0, maximum=1, value=0.6, label="Temperature", step=0.1)
539
- top_p = gr.Slider(minimum=0.5, maximum=1, value=0.95, label="Top P", step=0.1)
540
- tokens = gr.Slider(minimum=5, maximum=2058, value=512, label="Max Tokens", step=1)
541
- btn.click(fn=Chat, inputs=[userin, temp, top_p, tokens], outputs=text2)
542
-
543
- # gr.Markdown("""
544
- # # THE CHATBOT
545
- # Do you want a bit more freedom over the outputs you generate? No problem! You can use a chatbot version of our model below. You can ask it anything.
546
- # If you're concerned about a particular output, please
547
- # """)
548
-
549
- # chatbot = gr.Chatbot([], elem_id="Claimed Assistant").style(height=500)
550
- # with gr.Row():
551
- # with gr.Column(scale=1):
552
- # txt = gr.Textbox(
553
- # show_label=False,
554
- # placeholder="Enter text and submit",
555
- # ).style(container=False)
556
- #
557
- # with gr.Row():
558
- # with gr.Accordion("Parameters"):
559
- # temp = gr.Slider(minimum=0, maximum=1, value=0.6, label="Temperature", step=0.1)
560
- # top_p = gr.Slider(minimum=0.5, maximum=1, value=0.95, label="Top P", step=0.1)
561
- # tokens = gr.Slider(minimum=5, maximum=1024, value=256, label="Max Tokens", step=1)
562
- #
563
- # txt.submit(add_text, [chatbot, txt], [chatbot, txt]).then(
564
- # generateresponse, [chatbot, temp, top_p, tokens], chatbot)
565
-
566
- gr.Markdown("""
567
- # HAVE AN IDEA? GET IT CLAIMED
568
-
569
- In the future, we are looking to expand our model's capabilities further to assist in a range of IP related tasks.
570
-
571
- If you are interested in using a more powerful model that we have trained, or if you have any suggestions of features you would like to see us add, please get in touch!
572
-
573
- """)
574
- demo.queue(max_size=20)
575
- demo.launch(show_api=False)
576
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/align_rope_cross_zone.py DELETED
@@ -1,36 +0,0 @@
1
- import numpy as np
2
- from cliport.tasks.task import Task
3
- from cliport.utils import utils
4
- from cliport.tasks import primitives
5
- from cliport.tasks.grippers import Spatula
6
-
7
- class AlignRopeCrossZone(Task):
8
- """Align a deformable rope across the diagonal of a zone marked on the tabletop."""
9
-
10
- def __init__(self):
11
- super().__init__()
12
- self.max_steps = 20
13
- self.lang_template = "align the rope across the diagonal of a zone"
14
- self.task_completed_desc = "done aligning."
15
- self.additional_reset()
16
-
17
- def reset(self, env):
18
- super().reset(env)
19
-
20
- # Add zone.
21
- length = 0.12
22
- zone_size = (length, length, 0.01)
23
- zone_pose = self.get_random_pose(env, zone_size)
24
- zone_urdf = 'zone/zone.urdf'
25
- env.add_object(zone_urdf, zone_pose, 'fixed')
26
-
27
- # Add rope.
28
- rope_size = (length, 0.01, 0.01)
29
- rope_pose = self.get_random_pose(env, rope_size)
30
- corner1_pose = utils.apply(zone_pose, (length / 2, length / 2, 0.01))
31
- corner2_pose = utils.apply(zone_pose, (-length / 2, -length / 2, 0.01))
32
- rope_id, targets, matches = self.make_rope(env, (corner1_pose, corner2_pose), n_parts=10)
33
-
34
- # Goal: rope is aligned with the diagonal of the zone.
35
- self.add_goal(objs=rope_id, matches=matches, targ_poses=targets, replace=False,
36
- rotations=False, metric='pose', params=None, step_max_reward=1, language_goal=self.lang_template)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/GiorgiSekhniashvili/geo-whisper/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Geo Whisper
3
- emoji: 🏢
4
- colorFrom: red
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.32.0
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference