parquet-converter commited on
Commit
503155d
·
1 Parent(s): f645660

Update parquet files (step 14 of 476)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/10 Endrathukulla Full [UPD] Movie Download 720p.md +0 -80
  2. spaces/1gistliPinn/ChatGPT4/Examples/Ekb License Siemens Download.rar [UPDATED].md +0 -30
  3. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Brawl Stars 2017 APK The Best Way to Relive the First Edition of the Game on Android.md +0 -143
  4. spaces/1phancelerku/anime-remove-background/Apkfew Whatsapp Tracker Free APK Download - Track Online Activity and Chat History.md +0 -149
  5. spaces/7hao/bingo/src/lib/hooks/chat-history.ts +0 -62
  6. spaces/A00001/bingothoo/src/lib/bots/bing/utils.ts +0 -87
  7. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_32xb64-warmup-lbs_in1k.py +0 -12
  8. spaces/Abhaykoul/BardCookies-AI_Query/README.md +0 -13
  9. spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Aibn.py +0 -52
  10. spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/ChatgptLogin.py +0 -74
  11. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/flip-plugin.js +0 -19
  12. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/shake/Factory.js +0 -11
  13. spaces/Ailexcoder/GPT4ALL1/README.md +0 -13
  14. spaces/AlekseyKorshuk/instagram-filter-removal/modeling/base.py +0 -60
  15. spaces/Ame42/rwms/main.py +0 -401
  16. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py +0 -720
  17. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/ddim/test_ddim.py +0 -143
  18. spaces/Andy1621/uniformer_image_detection/mmdet/core/mask/utils.py +0 -63
  19. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/text.py +0 -256
  20. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/check.py +0 -52
  21. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/version.py +0 -9
  22. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/requests/structures.py +0 -99
  23. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/modeling/test_box2box_transform.py +0 -94
  24. spaces/Benson/text-generation/Examples/Bus Simulator Indonesia Nuevo Mapa Descargar.md +0 -69
  25. spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/__init__.py +0 -0
  26. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/.github/CODE_OF_CONDUCT.md +0 -5
  27. spaces/CVPR/LIVE/thrust/cmake/ThrustMultiConfig.cmake +0 -127
  28. spaces/DAMO-NLP-SG/CLEX-Chat/modeling_llama.py +0 -985
  29. spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/datasets/dataloader_utils.py +0 -162
  30. spaces/Dao3/OpenArt/README.md +0 -19
  31. spaces/Dao3/openai-translator/app.py +0 -255
  32. spaces/DeclK/pose/tools/dtw.py +0 -116
  33. spaces/Demi2809/rvc-models/infer_pack/models_onnx.py +0 -849
  34. spaces/Detomo/ai-comic-generation/src/lib/computeSha256.ts +0 -14
  35. spaces/DonaSmix/anime-remove-background/README.md +0 -14
  36. spaces/EDGAhab/Aatrox-Talking/app.py +0 -98
  37. spaces/Eddycrack864/Applio-Inference/Applio-RVC-Fork/utils/clonerepo_experimental.py +0 -253
  38. spaces/Ekohai/bingAI/README.md +0 -11
  39. spaces/FlippFuzz/whisper-webui/src/whisper/abstractWhisperContainer.py +0 -108
  40. spaces/FrankZxShen/so-vits-svc-models-ba/vencoder/__init__.py +0 -0
  41. spaces/GV05/text-emotion-detector/app.py +0 -34
  42. spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_coordinated_insertion.py +0 -62
  43. spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/model/tf/shape_helpers_test.py +0 -39
  44. spaces/Gradio-Blocks/uniformer_image_detection/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py +0 -196
  45. spaces/Gradio-Blocks/uniformer_image_detection/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco.py +0 -4
  46. spaces/Gradio-Blocks/uniformer_image_detection/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py +0 -65
  47. spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/dpt/models.py +0 -126
  48. spaces/HaloMaster/chinesesummary/fengshen/models/megatron_t5/__init__.py +0 -49
  49. spaces/Happys/chatbot/Dockerfile +0 -8
  50. spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tasks/sentence_ranking.py +0 -219
spaces/1acneusushi/gradio-2dmoleculeeditor/10 Endrathukulla Full [UPD] Movie Download 720p.md DELETED
@@ -1,80 +0,0 @@
1
- ## 10 endrathukulla full movie download 720p
2
-
3
-
4
-
5
-
6
-
7
-
8
-
9
-
10
-
11
- **Download File ===> [https://eromdesre.blogspot.com/?d=2txKKP](https://eromdesre.blogspot.com/?d=2txKKP)**
12
-
13
-
14
-
15
-
16
-
17
-
18
-
19
-
20
-
21
-
22
-
23
-
24
-
25
- # 10 Endrathukulla Full Movie Download 720p: A Thrilling Road Action Comedy
26
-
27
-
28
-
29
- If you are looking for a movie that combines action, comedy, and adventure, then you might want to check out **10 Endrathukulla**, a 2015 Tamil-language film starring Vikram and Samantha Ruth Prabhu. The movie is written and directed by Vijay Milton and produced by A. R. Murugadoss under the banner A. R. Murugadoss Productions and Fox Star Studios.
30
-
31
-
32
-
33
- The movie follows the story of an extreme driver (Vikram) who is on a mission to deliver his boss's goods to the rightful man. Along the way, he meets a mysterious woman (Samantha) who joins him on his journey. However, he soon finds himself being pulled into a track filled with twists and turns, as he faces various challenges and enemies. The movie is packed with thrilling car chases, stunts, and humor, as well as a surprising revelation at the end.
34
-
35
-
36
-
37
- If you want to watch **10 Endrathukulla** full movie in 720p quality, you can download it from various online sources. However, be careful of illegal or pirated websites that may harm your device or violate the copyright laws. We recommend you to use legal and safe platforms that offer high-quality streaming or downloading options for **10 Endrathukulla** full movie.
38
-
39
-
40
-
41
- Some of the legal and safe platforms that you can use to watch **10 Endrathukulla** full movie in 720p are:
42
-
43
-
44
-
45
- - [Hotstar](https://www.hotstar.com/in/movies/10-endrathukulla/1000074620/watch): This is a popular streaming service that offers a variety of movies and shows in different languages. You can watch **10 Endrathukulla** full movie in 720p on Hotstar with a subscription plan or a VIP access.
46
-
47
- - [YouTube](https://www.youtube.com/watch?v=Q6kVU8uNdic): This is a free platform that allows you to watch videos of various genres and categories. You can watch **10 Endrathukulla** full movie in 720p on YouTube for free, but you may have to deal with some ads and interruptions.
48
-
49
- - [Amazon Prime Video](https://www.amazon.com/10-Endrathukulla-Vikram/dp/B01M7YJ4ZL): This is a premium streaming service that offers a wide range of movies and shows from different countries and languages. You can watch **10 Endrathukulla** full movie in 720p on Amazon Prime Video with a subscription plan or a rental fee.
50
-
51
-
52
-
53
- We hope you enjoy watching **10 Endrathukulla** full movie in 720p and have a great time with this entertaining road action comedy.
54
-
55
-
56
-
57
- If you want to know more about **10 Endrathukulla** and its cast and crew, here are some interesting facts and trivia that you might find useful.
58
-
59
-
60
-
61
- - **10 Endrathukulla** is the second collaboration between Vikram and A. R. Murugadoss, after the 2005 blockbuster **Ghajini**.
62
-
63
- - The movie was shot in various locations across India, including Chennai, Hyderabad, Rajasthan, Sikkim, and Nepal.
64
-
65
- - The movie features a cameo appearance by Bollywood actor Abhimanyu Singh, who plays the role of a corrupt cop.
66
-
67
- - The movie was originally titled **Paththu Enradhukulla**, which means "before I count to ten" in Tamil. However, the title was later changed to **10 Endrathukulla**, which is a shorter and catchier version.
68
-
69
- - The movie was released on October 21, 2015, coinciding with the festival of Dussehra. It received mixed reviews from critics and audiences, but was praised for its action sequences and performances.
70
-
71
-
72
-
73
- We hope you learned something new about **10 Endrathukulla** and its making. If you have any feedback or suggestions for us, please feel free to leave a comment below. We would love to hear from you.
74
-
75
- dfd1c89656
76
-
77
-
78
-
79
-
80
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Ekb License Siemens Download.rar [UPDATED].md DELETED
@@ -1,30 +0,0 @@
1
- <br />
2
- <h1>How to Download and Install SIM EKB for Siemens Software</h1>
3
- <p>SIM EKB is a software that allows you to activate Siemens software products without buying a license. It is mainly used by students and hobbyists who want to learn and experiment with Siemens software. However, it is not recommended for professional use, as it may violate Siemens' terms and conditions. In this article, we will show you how to download and install SIM EKB for Siemens software.</p>
4
- <h2>Ekb License Siemens Download.rar</h2><br /><p><b><b>DOWNLOAD</b> ->>> <a href="https://imgfil.com/2uy0O7">https://imgfil.com/2uy0O7</a></b></p><br /><br />
5
- <h2>Step 1: Download SIM EKB</h2>
6
- <p>The latest version of SIM EKB as of April 2023 is SIM EKB Install 2022 11 27, which supports all the software in the TIA PORTAL V18 package along with many other upgrades. You can download it from the following link[^1^]. The password to extract the file is plc4me.com.</p>
7
- <h2>Step 2: Delete old keys</h2>
8
- <p>If you have previously installed any Siemens software products, you may need to delete the old keys before installing new ones. To do this, go to the hidden folder C:\AX NF ZZ and delete all the files inside it. You may need to enable the option to show hidden files and folders in Windows Explorer.</p>
9
- <h2>Step 3: Run SIM EKB Install</h2>
10
- <p>After extracting the file, run the SIM EKB Install.exe file as administrator. You will see a window like this:</p>
11
- <img src="https://plc4me.com/wp-content/uploads/2020/12/SIM-EKB-Install-2020-12-13.jpg" alt="SIM EKB Install window">
12
- <p>Select the software products that you want to activate from the list on the left. You can use the search box to find them quickly. The unlocked software will be highlighted in blue. Then click on Install button at the bottom right corner.</p>
13
- <h2>Step 4: Enjoy your Siemens software</h2>
14
- <p>After installing the keys, you can launch your Siemens software and use it without any limitations. However, remember that this is only for educational purposes and not for commercial use. If you need professional support or updates, you should contact Siemens and buy a license.</p>
15
- <p></p>
16
- <h3>References</h3>
17
- <ol>
18
- <li>[Download] SIM EKB Install 2022 11 27 for Siemens Software - plc4me.com</li>
19
- </ol><h2>Some examples of Siemens software products</h2>
20
- <p>Siemens offers a wide range of software products for various industrial applications. Some of the most popular ones are:</p>
21
- <ul>
22
- <li><strong>TIA Portal</strong>: This is an integrated engineering framework that allows you to program, configure, and commission Siemens automation devices such as PLCs, HMIs, drives, and networks. It supports various standards and protocols such as OPC UA, PROFINET, and EtherNet/IP. It also includes simulation and testing tools to help you optimize your system performance and reliability.</li>
23
- <li><strong>STEP 7</strong>: This is a programming software for Siemens PLCs that supports different languages such as Ladder Logic, Structured Text, Function Block Diagram, and Statement List. It allows you to create and edit programs, monitor and debug variables, and download and upload programs to PLCs. It can be used as a standalone software or as part of TIA Portal.</li>
24
- <li><strong>WinCC</strong>: This is a visualization software for Siemens HMIs that allows you to create and edit graphical user interfaces for your machines and processes. It supports various features such as animations, alarms, trends, recipes, and scripts. It can be used as a standalone software or as part of TIA Portal.</li>
25
- <li><strong>SINAMICS Startdrive</strong>: This is a commissioning software for Siemens drives that allows you to configure and optimize the parameters of your drive systems. It supports various types of drives such as frequency converters, servo drives, and motion controllers. It can be used as a standalone software or as part of TIA Portal.</li>
26
- <li><strong>SIMATIC PCS 7</strong>: This is a process control system software that allows you to design, implement, and operate complex process plants. It supports various functions such as distributed control, batch control, advanced process control, safety instrumented systems, and plant asset management. It also integrates with other Siemens software products such as SIMATIC NET, SIMATIC S7-400H/FH, and SIMATIC WinCC.</li>
27
- </ul>
28
- <p>These are just some of the Siemens software products that you can activate with SIM EKB. However, there are many more that you can explore on the Siemens website or on the SIM EKB Install window.</p> d5da3c52bf<br />
29
- <br />
30
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Brawl Stars 2017 APK The Best Way to Relive the First Edition of the Game on Android.md DELETED
@@ -1,143 +0,0 @@
1
- <br />
2
- <h1>2017 Brawl Stars APK: How to Download and Play the Epic Mobile Game</h1>
3
- <p>If you are a fan of mobile games, you might have heard of Brawl Stars, a fast-paced multiplayer game from Supercell, the makers of Clash of Clans and Clash Royale. Brawl Stars was released globally in 2018, but before that, it was available in a few countries as a beta version in 2017. If you want to experience the original version of the game, you can download and install the 2017 Brawl Stars APK on your Android device. In this article, we will show you how to do that and also give you some tips on how to play the game.</p>
4
- <h2>2017 brawl stars apk</h2><br /><p><b><b>DOWNLOAD</b> &#10031;&#10031;&#10031; <a href="https://urlin.us/2uSScE">https://urlin.us/2uSScE</a></b></p><br /><br />
5
- <h2>What is Brawl Stars?</h2>
6
- <h3>A fast-paced multiplayer game from Supercell</h3>
7
- <p>Brawl Stars is a game that combines elements of shooter, MOBA, and battle royale genres. You can team up with your friends or play solo in various game modes, each with a different objective. You can also unlock and upgrade dozens of characters, called Brawlers, each with a unique ability and style. You can collect skins, pins, and trophies to show off your achievements and personality.</p>
8
- <h3>Different game modes and characters to choose from</h3>
9
- <p>Brawl Stars has four main game modes: Smash & Grab, Heist, Showdown, and Bounty. Each mode has its own rules and strategies, so you need to adapt your gameplay accordingly. Here is a brief overview of each mode:</p>
10
- <ul>
11
- <li>Smash & Grab (3v3): Team up and out-strategize the opposing team. Collect and hold 10 gems to win, but get fragged and lose your gems.</li>
12
- <li>Showdown (Solo/Duo): A battle royale style fight for survival. Collect power ups for your Brawler. Grab a friend or play solo - be the last Brawler standing in the rowdiest battle royale yet. Winner take all!</li>
13
- <li>Brawl Ball (3v3): It's a whole new Brawl game! Show off your soccer/football skills and score two goals before the other team. There are no red cards here.</li>
14
- <li>Bounty (3v3): Take out opponents to earn stars, but don’t let them pick you off. The squad with the most stars wins the match!</li>
15
- <li>Heist (3v3): Protect your team’s safe and try to crack open your opponents’. Navigate the map to sneak, blast and blow your way clear to the enemies treasure.</li>
16
- <li>Special Events: Limited time special PvE and PvP game modes.</li>
17
- <li>Championship Challenge: Join Brawl Stars' esports scene with in-game qualifiers!</li>
18
- </ul>
19
- <p>Brawl Stars also has 22 different Brawlers that you can unlock and use in any game mode. Each Brawler has a basic attack, a super ability, a star power, and a gadget. You can level up your Brawlers by collecting power points and coins, and unlock new skins by earning gems or buying them with real money. Some of the Brawlers are:</p>
20
- <table>
21
- <tr><th>Name</th><th>Type</th><th>Ability</th></tr>
22
- <tr><td>Shelly</td><td>Common</td><td>A shotgunner who can blast enemies at close range and charge her super to unleash a powerful shot that can destroy obstacles.</td></tr>
23
- <tr><td>Nita</td><td>Common</td><td>A fighter who can summon a big bear to fight by her side.</td></tr>
24
- <tr><td>Colt</td><td>Common</td><td>A sharpshooter who can fire a burst of bullets with great accuracy.</td></tr>
25
- <tr><td>Bull </td><td>Common</td><td>A tank who can charge forward and deal massive damage with his double-barreled shotgun.</td></tr>
26
- <tr><td>Jessie</td><td>Common</td><td>An inventor who can build a turret that shoots at enemies.</td></tr>
27
- <tr><td>Brock</td><td>Rare</td><td>A rocket launcher who can fire long-range missiles that explode on impact.</td></tr>
28
- <tr><td>Dynamike</td><td>Rare</td><td>A miner who can throw sticks of dynamite and a big barrel bomb.</td></tr>
29
- <tr><td>Bo</td><td>Rare</td><td>A bowman who can shoot explosive arrows and plant hidden mines.</td></tr>
30
- <tr><td>Tick</td><td>Rare</td><td>A metal ball of mischief who can detach and toss his head, which explodes after a few seconds.</td></tr>
31
- <tr><td>8-Bit</td><td>Rare</td><td>A retro arcade machine who can shoot laser beams and boost his and his allies' damage with his booster.</td></tr>
32
- <tr><td>Emz</td><td>Rare</td><td>A social media star who can spray a cloud of hairspray that damages enemies over time.</td></tr>
33
- <tr><td>El Primo</td><td>Super Rare</td><td>A wrestler who can punch enemies with his fiery fists and leap into the fray with his super.</td></tr>
34
- <tr><td>Barley</td><lt>Super Rare</lt><lt>A bartender who can toss bottles of flaming liquid that leave a burning area on the ground.</lt></tr>
35
- <tr><lt>Poco</lt><lt>Super Rare</lt><lt>A musician who can heal himself and his allies with his soothing tunes.</lt></tr>
36
- <tr><lt>Rosa</lt><lt>Super Rare</lt><lt>A botanist who can punch enemies with her boxing gloves and shield herself with her plant barrier.</lt></tr>
37
- <tr><lt>Rico</lt><lt>Super Rare</lt><lt>A bouncy ball machine who can shoot bullets that bounce off walls and obstacles.</lt></tr>
38
- <tr><lt>Darryl</lt><lt>Super Rare</lt><lt>A barrel robot who can roll into enemies and blast them with his double shotguns.</lt></tr>
39
- <tr><lt>Penny</lt><lt>Epic</lt><lt>A pirate who can fire a bag of coins that splits into three on impact and build a cannon that shoots at enemies.</lt></tr>
40
- <tr><lt>Piper</lt><lt>Epic</lt><lt>A sniper who can deal more damage the farther her bullets travel and drop bombs when she uses her umbrella to fly away.</lt></tr>
41
- <tr><lt>Pam</lt><lt>Epic</lt><lt>A junker who can spray scrap metal at enemies and deploy a healing turret for her allies.</lt></tr>
42
- <tr><lt>Frank</lt><lt>Epic</t<td>A zombie who can smash enemies with his hammer and stun them with his super.</t></tr>
43
- <tr>t<Bea<t<Epic<t<A beekeeper who can fire a long-range shot that deals more damage if it hits consecutively and summon a swarm of angry bees to chase down enemies.</t></tr>
44
- <tr>t<Edgar<t<Epic<t<A moody teen who can dash forward and heal himself by attacking enemies with his scarf.</t></tr>
45
- <tr>t<Bibi<t<Epic<t<A baseball fan who can hit enemies with her bat and knock them back with her super.</t></tr>
46
- <tr>t<Mortis<t<Mythic<t<A vampire who can dash forward and steal the life of his enemies with his shovel.</t></tr>
47
- <tr>t<Tara<t<Mythic<t<A fortune teller who can throw tarot cards that pierce through enemies and pull them together with her super.</t></tr>
48
- <tr>t<Gene<t<Mythic<t<A genie who can shoot a magic hand that grabs and pulls enemies to him.</t></tr>
49
- <tr>t<Max<t<Mythic<t<A speedster who can run fast and boost her and her allies' speed with her super.</t></tr>
50
- <tr>t>Mr. P<t<Mythic<t<A hotel owner who can throw suitcases that bounce over obstacles and spawn robotic porters to attack enemies.</t></tr>
51
- <tr>t>Sprout<t<Mythic<t<A sentient plant who can throw seed bombs that bounce and explode, creating bushes, and create a protective wall with his super.</t></tr>
52
- <tr>t>Crow<t<Legendary<t<A toxic bird who can shoot daggers that poison enemies and jump high in the air, dropping poison bombs below him.</t></tr>
53
- <tr>t>Spike<t<Legendary<t<A cactus who can throw spikes that explode in a star shape, dealing damage to multiple enemies, and heal himself by standing in the bushes.</t></tr>
54
- <tr>t<Leon<t<Legendary<t<A stealthy assassin who can shoot blades that deal more damage the closer he is to his target and turn invisible with his super.</t></tr>
55
- <tr>t<Sandy<t<Legendary<t<A sleepy sandman who can throw sand pebbles that damage enemies and create a sandstorm that hides him and his allies from sight.</t></tr>
56
- <tr>t<Amber<t<Legendary<t<A fire dancer who can spray a continuous stream of fire and leave a trail of flames with her super.</t></tr>
57
- <tr>t>Gale<t<Legendary<t<A grumpy snowman who can shoot snowballs that push back enemies and create a jump pad with his super.</t></tr>
58
- <tr>t>Colette<t<Legendary<t<A cashier who can shoot a projectile that deals damage based on the enemy's health and charge forward with her super, hitting enemies along the way.</t></tr>
59
- </table>
60
- <h2>How to download and install the 2017 Brawl Stars APK</h2>
61
- <h3>The requirements and risks of using an APK file</h3>
62
- <p>An APK file is an Android application package that contains all the files and data needed to run an app on an Android device. You can download APK files from various sources online, but you need to be careful about the quality and security of the files. Some APK files may contain malware or viruses that can harm your device or steal your personal information. You also need to make sure that the APK file is compatible with your device and Android version.</p>
63
- <p>To download and install the 2017 Brawl Stars APK, you will need an Android device that runs on Android 4.1 or higher, has at least 1 GB of RAM, and has enough storage space. You will also need to enable the option to install apps from unknown sources in your device settings. This will allow you to install apps that are not from the Google Play Store. However, this also means that you are responsible for the safety and performance of your device. You should only download APK files from trusted sources and scan them for viruses before installing them.</p>
64
- <h3>The steps to download and install the APK file</h3>
65
- <p>Here are the steps to download and install the 2017 Brawl Stars APK on your Android device:</p>
66
- <ol>
67
- <li>Go to a reliable website that offers the 2017 Brawl Stars APK file, such as [APKPure] or [APKMirror].</li>
68
- <li>Find the 2017 Brawl Stars APK file and tap on the download button. The file size is about 100 MB, so make sure you have a stable internet connection and enough battery life.</li>
69
- <li>Once the download is complete, locate the APK file in your device's file manager and tap on it to start the installation process.</li>
70
- <li>Follow the instructions on the screen and grant the necessary permissions to the app.</li>
71
- <li>Wait for the installation to finish and then launch the app from your home screen or app drawer.</li>
72
- <li>Enjoy playing Brawl Stars!</li>
73
- </ol>
74
- <h2>How to play Brawl Stars on your Android device</h2>
75
- <h3>The basic controls and gameplay mechanics</h3>
76
- <p>Brawl Stars is easy to learn but hard to master. The game has simple controls that you can customize according to your preference. You can use either a joystick or tap mode to move your Brawler around the map. You can also use either auto-aim or manual aim to shoot at enemies. To use your super ability, you need to fill up your super meter by hitting enemies with your basic attack. You can also use a gadget once per match if you have unlocked it for your Brawler.</p>
77
- <p>2017 brawl stars apk download for android<br />
78
- 2017 brawl stars apk mod unlimited gems<br />
79
- 2017 brawl stars apk latest version<br />
80
- 2017 brawl stars apk free download uptodown<br />
81
- 2017 brawl stars apk old version<br />
82
- 2017 brawl stars apk hack no root<br />
83
- 2017 brawl stars apk offline installer<br />
84
- 2017 brawl stars apk update new features<br />
85
- 2017 brawl stars apk file size<br />
86
- 2017 brawl stars apk compatible devices<br />
87
- 2017 brawl stars apk gameplay tips<br />
88
- 2017 brawl stars apk review and rating<br />
89
- 2017 brawl stars apk best characters<br />
90
- 2017 brawl stars apk how to install<br />
91
- 2017 brawl stars apk error fix<br />
92
- 2017 brawl stars apk online multiplayer mode<br />
93
- 2017 brawl stars apk fun and addictive<br />
94
- 2017 brawl stars apk unlock all skins<br />
95
- 2017 brawl stars apk safe and secure<br />
96
- 2017 brawl stars apk original from Supercell<br />
97
- 2017 brawl stars apk cheats and tricks<br />
98
- 2017 brawl stars apk requirements and specifications<br />
99
- 2017 brawl stars apk alternative download links<br />
100
- 2017 brawl stars apk beta version testing<br />
101
- 2017 brawl stars apk support and feedback<br />
102
- 2017 brawl stars apk new maps and modes<br />
103
- 2017 brawl stars apk events and challenges<br />
104
- 2017 brawl stars apk rewards and trophies<br />
105
- 2017 brawl stars apk clans and friends<br />
106
- 2017 brawl stars apk ranking and leaderboard<br />
107
- 2017 brawl stars apk skins and customizations<br />
108
- 2017 brawl stars apk coins and gems generator<br />
109
- 2017 brawl stars apk patch notes and changelog<br />
110
- 2017 brawl stars apk bugs and glitches report<br />
111
- 2017 brawl stars apk videos and screenshots<br />
112
- 2017 brawl stars apk guides and tutorials<br />
113
- 2017 brawl stars apk forums and communities<br />
114
- 2017 brawl stars apk news and updates<br />
115
- 2017 brawl stars apk comparison with other games<br />
116
- 2017 brawl stars apk pros and cons analysis</p>
117
- <p>The game has different gameplay mechanics depending on the game mode you choose. For example, in Smash & Grab, you need to collect gems from the center of the map and hold them until the countdown ends. If you die, you will drop all your gems, so you need to be careful and protect yourself and your teammates. In Showdown, you need to survive as long as possible by avoiding enemies, collecting power ups, and hiding in bushes or behind walls. The map will shrink over time, forcing you to confront other players. The last one standing wins.</p>
118
- <h3>Some tips and tricks to improve your skills</h3>
119
- <p>Brawl Stars is a game that requires strategy, teamwork, and skill. Here are some tips and tricks that can help you improve your skills:</p>
120
- <ul>
121
- <li>Choose a Brawler that suits your play style and the game mode. For example, if you like close-range combat, you can use Shelly or Bull. If you prefer long-range sniping, you can use Piper or Brock. If you like to heal and support your allies, you can use Poco or Pam.</li>
122
- <li>Learn the strengths and weaknesses of each Brawler and how to counter them. For example, if you are facing a tanky Brawler like El Primo or Rosa, you can use a Brawler that can deal high damage or pierce through their shield, like Colt or Spike. If you are facing a long-range Brawler like Piper or Brock, you can use a Brawler that can dodge their shots or close the gap, like Mortis or Leon.</li>
123
- <li>Communicate and cooperate with your teammates. You can use the in-game chat or voice chat to coordinate your moves and strategies. You can also use the quick chat buttons to send simple messages like "Attack", "Defend", or "Help". You can also use the ping system to mark enemies, gems, power ups, or locations on the map.</li>
124
- <li>Use the environment to your advantage. You can hide in bushes or behind walls to ambush enemies or escape from danger. You can also destroy obstacles with your attacks or super to create new paths or expose enemies. You can also use the jump pads, teleporters, or water to move around the map faster or surprise enemies.</li>
125
- <li>Practice and experiment with different Brawlers and game modes. You can play friendly matches with your friends or club members to test your skills and have fun. You can also play solo or duo Showdown to improve your survival skills and learn how to deal with different situations. You can also watch replays of your matches or other players' matches to learn from your mistakes or get inspired by their strategies.</li>
126
- </ul>
127
- <h2>Conclusion</h2>
128
- <p>Brawl Stars is a fun and addictive game that you can play on your Android device. If you want to experience the original version of the game from 2017, you can download and install the 2017 Brawl Stars APK file from a reliable source. However, you need to be careful about the quality and security of the APK file and enable the option to install apps from unknown sources on your device. You also need to learn how to play the game well and use the best Brawlers and strategies for each game mode. With some practice and teamwork, you can become a Brawl Star!</p>
129
- <h2>FAQs</h2>
130
- <p>Here are some frequently asked questions about Brawl Stars and the 2017 Brawl Stars APK:</p>
131
- <ol>
132
- <li>What is the difference between the 2017 Brawl Stars APK and the current version of the game?</li>
133
- <p>The 2017 Brawl Stars APK is the beta version of the game that was released in a few countries before the global launch in 2018. The 2017 version has some differences from the current version, such as fewer Brawlers, game modes, skins, maps, features, and updates. The 2017 version also has some bugs and glitches that may affect your gameplay experience.</p>
134
- <li>Is it safe to download and install the 2017 Brawl Stars APK?</li>
135
- <p>It depends on where you download the APK file from. Some websites may offer fake or malicious APK files that can harm your device or steal your personal information. You should only download APK files from trusted sources that have positive reviews and ratings from other users. You should also scan the APK file for viruses before installing it on your device.</p>
136
- <li>Will I get banned for using the 2017 Brawl Stars APK?</li>
137
- <p>No, you will not get banned for using the 2017 Brawl Stars APK as long as you do not use any cheats, hacks, mods, or third-party tools that give you an unfair advantage over other players. However, you may not be able to access some features or events that are exclusive to the current version of the game.</p>
138
- <li>Can I play with my friends who have the current version of the game?</li>
139
- <p>No, you cannot play with your friends who have the current version of the game because they are on different servers. You can only play with other players who have the same version of the game as you.</p>
140
- <li>Can I update the 2017 Brawl Stars APK to the current version of the game?</li>
141
- <p>No, you cannot update the 2017 Brawl Stars APK to the current version of the game. You will need to uninstall the 2017 Brawl Stars APK and download the current version of the game from the Google Play Store or another reliable source.</p> 197e85843d<br />
142
- <br />
143
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Apkfew Whatsapp Tracker Free APK Download - Track Online Activity and Chat History.md DELETED
@@ -1,149 +0,0 @@
1
- <br />
2
- <h1>How to Download Apkfew Whatsapp Tracker and Why You Need It</h1>
3
- <p>Do you want to track the online activity and chat history of any WhatsApp user? Do you want to know who viewed your profile and who deleted their account? If yes, then you need Apkfew Whatsapp Tracker, a powerful and reliable app that lets you monitor any WhatsApp account discreetly and remotely. In this article, we will show you how to download Apkfew Whatsapp Tracker for Android devices and how to use it effectively. We will also compare it with other similar apps and answer some frequently asked questions.</p>
4
- <h2>download apkfew whatsapp tracker</h2><br /><p><b><b>Download Zip</b> &mdash;&mdash;&mdash; <a href="https://jinyurl.com/2uNOSC">https://jinyurl.com/2uNOSC</a></b></p><br /><br />
5
- <h2>What is Apkfew Whatsapp Tracker?</h2>
6
- <p>Apkfew Whatsapp Tracker is a free app that allows you to track the online status, last seen, chat messages, media files, profile visits, and deleted accounts of any WhatsApp user. You can use it to spy on your spouse, children, friends, employees, or anyone else who uses WhatsApp. You can also use it to protect your privacy and security by knowing who is stalking you or trying to hack your account.</p>
7
- <h3>Features of Apkfew Whatsapp Tracker</h3>
8
- <ul>
9
- <li>Track online status and last seen of any WhatsApp user, even if they hide it or block you.</li>
10
- <li>Monitor chat messages and media files of any WhatsApp user, even if they delete them or use end-to-end encryption.</li>
11
- <li>View profile visits and deleted accounts of any WhatsApp user, even if they disable read receipts or change their number.</li>
12
- <li>Get instant notifications and reports on your phone or email whenever there is any activity on the target account.</li>
13
- <li>Access all the data remotely from a web-based dashboard that is easy to use and secure.</li>
14
- </ul>
15
- <h3>Benefits of Apkfew Whatsapp Tracker</h3>
16
- <ul>
17
- <li>Apkfew Whatsapp Tracker is free to download and use, unlike other apps that charge you monthly or yearly fees.</li>
18
- <li>Apkfew Whatsapp Tracker is compatible with all Android devices, regardless of the model or version.</li>
19
- <li>Apkfew Whatsapp Tracker is undetectable and untraceable, as it does not require rooting or jailbreaking the target device or installing any software on it.</li>
20
- <li>Apkfew Whatsapp Tracker is reliable and accurate, as it uses advanced algorithms and techniques to collect and analyze the data.</li>
21
- <li>Apkfew Whatsapp Tracker is ethical and legal, as it does not violate the privacy or security of the target user or anyone else involved.</li>
22
- </ul>
23
- <h2>How to Download Apkfew Whatsapp Tracker for Android</h2>
24
- <p>To download Apkfew Whatsapp Tracker for Android devices, you need to follow these simple steps:</p>
25
- <h3>Step 1: Enable Unknown Sources</h3>
26
- <p>Since Apkfew Whatsapp Tracker is not available on the Google Play Store, you need to enable unknown sources on your device to install it. To do this, go to Settings > Security > Unknown Sources and toggle it on. This will allow you to install apps from sources other than the Google Play Store.</p>
27
- <h3>Step 2: Visit the Apkfew Website</h3>
28
- <p>The next step is to visit the official website of Apkfew at [https://apkcombo.com/search/apkfew-whatsapp-tracker-free](^1^). Here you will find the latest version of the app along with its description and reviews. You can also check out other apps from Apkfew that offer similar features.</p>
29
- <h3>Step 3: Download and Install the Apk File</h3 <p>Once you are on the website, click on the download button and wait for the apk file to be downloaded on your device. The file size is about 10 MB and it should take a few minutes depending on your internet speed. After the download is complete, locate the file in your downloads folder and tap on it to start the installation process. Follow the instructions on the screen and agree to the terms and conditions to finish the installation.</p>
30
- <p>download apkfew whatsapp tracker free<br />
31
- download apkfew whatsapp tracker online<br />
32
- download apkfew whatsapp tracker app<br />
33
- download apkfew whatsapp tracker pro<br />
34
- download apkfew whatsapp tracker premium<br />
35
- download apkfew whatsapp tracker mod<br />
36
- download apkfew whatsapp tracker apk<br />
37
- download apkfew whatsapp tracker for android<br />
38
- download apkfew whatsapp tracker for ios<br />
39
- download apkfew whatsapp tracker for pc<br />
40
- download apkfew whatsapp tracker for windows<br />
41
- download apkfew whatsapp tracker for mac<br />
42
- download apkfew whatsapp tracker for linux<br />
43
- download apkfew whatsapp tracker latest version<br />
44
- download apkfew whatsapp tracker 2023<br />
45
- download apkfew whatsapp tracker update<br />
46
- download apkfew whatsapp tracker review<br />
47
- download apkfew whatsapp tracker tutorial<br />
48
- download apkfew whatsapp tracker guide<br />
49
- download apkfew whatsapp tracker tips<br />
50
- download apkfew whatsapp tracker tricks<br />
51
- download apkfew whatsapp tracker hacks<br />
52
- download apkfew whatsapp tracker cheats<br />
53
- download apkfew whatsapp tracker features<br />
54
- download apkfew whatsapp tracker benefits<br />
55
- download apkfew whatsapp tracker advantages<br />
56
- download apkfew whatsapp tracker disadvantages<br />
57
- download apkfew whatsapp tracker problems<br />
58
- download apkfew whatsapp tracker issues<br />
59
- download apkfew whatsapp tracker bugs<br />
60
- download apkfew whatsapp tracker fixes<br />
61
- download apkfew whatsapp tracker solutions<br />
62
- download apkfew whatsapp tracker alternatives<br />
63
- download apkfew whatsapp tracker competitors<br />
64
- download apkfew whatsapp tracker comparison<br />
65
- download apkfew whatsapp tracker best practices<br />
66
- download apkfew whatsapp tracker case studies<br />
67
- download apkfew whatsapp tracker testimonials<br />
68
- download apkfew whatsapp tracker feedbacks<br />
69
- download apkfew whatsapp tracker ratings<br />
70
- download apkfew whatsapp tracker rankings<br />
71
- download apkfew whatsapp tracker statistics<br />
72
- download apkfew whatsapp tracker analytics<br />
73
- download apkfew whatsapp tracker insights<br />
74
- download apkfew whatsapp tracker reports<br />
75
- download apkfew whatsapp tracker results<br />
76
- download apkfew whatsapp tracker performance<br />
77
- download apkfew whatsapp tracker quality<br />
78
- download apkfew whatsapp tracker reliability<br />
79
- download apkfew whatsapp tracker security</p>
80
- <h3>Step 4: Launch the App and Grant Permissions</h3>
81
- <p>The final step is to launch the app and grant it the necessary permissions to access your device's data and functions. To do this, open the app from your app drawer or home screen and sign up with your email and password. You will then be asked to enter the phone number of the WhatsApp user you want to track. You will also need to grant the app permissions to access your contacts, storage, location, camera, microphone, and notifications. These permissions are essential for the app to work properly and collect the data you need.</p>
82
- <h2>How to Use Apkfew Whatsapp Tracker</h2>
83
- <p>Now that you have downloaded and installed Apkfew Whatsapp Tracker, you can start using it to monitor any WhatsApp account you want. Here are some of the things you can do with the app:</p>
84
- <h3>Track Online Status and Last Seen</h3>
85
- <p>With Apkfew Whatsapp Tracker, you can track the online status and last seen of any WhatsApp user, even if they hide it or block you. You can see when they are online or offline, how long they stay online, and how often they change their status. You can also see their last seen time and date, even if they disable it in their settings. This way, you can know their activity patterns and habits, and find out if they are lying or cheating on you.</p>
86
- <h3>Monitor Chat Messages and Media Files</h3>
87
- <p>Another feature of Apkfew Whatsapp Tracker is that it allows you to monitor the chat messages and media files of any WhatsApp user, even if they delete them or use end-to-end encryption. You can read their text messages, voice messages, images, videos, documents, stickers, emojis, and more. You can also see who they are chatting with, what they are talking about, and when they are sending or receiving messages. This way, you can know their interests, preferences, opinions, and secrets.</p>
88
- <h3>View Profile Visits and Deleted Accounts</h3>
89
- <p>A third feature of Apkfew Whatsapp Tracker is that it enables you to view the profile visits and deleted accounts of any WhatsApp user, even if they disable read receipts or change their number. You can see who visited their profile, how many times they visited it, and when they visited it. You can also see who deleted their account, why they deleted it, and when they deleted it. This way, you can know who is stalking them or trying to hack their account.</p>
90
- <h2>Comparison Table of Apkfew Whatsapp Tracker and Other Apps</h2>
91
- <p>To give you a better idea of how Apkfew Whatsapp Tracker compares with other similar apps in the market, we have created a comparison table that shows some of the key features and differences between them. Here is the table:</p>
92
- <table>
93
- <tr>
94
- <th>App Name</th>
95
- <th>Price</th>
96
- <th>Compatibility</th>
97
- <th>Detectability</th>
98
- <th>Rooting/Jailbreaking Required</th>
99
- <th>Data Collected</th>
100
- </tr>
101
- <tr>
102
- <td>Apkfew Whatsapp Tracker</td>
103
- <td>Free</td>
104
- <td>All Android devices</td>
105
- <td>Undetectable</td>
106
- <td>No</td>
107
- <td>Online status, last seen, chat messages, media files, profile visits, deleted accounts</td>
108
- </tr>
109
- <tr>
110
- <td>mSpy</td>
111
- <td>$29.99/month</td>
112
- <td>All Android devices (rooted) All iOS devices (jailbroken)</td>
113
- <td>Detectable</td>
114
- <td>Yes</td>
115
- <td>Online status, last seen, chat messages, media files</td>
116
- </tr>
117
- <tr>
118
- <td>Spyzie</td>
119
- <td>$39.99/month</td>
120
- <td>All Android devices (rooted) All iOS devices (jailbroken)</td>
121
- <td>Detectable</td>
122
- <td>Yes</td>
123
- <td>Online status, last seen, chat messages, media files</td>
124
- </tr>
125
- <tr>
126
- <td>FoneMonitor</td>
127
- <td>$29.99/month</td <td>All Android devices (rooted) All iOS devices (jailbroken)</td <td>Detectable</td <td>Yes</td <td>Online status, last seen, chat messages, media files</td </tr <tr <td>Cocospy <td>$39.99/month <td>All Android devices <td>All Android devices (rooted) All iOS devices (jailbroken)</td>
128
- <td>Detectable</td>
129
- <td>Yes</td>
130
- <td>Online status, last seen, chat messages, media files</td>
131
- </tr>
132
- </table>
133
- <p>As you can see, Apkfew Whatsapp Tracker is the best app among the four, as it offers more features, better compatibility, higher security, and lower cost. It is the only app that does not require rooting or jailbreaking the target device, and it is the only app that can track profile visits and deleted accounts. It is also the only app that is free to download and use, while the others charge you hefty fees. Therefore, we recommend you to choose Apkfew Whatsapp Tracker over the other apps.</p>
134
- <h2>Conclusion</h2>
135
- <p>In conclusion, Apkfew Whatsapp Tracker is a free app that lets you track the online activity and chat history of any WhatsApp user. You can use it to spy on your spouse, children, friends, employees, or anyone else who uses WhatsApp. You can also use it to protect your privacy and security by knowing who is stalking you or trying to hack your account. To download Apkfew Whatsapp Tracker for Android devices, you need to enable unknown sources, visit the Apkfew website, download and install the apk file, and launch the app and grant permissions. To use Apkfew Whatsapp Tracker, you need to enter the phone number of the WhatsApp user you want to track, and then you can access all the data remotely from a web-based dashboard. Apkfew Whatsapp Tracker is better than other similar apps in terms of features, compatibility, security, and cost. It is the best app for WhatsApp tracking that you can find in the market.</p>
136
- <h2>FAQs</h2>
137
- <p>Here are some of the frequently asked questions about Apkfew Whatsapp Tracker:</p>
138
- <h3>Q: Is Apkfew Whatsapp Tracker safe to use?</h3>
139
- <p>A: Yes, Apkfew Whatsapp Tracker is safe to use, as it does not contain any viruses, malware, spyware, or adware. It also does not collect or store any personal or sensitive information from your device or the target device. It only accesses the data that is relevant for WhatsApp tracking and does not share it with anyone else.</p>
140
- <h3>Q: Is Apkfew Whatsapp Tracker legal to use?</h3>
141
- <p>A: Yes, Apkfew Whatsapp Tracker is legal to use, as long as you follow the laws and regulations of your country and respect the privacy and security of the target user. You should not use Apkfew Whatsapp Tracker for any illegal or unethical purposes, such as blackmailing, harassing, threatening, or harming anyone. You should also inform and obtain consent from the target user before using Apkfew Whatsapp Tracker on their device.</p>
142
- <h3>Q: Does Apkfew Whatsapp Tracker work on iOS devices?</h3>
143
- <p>A: No, Apkfew Whatsapp Tracker does not work on iOS devices, as it is designed for Android devices only. However, you can still use Apkfew Whatsapp Tracker to track an iOS device if you have access to its WhatsApp web login credentials. You can then scan the QR code from your Android device and access all the data from the web-based dashboard.</p>
144
- <h3>Q: How can I contact Apkfew Whatsapp Tracker support team?</h3>
145
- <p>A: If you have any questions, issues, feedbacks, or suggestions about Apkfew Whatsapp Tracker, you can contact their support team by sending an email to [[email protected]]. They will respond to you within 24 hours and help you resolve any problems.</p>
146
- <h3>Q: How can I update Apkfew Whatsapp Tracker to the latest version?</h3>
147
- <p>A: To update Apkfew Whatsapp Tracker to the latest version, you need to visit their website at [https://apkcombo.com/search/apkfew-whatsapp-tracker-free] and download and install the new apk file over the old one. You do not need to uninstall or reinstall the app. The update will automatically apply and improve the performance and functionality of the app.</p> 401be4b1e0<br />
148
- <br />
149
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/7hao/bingo/src/lib/hooks/chat-history.ts DELETED
@@ -1,62 +0,0 @@
1
- import { zip } from 'lodash-es'
2
- import { ChatMessageModel, BotId } from '@/lib/bots/bing/types'
3
- import { Storage } from '../storage'
4
-
5
- /**
6
- * conversations:$botId => Conversation[]
7
- * conversation:$botId:$cid:messages => ChatMessageModel[]
8
- */
9
-
10
- interface Conversation {
11
- id: string
12
- createdAt: number
13
- }
14
-
15
- type ConversationWithMessages = Conversation & { messages: ChatMessageModel[] }
16
-
17
- async function loadHistoryConversations(botId: BotId): Promise<Conversation[]> {
18
- const key = `conversations:${botId}`
19
- const { [key]: value } = await Storage.get(key)
20
- return value || []
21
- }
22
-
23
- async function deleteHistoryConversation(botId: BotId, cid: string) {
24
- const conversations = await loadHistoryConversations(botId)
25
- const newConversations = conversations.filter((c) => c.id !== cid)
26
- await Storage.set({ [`conversations:${botId}`]: newConversations })
27
- }
28
-
29
- async function loadConversationMessages(botId: BotId, cid: string): Promise<ChatMessageModel[]> {
30
- const key = `conversation:${botId}:${cid}:messages`
31
- const { [key]: value } = await Storage.get(key)
32
- return value || []
33
- }
34
-
35
- export async function setConversationMessages(botId: BotId, cid: string, messages: ChatMessageModel[]) {
36
- const conversations = await loadHistoryConversations(botId)
37
- if (!conversations.some((c) => c.id === cid)) {
38
- conversations.unshift({ id: cid, createdAt: Date.now() })
39
- await Storage.set({ [`conversations:${botId}`]: conversations })
40
- }
41
- const key = `conversation:${botId}:${cid}:messages`
42
- await Storage.set({ [key]: messages })
43
- }
44
-
45
- export async function loadHistoryMessages(botId: BotId): Promise<ConversationWithMessages[]> {
46
- const conversations = await loadHistoryConversations(botId)
47
- const messagesList = await Promise.all(conversations.map((c) => loadConversationMessages(botId, c.id)))
48
- return zip(conversations, messagesList).map(([c, messages]) => ({
49
- id: c!.id,
50
- createdAt: c!.createdAt,
51
- messages: messages!,
52
- }))
53
- }
54
-
55
- export async function deleteHistoryMessage(botId: BotId, conversationId: string, messageId: string) {
56
- const messages = await loadConversationMessages(botId, conversationId)
57
- const newMessages = messages.filter((m) => m.id !== messageId)
58
- await setConversationMessages(botId, conversationId, newMessages)
59
- if (!newMessages.length) {
60
- await deleteHistoryConversation(botId, conversationId)
61
- }
62
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A00001/bingothoo/src/lib/bots/bing/utils.ts DELETED
@@ -1,87 +0,0 @@
1
- import { ChatResponseMessage, BingChatResponse } from './types'
2
-
3
- export function convertMessageToMarkdown(message: ChatResponseMessage): string {
4
- if (message.messageType === 'InternalSearchQuery') {
5
- return message.text
6
- }
7
- for (const card of message.adaptiveCards??[]) {
8
- for (const block of card.body) {
9
- if (block.type === 'TextBlock') {
10
- return block.text
11
- }
12
- }
13
- }
14
- return ''
15
- }
16
-
17
- const RecordSeparator = String.fromCharCode(30)
18
-
19
- export const websocketUtils = {
20
- packMessage(data: any) {
21
- return `${JSON.stringify(data)}${RecordSeparator}`
22
- },
23
- unpackMessage(data: string | ArrayBuffer | Blob) {
24
- if (!data) return {}
25
- return data
26
- .toString()
27
- .split(RecordSeparator)
28
- .filter(Boolean)
29
- .map((s) => {
30
- try {
31
- return JSON.parse(s)
32
- } catch (e) {
33
- return {}
34
- }
35
- })
36
- },
37
- }
38
-
39
- export async function createImage(prompt: string, id: string, headers: HeadersInit): Promise<string | undefined> {
40
- const { headers: responseHeaders } = await fetch(`https://www.bing.com/images/create?partner=sydney&re=1&showselective=1&sude=1&kseed=7000&SFX=&q=${encodeURIComponent(prompt)}&iframeid=${id}`,
41
- {
42
- method: 'HEAD',
43
- headers,
44
- redirect: 'manual'
45
- },
46
- );
47
-
48
- if (!/&id=([^&]+)$/.test(responseHeaders.get('location') || '')) {
49
- throw new Error('请求异常,请检查 cookie 是否有效')
50
- }
51
-
52
- const resultId = RegExp.$1;
53
- let count = 0
54
- const imageThumbUrl = `https://www.bing.com/images/create/async/results/${resultId}?q=${encodeURIComponent(prompt)}&partner=sydney&showselective=1&IID=images.as`;
55
-
56
- do {
57
- await sleep(3000);
58
- const content = await fetch(imageThumbUrl, { headers, method: 'GET' })
59
-
60
- // @ts-ignore
61
- if (content.headers.get('content-length') > 1) {
62
- const text = await content.text()
63
- return (text?.match(/<img class="mimg"((?!src).)+src="[^"]+/mg)??[])
64
- .map(target => target?.split('src="').pop()?.replace(/&amp;/g, '&'))
65
- .map(img => `![${prompt}](${img})`).join(' ')
66
- }
67
- } while(count ++ < 10);
68
- }
69
-
70
-
71
- export async function* streamAsyncIterable(stream: ReadableStream) {
72
- const reader = stream.getReader()
73
- try {
74
- while (true) {
75
- const { done, value } = await reader.read()
76
- if (done) {
77
- return
78
- }
79
- yield value
80
- }
81
- } finally {
82
- reader.releaseLock()
83
- }
84
- }
85
-
86
- export const sleep = (ms: number) => new Promise(resolve => setTimeout(resolve, ms))
87
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_32xb64-warmup-lbs_in1k.py DELETED
@@ -1,12 +0,0 @@
1
- _base_ = ['./resnet50_32xb64-warmup_in1k.py']
2
- model = dict(
3
- head=dict(
4
- type='LinearClsHead',
5
- num_classes=1000,
6
- in_channels=2048,
7
- loss=dict(
8
- type='LabelSmoothLoss',
9
- loss_weight=1.0,
10
- label_smooth_val=0.1,
11
- num_classes=1000),
12
- ))
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Abhaykoul/BardCookies-AI_Query/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: AI With Realtime Data
3
- emoji: 🐠
4
- colorFrom: pink
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.50.2
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Aibn.py DELETED
@@ -1,52 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import time
4
- import hashlib
5
-
6
- from ..typing import AsyncGenerator
7
- from ..requests import StreamSession
8
- from .base_provider import AsyncGeneratorProvider
9
-
10
-
11
- class Aibn(AsyncGeneratorProvider):
12
- url = "https://aibn.cc"
13
- supports_gpt_35_turbo = True
14
- working = True
15
-
16
- @classmethod
17
- async def create_async_generator(
18
- cls,
19
- model: str,
20
- messages: list[dict[str, str]],
21
- timeout: int = 30,
22
- **kwargs
23
- ) -> AsyncGenerator:
24
- async with StreamSession(impersonate="chrome107", timeout=timeout) as session:
25
- timestamp = int(time.time())
26
- data = {
27
- "messages": messages,
28
- "pass": None,
29
- "sign": generate_signature(timestamp, messages[-1]["content"]),
30
- "time": timestamp
31
- }
32
- async with session.post(f"{cls.url}/api/generate", json=data) as response:
33
- response.raise_for_status()
34
- async for chunk in response.iter_content():
35
- yield chunk.decode()
36
-
37
- @classmethod
38
- @property
39
- def params(cls):
40
- params = [
41
- ("model", "str"),
42
- ("messages", "list[dict[str, str]]"),
43
- ("stream", "bool"),
44
- ("temperature", "float"),
45
- ]
46
- param = ", ".join([": ".join(p) for p in params])
47
- return f"g4f.provider.{cls.__name__} supports: ({param})"
48
-
49
-
50
- def generate_signature(timestamp: int, message: str, secret: str = "undefined"):
51
- data = f"{timestamp}:{message}:{secret}"
52
- return hashlib.sha256(data.encode()).hexdigest()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/ChatgptLogin.py DELETED
@@ -1,74 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import os, re
4
- from aiohttp import ClientSession
5
-
6
- from .base_provider import AsyncProvider, format_prompt
7
-
8
-
9
- class ChatgptLogin(AsyncProvider):
10
- url = "https://opchatgpts.net"
11
- supports_gpt_35_turbo = True
12
- working = True
13
- _nonce = None
14
-
15
- @classmethod
16
- async def create_async(
17
- cls,
18
- model: str,
19
- messages: list[dict[str, str]],
20
- **kwargs
21
- ) -> str:
22
- headers = {
23
- "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
24
- "Accept" : "*/*",
25
- "Accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
26
- "Origin" : "https://opchatgpts.net",
27
- "Alt-Used" : "opchatgpts.net",
28
- "Referer" : "https://opchatgpts.net/chatgpt-free-use/",
29
- "Sec-Fetch-Dest" : "empty",
30
- "Sec-Fetch-Mode" : "cors",
31
- "Sec-Fetch-Site" : "same-origin",
32
- }
33
- async with ClientSession(
34
- headers=headers
35
- ) as session:
36
- if not cls._nonce:
37
- async with session.get(
38
- "https://opchatgpts.net/chatgpt-free-use/",
39
- params={"id": os.urandom(6).hex()},
40
- ) as response:
41
- result = re.search(r'data-nonce="(.*?)"', await response.text())
42
- if not result:
43
- raise RuntimeError("No nonce value")
44
- cls._nonce = result.group(1)
45
- data = {
46
- "_wpnonce": cls._nonce,
47
- "post_id": 28,
48
- "url": "https://opchatgpts.net/chatgpt-free-use",
49
- "action": "wpaicg_chat_shortcode_message",
50
- "message": format_prompt(messages),
51
- "bot_id": 0
52
- }
53
- async with session.post("https://opchatgpts.net/wp-admin/admin-ajax.php", data=data) as response:
54
- response.raise_for_status()
55
- data = await response.json()
56
- if "data" in data:
57
- return data["data"]
58
- elif "msg" in data:
59
- raise RuntimeError(data["msg"])
60
- else:
61
- raise RuntimeError(f"Response: {data}")
62
-
63
-
64
- @classmethod
65
- @property
66
- def params(cls):
67
- params = [
68
- ("model", "str"),
69
- ("messages", "list[dict[str, str]]"),
70
- ("stream", "bool"),
71
- ("temperature", "float"),
72
- ]
73
- param = ", ".join([": ".join(p) for p in params])
74
- return f"g4f.provider.{cls.__name__} supports: ({param})"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/flip-plugin.js DELETED
@@ -1,19 +0,0 @@
1
- import Flip from './flip.js';
2
-
3
- class FlipPlugin extends Phaser.Plugins.BasePlugin {
4
-
5
- constructor(pluginManager) {
6
- super(pluginManager);
7
- }
8
-
9
- start() {
10
- var eventEmitter = this.game.events;
11
- eventEmitter.on('destroy', this.destroy, this);
12
- }
13
-
14
- add(gameObject, config) {
15
- return new Flip(gameObject, config);
16
- }
17
- }
18
-
19
- export default FlipPlugin;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/shake/Factory.js DELETED
@@ -1,11 +0,0 @@
1
- import Shake from './Shake.js';
2
- import ObjectFactory from '../ObjectFactory.js';
3
- import SetValue from '../../../plugins/utils/object/SetValue.js';
4
-
5
- ObjectFactory.register('shake', function (gameObject, config) {
6
- return new Shake(gameObject, config);
7
- });
8
-
9
- SetValue(window, 'RexPlugins.UI.Shake', Shake);
10
-
11
- export default Shake;
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ailexcoder/GPT4ALL1/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Gpt4all
3
- emoji: 🦀
4
- colorFrom: gray
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.24.1
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: Ailexcoder/GPT4ALL
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlekseyKorshuk/instagram-filter-removal/modeling/base.py DELETED
@@ -1,60 +0,0 @@
1
- from torch import nn
2
-
3
-
4
- class BaseNetwork(nn.Module):
5
- def __init__(self):
6
- super(BaseNetwork, self).__init__()
7
-
8
- def forward(self, x, y):
9
- pass
10
-
11
- def print_network(self):
12
- if isinstance(self, list):
13
- self = self[0]
14
- num_params = 0
15
- for param in self.parameters():
16
- num_params += param.numel()
17
- print('Network [%s] was created. Total number of parameters: %.1f million. '
18
- 'To see the architecture, do print(network).'
19
- % (type(self).__name__, num_params / 1000000))
20
-
21
- def set_requires_grad(self, requires_grad=False):
22
- """Set requies_grad=Fasle for all the networks to avoid unnecessary computations
23
- Parameters:
24
- requires_grad (bool) -- whether the networks require gradients or not
25
- """
26
- for param in self.parameters():
27
- param.requires_grad = requires_grad
28
-
29
- def init_weights(self, init_type='xavier', gain=0.02):
30
- def init_func(m):
31
- classname = m.__class__.__name__
32
- if classname.find('BatchNorm2d') != -1:
33
- if hasattr(m, 'weight') and m.weight is not None:
34
- nn.init.normal_(m.weight.data, 1.0, gain)
35
- if hasattr(m, 'bias') and m.bias is not None:
36
- nn.init.constant_(m.bias.data, 0.0)
37
- elif hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
38
- if init_type == 'normal':
39
- nn.init.normal_(m.weight.data, 0.0, gain)
40
- elif init_type == 'xavier':
41
- nn.init.xavier_normal_(m.weight.data, gain=gain)
42
- elif init_type == 'xavier_uniform':
43
- nn.init.xavier_uniform_(m.weight.data, gain=1.0)
44
- elif init_type == 'kaiming':
45
- nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
46
- elif init_type == 'orthogonal':
47
- nn.init.orthogonal_(m.weight.data, gain=gain)
48
- elif init_type == 'none': # uses pytorch's default init method
49
- m.reset_parameters()
50
- else:
51
- raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
52
- if hasattr(m, 'bias') and m.bias is not None:
53
- nn.init.constant_(m.bias.data, 0.0)
54
-
55
- self.apply(init_func)
56
-
57
- # propagate to children
58
- for m in self.children():
59
- if hasattr(m, 'init_weights'):
60
- m.init_weights(init_type, gain)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ame42/rwms/main.py DELETED
@@ -1,401 +0,0 @@
1
- # 'dataset' holds the input data for this script
2
- import os.path
3
-
4
- import gradio as gr
5
- import numpy
6
- import pandas
7
- from sklearn.ensemble import RandomForestRegressor
8
- from sklearn.linear_model import LinearRegression
9
- from sklearn.metrics import explained_variance_score, max_error, mean_absolute_error, mean_squared_error, \
10
- mean_squared_log_error, median_absolute_error, mean_absolute_percentage_error, r2_score, mean_poisson_deviance, \
11
- mean_gamma_deviance, mean_tweedie_deviance, d2_tweedie_score, mean_pinball_loss, d2_pinball_score, \
12
- d2_absolute_error_score
13
- from sklearn.model_selection import train_test_split
14
- from sklearn.preprocessing import StandardScaler
15
-
16
- import datastore
17
- from local_utils import *
18
-
19
- MAX_DEPTH = 20
20
- N_EST = 10
21
-
22
- mode = {"app": test_mode, "data": all_mode, "regen": False}
23
-
24
-
25
- def clean_prepare_train(data_i, train_size=0.015, test_size=0.005):
26
- # drop sparse column THP BLIND then drop empty rows for all remaining columns
27
- data_i.drop(axis=1, columns=[blind_col], inplace=True)
28
- data_i.dropna(axis=0, inplace=True, how="any")
29
- data_i.reset_index(inplace=True)
30
-
31
- # change well_id to dummies
32
- dummies = pandas.get_dummies(data_i[well_col])
33
- data_i = pandas.concat([data_i, dummies], axis=1).reindex(data_i.index)
34
- data_i.drop(columns=[well_col], axis=1, inplace=True)
35
-
36
- # remove useless columns
37
- data_i = keep_useful_cols(data_i, [ro_col, dur_col, man_col, blind_col, temp_col] + dummies.columns.tolist())
38
-
39
- # get x and y
40
- y = data_i[ro_col]
41
- x_i = data_i.drop(axis=1, columns=[ro_col])
42
-
43
- # verify data row count
44
- print(f"\n{x_i.shape[0]} rows")
45
-
46
- # fit scaler
47
- scaler_i = StandardScaler(copy=False)
48
- scaler_i.fit(x_i)
49
- x_fit = pandas.DataFrame(scaler_i.transform(x_i), columns=x_i.columns)
50
-
51
- # data split
52
- x_train, x_test, y_train, y_test = \
53
- train_test_split(x_fit, y, random_state=30, train_size=train_size, test_size=test_size)
54
-
55
- # model
56
- model_i = RandomForestRegressor(n_estimators=N_EST, random_state=30, max_depth=MAX_DEPTH)
57
- model_i.fit(x_train, y_train)
58
- # print([est.get_depth() for est in model_i.estimators_])
59
-
60
- # testing
61
- y_pred = model_i.predict(x_test)
62
- score_i = r2_score(y_test, y_pred)
63
- # print("explained_variance_score:", explained_variance_score(y_test, y_pred))
64
- # print("max_error:", max_error(y_test, y_pred))
65
- # print("mean_absolute_error:", mean_absolute_error(y_test, y_pred))
66
- # print("mean_squared_error:", mean_squared_error(y_test, y_pred))
67
- # print("mean_squared_log_error:", mean_squared_log_error(y_test, y_pred))
68
- # print("median_absolute_error:", median_absolute_error(y_test, y_pred))
69
- # print("mean_absolute_percentage_error:", mean_absolute_percentage_error(y_test, y_pred))
70
- # print("r2_score:", r2_score(y_test, y_pred))
71
- # print("mean_poisson_deviance:", mean_poisson_deviance(y_test, y_pred))
72
- # print("mean_gamma_deviance:", mean_gamma_deviance(y_test, y_pred))
73
- # print("mean_tweedie_deviance:", mean_tweedie_deviance(y_test, y_pred))
74
- # print("d2_tweedie_score:", d2_tweedie_score(y_test, y_pred))
75
- # print("mean_pinball_loss:", mean_pinball_loss(y_test, y_pred))
76
- # print("d2_pinball_score:", d2_pinball_score(y_test, y_pred))
77
- # print("d2_absolute_error_score:", d2_absolute_error_score(y_test, y_pred))
78
-
79
- # create power_bi data payload
80
- x_test, y_test, y_pred = (pandas.DataFrame(x_test).reset_index(),
81
- pandas.DataFrame(y_test).reset_index(),
82
- pandas.DataFrame(y_pred, columns=[sim_col]).reset_index())
83
- data_run = pandas.concat([x_test, y_test, y_pred], axis=1).drop("index", axis=1)
84
-
85
- return model_i, scaler_i, score_i, x_i, data_run
86
-
87
-
88
- def report_on(model_i, scaler_i, score_i, x_i):
89
- print(f"""
90
- \033[1;31mAI generalization stats\033[0m
91
- Model performance (rms score): \033[0;35m{score_i * 100:.2f}%\033[0m
92
- """)
93
-
94
- tests = [WellDataPoint(thp=661.84, day_sec=54100, man_pres=143.93, temp=93.9, _l1=0, _s1=1, _l2=0, _s2=0),
95
- WellDataPoint(thp=1118.456, day_sec=86050, man_pres=166.063, temp=79.706, _l1=1, _s1=0, _l2=0, _s2=0),
96
- WellDataPoint(thp=609.08, day_sec=42600, man_pres=137.2, temp=95.477, _l1=0, _s1=0, _l2=0, _s2=1),
97
- WellDataPoint(thp=1118.07, day_sec=49400, man_pres=146.44, temp=98.5, _l1=0, _s1=0, _l2=1, _s2=0)]
98
-
99
- for test in tests:
100
- print(f"\n{test}")
101
- try:
102
- test_x = pandas.DataFrame(scaler_i.transform(pandas.DataFrame([test.get_x()], columns=x_i.columns)),
103
- columns=x_i.columns)
104
- y_vis_pred = model_i.predict(test_x)
105
- print(f"Real: \033[0;35m{test.get_y():.2f} psi\033[0m vs. "
106
- f"Prediction: \033[0;35m{y_vis_pred[0]:.2f} psi\033[0m", flush=True)
107
- except ValueError:
108
- print(x_i.columns, flush=True)
109
-
110
-
111
- def train(mode, best=(25, 10, 54, 0, 0)):
112
- if mode == day_mode:
113
- data = datastore.get_22_data()
114
- model, scaler, score, x, results = clean_prepare_train(data, train_size=0.75, test_size=0.25)
115
- write_state_files(model, scaler)
116
- results.to_csv(f"{out_folder}POWER_BI_DATA_DAY.csv", index_label=id_col)
117
- report_on(model, scaler, score, x)
118
- else:
119
- # get data payload
120
- if not os.path.exists(f"{out_folder}data_opt_balanced.csv"):
121
- data_dict = datastore.get_all_data()
122
-
123
- # search for the best offset combination model
124
- # best = find_best(data_dict, model_search, best)
125
- print(f"\033[1;31mFinal offsets\033[0m\n{s1}: {best[0]}, {l1}: {best[1]}, {s2}: {best[2]}, {l2}: {best[3]}")
126
- data = datastore.offset_wells(data_dict, [x for x in best[:4]])
127
-
128
- # remove unnecessary id columns
129
- data = keep_useful_cols(data)
130
-
131
- # balance it by oversampling
132
- data = oversample_balance(data)
133
-
134
- # dump it
135
- data.to_csv(f"{out_folder}data_opt_balanced.csv", index_label=id_col)
136
- else:
137
- data = pandas.read_csv(f"{out_folder}data_opt_balanced.csv")
138
-
139
- # create model
140
- model, scaler, score, x, results = clean_prepare_train(keep_useful_cols(data), train_size=0.75, test_size=0.25)
141
- write_state_files(model, scaler)
142
- results.to_csv(f"{out_folder}POWER_BI_DATA.csv", index_label=id_col)
143
- report_on(model, scaler, score, x)
144
-
145
- return model
146
-
147
-
148
- def model_search(dt_dict, s_1, l_1, s_2, l_2, current_best):
149
- dt = datastore.offset_wells(dt_dict, [s_1, l_1, s_2, l_2])
150
- _, _, scr, _, _ = clean_prepare_train(dt, train_size=0.75, test_size=0.25)
151
- scores_i = (s_1, l_1, s_2, l_2, scr)
152
- print(f"s1: {s_1}, l1: {l_1}, s2: {s_2}, l2: {l_2}, \033[0;35mscore: {scr * 100}\033[0m vs. "
153
- f"\033[1;31mbest: {current_best[4] * 100}\033[0m")
154
- return scores_i if scr > current_best[4] else current_best
155
-
156
-
157
- def find_best(data_dict, model_search, best):
158
- for i in range(60):
159
- best = model_search(data_dict, i, best[1], best[2], best[3], best)
160
- for j in range(60):
161
- best = model_search(data_dict, best[0], j, best[2], best[3], best)
162
- for k in range(60):
163
- best = model_search(data_dict, best[0], best[1], k, best[3], best)
164
- for n in range(180):
165
- best = model_search(data_dict, best[0], best[1], best[2], n, best)
166
- return best
167
-
168
-
169
- def app(hours, mins, secs, man_pres, temp, well, thp=None, regen=False, full_text_reply=True):
170
- global test_x, y_vis_pred
171
-
172
- dur_sec = to_sec(hours, mins, secs)
173
-
174
- if regen or not (os.path.exists(f"{model_file}.mdl") and os.path.exists(f"{scaler_file}.sts")):
175
- train(mode['data'])
176
-
177
- mdl, scl = read_state_files(model_file, scaler_file)
178
-
179
- thp = 0 if thp is None else thp
180
-
181
- _l1, _l2, _s1, _s2 = change_well_to_dummy(well)
182
-
183
- test = WellDataPoint(thp=thp, day_sec=dur_sec, man_pres=man_pres, temp=temp, _l1=_l1, _s1=_s1, _l2=_l2, _s2=_s2)
184
- columns = ['Daylight duration (SEC)', 'Manifold Pressure (PSI)', 'TEMP (°F)', '1L', '1S', '2L', '2S']
185
- try:
186
- test_x = pandas.DataFrame(scl.transform(pandas.DataFrame([test.get_x()], columns=columns)), columns=columns)
187
- y_vis_pred = mdl.predict(test_x)
188
- print(f"Real: \033[0;35m{test.get_y():.2f} psi\033[0m vs. "
189
- f"Prediction: \033[0;35m{y_vis_pred[0]:.2f} psi\033[0m")
190
- except ValueError:
191
- print(test, flush=True)
192
- raise
193
-
194
- return f"{test.__plain__()}\nReal: {test.get_y():.2f} psi vs. Prediction: {y_vis_pred[0]:.2f} psi" if \
195
- full_text_reply else y_vis_pred
196
-
197
-
198
- def i_app(wl, pres):
199
- # match well to factors
200
- factor = factors.loc[factors["Well"] == wl[6:]]
201
-
202
- # retrieve conversion and flow factor
203
- c_factor = factor["Conversion Factor"]
204
- f_factor = factor["Flow Factor"]
205
-
206
- # return math result
207
- return f"""\
208
- Testing data
209
- Manifold pressure: {pres} psi
210
- Well: {wl}
211
-
212
- Flowing tubing head pressure: {pres + [f for f in c_factor][0]:.2f} psi
213
- Q-liquid: {pres * [f for f in f_factor][0]:.2f} bbl/day"""
214
-
215
-
216
- scroll_data = pandas.read_csv(f"{out_folder}data_opt_balanced.csv") # pandas.DataFrame()
217
- n_real = 0
218
- n_sim = 0
219
- mn = 0
220
- mx = 0
221
- _, _, _, _, results = clean_prepare_train(scroll_data, train_size=0.50, test_size=0.50)
222
- state_var = False
223
- results.insert(0, id_col, numpy.array(range(results.shape[0])), False)
224
-
225
- # randomize data rows and reset index
226
- scroll_data = scroll_data.sample(frac=1)
227
- scroll_data.drop([id_col, "index"], axis=1, inplace=True, errors="ignore")
228
- scroll_data.insert(0, id_col, numpy.array(range(scroll_data.shape[0])), False)
229
- y_range = min(scroll_data[ro_col]), max(scroll_data[ro_col])
230
-
231
-
232
- # async def load_data():
233
- # global state_var
234
- # if not state_var:
235
- # state_var = True
236
- # global scroll_data
237
- # data = pandas.read_csv(f"{out_folder}data_opt_balanced.csv")
238
- # model, scaler, score, x, results = clean_prepare_train(keep_useful_cols(data), train_size=0.50, test_size=0.50)
239
- # i = 0
240
- #
241
- # while i < results.shape[0]:
242
- # await asyncio.sleep(1)
243
- # i += 1
244
- # new_row = results.iloc[[i]]
245
- # print(new_row)
246
- # scroll_data = pandas.concat([scroll_data, new_row], ignore_index=True)
247
- # if scroll_data.shape[0] > 100:
248
- # scroll_data.drop(0, axis=0, inplace=True)
249
- # print(scroll_data.shape)
250
-
251
-
252
- # URL = "https://docs.google.com/spreadsheets/d/1ZQbeOeCaiLMidenqmwq7wC-ni7rdtUYQXH1XER6XyyQ/edit#gid=0"
253
- # csv_url = URL.replace('/edit#gid=', '/export?format=csv&gid=')
254
- #
255
- #
256
- # def get_data():
257
- # return pandas.read_csv(csv_url)
258
-
259
-
260
- def get_real_data() -> pandas.DataFrame:
261
- global results
262
- global mn
263
- global mx
264
- mx += 1
265
- mn = 0 if mx - 50 < 0 else mx - 50
266
- sl = results.iloc[mn:mx]
267
- sl.insert(0, time_col, numpy.array([from_sec(int(r)) for r in sl[id_col].tolist()]), False)
268
- return gr.LinePlot.update(value=sl) # scroll_data
269
-
270
-
271
- def get_sim_data() -> pandas.DataFrame:
272
- global results
273
- sl = results.iloc[mn:mx]
274
- sl.insert(0, time_col, numpy.array([from_sec(r) for r in sl[id_col].tolist()]), False)
275
- return gr.LinePlot.update(value=sl) # scroll_data
276
-
277
-
278
- x_real = 0
279
- x_pres = 0
280
- x_ql = 0
281
-
282
-
283
- def get_x_real_data() -> pandas.DataFrame:
284
- global results
285
- sl = scroll_data.iloc[mn:mx]
286
- sl = sl.drop(time_col, axis=1, errors="ignore")
287
- sl.insert(0, time_col, numpy.array([from_sec(int(r)) for r in sl[id_col].tolist()]), False)
288
- return gr.LinePlot.update(value=sl) # scroll_data
289
-
290
-
291
- def get_x_sim_pres_data() -> pandas.DataFrame:
292
- global results
293
- sl = scroll_data.iloc[mn:mx]
294
- sl = sl.drop(sim_col, axis=1, errors="ignore")
295
- sl = sl.drop(time_col, axis=1, errors="ignore")
296
- sl.insert(0, time_col, numpy.array([from_sec(int(r)) for r in sl[id_col].tolist()]), False)
297
- sl.insert(0, sim_col, numpy.array([calc_excel(r)[0] for r in sl[man_col].tolist()]), False)
298
- return gr.LinePlot.update(value=sl) # scroll_data
299
-
300
-
301
- def get_x_sim_ql_data() -> pandas.DataFrame:
302
- global results
303
- sl = scroll_data.iloc[mn:mx]
304
- sl = sl.drop(time_col, axis=1, errors="ignore")
305
- sl.insert(0, time_col, numpy.array([from_sec(int(r)) for r in sl[id_col].tolist()]), False)
306
- sl.insert(0, ql_col, numpy.array([calc_excel(r)[1] for r in sl[man_col].tolist()]), False)
307
- return gr.LinePlot.update(value=sl) # scroll_data
308
-
309
-
310
- # get conversion factors
311
- factors = datastore.get_conversion_factors()
312
-
313
- if mode['app'] == train_mode:
314
- app(23, 59, 40, 143.96, 79.523, parse_well_id(s2))
315
- app(17, 2, 0, 144.41, 97.278, parse_well_id(l1), regen=mode['regen'])
316
- else:
317
- with gr.Blocks() as demo:
318
- gr.Markdown("#")
319
- with gr.Tab("Dashboard"):
320
- mx = 50
321
- # pull data into line plot
322
- with gr.Row():
323
- with gr.Column():
324
- gr.Markdown("# Our AI-powered calculator (Accuracy: 99.61%)")
325
- # Real Tubing Head Pressure
326
- real_ai = gr.LinePlot(y=ro_col, x=time_col, label="Awoba Well X", title="Real Tubing Head Pressure",
327
- y_title=ro_col, x_title=time_col, every=1, height=150, width=600)
328
- demo.load(fn=get_real_data, inputs=None, outputs=real_ai)
329
-
330
- # Calculated Tubing Head Pressure
331
- sim_ai = gr.LinePlot(y=sim_col, x=time_col, label="Awoba Well X",
332
- title="Calculated Tubing Head Pressure",
333
- y_title=sim_col, x_title=time_col, every=1, height=150, width=600)
334
- demo.load(fn=get_sim_data, inputs=None, outputs=sim_ai)
335
-
336
-
337
- with gr.Column():
338
- gr.Markdown("###")
339
- gr.Markdown("### Excel formulae (Accuracy: 27.53%)")
340
- # Real Tubing Head Pressure
341
- real_x = gr.LinePlot(y=ro_col, x=time_col, label="Abura Well X", title="Real Tubing Head Pressure",
342
- y_title=ro_col, x_title=time_col, every=1, height=150, width=600, y_lim=y_range
343
- )
344
- demo.load(fn=get_x_real_data, inputs=None, outputs=real_x)
345
-
346
- # Calculated Tubing Head Pressure
347
- sim_x = gr.LinePlot(y=sim_col, x=time_col, label="Abura Well X", title="Calculated Tubing Head Pressure"
348
- , y_title=sim_col, x_title=time_col, every=1, height=150, width=600,
349
- y_lim=y_range)
350
- demo.load(fn=get_x_sim_pres_data, inputs=None, outputs=sim_x)
351
-
352
- # Calculated Production
353
- sim_ql_x = gr.LinePlot(y=ql_col, x=time_col, label="Abura Well X", title="Calculated Production",
354
- y_title=ql_col, x_title=time_col, every=1, height=150, width=600)
355
- demo.load(fn=get_x_sim_ql_data, inputs=None, outputs=sim_ql_x)
356
- with gr.Tab("AI approach"):
357
- hours = gr.Number(label="Hours (24-hour format)", value=23)
358
- mins = gr.Number(label="Minutes", value=59)
359
- secs = gr.Number(label="Seconds", value=40)
360
- man_pres = gr.Number(label=man_col, value=143.96)
361
- temp = gr.Number(label=temp_col, value=79.523)
362
- well = gr.Radio(
363
- [parse_well_id(w) for w in [l1, s1, l2, s2]],
364
- value=parse_well_id(s2),
365
- label="Select a well"
366
- )
367
- thp = gr.Number(label=ro_col, value=641.98)
368
- greet_btn = gr.Button("Simulate")
369
- greet_btn.style(full_width=True)
370
- output = gr.Textbox(label="Results")
371
- greet_btn.click(fn=app, inputs=[hours, mins, secs, man_pres, temp, well, thp], outputs=output)
372
-
373
- with gr.Tab("Excel approach"):
374
- # build interface to take in well selection and manifold pressure
375
- i_man_pres = gr.Number(label=man_col, value=143.96)
376
- i_well = gr.Radio(
377
- [parse_well_id_2(w) for w in factors["Well"]],
378
- label="Select a well"
379
- )
380
- i_greet_btn = gr.Button("Simulate")
381
- i_greet_btn.style(full_width=True)
382
- i_output = gr.Textbox(label="Results")
383
-
384
- # call i_app function with params on button click
385
- i_greet_btn.click(fn=i_app, inputs=[i_well, i_man_pres], outputs=i_output)
386
-
387
-
388
- # demo.load(fn=get_real_data, inputs=None, outputs=real_ai)
389
- # with gr.Column():
390
- # with gr.Row():
391
- # gr.LinePlot(value=get_real_data, y=ro_col, x=id_col, label="Real Tubing Head Pressure",
392
- # y_title=ro_col, x_title=time_col, every=1, height=80, width=600)
393
- # gr.LinePlot(value=get_sim_data, y=sim_col, x=id_col, label="Calculated Tubing Head Pressure",
394
- # y_title=sim_col, x_title=time_col, every=1, height=80, width=600)
395
- # with gr.Row():
396
- # gr.LinePlot(value=get_real_data, y=ro_col, x=id_col, label="Real Tubing Head Pressure",
397
- # y_title=ro_col, x_title=time_col, every=1, height=80, width=600)
398
- # gr.LinePlot(value=get_sim_data, y=sim_col, x=id_col, label="Calculated Tubing Head Pressure",
399
- # y_title=sim_col, x_title=time_col, every=1, height=80, width=600)
400
-
401
- demo.launch(enable_queue=True, share=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/text_to_video_synthesis/pipeline_text_to_video_synth_img2img.py DELETED
@@ -1,720 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- from typing import Any, Callable, Dict, List, Optional, Union
17
-
18
- import numpy as np
19
- import PIL
20
- import torch
21
- from transformers import CLIPTextModel, CLIPTokenizer
22
-
23
- from ...loaders import LoraLoaderMixin, TextualInversionLoaderMixin
24
- from ...models import AutoencoderKL, UNet3DConditionModel
25
- from ...schedulers import KarrasDiffusionSchedulers
26
- from ...utils import (
27
- is_accelerate_available,
28
- is_accelerate_version,
29
- logging,
30
- randn_tensor,
31
- replace_example_docstring,
32
- )
33
- from ..pipeline_utils import DiffusionPipeline
34
- from . import TextToVideoSDPipelineOutput
35
-
36
-
37
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
38
-
39
- EXAMPLE_DOC_STRING = """
40
- Examples:
41
- ```py
42
- >>> import torch
43
- >>> from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
44
- >>> from diffusers.utils import export_to_video
45
-
46
- >>> pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16)
47
- >>> pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
48
- >>> pipe.to("cuda")
49
-
50
- >>> prompt = "spiderman running in the desert"
51
- >>> video_frames = pipe(prompt, num_inference_steps=40, height=320, width=576, num_frames=24).frames
52
- >>> # safe low-res video
53
- >>> video_path = export_to_video(video_frames, output_video_path="./video_576_spiderman.mp4")
54
-
55
- >>> # let's offload the text-to-image model
56
- >>> pipe.to("cpu")
57
-
58
- >>> # and load the image-to-image model
59
- >>> pipe = DiffusionPipeline.from_pretrained(
60
- ... "cerspense/zeroscope_v2_XL", torch_dtype=torch.float16, revision="refs/pr/15"
61
- ... )
62
- >>> pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
63
- >>> pipe.enable_model_cpu_offload()
64
-
65
- >>> # The VAE consumes A LOT of memory, let's make sure we run it in sliced mode
66
- >>> pipe.vae.enable_slicing()
67
-
68
- >>> # now let's upscale it
69
- >>> video = [Image.fromarray(frame).resize((1024, 576)) for frame in video_frames]
70
-
71
- >>> # and denoise it
72
- >>> video_frames = pipe(prompt, video=video, strength=0.6).frames
73
- >>> video_path = export_to_video(video_frames, output_video_path="./video_1024_spiderman.mp4")
74
- >>> video_path
75
- ```
76
- """
77
-
78
-
79
- def tensor2vid(video: torch.Tensor, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) -> List[np.ndarray]:
80
- # This code is copied from https://github.com/modelscope/modelscope/blob/1509fdb973e5871f37148a4b5e5964cafd43e64d/modelscope/pipelines/multi_modal/text_to_video_synthesis_pipeline.py#L78
81
- # reshape to ncfhw
82
- mean = torch.tensor(mean, device=video.device).reshape(1, -1, 1, 1, 1)
83
- std = torch.tensor(std, device=video.device).reshape(1, -1, 1, 1, 1)
84
- # unnormalize back to [0,1]
85
- video = video.mul_(std).add_(mean)
86
- video.clamp_(0, 1)
87
- # prepare the final outputs
88
- i, c, f, h, w = video.shape
89
- images = video.permute(2, 3, 0, 4, 1).reshape(
90
- f, h, i * w, c
91
- ) # 1st (frames, h, batch_size, w, c) 2nd (frames, h, batch_size * w, c)
92
- images = images.unbind(dim=0) # prepare a list of indvidual (consecutive frames)
93
- images = [(image.cpu().numpy() * 255).astype("uint8") for image in images] # f h w c
94
- return images
95
-
96
-
97
- def preprocess_video(video):
98
- supported_formats = (np.ndarray, torch.Tensor, PIL.Image.Image)
99
-
100
- if isinstance(video, supported_formats):
101
- video = [video]
102
- elif not (isinstance(video, list) and all(isinstance(i, supported_formats) for i in video)):
103
- raise ValueError(
104
- f"Input is in incorrect format: {[type(i) for i in video]}. Currently, we only support {', '.join(supported_formats)}"
105
- )
106
-
107
- if isinstance(video[0], PIL.Image.Image):
108
- video = [np.array(frame) for frame in video]
109
-
110
- if isinstance(video[0], np.ndarray):
111
- video = np.concatenate(video, axis=0) if video[0].ndim == 5 else np.stack(video, axis=0)
112
-
113
- if video.dtype == np.uint8:
114
- video = np.array(video).astype(np.float32) / 255.0
115
-
116
- if video.ndim == 4:
117
- video = video[None, ...]
118
-
119
- video = torch.from_numpy(video.transpose(0, 4, 1, 2, 3))
120
-
121
- elif isinstance(video[0], torch.Tensor):
122
- video = torch.cat(video, axis=0) if video[0].ndim == 5 else torch.stack(video, axis=0)
123
-
124
- # don't need any preprocess if the video is latents
125
- channel = video.shape[1]
126
- if channel == 4:
127
- return video
128
-
129
- # move channels before num_frames
130
- video = video.permute(0, 2, 1, 3, 4)
131
-
132
- # normalize video
133
- video = 2.0 * video - 1.0
134
-
135
- return video
136
-
137
-
138
- class VideoToVideoSDPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin):
139
- r"""
140
- Pipeline for text-guided video-to-video generation.
141
-
142
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
143
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
144
-
145
- Args:
146
- vae ([`AutoencoderKL`]):
147
- Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations.
148
- text_encoder ([`CLIPTextModel`]):
149
- Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
150
- tokenizer (`CLIPTokenizer`):
151
- A [`~transformers.CLIPTokenizer`] to tokenize text.
152
- unet ([`UNet3DConditionModel`]):
153
- A [`UNet3DConditionModel`] to denoise the encoded video latents.
154
- scheduler ([`SchedulerMixin`]):
155
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
156
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
157
- """
158
-
159
- def __init__(
160
- self,
161
- vae: AutoencoderKL,
162
- text_encoder: CLIPTextModel,
163
- tokenizer: CLIPTokenizer,
164
- unet: UNet3DConditionModel,
165
- scheduler: KarrasDiffusionSchedulers,
166
- ):
167
- super().__init__()
168
-
169
- self.register_modules(
170
- vae=vae,
171
- text_encoder=text_encoder,
172
- tokenizer=tokenizer,
173
- unet=unet,
174
- scheduler=scheduler,
175
- )
176
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
177
-
178
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing
179
- def enable_vae_slicing(self):
180
- r"""
181
- Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
182
- compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
183
- """
184
- self.vae.enable_slicing()
185
-
186
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing
187
- def disable_vae_slicing(self):
188
- r"""
189
- Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
190
- computing decoding in one step.
191
- """
192
- self.vae.disable_slicing()
193
-
194
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling
195
- def enable_vae_tiling(self):
196
- r"""
197
- Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
198
- compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
199
- processing larger images.
200
- """
201
- self.vae.enable_tiling()
202
-
203
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
204
- def disable_vae_tiling(self):
205
- r"""
206
- Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
207
- computing decoding in one step.
208
- """
209
- self.vae.disable_tiling()
210
-
211
- def enable_model_cpu_offload(self, gpu_id=0):
212
- r"""
213
- Offload all models to CPU to reduce memory usage with a low impact on performance. Moves one whole model at a
214
- time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs.
215
- Memory savings are lower than using `enable_sequential_cpu_offload`, but performance is much better due to the
216
- iterative execution of the `unet`.
217
- """
218
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
219
- from accelerate import cpu_offload_with_hook
220
- else:
221
- raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
222
-
223
- device = torch.device(f"cuda:{gpu_id}")
224
-
225
- if self.device.type != "cpu":
226
- self.to("cpu", silence_dtype_warnings=True)
227
- torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
228
-
229
- hook = None
230
- for cpu_offloaded_model in [self.text_encoder, self.vae, self.unet]:
231
- _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
232
-
233
- # We'll offload the last model manually.
234
- self.final_offload_hook = hook
235
-
236
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
237
- def _encode_prompt(
238
- self,
239
- prompt,
240
- device,
241
- num_images_per_prompt,
242
- do_classifier_free_guidance,
243
- negative_prompt=None,
244
- prompt_embeds: Optional[torch.FloatTensor] = None,
245
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
246
- lora_scale: Optional[float] = None,
247
- ):
248
- r"""
249
- Encodes the prompt into text encoder hidden states.
250
-
251
- Args:
252
- prompt (`str` or `List[str]`, *optional*):
253
- prompt to be encoded
254
- device: (`torch.device`):
255
- torch device
256
- num_images_per_prompt (`int`):
257
- number of images that should be generated per prompt
258
- do_classifier_free_guidance (`bool`):
259
- whether to use classifier free guidance or not
260
- negative_prompt (`str` or `List[str]`, *optional*):
261
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
262
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
263
- less than `1`).
264
- prompt_embeds (`torch.FloatTensor`, *optional*):
265
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
266
- provided, text embeddings will be generated from `prompt` input argument.
267
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
268
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
269
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
270
- argument.
271
- lora_scale (`float`, *optional*):
272
- A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
273
- """
274
- # set lora scale so that monkey patched LoRA
275
- # function of text encoder can correctly access it
276
- if lora_scale is not None and isinstance(self, LoraLoaderMixin):
277
- self._lora_scale = lora_scale
278
-
279
- if prompt is not None and isinstance(prompt, str):
280
- batch_size = 1
281
- elif prompt is not None and isinstance(prompt, list):
282
- batch_size = len(prompt)
283
- else:
284
- batch_size = prompt_embeds.shape[0]
285
-
286
- if prompt_embeds is None:
287
- # textual inversion: procecss multi-vector tokens if necessary
288
- if isinstance(self, TextualInversionLoaderMixin):
289
- prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
290
-
291
- text_inputs = self.tokenizer(
292
- prompt,
293
- padding="max_length",
294
- max_length=self.tokenizer.model_max_length,
295
- truncation=True,
296
- return_tensors="pt",
297
- )
298
- text_input_ids = text_inputs.input_ids
299
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
300
-
301
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
302
- text_input_ids, untruncated_ids
303
- ):
304
- removed_text = self.tokenizer.batch_decode(
305
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
306
- )
307
- logger.warning(
308
- "The following part of your input was truncated because CLIP can only handle sequences up to"
309
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
310
- )
311
-
312
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
313
- attention_mask = text_inputs.attention_mask.to(device)
314
- else:
315
- attention_mask = None
316
-
317
- prompt_embeds = self.text_encoder(
318
- text_input_ids.to(device),
319
- attention_mask=attention_mask,
320
- )
321
- prompt_embeds = prompt_embeds[0]
322
-
323
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
324
-
325
- bs_embed, seq_len, _ = prompt_embeds.shape
326
- # duplicate text embeddings for each generation per prompt, using mps friendly method
327
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
328
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
329
-
330
- # get unconditional embeddings for classifier free guidance
331
- if do_classifier_free_guidance and negative_prompt_embeds is None:
332
- uncond_tokens: List[str]
333
- if negative_prompt is None:
334
- uncond_tokens = [""] * batch_size
335
- elif prompt is not None and type(prompt) is not type(negative_prompt):
336
- raise TypeError(
337
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
338
- f" {type(prompt)}."
339
- )
340
- elif isinstance(negative_prompt, str):
341
- uncond_tokens = [negative_prompt]
342
- elif batch_size != len(negative_prompt):
343
- raise ValueError(
344
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
345
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
346
- " the batch size of `prompt`."
347
- )
348
- else:
349
- uncond_tokens = negative_prompt
350
-
351
- # textual inversion: procecss multi-vector tokens if necessary
352
- if isinstance(self, TextualInversionLoaderMixin):
353
- uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
354
-
355
- max_length = prompt_embeds.shape[1]
356
- uncond_input = self.tokenizer(
357
- uncond_tokens,
358
- padding="max_length",
359
- max_length=max_length,
360
- truncation=True,
361
- return_tensors="pt",
362
- )
363
-
364
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
365
- attention_mask = uncond_input.attention_mask.to(device)
366
- else:
367
- attention_mask = None
368
-
369
- negative_prompt_embeds = self.text_encoder(
370
- uncond_input.input_ids.to(device),
371
- attention_mask=attention_mask,
372
- )
373
- negative_prompt_embeds = negative_prompt_embeds[0]
374
-
375
- if do_classifier_free_guidance:
376
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
377
- seq_len = negative_prompt_embeds.shape[1]
378
-
379
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
380
-
381
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
382
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
383
-
384
- # For classifier free guidance, we need to do two forward passes.
385
- # Here we concatenate the unconditional and text embeddings into a single batch
386
- # to avoid doing two forward passes
387
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
388
-
389
- return prompt_embeds
390
-
391
- # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents
392
- def decode_latents(self, latents):
393
- latents = 1 / self.vae.config.scaling_factor * latents
394
-
395
- batch_size, channels, num_frames, height, width = latents.shape
396
- latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width)
397
-
398
- image = self.vae.decode(latents).sample
399
- video = (
400
- image[None, :]
401
- .reshape(
402
- (
403
- batch_size,
404
- num_frames,
405
- -1,
406
- )
407
- + image.shape[2:]
408
- )
409
- .permute(0, 2, 1, 3, 4)
410
- )
411
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
412
- video = video.float()
413
- return video
414
-
415
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
416
- def prepare_extra_step_kwargs(self, generator, eta):
417
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
418
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
419
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
420
- # and should be between [0, 1]
421
-
422
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
423
- extra_step_kwargs = {}
424
- if accepts_eta:
425
- extra_step_kwargs["eta"] = eta
426
-
427
- # check if the scheduler accepts generator
428
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
429
- if accepts_generator:
430
- extra_step_kwargs["generator"] = generator
431
- return extra_step_kwargs
432
-
433
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.check_inputs
434
- def check_inputs(
435
- self, prompt, strength, callback_steps, negative_prompt=None, prompt_embeds=None, negative_prompt_embeds=None
436
- ):
437
- if strength < 0 or strength > 1:
438
- raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
439
-
440
- if (callback_steps is None) or (
441
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
442
- ):
443
- raise ValueError(
444
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
445
- f" {type(callback_steps)}."
446
- )
447
-
448
- if prompt is not None and prompt_embeds is not None:
449
- raise ValueError(
450
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
451
- " only forward one of the two."
452
- )
453
- elif prompt is None and prompt_embeds is None:
454
- raise ValueError(
455
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
456
- )
457
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
458
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
459
-
460
- if negative_prompt is not None and negative_prompt_embeds is not None:
461
- raise ValueError(
462
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
463
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
464
- )
465
-
466
- if prompt_embeds is not None and negative_prompt_embeds is not None:
467
- if prompt_embeds.shape != negative_prompt_embeds.shape:
468
- raise ValueError(
469
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
470
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
471
- f" {negative_prompt_embeds.shape}."
472
- )
473
-
474
- # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
475
- def get_timesteps(self, num_inference_steps, strength, device):
476
- # get the original timestep using init_timestep
477
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
478
-
479
- t_start = max(num_inference_steps - init_timestep, 0)
480
- timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
481
-
482
- return timesteps, num_inference_steps - t_start
483
-
484
- def prepare_latents(self, video, timestep, batch_size, dtype, device, generator=None):
485
- video = video.to(device=device, dtype=dtype)
486
-
487
- # change from (b, c, f, h, w) -> (b * f, c, w, h)
488
- bsz, channel, frames, width, height = video.shape
489
- video = video.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height)
490
-
491
- if video.shape[1] == 4:
492
- init_latents = video
493
- else:
494
- if isinstance(generator, list) and len(generator) != batch_size:
495
- raise ValueError(
496
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
497
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
498
- )
499
-
500
- elif isinstance(generator, list):
501
- init_latents = [
502
- self.vae.encode(video[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
503
- ]
504
- init_latents = torch.cat(init_latents, dim=0)
505
- else:
506
- init_latents = self.vae.encode(video).latent_dist.sample(generator)
507
-
508
- init_latents = self.vae.config.scaling_factor * init_latents
509
-
510
- if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
511
- raise ValueError(
512
- f"Cannot duplicate `video` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
513
- )
514
- else:
515
- init_latents = torch.cat([init_latents], dim=0)
516
-
517
- shape = init_latents.shape
518
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
519
-
520
- # get latents
521
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
522
- latents = init_latents
523
-
524
- latents = latents[None, :].reshape((bsz, frames, latents.shape[1]) + latents.shape[2:]).permute(0, 2, 1, 3, 4)
525
-
526
- return latents
527
-
528
- @torch.no_grad()
529
- @replace_example_docstring(EXAMPLE_DOC_STRING)
530
- def __call__(
531
- self,
532
- prompt: Union[str, List[str]] = None,
533
- video: Union[List[np.ndarray], torch.FloatTensor] = None,
534
- strength: float = 0.6,
535
- num_inference_steps: int = 50,
536
- guidance_scale: float = 15.0,
537
- negative_prompt: Optional[Union[str, List[str]]] = None,
538
- eta: float = 0.0,
539
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
540
- latents: Optional[torch.FloatTensor] = None,
541
- prompt_embeds: Optional[torch.FloatTensor] = None,
542
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
543
- output_type: Optional[str] = "np",
544
- return_dict: bool = True,
545
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
546
- callback_steps: int = 1,
547
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
548
- ):
549
- r"""
550
- The call function to the pipeline for generation.
551
-
552
- Args:
553
- prompt (`str` or `List[str]`, *optional*):
554
- The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
555
- video (`List[np.ndarray]` or `torch.FloatTensor`):
556
- `video` frames or tensor representing a video batch to be used as the starting point for the process.
557
- Can also accpet video latents as `image`, if passing latents directly, it will not be encoded again.
558
- strength (`float`, *optional*, defaults to 0.8):
559
- Indicates extent to transform the reference `video`. Must be between 0 and 1. `video` is used as a
560
- starting point, adding more noise to it the larger the `strength`. The number of denoising steps
561
- depends on the amount of noise initially added. When `strength` is 1, added noise is maximum and the
562
- denoising process runs for the full number of iterations specified in `num_inference_steps`. A value of
563
- 1 essentially ignores `video`.
564
- num_inference_steps (`int`, *optional*, defaults to 50):
565
- The number of denoising steps. More denoising steps usually lead to a higher quality videos at the
566
- expense of slower inference.
567
- guidance_scale (`float`, *optional*, defaults to 7.5):
568
- A higher guidance scale value encourages the model to generate images closely linked to the text
569
- `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
570
- negative_prompt (`str` or `List[str]`, *optional*):
571
- The prompt or prompts to guide what to not include in video generation. If not defined, you need to
572
- pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
573
- eta (`float`, *optional*, defaults to 0.0):
574
- Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
575
- to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
576
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
577
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
578
- generation deterministic.
579
- latents (`torch.FloatTensor`, *optional*):
580
- Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video
581
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
582
- tensor is generated by sampling using the supplied random `generator`. Latents should be of shape
583
- `(batch_size, num_channel, num_frames, height, width)`.
584
- prompt_embeds (`torch.FloatTensor`, *optional*):
585
- Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
586
- provided, text embeddings are generated from the `prompt` input argument.
587
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
588
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
589
- not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
590
- output_type (`str`, *optional*, defaults to `"np"`):
591
- The output format of the generated video. Choose between `torch.FloatTensor` or `np.array`.
592
- return_dict (`bool`, *optional*, defaults to `True`):
593
- Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead
594
- of a plain tuple.
595
- callback (`Callable`, *optional*):
596
- A function that calls every `callback_steps` steps during inference. The function is called with the
597
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
598
- callback_steps (`int`, *optional*, defaults to 1):
599
- The frequency at which the `callback` function is called. If not specified, the callback is called at
600
- every step.
601
- cross_attention_kwargs (`dict`, *optional*):
602
- A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
603
- [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
604
-
605
- Examples:
606
-
607
- Returns:
608
- [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] or `tuple`:
609
- If `return_dict` is `True`, [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] is
610
- returned, otherwise a `tuple` is returned where the first element is a list with the generated frames.
611
- """
612
- # 0. Default height and width to unet
613
- num_images_per_prompt = 1
614
-
615
- # 1. Check inputs. Raise error if not correct
616
- self.check_inputs(prompt, strength, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds)
617
-
618
- # 2. Define call parameters
619
- if prompt is not None and isinstance(prompt, str):
620
- batch_size = 1
621
- elif prompt is not None and isinstance(prompt, list):
622
- batch_size = len(prompt)
623
- else:
624
- batch_size = prompt_embeds.shape[0]
625
-
626
- device = self._execution_device
627
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
628
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
629
- # corresponds to doing no classifier free guidance.
630
- do_classifier_free_guidance = guidance_scale > 1.0
631
-
632
- # 3. Encode input prompt
633
- text_encoder_lora_scale = (
634
- cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
635
- )
636
- prompt_embeds = self._encode_prompt(
637
- prompt,
638
- device,
639
- num_images_per_prompt,
640
- do_classifier_free_guidance,
641
- negative_prompt,
642
- prompt_embeds=prompt_embeds,
643
- negative_prompt_embeds=negative_prompt_embeds,
644
- lora_scale=text_encoder_lora_scale,
645
- )
646
-
647
- # 4. Preprocess video
648
- video = preprocess_video(video)
649
-
650
- # 5. Prepare timesteps
651
- self.scheduler.set_timesteps(num_inference_steps, device=device)
652
- timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
653
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
654
-
655
- # 5. Prepare latent variables
656
- latents = self.prepare_latents(video, latent_timestep, batch_size, prompt_embeds.dtype, device, generator)
657
-
658
- # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
659
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
660
-
661
- # 7. Denoising loop
662
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
663
- with self.progress_bar(total=num_inference_steps) as progress_bar:
664
- for i, t in enumerate(timesteps):
665
- # expand the latents if we are doing classifier free guidance
666
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
667
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
668
-
669
- # predict the noise residual
670
- noise_pred = self.unet(
671
- latent_model_input,
672
- t,
673
- encoder_hidden_states=prompt_embeds,
674
- cross_attention_kwargs=cross_attention_kwargs,
675
- return_dict=False,
676
- )[0]
677
-
678
- # perform guidance
679
- if do_classifier_free_guidance:
680
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
681
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
682
-
683
- # reshape latents
684
- bsz, channel, frames, width, height = latents.shape
685
- latents = latents.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height)
686
- noise_pred = noise_pred.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height)
687
-
688
- # compute the previous noisy sample x_t -> x_t-1
689
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
690
-
691
- # reshape latents back
692
- latents = latents[None, :].reshape(bsz, frames, channel, width, height).permute(0, 2, 1, 3, 4)
693
-
694
- # call the callback, if provided
695
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
696
- progress_bar.update()
697
- if callback is not None and i % callback_steps == 0:
698
- callback(i, t, latents)
699
-
700
- if output_type == "latent":
701
- return TextToVideoSDPipelineOutput(frames=latents)
702
-
703
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
704
- self.unet.to("cpu")
705
-
706
- video_tensor = self.decode_latents(latents)
707
-
708
- if output_type == "pt":
709
- video = video_tensor
710
- else:
711
- video = tensor2vid(video_tensor)
712
-
713
- # Offload last model to CPU
714
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
715
- self.final_offload_hook.offload()
716
-
717
- if not return_dict:
718
- return (video,)
719
-
720
- return TextToVideoSDPipelineOutput(frames=video)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/ddim/test_ddim.py DELETED
@@ -1,143 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import unittest
17
-
18
- import numpy as np
19
- import torch
20
-
21
- from diffusers import DDIMPipeline, DDIMScheduler, UNet2DModel
22
- from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
23
-
24
- from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
25
- from ..test_pipelines_common import PipelineTesterMixin
26
-
27
-
28
- enable_full_determinism()
29
-
30
-
31
- class DDIMPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
32
- pipeline_class = DDIMPipeline
33
- params = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
34
- required_optional_params = PipelineTesterMixin.required_optional_params - {
35
- "num_images_per_prompt",
36
- "latents",
37
- "callback",
38
- "callback_steps",
39
- }
40
- batch_params = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
41
-
42
- def get_dummy_components(self):
43
- torch.manual_seed(0)
44
- unet = UNet2DModel(
45
- block_out_channels=(32, 64),
46
- layers_per_block=2,
47
- sample_size=32,
48
- in_channels=3,
49
- out_channels=3,
50
- down_block_types=("DownBlock2D", "AttnDownBlock2D"),
51
- up_block_types=("AttnUpBlock2D", "UpBlock2D"),
52
- )
53
- scheduler = DDIMScheduler()
54
- components = {"unet": unet, "scheduler": scheduler}
55
- return components
56
-
57
- def get_dummy_inputs(self, device, seed=0):
58
- if str(device).startswith("mps"):
59
- generator = torch.manual_seed(seed)
60
- else:
61
- generator = torch.Generator(device=device).manual_seed(seed)
62
- inputs = {
63
- "batch_size": 1,
64
- "generator": generator,
65
- "num_inference_steps": 2,
66
- "output_type": "numpy",
67
- }
68
- return inputs
69
-
70
- def test_inference(self):
71
- device = "cpu"
72
-
73
- components = self.get_dummy_components()
74
- pipe = self.pipeline_class(**components)
75
- pipe.to(device)
76
- pipe.set_progress_bar_config(disable=None)
77
-
78
- inputs = self.get_dummy_inputs(device)
79
- image = pipe(**inputs).images
80
- image_slice = image[0, -3:, -3:, -1]
81
-
82
- self.assertEqual(image.shape, (1, 32, 32, 3))
83
- expected_slice = np.array(
84
- [1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04]
85
- )
86
- max_diff = np.abs(image_slice.flatten() - expected_slice).max()
87
- self.assertLessEqual(max_diff, 1e-3)
88
-
89
- def test_dict_tuple_outputs_equivalent(self):
90
- super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3)
91
-
92
- def test_save_load_local(self):
93
- super().test_save_load_local(expected_max_difference=3e-3)
94
-
95
- def test_save_load_optional_components(self):
96
- super().test_save_load_optional_components(expected_max_difference=3e-3)
97
-
98
- def test_inference_batch_single_identical(self):
99
- super().test_inference_batch_single_identical(expected_max_diff=3e-3)
100
-
101
-
102
- @slow
103
- @require_torch_gpu
104
- class DDIMPipelineIntegrationTests(unittest.TestCase):
105
- def test_inference_cifar10(self):
106
- model_id = "google/ddpm-cifar10-32"
107
-
108
- unet = UNet2DModel.from_pretrained(model_id)
109
- scheduler = DDIMScheduler()
110
-
111
- ddim = DDIMPipeline(unet=unet, scheduler=scheduler)
112
- ddim.to(torch_device)
113
- ddim.set_progress_bar_config(disable=None)
114
-
115
- generator = torch.manual_seed(0)
116
- image = ddim(generator=generator, eta=0.0, output_type="numpy").images
117
-
118
- image_slice = image[0, -3:, -3:, -1]
119
-
120
- assert image.shape == (1, 32, 32, 3)
121
- expected_slice = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453])
122
-
123
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
124
-
125
- def test_inference_ema_bedroom(self):
126
- model_id = "google/ddpm-ema-bedroom-256"
127
-
128
- unet = UNet2DModel.from_pretrained(model_id)
129
- scheduler = DDIMScheduler.from_pretrained(model_id)
130
-
131
- ddpm = DDIMPipeline(unet=unet, scheduler=scheduler)
132
- ddpm.to(torch_device)
133
- ddpm.set_progress_bar_config(disable=None)
134
-
135
- generator = torch.manual_seed(0)
136
- image = ddpm(generator=generator, output_type="numpy").images
137
-
138
- image_slice = image[0, -3:, -3:, -1]
139
-
140
- assert image.shape == (1, 256, 256, 3)
141
- expected_slice = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069])
142
-
143
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/core/mask/utils.py DELETED
@@ -1,63 +0,0 @@
1
- import mmcv
2
- import numpy as np
3
- import pycocotools.mask as mask_util
4
-
5
-
6
- def split_combined_polys(polys, poly_lens, polys_per_mask):
7
- """Split the combined 1-D polys into masks.
8
-
9
- A mask is represented as a list of polys, and a poly is represented as
10
- a 1-D array. In dataset, all masks are concatenated into a single 1-D
11
- tensor. Here we need to split the tensor into original representations.
12
-
13
- Args:
14
- polys (list): a list (length = image num) of 1-D tensors
15
- poly_lens (list): a list (length = image num) of poly length
16
- polys_per_mask (list): a list (length = image num) of poly number
17
- of each mask
18
-
19
- Returns:
20
- list: a list (length = image num) of list (length = mask num) of \
21
- list (length = poly num) of numpy array.
22
- """
23
- mask_polys_list = []
24
- for img_id in range(len(polys)):
25
- polys_single = polys[img_id]
26
- polys_lens_single = poly_lens[img_id].tolist()
27
- polys_per_mask_single = polys_per_mask[img_id].tolist()
28
-
29
- split_polys = mmcv.slice_list(polys_single, polys_lens_single)
30
- mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single)
31
- mask_polys_list.append(mask_polys)
32
- return mask_polys_list
33
-
34
-
35
- # TODO: move this function to more proper place
36
- def encode_mask_results(mask_results):
37
- """Encode bitmap mask to RLE code.
38
-
39
- Args:
40
- mask_results (list | tuple[list]): bitmap mask results.
41
- In mask scoring rcnn, mask_results is a tuple of (segm_results,
42
- segm_cls_score).
43
-
44
- Returns:
45
- list | tuple: RLE encoded mask.
46
- """
47
- if isinstance(mask_results, tuple): # mask scoring
48
- cls_segms, cls_mask_scores = mask_results
49
- else:
50
- cls_segms = mask_results
51
- num_classes = len(cls_segms)
52
- encoded_mask_results = [[] for _ in range(num_classes)]
53
- for i in range(len(cls_segms)):
54
- for cls_segm in cls_segms[i]:
55
- encoded_mask_results[i].append(
56
- mask_util.encode(
57
- np.array(
58
- cls_segm[:, :, np.newaxis], order='F',
59
- dtype='uint8'))[0]) # encoded with RLE
60
- if isinstance(mask_results, tuple):
61
- return encoded_mask_results, cls_mask_scores
62
- else:
63
- return encoded_mask_results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/hooks/logger/text.py DELETED
@@ -1,256 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- import datetime
3
- import os
4
- import os.path as osp
5
- from collections import OrderedDict
6
-
7
- import torch
8
- import torch.distributed as dist
9
-
10
- import annotator.uniformer.mmcv as mmcv
11
- from annotator.uniformer.mmcv.fileio.file_client import FileClient
12
- from annotator.uniformer.mmcv.utils import is_tuple_of, scandir
13
- from ..hook import HOOKS
14
- from .base import LoggerHook
15
-
16
-
17
- @HOOKS.register_module()
18
- class TextLoggerHook(LoggerHook):
19
- """Logger hook in text.
20
-
21
- In this logger hook, the information will be printed on terminal and
22
- saved in json file.
23
-
24
- Args:
25
- by_epoch (bool, optional): Whether EpochBasedRunner is used.
26
- Default: True.
27
- interval (int, optional): Logging interval (every k iterations).
28
- Default: 10.
29
- ignore_last (bool, optional): Ignore the log of last iterations in each
30
- epoch if less than :attr:`interval`. Default: True.
31
- reset_flag (bool, optional): Whether to clear the output buffer after
32
- logging. Default: False.
33
- interval_exp_name (int, optional): Logging interval for experiment
34
- name. This feature is to help users conveniently get the experiment
35
- information from screen or log file. Default: 1000.
36
- out_dir (str, optional): Logs are saved in ``runner.work_dir`` default.
37
- If ``out_dir`` is specified, logs will be copied to a new directory
38
- which is the concatenation of ``out_dir`` and the last level
39
- directory of ``runner.work_dir``. Default: None.
40
- `New in version 1.3.16.`
41
- out_suffix (str or tuple[str], optional): Those filenames ending with
42
- ``out_suffix`` will be copied to ``out_dir``.
43
- Default: ('.log.json', '.log', '.py').
44
- `New in version 1.3.16.`
45
- keep_local (bool, optional): Whether to keep local log when
46
- :attr:`out_dir` is specified. If False, the local log will be
47
- removed. Default: True.
48
- `New in version 1.3.16.`
49
- file_client_args (dict, optional): Arguments to instantiate a
50
- FileClient. See :class:`mmcv.fileio.FileClient` for details.
51
- Default: None.
52
- `New in version 1.3.16.`
53
- """
54
-
55
- def __init__(self,
56
- by_epoch=True,
57
- interval=10,
58
- ignore_last=True,
59
- reset_flag=False,
60
- interval_exp_name=1000,
61
- out_dir=None,
62
- out_suffix=('.log.json', '.log', '.py'),
63
- keep_local=True,
64
- file_client_args=None):
65
- super(TextLoggerHook, self).__init__(interval, ignore_last, reset_flag,
66
- by_epoch)
67
- self.by_epoch = by_epoch
68
- self.time_sec_tot = 0
69
- self.interval_exp_name = interval_exp_name
70
-
71
- if out_dir is None and file_client_args is not None:
72
- raise ValueError(
73
- 'file_client_args should be "None" when `out_dir` is not'
74
- 'specified.')
75
- self.out_dir = out_dir
76
-
77
- if not (out_dir is None or isinstance(out_dir, str)
78
- or is_tuple_of(out_dir, str)):
79
- raise TypeError('out_dir should be "None" or string or tuple of '
80
- 'string, but got {out_dir}')
81
- self.out_suffix = out_suffix
82
-
83
- self.keep_local = keep_local
84
- self.file_client_args = file_client_args
85
- if self.out_dir is not None:
86
- self.file_client = FileClient.infer_client(file_client_args,
87
- self.out_dir)
88
-
89
- def before_run(self, runner):
90
- super(TextLoggerHook, self).before_run(runner)
91
-
92
- if self.out_dir is not None:
93
- self.file_client = FileClient.infer_client(self.file_client_args,
94
- self.out_dir)
95
- # The final `self.out_dir` is the concatenation of `self.out_dir`
96
- # and the last level directory of `runner.work_dir`
97
- basename = osp.basename(runner.work_dir.rstrip(osp.sep))
98
- self.out_dir = self.file_client.join_path(self.out_dir, basename)
99
- runner.logger.info(
100
- (f'Text logs will be saved to {self.out_dir} by '
101
- f'{self.file_client.name} after the training process.'))
102
-
103
- self.start_iter = runner.iter
104
- self.json_log_path = osp.join(runner.work_dir,
105
- f'{runner.timestamp}.log.json')
106
- if runner.meta is not None:
107
- self._dump_log(runner.meta, runner)
108
-
109
- def _get_max_memory(self, runner):
110
- device = getattr(runner.model, 'output_device', None)
111
- mem = torch.cuda.max_memory_allocated(device=device)
112
- mem_mb = torch.tensor([mem / (1024 * 1024)],
113
- dtype=torch.int,
114
- device=device)
115
- if runner.world_size > 1:
116
- dist.reduce(mem_mb, 0, op=dist.ReduceOp.MAX)
117
- return mem_mb.item()
118
-
119
- def _log_info(self, log_dict, runner):
120
- # print exp name for users to distinguish experiments
121
- # at every ``interval_exp_name`` iterations and the end of each epoch
122
- if runner.meta is not None and 'exp_name' in runner.meta:
123
- if (self.every_n_iters(runner, self.interval_exp_name)) or (
124
- self.by_epoch and self.end_of_epoch(runner)):
125
- exp_info = f'Exp name: {runner.meta["exp_name"]}'
126
- runner.logger.info(exp_info)
127
-
128
- if log_dict['mode'] == 'train':
129
- if isinstance(log_dict['lr'], dict):
130
- lr_str = []
131
- for k, val in log_dict['lr'].items():
132
- lr_str.append(f'lr_{k}: {val:.3e}')
133
- lr_str = ' '.join(lr_str)
134
- else:
135
- lr_str = f'lr: {log_dict["lr"]:.3e}'
136
-
137
- # by epoch: Epoch [4][100/1000]
138
- # by iter: Iter [100/100000]
139
- if self.by_epoch:
140
- log_str = f'Epoch [{log_dict["epoch"]}]' \
141
- f'[{log_dict["iter"]}/{len(runner.data_loader)}]\t'
142
- else:
143
- log_str = f'Iter [{log_dict["iter"]}/{runner.max_iters}]\t'
144
- log_str += f'{lr_str}, '
145
-
146
- if 'time' in log_dict.keys():
147
- self.time_sec_tot += (log_dict['time'] * self.interval)
148
- time_sec_avg = self.time_sec_tot / (
149
- runner.iter - self.start_iter + 1)
150
- eta_sec = time_sec_avg * (runner.max_iters - runner.iter - 1)
151
- eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
152
- log_str += f'eta: {eta_str}, '
153
- log_str += f'time: {log_dict["time"]:.3f}, ' \
154
- f'data_time: {log_dict["data_time"]:.3f}, '
155
- # statistic memory
156
- if torch.cuda.is_available():
157
- log_str += f'memory: {log_dict["memory"]}, '
158
- else:
159
- # val/test time
160
- # here 1000 is the length of the val dataloader
161
- # by epoch: Epoch[val] [4][1000]
162
- # by iter: Iter[val] [1000]
163
- if self.by_epoch:
164
- log_str = f'Epoch({log_dict["mode"]}) ' \
165
- f'[{log_dict["epoch"]}][{log_dict["iter"]}]\t'
166
- else:
167
- log_str = f'Iter({log_dict["mode"]}) [{log_dict["iter"]}]\t'
168
-
169
- log_items = []
170
- for name, val in log_dict.items():
171
- # TODO: resolve this hack
172
- # these items have been in log_str
173
- if name in [
174
- 'mode', 'Epoch', 'iter', 'lr', 'time', 'data_time',
175
- 'memory', 'epoch'
176
- ]:
177
- continue
178
- if isinstance(val, float):
179
- val = f'{val:.4f}'
180
- log_items.append(f'{name}: {val}')
181
- log_str += ', '.join(log_items)
182
-
183
- runner.logger.info(log_str)
184
-
185
- def _dump_log(self, log_dict, runner):
186
- # dump log in json format
187
- json_log = OrderedDict()
188
- for k, v in log_dict.items():
189
- json_log[k] = self._round_float(v)
190
- # only append log at last line
191
- if runner.rank == 0:
192
- with open(self.json_log_path, 'a+') as f:
193
- mmcv.dump(json_log, f, file_format='json')
194
- f.write('\n')
195
-
196
- def _round_float(self, items):
197
- if isinstance(items, list):
198
- return [self._round_float(item) for item in items]
199
- elif isinstance(items, float):
200
- return round(items, 5)
201
- else:
202
- return items
203
-
204
- def log(self, runner):
205
- if 'eval_iter_num' in runner.log_buffer.output:
206
- # this doesn't modify runner.iter and is regardless of by_epoch
207
- cur_iter = runner.log_buffer.output.pop('eval_iter_num')
208
- else:
209
- cur_iter = self.get_iter(runner, inner_iter=True)
210
-
211
- log_dict = OrderedDict(
212
- mode=self.get_mode(runner),
213
- epoch=self.get_epoch(runner),
214
- iter=cur_iter)
215
-
216
- # only record lr of the first param group
217
- cur_lr = runner.current_lr()
218
- if isinstance(cur_lr, list):
219
- log_dict['lr'] = cur_lr[0]
220
- else:
221
- assert isinstance(cur_lr, dict)
222
- log_dict['lr'] = {}
223
- for k, lr_ in cur_lr.items():
224
- assert isinstance(lr_, list)
225
- log_dict['lr'].update({k: lr_[0]})
226
-
227
- if 'time' in runner.log_buffer.output:
228
- # statistic memory
229
- if torch.cuda.is_available():
230
- log_dict['memory'] = self._get_max_memory(runner)
231
-
232
- log_dict = dict(log_dict, **runner.log_buffer.output)
233
-
234
- self._log_info(log_dict, runner)
235
- self._dump_log(log_dict, runner)
236
- return log_dict
237
-
238
- def after_run(self, runner):
239
- # copy or upload logs to self.out_dir
240
- if self.out_dir is not None:
241
- for filename in scandir(runner.work_dir, self.out_suffix, True):
242
- local_filepath = osp.join(runner.work_dir, filename)
243
- out_filepath = self.file_client.join_path(
244
- self.out_dir, filename)
245
- with open(local_filepath, 'r') as f:
246
- self.file_client.put_text(f.read(), out_filepath)
247
-
248
- runner.logger.info(
249
- (f'The file {local_filepath} has been uploaded to '
250
- f'{out_filepath}.'))
251
-
252
- if not self.keep_local:
253
- os.remove(local_filepath)
254
- runner.logger.info(
255
- (f'{local_filepath} was removed due to the '
256
- '`self.keep_local=False`'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/commands/check.py DELETED
@@ -1,52 +0,0 @@
1
- import logging
2
- from optparse import Values
3
- from typing import List
4
-
5
- from pip._internal.cli.base_command import Command
6
- from pip._internal.cli.status_codes import ERROR, SUCCESS
7
- from pip._internal.operations.check import (
8
- check_package_set,
9
- create_package_set_from_installed,
10
- )
11
- from pip._internal.utils.misc import write_output
12
-
13
- logger = logging.getLogger(__name__)
14
-
15
-
16
- class CheckCommand(Command):
17
- """Verify installed packages have compatible dependencies."""
18
-
19
- usage = """
20
- %prog [options]"""
21
-
22
- def run(self, options: Values, args: List[str]) -> int:
23
- package_set, parsing_probs = create_package_set_from_installed()
24
- missing, conflicting = check_package_set(package_set)
25
-
26
- for project_name in missing:
27
- version = package_set[project_name].version
28
- for dependency in missing[project_name]:
29
- write_output(
30
- "%s %s requires %s, which is not installed.",
31
- project_name,
32
- version,
33
- dependency[0],
34
- )
35
-
36
- for project_name in conflicting:
37
- version = package_set[project_name].version
38
- for dep_name, dep_version, req in conflicting[project_name]:
39
- write_output(
40
- "%s %s has requirement %s, but you have %s %s.",
41
- project_name,
42
- version,
43
- req,
44
- dep_name,
45
- dep_version,
46
- )
47
-
48
- if missing or conflicting or parsing_probs:
49
- return ERROR
50
- else:
51
- write_output("No broken requirements found.")
52
- return SUCCESS
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/version.py DELETED
@@ -1,9 +0,0 @@
1
- """
2
- This module exists only to simplify retrieving the version number of chardet
3
- from within setuptools and from chardet subpackages.
4
-
5
- :author: Dan Blanchard ([email protected])
6
- """
7
-
8
- __version__ = "5.1.0"
9
- VERSION = __version__.split(".")
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/requests/structures.py DELETED
@@ -1,99 +0,0 @@
1
- """
2
- requests.structures
3
- ~~~~~~~~~~~~~~~~~~~
4
-
5
- Data structures that power Requests.
6
- """
7
-
8
- from collections import OrderedDict
9
-
10
- from .compat import Mapping, MutableMapping
11
-
12
-
13
- class CaseInsensitiveDict(MutableMapping):
14
- """A case-insensitive ``dict``-like object.
15
-
16
- Implements all methods and operations of
17
- ``MutableMapping`` as well as dict's ``copy``. Also
18
- provides ``lower_items``.
19
-
20
- All keys are expected to be strings. The structure remembers the
21
- case of the last key to be set, and ``iter(instance)``,
22
- ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
23
- will contain case-sensitive keys. However, querying and contains
24
- testing is case insensitive::
25
-
26
- cid = CaseInsensitiveDict()
27
- cid['Accept'] = 'application/json'
28
- cid['aCCEPT'] == 'application/json' # True
29
- list(cid) == ['Accept'] # True
30
-
31
- For example, ``headers['content-encoding']`` will return the
32
- value of a ``'Content-Encoding'`` response header, regardless
33
- of how the header name was originally stored.
34
-
35
- If the constructor, ``.update``, or equality comparison
36
- operations are given keys that have equal ``.lower()``s, the
37
- behavior is undefined.
38
- """
39
-
40
- def __init__(self, data=None, **kwargs):
41
- self._store = OrderedDict()
42
- if data is None:
43
- data = {}
44
- self.update(data, **kwargs)
45
-
46
- def __setitem__(self, key, value):
47
- # Use the lowercased key for lookups, but store the actual
48
- # key alongside the value.
49
- self._store[key.lower()] = (key, value)
50
-
51
- def __getitem__(self, key):
52
- return self._store[key.lower()][1]
53
-
54
- def __delitem__(self, key):
55
- del self._store[key.lower()]
56
-
57
- def __iter__(self):
58
- return (casedkey for casedkey, mappedvalue in self._store.values())
59
-
60
- def __len__(self):
61
- return len(self._store)
62
-
63
- def lower_items(self):
64
- """Like iteritems(), but with all lowercase keys."""
65
- return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items())
66
-
67
- def __eq__(self, other):
68
- if isinstance(other, Mapping):
69
- other = CaseInsensitiveDict(other)
70
- else:
71
- return NotImplemented
72
- # Compare insensitively
73
- return dict(self.lower_items()) == dict(other.lower_items())
74
-
75
- # Copy is required
76
- def copy(self):
77
- return CaseInsensitiveDict(self._store.values())
78
-
79
- def __repr__(self):
80
- return str(dict(self.items()))
81
-
82
-
83
- class LookupDict(dict):
84
- """Dictionary lookup object."""
85
-
86
- def __init__(self, name=None):
87
- self.name = name
88
- super().__init__()
89
-
90
- def __repr__(self):
91
- return f"<lookup '{self.name}'>"
92
-
93
- def __getitem__(self, key):
94
- # We allow fall-through here, so values default to None
95
-
96
- return self.__dict__.get(key, None)
97
-
98
- def get(self, key, default=None):
99
- return self.__dict__.get(key, default)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/modeling/test_box2box_transform.py DELETED
@@ -1,94 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import logging
3
- import unittest
4
- import torch
5
-
6
- from detectron2.modeling.box_regression import (
7
- Box2BoxTransform,
8
- Box2BoxTransformLinear,
9
- Box2BoxTransformRotated,
10
- )
11
- from detectron2.utils.testing import random_boxes
12
-
13
- logger = logging.getLogger(__name__)
14
-
15
-
16
- class TestBox2BoxTransform(unittest.TestCase):
17
- def test_reconstruction(self):
18
- weights = (5, 5, 10, 10)
19
- b2b_tfm = Box2BoxTransform(weights=weights)
20
- src_boxes = random_boxes(10)
21
- dst_boxes = random_boxes(10)
22
-
23
- devices = [torch.device("cpu")]
24
- if torch.cuda.is_available():
25
- devices.append(torch.device("cuda"))
26
- for device in devices:
27
- src_boxes = src_boxes.to(device=device)
28
- dst_boxes = dst_boxes.to(device=device)
29
- deltas = b2b_tfm.get_deltas(src_boxes, dst_boxes)
30
- dst_boxes_reconstructed = b2b_tfm.apply_deltas(deltas, src_boxes)
31
- self.assertTrue(torch.allclose(dst_boxes, dst_boxes_reconstructed))
32
-
33
- def test_apply_deltas_tracing(self):
34
- weights = (5, 5, 10, 10)
35
- b2b_tfm = Box2BoxTransform(weights=weights)
36
-
37
- with torch.no_grad():
38
- func = torch.jit.trace(b2b_tfm.apply_deltas, (torch.randn(10, 20), torch.randn(10, 4)))
39
-
40
- o = func(torch.randn(10, 20), torch.randn(10, 4))
41
- self.assertEqual(o.shape, (10, 20))
42
- o = func(torch.randn(5, 20), torch.randn(5, 4))
43
- self.assertEqual(o.shape, (5, 20))
44
-
45
-
46
- def random_rotated_boxes(mean_box, std_length, std_angle, N):
47
- return torch.cat(
48
- [torch.rand(N, 4) * std_length, torch.rand(N, 1) * std_angle], dim=1
49
- ) + torch.tensor(mean_box, dtype=torch.float)
50
-
51
-
52
- class TestBox2BoxTransformRotated(unittest.TestCase):
53
- def test_reconstruction(self):
54
- weights = (5, 5, 10, 10, 1)
55
- b2b_transform = Box2BoxTransformRotated(weights=weights)
56
- src_boxes = random_rotated_boxes([10, 10, 20, 20, -30], 5, 60.0, 10)
57
- dst_boxes = random_rotated_boxes([10, 10, 20, 20, -30], 5, 60.0, 10)
58
-
59
- devices = [torch.device("cpu")]
60
- if torch.cuda.is_available():
61
- devices.append(torch.device("cuda"))
62
- for device in devices:
63
- src_boxes = src_boxes.to(device=device)
64
- dst_boxes = dst_boxes.to(device=device)
65
- deltas = b2b_transform.get_deltas(src_boxes, dst_boxes)
66
- dst_boxes_reconstructed = b2b_transform.apply_deltas(deltas, src_boxes)
67
- assert torch.allclose(dst_boxes[:, :4], dst_boxes_reconstructed[:, :4], atol=1e-5)
68
- # angle difference has to be normalized
69
- assert torch.allclose(
70
- (dst_boxes[:, 4] - dst_boxes_reconstructed[:, 4] + 180.0) % 360.0 - 180.0,
71
- torch.zeros_like(dst_boxes[:, 4]),
72
- atol=1e-4,
73
- )
74
-
75
-
76
- class TestBox2BoxTransformLinear(unittest.TestCase):
77
- def test_reconstruction(self):
78
- b2b_tfm = Box2BoxTransformLinear()
79
- src_boxes = random_boxes(10)
80
- dst_boxes = torch.tensor([0, 0, 101, 101] * 10).reshape(10, 4).float()
81
-
82
- devices = [torch.device("cpu")]
83
- if torch.cuda.is_available():
84
- devices.append(torch.device("cuda"))
85
- for device in devices:
86
- src_boxes = src_boxes.to(device=device)
87
- dst_boxes = dst_boxes.to(device=device)
88
- deltas = b2b_tfm.get_deltas(src_boxes, dst_boxes)
89
- dst_boxes_reconstructed = b2b_tfm.apply_deltas(deltas, src_boxes)
90
- self.assertTrue(torch.allclose(dst_boxes, dst_boxes_reconstructed, atol=1e-3))
91
-
92
-
93
- if __name__ == "__main__":
94
- unittest.main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Bus Simulator Indonesia Nuevo Mapa Descargar.md DELETED
@@ -1,69 +0,0 @@
1
- <br />
2
- <h1>Simulador de autobús Indonesia: Cómo descargar y disfrutar de nuevos mapas</h1>
3
- <p>Bus Simulator Indonesia (alias BUSSID) es un popular juego de simulación que te permite experimentar lo que le gusta ser un conductor de autobús en Indonesia de una manera divertida y auténtica. BUSSID puede no ser el primero, pero es probablemente uno de los únicos juegos de simulador de bus con más características y el entorno indonesio más auténtico. </p>
4
- <p>Algunas de las características principales de BUSSID son:</p>
5
- <h2>bus simulator indonesia nuevo mapa descargar</h2><br /><p><b><b>DOWNLOAD</b> >>>>> <a href="https://bltlly.com/2v6MdH">https://bltlly.com/2v6MdH</a></b></p><br /><br />
6
- <ul>
7
- <li>Diseña tu propia librea</li>
8
- <li> Control muy fácil e intuitivo</li>
9
- <li>Ciudades y lugares indonesios auténticos</li>
10
- <li>Autobuses de Indonesia</li>
11
- <li>Fresco y divertido bocinazos, incluyendo el icónico "Om Telolet Om!" bocina</li>
12
- <li> Alta calidad y gráficos 3D detallados</li>
13
- <li>No hay anuncios obstructivos durante la conducción</li>
14
- <li>Tabla de clasificación y ahorro de datos en línea</li>
15
- <li>Utilice su propio modelo 3D utilizando el sistema de vehículo mod</li>
16
- <li>Convoy multijugador en línea</li>
17
- </ul>
18
- <p>Para jugar BUSSID, es necesario elegir un autobús, una librea, y una ruta. Luego, debe conducir su autobús a lo largo de la ruta, recoger y dejar pasajeros, ganar dinero y evitar accidentes. También puede personalizar su autobús, actualizar su garaje y unirse a convoyes en línea con otros jugadores. </p>
19
- <p>Uno de los beneficios de jugar BUSSID es que puedes descargar nuevos mapas para el juego, lo que puede agregar más variedad, desafío y diversión a tu experiencia de conducción. Los nuevos mapas pueden tener diferentes temas, como extremos, off-road o escénicos. También pueden tener diferentes características, como curvas afiladas, colinas empinadas o hitos realistas. Los nuevos mapas pueden hacerte sentir que conduces en diferentes regiones de Indonesia o incluso en otros países. </p>
20
- <p>Pero, ¿cómo descargar nuevos mapas para BUSSID? ¿Y cómo los disfruta? En este artículo, le mostraremos cómo hacer ambos en pasos fáciles. ¡Vamos a empezar! </p>
21
- <h2>Cómo descargar nuevos mapas para Bus Simulator Indonesia</h2>
22
-
23
- <p>Una de las mejores fuentes de mapas mod para BUSSID es [MediaRale]( 1 ), un sitio web que proporciona varios mods para juegos, incluyendo BUSSID. MediaRale tiene una sección dedicada a los mapas mod para BUSSID, donde puede encontrar muchas opciones para elegir. Puedes navegar por categorías, como extrema, todoterreno o escénica. También puede ver capturas de pantalla, descripciones, calificaciones y enlaces de descarga para cada mapa de mods. </p>
24
- <p>Una vez que hayas encontrado un mapa mod que te guste, necesitas descargarlo en tu dispositivo. El archivo de mapa mod generalmente estará en formato ZIP o RAR, lo que significa que necesita extraerlo usando una aplicación de administrador de archivos o una aplicación extractora ZIP. Puedes encontrar muchas aplicaciones gratuitas para este propósito en la Google Play Store o en la App Store.</p>
25
- <p>Después de haber extraído el archivo de mapa mod, debe copiarlo en la carpeta mod de BUSSID. La carpeta mod se encuentra en el almacenamiento interno de su dispositivo, bajo la carpeta Android/data/com.maleo.bussimulatorid/files/mod. Puede usar una aplicación de administrador de archivos para navegar a esta carpeta y pegar el archivo de mapa mod allí. </p>
26
- <p></p>
27
- <p>El último paso es iniciar el juego y seleccionar el mapa mod desde el menú del mapa. Puede hacer esto pulsando en el icono del mapa en la esquina superior derecha de la pantalla, y luego desplazándose hacia abajo para encontrar el mapa mod que ha descargado. Toque en él para seleccionarlo, y luego toque en el botón de inicio para comenzar su viaje. </p>
28
- <h2>Cómo disfrutar de nuevos mapas para Bus Simulator Indonesia</h2>
29
- <p>Ahora que ha descargado e instalado un nuevo mapa para BUSSID, puede disfrutarlo conduciendo su autobús en él. Sin embargo, hay algunos consejos que pueden ayudarte a aprovechar al máximo tu experiencia. Estos son algunos de ellos:</p>
30
- <ul>
31
-
32
- <li><strong>Consejo 2: Siga las reglas de tráfico y respete otros controladores</strong>. A pesar de que usted está jugando en un mapa mod, todavía tiene que seguir las reglas de tráfico y respetar a otros conductores en la carretera. Esto significa que debe obedecer el límite de velocidad, detenerse en las luces rojas, hacer una señal antes de girar y evitar colisiones. Esto no solo hará que su conducción sea más realista y segura, sino también más agradable y gratificante. </li>
33
- <li><strong>Consejo 3: Utilice el bocinazo y otras características para interactuar con el entorno</strong>. Uno de los aspectos más divertidos de BUSSID es que puedes utilizar la bocina y otras funciones para interactuar con el entorno. Por ejemplo, puedes usar la bocina para saludar a otros conductores, peatones o animales. También puede usar los limpiaparabrisas, los faros, los indicadores y las puertas para comunicarse con otros o expresarse. Incluso puede usar el "Om Telolet Om!" tocar el claxon para hacer que la gente te vitoree. </li>
34
- <li><strong>Consejo 4: Explora diferentes rutas y puntos de referencia en el mapa</strong>. Otra forma de disfrutar de nuevos mapas para BUSSID es explorar diferentes rutas y puntos de referencia en ellos. Puede hacer esto siguiendo la navegación GPS o eligiendo su propio camino. Puede descubrir nuevos lugares, paisajes o desafíos que no haya visto antes. También puede encontrar secretos ocultos o huevos de Pascua que el creador del mapa ha dejado para usted. </li>
35
- <li><strong>Consejo 5: Únete a convoyes multijugador en línea con otros jugadores</strong>. La mejor manera de disfrutar de nuevos mapas para BUSSID es unirse a convoyes multijugador en línea con otros jugadores. Puede hacer esto tocando el icono del convoy en la esquina superior izquierda de la pantalla, y luego elegir un convoy que está jugando en el mismo mapa que usted. También puede crear su propio convoy e invitar a sus amigos u otros jugadores a unirse a usted. Al unirte a un convoy, puedes chatear con otros jugadores, compartir tus experiencias y divertirte juntos. </li>
36
- </ul>
37
- <h2>Conclusión</h2>
38
-
39
- <ol>
40
- <li>Encuentra un mapa mod que te guste en MediaRale</li>
41
- <li>Descargar el archivo de mapa mod y extraerlo si es necesario</li>
42
- <li>Copiar el archivo de mapa mod a la carpeta mod de BUSSID</li>
43
- <li>Iniciar el juego y seleccionar el mapa de mod desde el menú del mapa</li>
44
- </ol>
45
- <p>Para disfrutar de nuevos mapas para BUSSID, puedes seguir estos consejos:</p>
46
- <ul>
47
- <li> Elegir un autobús adecuado y librea para el mapa</li>
48
- <li>Siga las reglas de tráfico y respete otros controladores</li>
49
- <li>Utilice el bocinazo y otras características para interactuar con el entorno</li>
50
- <li>Explora diferentes rutas y puntos de referencia en el mapa</li>
51
- <li>Únete a convoyes multijugador en línea con otros jugadores</li>
52
- </ul>
53
- <p>Siguiendo estos pasos y consejos, puede descargar y disfrutar de nuevos mapas para BUSSID y divertirse conduciendo su autobús en ellos. Si aún no has probado BUSSID, puedes descargarlo gratis desde la Google Play Store o la App Store. También puedes visitar el sitio web oficial de BUSSID para aprender más sobre el juego y sus características. ¡Feliz conducción! </p>
54
- <h2>Preguntas frecuentes</h2>
55
- <p>Aquí hay algunas preguntas frecuentes sobre nuevos mapas para BUSSID:</p>
56
- <ol>
57
- <li><strong>Q: ¿Cuántos mapas nuevos están disponibles para BUSSID? </strong></li>
58
- <li>A: No hay un número exacto de nuevos mapas para BUSSID, ya que los nuevos mapas mod son constantemente creados y subidos por los usuarios. Sin embargo, puede encontrar cientos de mapas mod para BUSSID en MediaRale, que van desde mapas extremos, off-road, escénicos, a mapas realistas. </li>
59
- <li><strong>Q: ¿Cómo sé si un mapa mod es compatible con mi versión de BUSSID? </strong></li>
60
- <li>A: Puede comprobar la compatibilidad de un mapa mod mirando su descripción, calificación y comentarios en MediaRale. También puede comprobar la fecha de subida del mapa mod y compararlo con la fecha de la última actualización de BUSSID. Generalmente, los mapas mod que se cargan después de la última actualización de BUSSID tienen más probabilidades de ser compatibles. </li>
61
- <li><strong>Q: ¿Cómo puedo desinstalar un mapa mod de BUSSID? </strong></li>
62
-
63
- <li><strong>Q: ¿Cómo puedo reportar un problema o un error con un mapa mod? </strong></li>
64
- <li>A: Si encuentras un problema o un error con un mapa mod, puedes reportarlo al creador de mapas mod o a MediaRale. Puede encontrar la información de contacto del creador de mapas mod en su página de perfil en MediaRale. También puede dejar un comentario o una valoración en la página del mapa de mods en MediaRale para compartir sus comentarios. </li>
65
- <li><strong>Q: ¿Cómo puedo crear mi propio mapa mod para BUSSID? </strong></li>
66
- <li>A: Si quieres crear tu propio mapa mod para BUSSID, necesitas usar un software de modelado 3D, como Blender, SketchUp o Maya. También debe seguir las directrices y especificaciones de BUSSID para crear mapas mod. Puede encontrar más información y tutoriales sobre cómo crear mapas mod para BUSSID en el sitio web oficial de BUSSID o en YouTube.</li>
67
- </ol></p> 64aa2da5cf<br />
68
- <br />
69
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/__init__.py DELETED
File without changes
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/.github/CODE_OF_CONDUCT.md DELETED
@@ -1,5 +0,0 @@
1
- # Code of Conduct
2
-
3
- Facebook has adopted a Code of Conduct that we expect project participants to adhere to.
4
- Please read the [full text](https://code.fb.com/codeofconduct/)
5
- so that you can understand what actions will and will not be tolerated.
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/cmake/ThrustMultiConfig.cmake DELETED
@@ -1,127 +0,0 @@
1
- # This file defines thrust_configure_multiconfig(), which sets up and handles
2
- # the MultiConfig options that allow multiple host/device/dialect configurations
3
- # to be generated from a single thrust build.
4
-
5
- function(thrust_configure_multiconfig)
6
- option(THRUST_ENABLE_MULTICONFIG "Enable multiconfig options for coverage testing." OFF)
7
-
8
- # Dialects:
9
- set(THRUST_CPP_DIALECT_OPTIONS
10
- 11 14 17
11
- CACHE INTERNAL "C++ dialects supported by Thrust." FORCE
12
- )
13
-
14
- if (THRUST_ENABLE_MULTICONFIG)
15
- # Handle dialect options:
16
- foreach (dialect IN LISTS THRUST_CPP_DIALECT_OPTIONS)
17
- set(default_value OFF)
18
- if (dialect EQUAL 14) # Default to just 14 on:
19
- set(default_value ON)
20
- endif()
21
- option(THRUST_MULTICONFIG_ENABLE_DIALECT_CPP${dialect}
22
- "Generate C++${dialect} build configurations."
23
- ${default_value}
24
- )
25
- endforeach()
26
-
27
- # Supported versions of MSVC do not distinguish between C++11 and C++14.
28
- # Warn the user that they may be generating a ton of redundant targets.
29
- if ("MSVC" STREQUAL "${CMAKE_CXX_COMPILER_ID}" AND
30
- THRUST_MULTICONFIG_ENABLE_DIALECT_CPP11)
31
- message(WARNING
32
- "Supported versions of MSVC (2017+) do not distinguish between C++11 "
33
- "and C++14. The requested C++11 targets will be built with C++14."
34
- )
35
- endif()
36
-
37
- # Systems:
38
- option(THRUST_MULTICONFIG_ENABLE_SYSTEM_CPP "Generate build configurations that use CPP." ON)
39
- option(THRUST_MULTICONFIG_ENABLE_SYSTEM_CUDA "Generate build configurations that use CUDA." ON)
40
- option(THRUST_MULTICONFIG_ENABLE_SYSTEM_OMP "Generate build configurations that use OpenMP." OFF)
41
- option(THRUST_MULTICONFIG_ENABLE_SYSTEM_TBB "Generate build configurations that use TBB." OFF)
42
-
43
- # CMake added C++17 support for CUDA targets in 3.18:
44
- if (THRUST_MULTICONFIG_ENABLE_DIALECT_CPP17 AND
45
- THRUST_MULTICONFIG_ENABLE_SYSTEM_CUDA)
46
- cmake_minimum_required(VERSION 3.18)
47
- endif()
48
-
49
- # Workload:
50
- # - `SMALL`: [3 configs] Minimal coverage and validation of each device system against the `CPP` host.
51
- # - `MEDIUM`: [6 configs] Cheap extended coverage.
52
- # - `LARGE`: [8 configs] Expensive extended coverage. Include all useful build configurations.
53
- # - `FULL`: [12 configs] The complete cross product of all possible build configurations.
54
- #
55
- # Config | Workloads | Value | Expense | Note
56
- # ---------|-----------|------------|-----------|-----------------------------
57
- # CPP/CUDA | F L M S | Essential | Expensive | Validates CUDA against CPP
58
- # CPP/OMP | F L M S | Essential | Cheap | Validates OMP against CPP
59
- # CPP/TBB | F L M S | Essential | Cheap | Validates TBB against CPP
60
- # CPP/CPP | F L M | Important | Cheap | Tests CPP as device
61
- # OMP/OMP | F L M | Important | Cheap | Tests OMP as host
62
- # TBB/TBB | F L M | Important | Cheap | Tests TBB as host
63
- # TBB/CUDA | F L | Important | Expensive | Validates TBB/CUDA interop
64
- # OMP/CUDA | F L | Important | Expensive | Validates OMP/CUDA interop
65
- # TBB/OMP | F | Not useful | Cheap | Mixes CPU-parallel systems
66
- # OMP/TBB | F | Not useful | Cheap | Mixes CPU-parallel systems
67
- # TBB/CPP | F | Not Useful | Cheap | Parallel host, serial device
68
- # OMP/CPP | F | Not Useful | Cheap | Parallel host, serial device
69
-
70
- set(THRUST_MULTICONFIG_WORKLOAD SMALL CACHE STRING
71
- "Limit host/device configs: SMALL (up to 3 h/d combos per dialect), MEDIUM(6), LARGE(8), FULL(12)"
72
- )
73
- set_property(CACHE THRUST_MULTICONFIG_WORKLOAD PROPERTY STRINGS
74
- SMALL MEDIUM LARGE FULL
75
- )
76
- set(THRUST_MULTICONFIG_WORKLOAD_SMALL_CONFIGS
77
- CPP_OMP CPP_TBB CPP_CUDA
78
- CACHE INTERNAL "Host/device combos enabled for SMALL workloads." FORCE
79
- )
80
- set(THRUST_MULTICONFIG_WORKLOAD_MEDIUM_CONFIGS
81
- ${THRUST_MULTICONFIG_WORKLOAD_SMALL_CONFIGS}
82
- CPP_CPP TBB_TBB OMP_OMP
83
- CACHE INTERNAL "Host/device combos enabled for MEDIUM workloads." FORCE
84
- )
85
- set(THRUST_MULTICONFIG_WORKLOAD_LARGE_CONFIGS
86
- ${THRUST_MULTICONFIG_WORKLOAD_MEDIUM_CONFIGS}
87
- OMP_CUDA TBB_CUDA
88
- CACHE INTERNAL "Host/device combos enabled for LARGE workloads." FORCE
89
- )
90
- set(THRUST_MULTICONFIG_WORKLOAD_FULL_CONFIGS
91
- ${THRUST_MULTICONFIG_WORKLOAD_LARGE_CONFIGS}
92
- OMP_CPP TBB_CPP OMP_TBB TBB_OMP
93
- CACHE INTERNAL "Host/device combos enabled for FULL workloads." FORCE
94
- )
95
-
96
- # Hide the single config options if they exist from a previous run:
97
- if (DEFINED THRUST_HOST_SYSTEM)
98
- set_property(CACHE THRUST_HOST_SYSTEM PROPERTY TYPE INTERNAL)
99
- set_property(CACHE THRUST_DEVICE_SYSTEM PROPERTY TYPE INTERNAL)
100
- endif()
101
- if (DEFINED THRUST_CPP_DIALECT)
102
- set_property(CACHE THRUST_CPP_DIALECT PROPERTY TYPE INTERNAL)
103
- endif()
104
-
105
- else() # Single config:
106
- # Restore system option visibility if these cache options already exist
107
- # from a previous run.
108
- if (DEFINED THRUST_HOST_SYSTEM)
109
- set_property(CACHE THRUST_HOST_SYSTEM PROPERTY TYPE STRING)
110
- set_property(CACHE THRUST_DEVICE_SYSTEM PROPERTY TYPE STRING)
111
- endif()
112
-
113
- set(THRUST_CPP_DIALECT 14
114
- CACHE STRING "The C++ standard to target: ${THRUST_CPP_DIALECT_OPTIONS}"
115
- )
116
- set_property(CACHE THRUST_CPP_DIALECT
117
- PROPERTY STRINGS
118
- ${THRUST_CPP_DIALECT_OPTIONS}
119
- )
120
-
121
- # CMake added C++17 support for CUDA targets in 3.18:
122
- if (THRUST_CPP_DIALECT EQUAL 17 AND
123
- THRUST_DEVICE_SYSTEM STREQUAL "CUDA")
124
- cmake_minimum_required(VERSION 3.18)
125
- endif()
126
- endif()
127
- endfunction()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DAMO-NLP-SG/CLEX-Chat/modeling_llama.py DELETED
@@ -1,985 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
- #
4
- # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
- # and OPT implementations in this library. It has been modified from its
6
- # original forms to accommodate minor architectural differences compared
7
- # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
- #
9
- # Licensed under the Apache License, Version 2.0 (the "License");
10
- # you may not use this file except in compliance with the License.
11
- # You may obtain a copy of the License at
12
- #
13
- # http://www.apache.org/licenses/LICENSE-2.0
14
- #
15
- # Unless required by applicable law or agreed to in writing, software
16
- # distributed under the License is distributed on an "AS IS" BASIS,
17
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
- # See the License for the specific language governing permissions and
19
- # limitations under the License.
20
- """ PyTorch LLaMA model."""
21
- import math
22
- from typing import List, Optional, Tuple, Union
23
-
24
- import torch
25
- import torch.utils.checkpoint
26
- from torch import nn
27
- from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
28
-
29
- from transformers.activations import ACT2FN
30
- from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
31
- from transformers.modeling_utils import PreTrainedModel
32
- from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
33
- from configuration_clex import CLEXLlamaConfig
34
- from clex_layer import LlamaCLEXScalingRotaryEmbedding
35
- from einops import rearrange
36
- import importlib.metadata
37
- import importlib.util
38
-
39
-
40
- logger = logging.get_logger(__name__)
41
-
42
- def _is_package_available(pkg_name: str, return_version: bool = False) -> Union[Tuple[bool, str], bool]:
43
- # Check we're not importing a "pkg_name" directory somewhere but the actual library by trying to grab the version
44
- package_exists = importlib.util.find_spec(pkg_name) is not None
45
- package_version = "N/A"
46
- if package_exists:
47
- try:
48
- package_version = importlib.metadata.version(pkg_name)
49
- package_exists = True
50
- except importlib.metadata.PackageNotFoundError:
51
- package_exists = False
52
- logger.info(f"Detected {pkg_name} version {package_version}")
53
- if return_version:
54
- return package_exists, package_version
55
- else:
56
- return package_exists
57
-
58
- def is_flash_attn_available():
59
- if not _is_package_available("torch", return_version=True):
60
- return False
61
-
62
- # Let's add an extra check to see if cuda is available
63
-
64
- return _is_package_available("flash_attn") and torch.cuda.is_available()
65
-
66
-
67
-
68
-
69
-
70
-
71
- _CONFIG_FOR_DOC = "CLEXLlamaConfig"
72
-
73
-
74
-
75
-
76
-
77
- # Copied from transformers.models.bart.modeling_bart._make_causal_mask
78
- def _make_causal_mask(
79
- input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0
80
- ):
81
- """
82
- Make causal mask used for bi-directional self-attention.
83
- """
84
- bsz, tgt_len = input_ids_shape
85
- mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device)
86
- mask_cond = torch.arange(mask.size(-1), device=device)
87
- mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
88
- mask = mask.to(dtype)
89
-
90
- if past_key_values_length > 0:
91
- mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
92
- return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
93
-
94
-
95
- # Copied from transformers.models.bart.modeling_bart._expand_mask
96
- def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
97
- """
98
- Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
99
- """
100
- bsz, src_len = mask.size()
101
- tgt_len = tgt_len if tgt_len is not None else src_len
102
-
103
- expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
104
-
105
- inverted_mask = 1.0 - expanded_mask
106
-
107
- return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
108
-
109
-
110
- class LlamaRMSNorm(nn.Module):
111
- def __init__(self, hidden_size, eps=1e-6):
112
- """
113
- LlamaRMSNorm is equivalent to T5LayerNorm
114
- """
115
- super().__init__()
116
- self.weight = nn.Parameter(torch.ones(hidden_size))
117
- self.variance_epsilon = eps
118
-
119
- def forward(self, hidden_states):
120
- variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
121
- hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
122
-
123
- # convert into half-precision if necessary
124
- if self.weight.dtype in [torch.float16, torch.bfloat16]:
125
- hidden_states = hidden_states.to(self.weight.dtype)
126
-
127
- return self.weight * hidden_states
128
-
129
-
130
- class LlamaRotaryEmbedding(torch.nn.Module):
131
- def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
132
- super().__init__()
133
- inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
134
- self.register_buffer("inv_freq", inv_freq)
135
-
136
- # Build here to make `torch.jit.trace` work.
137
- self.max_seq_len_cached = max_position_embeddings
138
- t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
139
- freqs = torch.einsum("i,j->ij", t, self.inv_freq)
140
- # Different from paper, but it uses a different permutation in order to obtain the same calculation
141
- emb = torch.cat((freqs, freqs), dim=-1)
142
- self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
143
- self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
144
-
145
- def forward(self, x, seq_len=None):
146
- # x: [bs, num_attention_heads, seq_len, head_size]
147
- # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
148
- if seq_len > self.max_seq_len_cached:
149
- self.max_seq_len_cached = seq_len
150
- t = torch.arange(self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype)
151
- freqs = torch.einsum("i,j->ij", t, self.inv_freq)
152
- # Different from paper, but it uses a different permutation in order to obtain the same calculation
153
- emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
154
- self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False)
155
- self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False)
156
- return (
157
- self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
158
- self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
159
- )
160
-
161
-
162
- def rotate_half(x):
163
- """Rotates half the hidden dims of the input."""
164
- x1 = x[..., : x.shape[-1] // 2]
165
- x2 = x[..., x.shape[-1] // 2 :]
166
- return torch.cat((-x2, x1), dim=-1)
167
-
168
-
169
- def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
170
- # The first two dimensions of cos and sin are always 1, so we can `squeeze` them.
171
- cos = cos.squeeze(1).squeeze(0) # [seq_len, dim]
172
- sin = sin.squeeze(1).squeeze(0) # [seq_len, dim]
173
- cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
174
- sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim]
175
- q_embed = (q * cos) + (rotate_half(q) * sin)
176
- k_embed = (k * cos) + (rotate_half(k) * sin)
177
- return q_embed, k_embed
178
-
179
-
180
- class LlamaMLP(nn.Module):
181
- def __init__(
182
- self,
183
- hidden_size: int,
184
- intermediate_size: int,
185
- hidden_act: str,
186
- ):
187
- super().__init__()
188
- self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
189
- self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
190
- self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
191
- self.act_fn = ACT2FN[hidden_act]
192
-
193
- def forward(self, x):
194
- return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
195
-
196
-
197
- class LlamaAttention(nn.Module):
198
- """Multi-headed attention from 'Attention Is All You Need' paper"""
199
-
200
- def __init__(self, config: CLEXLlamaConfig):
201
- super().__init__()
202
- self.config = config
203
- self.hidden_size = config.hidden_size
204
- self.num_heads = config.num_attention_heads
205
- self.head_dim = self.hidden_size // self.num_heads
206
- self.max_position_embeddings = config.max_position_embeddings
207
- self.log_scale = config.log_scale
208
- if (self.head_dim * self.num_heads) != self.hidden_size:
209
- raise ValueError(
210
- f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
211
- f" and `num_heads`: {self.num_heads})."
212
- )
213
- self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
214
- self.k_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
215
- self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
216
- self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
217
- self.rotary_emb = LlamaRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings)
218
-
219
- def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
220
- return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
221
-
222
- def flash_attn_forward(
223
- self,
224
- qkv: torch.Tensor,
225
- key_padding_mask: Optional[torch.Tensor] = None,
226
- ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
227
- """Input shape: Batch x Time x Channel
228
-
229
- attention_mask: [bsz, q_len]
230
- """
231
- if is_flash_attn_available():
232
- from flash_attn.flash_attn_interface import flash_attn_varlen_qkvpacked_func, flash_attn_qkvpacked_func, flash_attn_with_kvcache
233
- # from flash_attn.flash_attn_interface import flash_attn_unpadded_qkvpacked_func
234
- from flash_attn.bert_padding import unpad_input, pad_input
235
- bsz, q_len, *_ = qkv.size()
236
-
237
- if key_padding_mask is None:
238
- # qkv = rearrange(qkv, "b s ... -> (b s) ...")
239
- max_s = q_len
240
- cu_q_lens = torch.arange(
241
- 0, (bsz + 1) * q_len, step=q_len, dtype=torch.int32, device=qkv.device
242
- )
243
- output = flash_attn_qkvpacked_func(
244
- qkv, 0.0, softmax_scale=None, causal=True
245
- )
246
- else:
247
- nheads = qkv.shape[-2]
248
- x = rearrange(qkv, "b s three h d -> b s (three h d)")
249
- x_unpad, indices, cu_q_lens, max_s = unpad_input(x, key_padding_mask)
250
- x_unpad = rearrange(
251
- x_unpad, "nnz (three h d) -> nnz three h d", three=3, h=nheads
252
- )
253
- output_unpad = flash_attn_varlen_qkvpacked_func(
254
- x_unpad, cu_q_lens, max_s, 0.0, softmax_scale=None, causal=True
255
- )
256
- output = rearrange(
257
- pad_input(
258
- rearrange(output_unpad, "nnz h d -> nnz (h d)"), indices, bsz, q_len
259
- ),
260
- "b s (h d) -> b s h d",
261
- h=nheads,
262
- )
263
- return self.o_proj(rearrange(output, "b s h d -> b s (h d)"))
264
-
265
- def forward(
266
- self,
267
- hidden_states: torch.Tensor,
268
- attention_mask: Optional[torch.Tensor] = None,
269
- position_ids: Optional[torch.LongTensor] = None,
270
- pack_cos_sin = None,
271
- past_key_value: Optional[Tuple[torch.Tensor]] = None,
272
- output_attentions: bool = False,
273
- use_cache: bool = False,
274
- ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
275
- bsz, q_len, _ = hidden_states.size()
276
-
277
- query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
278
- key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
279
- value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
280
-
281
- kv_seq_len = key_states.shape[-2]
282
-
283
- if past_key_value is not None:
284
- kv_seq_len += past_key_value[0].shape[-2]
285
-
286
- if pack_cos_sin is not None:
287
- cos, sin = pack_cos_sin.to(query_states.device)
288
- else:
289
- cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
290
- query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
291
-
292
- if past_key_value is not None:
293
- # reuse k, v, self_attention
294
- key_states = torch.cat([past_key_value[0], key_states], dim=2)
295
- value_states = torch.cat([past_key_value[1], value_states], dim=2)
296
-
297
- past_key_value = (key_states, value_states) if use_cache else None
298
-
299
- use_flashattn = self.config.use_flashattn and is_flash_attn_available()
300
-
301
-
302
-
303
- attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
304
-
305
- if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
306
- raise ValueError(
307
- f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
308
- f" {attn_weights.size()}"
309
- )
310
-
311
- if attention_mask is not None:
312
- if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
313
- raise ValueError(
314
- f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
315
- )
316
- attn_weights = attn_weights + attention_mask
317
- attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min))
318
-
319
- # upcast attention to fp32
320
- attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
321
- attn_output = torch.matmul(attn_weights, value_states)
322
-
323
- if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
324
- raise ValueError(
325
- f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
326
- f" {attn_output.size()}"
327
- )
328
-
329
- attn_output = attn_output.transpose(1, 2)
330
- attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
331
-
332
- attn_output = self.o_proj(attn_output)
333
-
334
- if not output_attentions:
335
- attn_weights = None
336
-
337
- return attn_output, attn_weights, past_key_value
338
-
339
-
340
- class LlamaDecoderLayer(nn.Module):
341
- def __init__(self, config: CLEXLlamaConfig):
342
- super().__init__()
343
- self.hidden_size = config.hidden_size
344
- self.self_attn = LlamaAttention(config=config)
345
- self.mlp = LlamaMLP(
346
- hidden_size=self.hidden_size,
347
- intermediate_size=config.intermediate_size,
348
- hidden_act=config.hidden_act,
349
- )
350
- self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
351
- self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
352
-
353
- def forward(
354
- self,
355
- hidden_states: torch.Tensor,
356
- attention_mask: Optional[torch.Tensor] = None,
357
- position_ids: Optional[torch.LongTensor] = None,
358
- pack_cos_sin=None,
359
- past_key_value: Optional[Tuple[torch.Tensor]] = None,
360
- output_attentions: Optional[bool] = False,
361
- use_cache: Optional[bool] = False,
362
- ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
363
- """
364
- Args:
365
- hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
366
- attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
367
- `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
368
- output_attentions (`bool`, *optional*):
369
- Whether or not to return the attentions tensors of all attention layers. See `attentions` under
370
- returned tensors for more detail.
371
- use_cache (`bool`, *optional*):
372
- If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
373
- (see `past_key_values`).
374
- past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
375
- """
376
-
377
- residual = hidden_states
378
-
379
- hidden_states = self.input_layernorm(hidden_states)
380
-
381
- # Self Attention
382
- hidden_states, self_attn_weights, present_key_value = self.self_attn(
383
- hidden_states=hidden_states,
384
- attention_mask=attention_mask,
385
- position_ids=position_ids,
386
- pack_cos_sin=pack_cos_sin,
387
- past_key_value=past_key_value,
388
- output_attentions=output_attentions,
389
- use_cache=use_cache,
390
- )
391
- hidden_states = residual + hidden_states
392
-
393
- # Fully Connected
394
- residual = hidden_states
395
- hidden_states = self.post_attention_layernorm(hidden_states)
396
- hidden_states = self.mlp(hidden_states)
397
- hidden_states = residual + hidden_states
398
-
399
- outputs = (hidden_states,)
400
-
401
- if output_attentions:
402
- outputs += (self_attn_weights,)
403
-
404
- if use_cache:
405
- outputs += (present_key_value,)
406
-
407
- return outputs
408
-
409
-
410
- LLAMA_START_DOCSTRING = r"""
411
- This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
412
- library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
413
- etc.)
414
-
415
- This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
416
- Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
417
- and behavior.
418
-
419
- Parameters:
420
- config ([`CLEXLlamaConfig`]):
421
- Model configuration class with all the parameters of the model. Initializing with a config file does not
422
- load the weights associated with the model, only the configuration. Check out the
423
- [`~PreTrainedModel.from_pretrained`] method to load the model weights.
424
- """
425
-
426
-
427
- @add_start_docstrings(
428
- "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
429
- LLAMA_START_DOCSTRING,
430
- )
431
- class LlamaPreTrainedModel(PreTrainedModel):
432
- config_class = CLEXLlamaConfig
433
- base_model_prefix = "model"
434
- supports_gradient_checkpointing = True
435
- _no_split_modules = ["LlamaDecoderLayer"]
436
- _keys_to_ignore_on_load_unexpected = [r"decoder\.version"]
437
- _keep_in_fp32_modules = ["model.clex_layer.proj_func.ode_up_proj", "model.clex_layer.proj_func.ode_down_proj", "model.clex_layer.inv_freq"]
438
-
439
- def _init_weights(self, module):
440
- std = self.config.initializer_range
441
- if isinstance(module, nn.Linear):
442
- module.weight.data.normal_(mean=0.0, std=std)
443
- if module.bias is not None:
444
- module.bias.data.zero_()
445
- elif isinstance(module, nn.Embedding):
446
- module.weight.data.normal_(mean=0.0, std=std)
447
- if module.padding_idx is not None:
448
- module.weight.data[module.padding_idx].zero_()
449
-
450
- def _set_gradient_checkpointing(self, module, value=False):
451
- if isinstance(module, LlamaModel):
452
- module.gradient_checkpointing = value
453
-
454
-
455
- LLAMA_INPUTS_DOCSTRING = r"""
456
- Args:
457
- input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
458
- Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
459
- it.
460
-
461
- Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
462
- [`PreTrainedTokenizer.__call__`] for details.
463
-
464
- [What are input IDs?](../glossary#input-ids)
465
- attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
466
- Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
467
-
468
- - 1 for tokens that are **not masked**,
469
- - 0 for tokens that are **masked**.
470
-
471
- [What are attention masks?](../glossary#attention-mask)
472
-
473
- Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
474
- [`PreTrainedTokenizer.__call__`] for details.
475
-
476
- If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
477
- `past_key_values`).
478
-
479
- If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
480
- and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
481
- information on the default strategy.
482
-
483
- - 1 indicates the head is **not masked**,
484
- - 0 indicates the head is **masked**.
485
- position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
486
- Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
487
- config.n_positions - 1]`.
488
-
489
- [What are position IDs?](../glossary#position-ids)
490
- past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
491
- Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
492
- `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
493
- `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
494
-
495
- Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
496
- blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
497
-
498
- If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
499
- don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
500
- `decoder_input_ids` of shape `(batch_size, sequence_length)`.
501
- inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
502
- Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
503
- is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
504
- model's internal embedding lookup matrix.
505
- use_cache (`bool`, *optional*):
506
- If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
507
- `past_key_values`).
508
- output_attentions (`bool`, *optional*):
509
- Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
510
- tensors for more detail.
511
- output_hidden_states (`bool`, *optional*):
512
- Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
513
- more detail.
514
- return_dict (`bool`, *optional*):
515
- Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
516
- """
517
-
518
-
519
- @add_start_docstrings(
520
- "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
521
- LLAMA_START_DOCSTRING,
522
- )
523
- class LlamaModel(LlamaPreTrainedModel):
524
- """
525
- Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`]
526
-
527
- Args:
528
- config: CLEXLlamaConfig
529
- """
530
-
531
- def __init__(self, config: CLEXLlamaConfig):
532
- super().__init__(config)
533
- self.padding_idx = config.pad_token_id
534
- self.vocab_size = config.vocab_size
535
-
536
- self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
537
- self.layers = nn.ModuleList([LlamaDecoderLayer(config) for _ in range(config.num_hidden_layers)])
538
- self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
539
- head_dim = config.hidden_size // config.num_attention_heads
540
- if config.rope_scaling["type"] == "clex":
541
- self.clex_layer = LlamaCLEXScalingRotaryEmbedding(head_dim, config.max_position_embeddings, config.rope_scaling)
542
- self.gradient_checkpointing = False
543
- # Initialize weights and apply final processing
544
- self.post_init()
545
-
546
-
547
- def get_input_embeddings(self):
548
- return self.embed_tokens
549
-
550
- def set_input_embeddings(self, value):
551
- self.embed_tokens = value
552
-
553
- # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
554
- def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
555
- # create causal mask
556
- # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
557
- combined_attention_mask = None
558
- if input_shape[-1] > 1:
559
- combined_attention_mask = _make_causal_mask(
560
- input_shape,
561
- inputs_embeds.dtype,
562
- device=inputs_embeds.device,
563
- past_key_values_length=past_key_values_length,
564
- )
565
-
566
- if attention_mask is not None:
567
- # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
568
- expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(
569
- inputs_embeds.device
570
- )
571
- combined_attention_mask = (
572
- expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
573
- )
574
-
575
- return combined_attention_mask
576
-
577
- @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
578
- def forward(
579
- self,
580
- input_ids: torch.LongTensor = None,
581
- attention_mask: Optional[torch.Tensor] = None,
582
- position_ids: Optional[torch.LongTensor] = None,
583
- past_key_values: Optional[List[torch.FloatTensor]] = None,
584
- inputs_embeds: Optional[torch.FloatTensor] = None,
585
- use_cache: Optional[bool] = None,
586
- output_attentions: Optional[bool] = None,
587
- output_hidden_states: Optional[bool] = None,
588
- return_dict: Optional[bool] = None,
589
- ) -> Union[Tuple, BaseModelOutputWithPast]:
590
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
591
- output_hidden_states = (
592
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
593
- )
594
- use_cache = use_cache if use_cache is not None else self.config.use_cache
595
-
596
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
597
-
598
- # retrieve input_ids and inputs_embeds
599
- if input_ids is not None and inputs_embeds is not None:
600
- raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
601
- elif input_ids is not None:
602
- batch_size, seq_length = input_ids.shape
603
- elif inputs_embeds is not None:
604
- batch_size, seq_length, _ = inputs_embeds.shape
605
- else:
606
- raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
607
-
608
- seq_length_with_past = seq_length
609
- past_key_values_length = 0
610
-
611
- if past_key_values is not None:
612
- past_key_values_length = past_key_values[0][0].shape[2]
613
- seq_length_with_past = seq_length_with_past + past_key_values_length
614
-
615
- if position_ids is None:
616
- device = input_ids.device if input_ids is not None else inputs_embeds.device
617
- position_ids = torch.arange(
618
- past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
619
- )
620
- position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
621
- else:
622
- position_ids = position_ids.view(-1, seq_length).long()
623
-
624
- if inputs_embeds is None:
625
- inputs_embeds = self.embed_tokens(input_ids)
626
- # embed positions
627
- if attention_mask is None:
628
- attention_mask = torch.ones(
629
- (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
630
- )
631
- attention_mask = self._prepare_decoder_attention_mask(
632
- attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
633
- )
634
- # attention_mask = None
635
-
636
-
637
- hidden_states = inputs_embeds
638
-
639
- if self.gradient_checkpointing and self.training:
640
- if use_cache:
641
- logger.warning_once(
642
- "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
643
- )
644
- use_cache = False
645
-
646
- # decoder layers
647
- all_hidden_states = () if output_hidden_states else None
648
- all_self_attns = () if output_attentions else None
649
- next_decoder_cache = () if use_cache else None
650
-
651
- pack_cos_sin = None
652
- if self.config.rope_scaling["type"] == "clex":
653
- pack_cos_sin = self.clex_layer(inputs_embeds.device, inputs_embeds.dtype, seq_length_with_past, self.training)
654
-
655
- for idx, decoder_layer in enumerate(self.layers):
656
- if output_hidden_states:
657
- all_hidden_states += (hidden_states,)
658
-
659
- past_key_value = past_key_values[idx] if past_key_values is not None else None
660
-
661
- if self.gradient_checkpointing and self.training:
662
-
663
- def create_custom_forward(module):
664
- def custom_forward(*inputs):
665
- # None for past_key_value
666
- return module(*inputs, output_attentions, None)
667
-
668
- return custom_forward
669
-
670
- layer_outputs = torch.utils.checkpoint.checkpoint(
671
- create_custom_forward(decoder_layer),
672
- hidden_states,
673
- attention_mask,
674
- position_ids,
675
- pack_cos_sin,
676
- None,
677
- )
678
- else:
679
- layer_outputs = decoder_layer(
680
- hidden_states,
681
- attention_mask=attention_mask,
682
- position_ids=position_ids,
683
- pack_cos_sin=pack_cos_sin,
684
- past_key_value=past_key_value,
685
- output_attentions=output_attentions,
686
- use_cache=use_cache,
687
- )
688
-
689
- hidden_states = layer_outputs[0]
690
-
691
- if use_cache:
692
- next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
693
-
694
- if output_attentions:
695
- all_self_attns += (layer_outputs[1],)
696
-
697
- hidden_states = self.norm(hidden_states)
698
-
699
- # add hidden states from the last decoder layer
700
- if output_hidden_states:
701
- all_hidden_states += (hidden_states,)
702
-
703
- next_cache = next_decoder_cache if use_cache else None
704
- if not return_dict:
705
- return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
706
- return BaseModelOutputWithPast(
707
- last_hidden_state=hidden_states,
708
- past_key_values=next_cache,
709
- hidden_states=all_hidden_states,
710
- attentions=all_self_attns,
711
- )
712
-
713
-
714
- class LlamaForCausalLM(LlamaPreTrainedModel):
715
- def __init__(self, config):
716
- super().__init__(config)
717
- self.model = LlamaModel(config)
718
-
719
- self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
720
-
721
- # Initialize weights and apply final processing
722
- self.post_init()
723
-
724
- def get_input_embeddings(self):
725
- return self.model.embed_tokens
726
-
727
- def set_input_embeddings(self, value):
728
- self.model.embed_tokens = value
729
-
730
- def get_output_embeddings(self):
731
- return self.lm_head
732
-
733
- def set_output_embeddings(self, new_embeddings):
734
- self.lm_head = new_embeddings
735
-
736
- def set_decoder(self, decoder):
737
- self.model = decoder
738
-
739
- def get_decoder(self):
740
- return self.model
741
-
742
- @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
743
- @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
744
- def forward(
745
- self,
746
- input_ids: torch.LongTensor = None,
747
- attention_mask: Optional[torch.Tensor] = None,
748
- position_ids: Optional[torch.LongTensor] = None,
749
- past_key_values: Optional[List[torch.FloatTensor]] = None,
750
- inputs_embeds: Optional[torch.FloatTensor] = None,
751
- labels: Optional[torch.LongTensor] = None,
752
- use_cache: Optional[bool] = None,
753
- output_attentions: Optional[bool] = None,
754
- output_hidden_states: Optional[bool] = None,
755
- return_dict: Optional[bool] = None,
756
- ) -> Union[Tuple, CausalLMOutputWithPast]:
757
- r"""
758
- Args:
759
- labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
760
- Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
761
- config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
762
- (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
763
-
764
- Returns:
765
-
766
- Example:
767
-
768
- ```python
769
- >>> from transformers import AutoTokenizer, LlamaForCausalLM
770
-
771
- >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
772
- >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
773
-
774
- >>> prompt = "Hey, are you consciours? Can you talk to me?"
775
- >>> inputs = tokenizer(prompt, return_tensors="pt")
776
-
777
- >>> # Generate
778
- >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
779
- >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
780
- "Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you."
781
- ```"""
782
-
783
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
784
- output_hidden_states = (
785
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
786
- )
787
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
788
-
789
- # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
790
- outputs = self.model(
791
- input_ids=input_ids,
792
- attention_mask=attention_mask,
793
- position_ids=position_ids,
794
- past_key_values=past_key_values,
795
- inputs_embeds=inputs_embeds,
796
- use_cache=use_cache,
797
- output_attentions=output_attentions,
798
- output_hidden_states=output_hidden_states,
799
- return_dict=return_dict,
800
- )
801
-
802
- hidden_states = outputs[0]
803
- logits = self.lm_head(hidden_states)
804
-
805
- loss = None
806
- if labels is not None:
807
- # Shift so that tokens < n predict n
808
- shift_logits = logits[..., :-1, :].contiguous()
809
- shift_labels = labels[..., 1:].contiguous()
810
- # Flatten the tokens
811
- loss_fct = CrossEntropyLoss()
812
- shift_logits = shift_logits.view(-1, self.config.vocab_size)
813
- shift_labels = shift_labels.view(-1)
814
- # Enable model parallelism
815
- shift_labels = shift_labels.to(shift_logits.device)
816
- loss = loss_fct(shift_logits, shift_labels)
817
- if not return_dict:
818
- output = (logits,) + outputs[1:]
819
- return (loss,) + output if loss is not None else output
820
- return CausalLMOutputWithPast(
821
- loss=loss,
822
- logits=logits,
823
- past_key_values=outputs.past_key_values,
824
- hidden_states=outputs.hidden_states,
825
- attentions=outputs.attentions,
826
- )
827
-
828
- def prepare_inputs_for_generation(
829
- self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs
830
- ):
831
- if past_key_values:
832
- input_ids = input_ids[:, -1:]
833
-
834
- position_ids = kwargs.get("position_ids", None)
835
- if attention_mask is not None and position_ids is None:
836
- # create position_ids on the fly for batch generation
837
- position_ids = attention_mask.long().cumsum(-1) - 1
838
- position_ids.masked_fill_(attention_mask == 0, 1)
839
- if past_key_values:
840
- position_ids = position_ids[:, -1].unsqueeze(-1)
841
-
842
- # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
843
- if inputs_embeds is not None and past_key_values is None:
844
- model_inputs = {"inputs_embeds": inputs_embeds}
845
- else:
846
- model_inputs = {"input_ids": input_ids}
847
-
848
- model_inputs.update(
849
- {
850
- "position_ids": position_ids,
851
- "past_key_values": past_key_values,
852
- "use_cache": kwargs.get("use_cache"),
853
- "attention_mask": attention_mask,
854
- }
855
- )
856
- return model_inputs
857
-
858
- @staticmethod
859
- def _reorder_cache(past_key_values, beam_idx):
860
- reordered_past = ()
861
- for layer_past in past_key_values:
862
- reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
863
- return reordered_past
864
-
865
-
866
- @add_start_docstrings(
867
- """
868
- The LLaMa Model transformer with a sequence classification head on top (linear layer).
869
-
870
- [`LlamaForSequenceClassification`] uses the last token in order to do the classification, as other causal models
871
- (e.g. GPT-2) do.
872
-
873
- Since it does classification on the last token, it requires to know the position of the last token. If a
874
- `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
875
- no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
876
- padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
877
- each row of the batch).
878
- """,
879
- LLAMA_START_DOCSTRING,
880
- )
881
- class LlamaForSequenceClassification(LlamaPreTrainedModel):
882
- _keys_to_ignore_on_load_missing = [r"lm_head.weight"]
883
-
884
- def __init__(self, config):
885
- super().__init__(config)
886
- self.num_labels = config.num_labels
887
- self.model = LlamaModel(config)
888
- self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
889
-
890
- # Initialize weights and apply final processing
891
- self.post_init()
892
-
893
- def get_input_embeddings(self):
894
- return self.model.embed_tokens
895
-
896
- def set_input_embeddings(self, value):
897
- self.model.embed_tokens = value
898
-
899
- @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
900
- def forward(
901
- self,
902
- input_ids: torch.LongTensor = None,
903
- attention_mask: Optional[torch.Tensor] = None,
904
- position_ids: Optional[torch.LongTensor] = None,
905
- past_key_values: Optional[List[torch.FloatTensor]] = None,
906
- inputs_embeds: Optional[torch.FloatTensor] = None,
907
- labels: Optional[torch.LongTensor] = None,
908
- use_cache: Optional[bool] = None,
909
- output_attentions: Optional[bool] = None,
910
- output_hidden_states: Optional[bool] = None,
911
- return_dict: Optional[bool] = None,
912
- ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
913
- r"""
914
- labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
915
- Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
916
- config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
917
- `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
918
- """
919
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
920
-
921
- transformer_outputs = self.model(
922
- input_ids,
923
- attention_mask=attention_mask,
924
- position_ids=position_ids,
925
- past_key_values=past_key_values,
926
- inputs_embeds=inputs_embeds,
927
- use_cache=use_cache,
928
- output_attentions=output_attentions,
929
- output_hidden_states=output_hidden_states,
930
- return_dict=return_dict,
931
- )
932
- hidden_states = transformer_outputs[0]
933
- logits = self.score(hidden_states)
934
-
935
- if input_ids is not None:
936
- batch_size = input_ids.shape[0]
937
- else:
938
- batch_size = inputs_embeds.shape[0]
939
-
940
- if self.config.pad_token_id is None and batch_size != 1:
941
- raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
942
- if self.config.pad_token_id is None:
943
- sequence_lengths = -1
944
- else:
945
- if input_ids is not None:
946
- sequence_lengths = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device)
947
- else:
948
- sequence_lengths = -1
949
-
950
- pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
951
-
952
- loss = None
953
- if labels is not None:
954
- labels = labels.to(logits.device)
955
- if self.config.problem_type is None:
956
- if self.num_labels == 1:
957
- self.config.problem_type = "regression"
958
- elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
959
- self.config.problem_type = "single_label_classification"
960
- else:
961
- self.config.problem_type = "multi_label_classification"
962
-
963
- if self.config.problem_type == "regression":
964
- loss_fct = MSELoss()
965
- if self.num_labels == 1:
966
- loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
967
- else:
968
- loss = loss_fct(pooled_logits, labels)
969
- elif self.config.problem_type == "single_label_classification":
970
- loss_fct = CrossEntropyLoss()
971
- loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
972
- elif self.config.problem_type == "multi_label_classification":
973
- loss_fct = BCEWithLogitsLoss()
974
- loss = loss_fct(pooled_logits, labels)
975
- if not return_dict:
976
- output = (pooled_logits,) + transformer_outputs[1:]
977
- return ((loss,) + output) if loss is not None else output
978
-
979
- return SequenceClassifierOutputWithPast(
980
- loss=loss,
981
- logits=pooled_logits,
982
- past_key_values=transformer_outputs.past_key_values,
983
- hidden_states=transformer_outputs.hidden_states,
984
- attentions=transformer_outputs.attentions,
985
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/datasets/dataloader_utils.py DELETED
@@ -1,162 +0,0 @@
1
- """
2
- Copyright (c) 2022, salesforce.com, inc.
3
- All rights reserved.
4
- SPDX-License-Identifier: BSD-3-Clause
5
- For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
6
- """
7
-
8
- import time
9
- import random
10
- import torch
11
- from video_llama.datasets.data_utils import move_to_cuda
12
- from torch.utils.data import DataLoader
13
-
14
-
15
- class MultiIterLoader:
16
- """
17
- A simple wrapper for iterating over multiple iterators.
18
-
19
- Args:
20
- loaders (List[Loader]): List of Iterator loaders.
21
- ratios (List[float]): List of ratios to sample from each loader. If None, all loaders are sampled uniformly.
22
- """
23
-
24
- def __init__(self, loaders, ratios=None):
25
- # assert all loaders has __next__ method
26
- for loader in loaders:
27
- assert hasattr(
28
- loader, "__next__"
29
- ), "Loader {} has no __next__ method.".format(loader)
30
-
31
- if ratios is None:
32
- ratios = [1.0] * len(loaders)
33
- else:
34
- assert len(ratios) == len(loaders)
35
- ratios = [float(ratio) / sum(ratios) for ratio in ratios]
36
-
37
- self.loaders = loaders
38
- self.ratios = ratios
39
-
40
- def __next__(self):
41
- # random sample from each loader by ratio
42
- loader_idx = random.choices(range(len(self.loaders)), self.ratios, k=1)[0]
43
- return next(self.loaders[loader_idx])
44
-
45
-
46
- class PrefetchLoader(object):
47
- """
48
- Modified from https://github.com/ChenRocks/UNITER.
49
-
50
- overlap compute and cuda data transfer
51
- (copied and then modified from nvidia apex)
52
- """
53
-
54
- def __init__(self, loader):
55
- self.loader = loader
56
- self.stream = torch.cuda.Stream()
57
-
58
- def __iter__(self):
59
- loader_it = iter(self.loader)
60
- self.preload(loader_it)
61
- batch = self.next(loader_it)
62
- while batch is not None:
63
- is_tuple = isinstance(batch, tuple)
64
- if is_tuple:
65
- task, batch = batch
66
-
67
- if is_tuple:
68
- yield task, batch
69
- else:
70
- yield batch
71
- batch = self.next(loader_it)
72
-
73
- def __len__(self):
74
- return len(self.loader)
75
-
76
- def preload(self, it):
77
- try:
78
- self.batch = next(it)
79
- except StopIteration:
80
- self.batch = None
81
- return
82
- # if record_stream() doesn't work, another option is to make sure
83
- # device inputs are created on the main stream.
84
- # self.next_input_gpu = torch.empty_like(self.next_input,
85
- # device='cuda')
86
- # self.next_target_gpu = torch.empty_like(self.next_target,
87
- # device='cuda')
88
- # Need to make sure the memory allocated for next_* is not still in use
89
- # by the main stream at the time we start copying to next_*:
90
- # self.stream.wait_stream(torch.cuda.current_stream())
91
- with torch.cuda.stream(self.stream):
92
- self.batch = move_to_cuda(self.batch)
93
- # more code for the alternative if record_stream() doesn't work:
94
- # copy_ will record the use of the pinned source tensor in this
95
- # side stream.
96
- # self.next_input_gpu.copy_(self.next_input, non_blocking=True)
97
- # self.next_target_gpu.copy_(self.next_target, non_blocking=True)
98
- # self.next_input = self.next_input_gpu
99
- # self.next_target = self.next_target_gpu
100
-
101
- def next(self, it):
102
- torch.cuda.current_stream().wait_stream(self.stream)
103
- batch = self.batch
104
- if batch is not None:
105
- record_cuda_stream(batch)
106
- self.preload(it)
107
- return batch
108
-
109
- def __getattr__(self, name):
110
- method = self.loader.__getattribute__(name)
111
- return method
112
-
113
-
114
- def record_cuda_stream(batch):
115
- if isinstance(batch, torch.Tensor):
116
- batch.record_stream(torch.cuda.current_stream())
117
- elif isinstance(batch, list) or isinstance(batch, tuple):
118
- for t in batch:
119
- record_cuda_stream(t)
120
- elif isinstance(batch, dict):
121
- for t in batch.values():
122
- record_cuda_stream(t)
123
- else:
124
- pass
125
-
126
-
127
- class IterLoader:
128
- """
129
- A wrapper to convert DataLoader as an infinite iterator.
130
-
131
- Modified from:
132
- https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/iter_based_runner.py
133
- """
134
-
135
- def __init__(self, dataloader: DataLoader, use_distributed: bool = False):
136
- self._dataloader = dataloader
137
- self.iter_loader = iter(self._dataloader)
138
- self._use_distributed = use_distributed
139
- self._epoch = 0
140
-
141
- @property
142
- def epoch(self) -> int:
143
- return self._epoch
144
-
145
- def __next__(self):
146
- try:
147
- data = next(self.iter_loader)
148
- except StopIteration:
149
- self._epoch += 1
150
- if hasattr(self._dataloader.sampler, "set_epoch") and self._use_distributed:
151
- self._dataloader.sampler.set_epoch(self._epoch)
152
- time.sleep(2) # Prevent possible deadlock during epoch transition
153
- self.iter_loader = iter(self._dataloader)
154
- data = next(self.iter_loader)
155
-
156
- return data
157
-
158
- def __iter__(self):
159
- return self
160
-
161
- def __len__(self):
162
- return len(self._dataloader)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Dao3/OpenArt/README.md DELETED
@@ -1,19 +0,0 @@
1
- ---
2
- title: OpenArt
3
- emoji: 🧘🏻‍♂️
4
- colorFrom: blue
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.16.1
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: Dao3/DreamlikeArt-Diffusion-1.0
11
- ---
12
- ---
13
- title: DreamlikeArt-Diffusion .0
14
- emoji: 🧘🏻‍♂️
15
- colorFrom: blue
16
- colorTo: yellow
17
- sdk: gradio
18
- sdk_version: 3.16.1
19
- app_file: app.py
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Dao3/openai-translator/app.py DELETED
@@ -1,255 +0,0 @@
1
- import os
2
- import openai
3
- import gradio as gr
4
-
5
- openai.api_key = os.environ['OPENAI_KEY']
6
-
7
- supportLanguages = [
8
- ["auto", "自动识别"],
9
- ["粤语", "粤语"],
10
- ["古文", "文言文"],
11
- ["af","Afrikaans"],
12
- ["ak","Akan"],
13
- ["sq","Albanian"],
14
- ["am","Amharic"],
15
- ["ar","Arabic"],
16
- ["hy","Armenian"],
17
- ["az","Azerbaijani"],
18
- ["eu","Basque"],
19
- ["be","Belarusian"],
20
- ["bem","Bemba"],
21
- ["bn","Bengali"],
22
- ["bh","Bihari"],
23
- ["xx-bork","Bork, bork, bork!"],
24
- ["bs","Bosnian"],
25
- ["br","Breton"],
26
- ["bg","Bulgarian"],
27
- ["km","Cambodian"],
28
- ["ca","Catalan"],
29
- ["chr","Cherokee"],
30
- ["ny","Chichewa"],
31
- ["zh-CN","中文(简体)"],
32
- ["zh-TW","中文 (繁体)"],
33
- ["co","Corsican"],
34
- ["hr","Croatian"],
35
- ["cs","Czech"],
36
- ["da","Danish"],
37
- ["nl","Dutch"],
38
- ["xx-elmer","Elmer Fudd"],
39
- ["en","English"],
40
- ["eo","Esperanto"],
41
- ["et","Estonian"],
42
- ["ee","Ewe"],
43
- ["fo","Faroese"],
44
- ["tl","Filipino"],
45
- ["fi","Finnish"],
46
- ["fr","French"],
47
- ["fy","Frisian"],
48
- ["gaa","Ga"],
49
- ["gl","Galician"],
50
- ["ka","Georgian"],
51
- ["de","German"],
52
- ["el","Greek"],
53
- ["gn","Guarani"],
54
- ["gu","Gujarati"],
55
- ["xx-hacker","Hacker"],
56
- ["ht","Haitian Creole"],
57
- ["ha","Hausa"],
58
- ["haw","Hawaiian"],
59
- ["iw","Hebrew"],
60
- ["hi","Hindi"],
61
- ["hu","Hungarian"],
62
- ["is","Icelandic"],
63
- ["ig","Igbo"],
64
- ["id","Indonesian"],
65
- ["ia","Interlingua"],
66
- ["ga","Irish"],
67
- ["it","Italian"],
68
- ["ja","Japanese"],
69
- ["jw","Javanese"],
70
- ["kn","Kannada"],
71
- ["kk","Kazakh"],
72
- ["rw","Kinyarwanda"],
73
- ["rn","Kirundi"],
74
- ["xx-klingon","Klingon"],
75
- ["kg","Kongo"],
76
- ["ko","Korean"],
77
- ["kri","Krio (Sierra Leone)"],
78
- ["ku","Kurdish"],
79
- ["ckb","Kurdish (Soranî)"],
80
- ["ky","Kyrgyz"],
81
- ["lo","Laothian"],
82
- ["la","Latin"],
83
- ["lv","Latvian"],
84
- ["ln","Lingala"],
85
- ["lt","Lithuanian"],
86
- ["loz","Lozi"],
87
- ["lg","Luganda"],
88
- ["ach","Luo"],
89
- ["mk","Macedonian"],
90
- ["mg","Malagasy"],
91
- ["ms","Malay"],
92
- ["ml","Malayalam"],
93
- ["mt","Maltese"],
94
- ["mi","Maori"],
95
- ["mr","Marathi"],
96
- ["mfe","Mauritian Creole"],
97
- ["mo","Moldavian"],
98
- ["mn","Mongolian"],
99
- ["sr-ME","Montenegrin"],
100
- ["ne","Nepali"],
101
- ["pcm","Nigerian Pidgin"],
102
- ["nso","Northern Sotho"],
103
- ["no","Norwegian"],
104
- ["nn","Norwegian (Nynorsk)"],
105
- ["oc","Occitan"],
106
- ["or","Oriya"],
107
- ["om","Oromo"],
108
- ["ps","Pashto"],
109
- ["fa","Persian"],
110
- ["xx-pirate","Pirate"],
111
- ["pl","Polish"],
112
- ["pt-BR","Portuguese (Brazil)"],
113
- ["pt-PT","Portuguese (Portugal)"],
114
- ["pa","Punjabi"],
115
- ["qu","Quechua"],
116
- ["ro","Romanian"],
117
- ["rm","Romansh"],
118
- ["nyn","Runyakitara"],
119
- ["ru","Russian"],
120
- ["gd","Scots Gaelic"],
121
- ["sr","Serbian"],
122
- ["sh","Serbo-Croatian"],
123
- ["st","Sesotho"],
124
- ["tn","Setswana"],
125
- ["crs","Seychellois Creole"],
126
- ["sn","Shona"],
127
- ["sd","Sindhi"],
128
- ["si","Sinhalese"],
129
- ["sk","Slovak"],
130
- ["sl","Slovenian"],
131
- ["so","Somali"],
132
- ["es","Spanish"],
133
- ["es-419","Spanish (Latin American)"],
134
- ["su","Sundanese"],
135
- ["sw","Swahili"],
136
- ["sv","Swedish"],
137
- ["tg","Tajik"],
138
- ["ta","Tamil"],
139
- ["tt","Tatar"],
140
- ["te","Telugu"],
141
- ["th","Thai"],
142
- ["ti","Tigrinya"],
143
- ["to","Tonga"],
144
- ["lua","Tshiluba"],
145
- ["tum","Tumbuka"],
146
- ["tr","Turkish"],
147
- ["tk","Turkmen"],
148
- ["tw","Twi"],
149
- ["ug","Uighur"],
150
- ["uk","Ukrainian"],
151
- ["ur","Urdu"],
152
- ["uz","Uzbek"],
153
- ["vi","Vietnamese"],
154
- ["cy","Welsh"],
155
- ["wo","Wolof"],
156
- ["xh","Xhosa"],
157
- ["yi","Yiddish"],
158
- ["yo","Yoruba"],
159
- ["zu","Zulu"],
160
- ]
161
- prompt_template = "You are a translation engine that can only translate text and cannot interpret it. Keep the indent of the original text, only modify when you need."
162
-
163
- def submit_message(detectFrom, detectTo, user_token, prompt):
164
- if user_token != "":
165
- openai.api_key = user_token
166
-
167
- if not prompt:
168
- return gr.update(value="")
169
-
170
- for lc, lang in supportLanguages:
171
- if detectFrom == lang:
172
- detectFrom = lc
173
- if detectTo == lang:
174
- detectTo = lc
175
-
176
- systemInstruct = prompt_template
177
- translateInstruct = f"translate from {detectFrom} to {detectTo}"
178
- if detectFrom == "auto":
179
- translateInstruct = f"translate to {detectTo}"
180
- if detectFrom in ["古文", "zh-CN", "zh-TW"]:
181
- if detectTo == "zh-TW":
182
- translateInstruct = "翻译成繁体白话文"
183
- if detectTo == "zh-CN":
184
- translateInstruct = "翻译成简体白话文"
185
- if detectTo == "粤语":
186
- translateInstruct = "翻译成粤语白话文"
187
-
188
- if detectFrom == detectTo:
189
- systemInstruct = "You are a text embellisher, you can only embellish the text, don't interpret it."
190
- if detectTo in ["zh-CN", "zh-TW"]:
191
- translateInstruct = "润色此句"
192
- else:
193
- translateInstruct = "polish this sentence"
194
-
195
- prompt_msg = [
196
- {"role": "system", "content": systemInstruct},
197
- {"role": "user", "content": translateInstruct},
198
- {"role": "user", "content": prompt},
199
- ]
200
-
201
- try:
202
- openai_response = openai.ChatCompletion.create(
203
- model="gpt-3.5-turbo",
204
- messages=prompt_msg,
205
- temperature=0,
206
- max_tokens=1000,
207
- top_p=1,
208
- stream=True,
209
- frequency_penalty=1,
210
- presence_penalty=1,
211
- )
212
-
213
- combined = ""
214
- for resp in openai_response:
215
- delta = resp["choices"][0]["delta"]
216
- if "content" in delta:
217
- combined += delta["content"]
218
- yield combined
219
-
220
- except Exception as e:
221
- return f"Error: {e}"
222
-
223
- css = """
224
- #col-container {max-width: 80%; margin-left: auto; margin-right: auto;}
225
- #chatbox {min-height: 400px;}
226
- #header {text-align: center;}
227
- #label {font-size: 0.8em; padding: 0.5em; margin: 0;}
228
- .message { font-size: 1.2em; }
229
- """
230
-
231
- with gr.Blocks(css=css) as demo:
232
-
233
- state = gr.State([])
234
-
235
- with gr.Column(elem_id="col-container"):
236
- gr.Markdown("""## 多语言翻译
237
- 使用OpenAI官方 API (gpt-3.5-turbo model).""", elem_id="header")
238
-
239
- with gr.Row():
240
- with gr.Column():
241
- translateFrom = gr.Dropdown(label="原文", elem_id="translate-from", multiselect=False, value="自动识别", choices=[l[1] for l in supportLanguages]).style(container=False)
242
- input_message = gr.Textbox(max_lines=100, show_label=False, lines=10, placeholder="Enter text and press enter", visible=True).style(container=False)
243
- with gr.Column():
244
- translateTo = gr.Dropdown(label="译文", elem_id="translate-to", multiselect=False, value="中文 (简体)", choices=[l[1] for l in supportLanguages[1:]]).style(container=False)
245
- output = gr.Textbox(max_lines=100, show_label=False, lines=10, label="Output", visible=True).style(container=False)
246
-
247
- btn_submit = gr.Button("急急如律令")
248
-
249
- with gr.Row():
250
- user_token = gr.Textbox(value='', placeholder="OpenAI API Key", type="password", label="输入你自己的OpenAI API Key翻译过程会更准确哦~.")
251
-
252
- btn_submit.click(submit_message, [translateFrom, translateTo, user_token, input_message], [output])
253
-
254
- demo.queue(concurrency_count=10)
255
- demo.launch(height='800px')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DeclK/pose/tools/dtw.py DELETED
@@ -1,116 +0,0 @@
1
- import numpy as np
2
- from .utils import get_keypoint_weight
3
-
4
-
5
- class DTWForKeypoints:
6
- def __init__(self, keypoints1, keypoints2):
7
- self.keypoints1 = keypoints1
8
- self.keypoints2 = keypoints2
9
-
10
- def get_dtw_path(self):
11
-
12
- norm_kp1 = self.normalize_keypoints(self.keypoints1)
13
- norm_kp2 = self.normalize_keypoints(self.keypoints2)
14
-
15
- kp_weight = get_keypoint_weight()
16
- oks, oks_unnorm = self.object_keypoint_similarity(norm_kp1,
17
- norm_kp2, keypoint_weights=kp_weight)
18
- print(f"OKS max {oks.max():.2f} min {oks.min():.2f}")
19
-
20
- # do the DTW, and return the path
21
- cost_matrix = 1 - oks
22
- dtw_dist, dtw_path = self.dynamic_time_warp(cost_matrix)
23
-
24
- return dtw_path, oks, oks_unnorm
25
-
26
- def normalize_keypoints(self, keypoints):
27
- centroid = keypoints.mean(axis=1)[:, None]
28
- max_distance = np.max(np.sqrt(np.sum((keypoints - centroid) ** 2, axis=2)),
29
- axis=1) + 1e-6
30
-
31
- normalized_keypoints = (keypoints - centroid) / max_distance[:, None, None]
32
- return normalized_keypoints
33
-
34
- def keypoints_areas(self, keypoints):
35
- min_coords = np.min(keypoints, axis=1)
36
- max_coords = np.max(keypoints, axis=1)
37
- areas = np.prod(max_coords - min_coords, axis=1)
38
- return areas
39
-
40
- def object_keypoint_similarity(self, keypoints1,
41
- keypoints2,
42
- scale_constant=0.2,
43
- keypoint_weights=None):
44
- """ Calculate the Object Keypoint Similarity (OKS) for multiple objects,
45
- and add weight to each keypoint. Here we choose to normalize the points
46
- using centroid and max distance instead of bounding box area.
47
- """
48
- # Compute squared distances between all pairs of keypoints
49
- sq_diff = np.sum((keypoints1[:, None] - keypoints2) ** 2, axis=-1)
50
-
51
- oks = np.exp(-sq_diff / (2 * scale_constant ** 2))
52
- oks_unnorm = oks.copy()
53
-
54
- if keypoint_weights is not None:
55
- oks = oks * keypoint_weights
56
- oks = np.sum(oks, axis=-1)
57
- else:
58
- oks = np.mean(oks, axis=-1)
59
-
60
- return oks, oks_unnorm
61
-
62
- def dynamic_time_warp(self, cost_matrix, R=1000):
63
- """Compute the Dynamic Time Warping distance and path between two time series.
64
- If the time series is too long, it will use the Sakoe-Chiba Band constraint,
65
- so time complexity is bounded at O(MR).
66
- """
67
-
68
- M = len(self.keypoints1)
69
- N = len(self.keypoints2)
70
-
71
- # Initialize the distance matrix with infinity
72
- D = np.full((M, N), np.inf)
73
-
74
- # Initialize the first row and column of the matrix
75
- D[0, 0] = cost_matrix[0, 0]
76
- for i in range(1, M):
77
- D[i, 0] = D[i - 1, 0] + cost_matrix[i, 0]
78
-
79
- for j in range(1, N):
80
- D[0, j] = D[0, j - 1] + cost_matrix[0, j]
81
-
82
- # Fill the remaining elements of the matrix within the
83
- # Sakoe-Chiba Band using dynamic programming
84
- for i in range(1, M):
85
- for j in range(max(1, i - R), min(N, i + R + 1)):
86
- cost = cost_matrix[i, j]
87
- D[i, j] = cost + min(D[i - 1, j], D[i, j - 1], D[i - 1, j - 1])
88
-
89
- # Backtrack to find the optimal path
90
- path = [(M - 1, N - 1)]
91
- i, j = M - 1, N - 1
92
- while i > 0 or j > 0:
93
- min_idx = np.argmin([D[i - 1, j], D[i, j - 1], D[i - 1, j - 1]])
94
- if min_idx == 0:
95
- i -= 1
96
- elif min_idx == 1:
97
- j -= 1
98
- else:
99
- i -= 1
100
- j -= 1
101
- path.append((i, j))
102
- path.reverse()
103
-
104
- return D[-1, -1], path
105
-
106
- if __name__ == '__main__':
107
-
108
- from mmengine.fileio import load
109
-
110
- keypoints1, kp1_scores = load('tennis1.pkl')
111
- keypoints2, kp2_scores = load('tennis3.pkl')
112
-
113
- # Normalize the keypoints
114
- dtw = DTWForKeypoints(keypoints1, keypoints2)
115
- path = dtw.get_dtw_path()
116
- print(path)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Demi2809/rvc-models/infer_pack/models_onnx.py DELETED
@@ -1,849 +0,0 @@
1
- import math, pdb, os
2
- from time import time as ttime
3
- import torch
4
- from torch import nn
5
- from torch.nn import functional as F
6
- from infer_pack import modules
7
- from infer_pack import attentions
8
- from infer_pack import commons
9
- from infer_pack.commons import init_weights, get_padding
10
- from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
11
- from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
12
- from infer_pack.commons import init_weights
13
- import numpy as np
14
- from infer_pack import commons
15
-
16
-
17
- class TextEncoder256(nn.Module):
18
- def __init__(
19
- self,
20
- out_channels,
21
- hidden_channels,
22
- filter_channels,
23
- n_heads,
24
- n_layers,
25
- kernel_size,
26
- p_dropout,
27
- f0=True,
28
- ):
29
- super().__init__()
30
- self.out_channels = out_channels
31
- self.hidden_channels = hidden_channels
32
- self.filter_channels = filter_channels
33
- self.n_heads = n_heads
34
- self.n_layers = n_layers
35
- self.kernel_size = kernel_size
36
- self.p_dropout = p_dropout
37
- self.emb_phone = nn.Linear(256, hidden_channels)
38
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
39
- if f0 == True:
40
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
41
- self.encoder = attentions.Encoder(
42
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
43
- )
44
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
45
-
46
- def forward(self, phone, pitch, lengths):
47
- if pitch == None:
48
- x = self.emb_phone(phone)
49
- else:
50
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
51
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
52
- x = self.lrelu(x)
53
- x = torch.transpose(x, 1, -1) # [b, h, t]
54
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
55
- x.dtype
56
- )
57
- x = self.encoder(x * x_mask, x_mask)
58
- stats = self.proj(x) * x_mask
59
-
60
- m, logs = torch.split(stats, self.out_channels, dim=1)
61
- return m, logs, x_mask
62
-
63
-
64
- class TextEncoder256Sim(nn.Module):
65
- def __init__(
66
- self,
67
- out_channels,
68
- hidden_channels,
69
- filter_channels,
70
- n_heads,
71
- n_layers,
72
- kernel_size,
73
- p_dropout,
74
- f0=True,
75
- ):
76
- super().__init__()
77
- self.out_channels = out_channels
78
- self.hidden_channels = hidden_channels
79
- self.filter_channels = filter_channels
80
- self.n_heads = n_heads
81
- self.n_layers = n_layers
82
- self.kernel_size = kernel_size
83
- self.p_dropout = p_dropout
84
- self.emb_phone = nn.Linear(256, hidden_channels)
85
- self.lrelu = nn.LeakyReLU(0.1, inplace=True)
86
- if f0 == True:
87
- self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256
88
- self.encoder = attentions.Encoder(
89
- hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout
90
- )
91
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
92
-
93
- def forward(self, phone, pitch, lengths):
94
- if pitch == None:
95
- x = self.emb_phone(phone)
96
- else:
97
- x = self.emb_phone(phone) + self.emb_pitch(pitch)
98
- x = x * math.sqrt(self.hidden_channels) # [b, t, h]
99
- x = self.lrelu(x)
100
- x = torch.transpose(x, 1, -1) # [b, h, t]
101
- x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to(
102
- x.dtype
103
- )
104
- x = self.encoder(x * x_mask, x_mask)
105
- x = self.proj(x) * x_mask
106
- return x, x_mask
107
-
108
-
109
- class ResidualCouplingBlock(nn.Module):
110
- def __init__(
111
- self,
112
- channels,
113
- hidden_channels,
114
- kernel_size,
115
- dilation_rate,
116
- n_layers,
117
- n_flows=4,
118
- gin_channels=0,
119
- ):
120
- super().__init__()
121
- self.channels = channels
122
- self.hidden_channels = hidden_channels
123
- self.kernel_size = kernel_size
124
- self.dilation_rate = dilation_rate
125
- self.n_layers = n_layers
126
- self.n_flows = n_flows
127
- self.gin_channels = gin_channels
128
-
129
- self.flows = nn.ModuleList()
130
- for i in range(n_flows):
131
- self.flows.append(
132
- modules.ResidualCouplingLayer(
133
- channels,
134
- hidden_channels,
135
- kernel_size,
136
- dilation_rate,
137
- n_layers,
138
- gin_channels=gin_channels,
139
- mean_only=True,
140
- )
141
- )
142
- self.flows.append(modules.Flip())
143
-
144
- def forward(self, x, x_mask, g=None, reverse=False):
145
- if not reverse:
146
- for flow in self.flows:
147
- x, _ = flow(x, x_mask, g=g, reverse=reverse)
148
- else:
149
- for flow in reversed(self.flows):
150
- x = flow(x, x_mask, g=g, reverse=reverse)
151
- return x
152
-
153
- def remove_weight_norm(self):
154
- for i in range(self.n_flows):
155
- self.flows[i * 2].remove_weight_norm()
156
-
157
-
158
- class PosteriorEncoder(nn.Module):
159
- def __init__(
160
- self,
161
- in_channels,
162
- out_channels,
163
- hidden_channels,
164
- kernel_size,
165
- dilation_rate,
166
- n_layers,
167
- gin_channels=0,
168
- ):
169
- super().__init__()
170
- self.in_channels = in_channels
171
- self.out_channels = out_channels
172
- self.hidden_channels = hidden_channels
173
- self.kernel_size = kernel_size
174
- self.dilation_rate = dilation_rate
175
- self.n_layers = n_layers
176
- self.gin_channels = gin_channels
177
-
178
- self.pre = nn.Conv1d(in_channels, hidden_channels, 1)
179
- self.enc = modules.WN(
180
- hidden_channels,
181
- kernel_size,
182
- dilation_rate,
183
- n_layers,
184
- gin_channels=gin_channels,
185
- )
186
- self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1)
187
-
188
- def forward(self, x, x_lengths, g=None):
189
- x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(
190
- x.dtype
191
- )
192
- x = self.pre(x) * x_mask
193
- x = self.enc(x, x_mask, g=g)
194
- stats = self.proj(x) * x_mask
195
- m, logs = torch.split(stats, self.out_channels, dim=1)
196
- z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask
197
- return z, m, logs, x_mask
198
-
199
- def remove_weight_norm(self):
200
- self.enc.remove_weight_norm()
201
-
202
-
203
- class Generator(torch.nn.Module):
204
- def __init__(
205
- self,
206
- initial_channel,
207
- resblock,
208
- resblock_kernel_sizes,
209
- resblock_dilation_sizes,
210
- upsample_rates,
211
- upsample_initial_channel,
212
- upsample_kernel_sizes,
213
- gin_channels=0,
214
- ):
215
- super(Generator, self).__init__()
216
- self.num_kernels = len(resblock_kernel_sizes)
217
- self.num_upsamples = len(upsample_rates)
218
- self.conv_pre = Conv1d(
219
- initial_channel, upsample_initial_channel, 7, 1, padding=3
220
- )
221
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
222
-
223
- self.ups = nn.ModuleList()
224
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
225
- self.ups.append(
226
- weight_norm(
227
- ConvTranspose1d(
228
- upsample_initial_channel // (2**i),
229
- upsample_initial_channel // (2 ** (i + 1)),
230
- k,
231
- u,
232
- padding=(k - u) // 2,
233
- )
234
- )
235
- )
236
-
237
- self.resblocks = nn.ModuleList()
238
- for i in range(len(self.ups)):
239
- ch = upsample_initial_channel // (2 ** (i + 1))
240
- for j, (k, d) in enumerate(
241
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
242
- ):
243
- self.resblocks.append(resblock(ch, k, d))
244
-
245
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
246
- self.ups.apply(init_weights)
247
-
248
- if gin_channels != 0:
249
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
250
-
251
- def forward(self, x, g=None):
252
- x = self.conv_pre(x)
253
- if g is not None:
254
- x = x + self.cond(g)
255
-
256
- for i in range(self.num_upsamples):
257
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
258
- x = self.ups[i](x)
259
- xs = None
260
- for j in range(self.num_kernels):
261
- if xs is None:
262
- xs = self.resblocks[i * self.num_kernels + j](x)
263
- else:
264
- xs += self.resblocks[i * self.num_kernels + j](x)
265
- x = xs / self.num_kernels
266
- x = F.leaky_relu(x)
267
- x = self.conv_post(x)
268
- x = torch.tanh(x)
269
-
270
- return x
271
-
272
- def remove_weight_norm(self):
273
- for l in self.ups:
274
- remove_weight_norm(l)
275
- for l in self.resblocks:
276
- l.remove_weight_norm()
277
-
278
-
279
- class SineGen(torch.nn.Module):
280
- """Definition of sine generator
281
- SineGen(samp_rate, harmonic_num = 0,
282
- sine_amp = 0.1, noise_std = 0.003,
283
- voiced_threshold = 0,
284
- flag_for_pulse=False)
285
- samp_rate: sampling rate in Hz
286
- harmonic_num: number of harmonic overtones (default 0)
287
- sine_amp: amplitude of sine-wavefrom (default 0.1)
288
- noise_std: std of Gaussian noise (default 0.003)
289
- voiced_thoreshold: F0 threshold for U/V classification (default 0)
290
- flag_for_pulse: this SinGen is used inside PulseGen (default False)
291
- Note: when flag_for_pulse is True, the first time step of a voiced
292
- segment is always sin(np.pi) or cos(0)
293
- """
294
-
295
- def __init__(
296
- self,
297
- samp_rate,
298
- harmonic_num=0,
299
- sine_amp=0.1,
300
- noise_std=0.003,
301
- voiced_threshold=0,
302
- flag_for_pulse=False,
303
- ):
304
- super(SineGen, self).__init__()
305
- self.sine_amp = sine_amp
306
- self.noise_std = noise_std
307
- self.harmonic_num = harmonic_num
308
- self.dim = self.harmonic_num + 1
309
- self.sampling_rate = samp_rate
310
- self.voiced_threshold = voiced_threshold
311
-
312
- def _f02uv(self, f0):
313
- # generate uv signal
314
- uv = torch.ones_like(f0)
315
- uv = uv * (f0 > self.voiced_threshold)
316
- return uv
317
-
318
- def forward(self, f0, upp):
319
- """sine_tensor, uv = forward(f0)
320
- input F0: tensor(batchsize=1, length, dim=1)
321
- f0 for unvoiced steps should be 0
322
- output sine_tensor: tensor(batchsize=1, length, dim)
323
- output uv: tensor(batchsize=1, length, 1)
324
- """
325
- with torch.no_grad():
326
- f0 = f0[:, None].transpose(1, 2)
327
- f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device)
328
- # fundamental component
329
- f0_buf[:, :, 0] = f0[:, :, 0]
330
- for idx in np.arange(self.harmonic_num):
331
- f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (
332
- idx + 2
333
- ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
334
- rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化
335
- rand_ini = torch.rand(
336
- f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device
337
- )
338
- rand_ini[:, 0] = 0
339
- rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
340
- tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化
341
- tmp_over_one *= upp
342
- tmp_over_one = F.interpolate(
343
- tmp_over_one.transpose(2, 1),
344
- scale_factor=upp,
345
- mode="linear",
346
- align_corners=True,
347
- ).transpose(2, 1)
348
- rad_values = F.interpolate(
349
- rad_values.transpose(2, 1), scale_factor=upp, mode="nearest"
350
- ).transpose(
351
- 2, 1
352
- ) #######
353
- tmp_over_one %= 1
354
- tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0
355
- cumsum_shift = torch.zeros_like(rad_values)
356
- cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
357
- sine_waves = torch.sin(
358
- torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi
359
- )
360
- sine_waves = sine_waves * self.sine_amp
361
- uv = self._f02uv(f0)
362
- uv = F.interpolate(
363
- uv.transpose(2, 1), scale_factor=upp, mode="nearest"
364
- ).transpose(2, 1)
365
- noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
366
- noise = noise_amp * torch.randn_like(sine_waves)
367
- sine_waves = sine_waves * uv + noise
368
- return sine_waves, uv, noise
369
-
370
-
371
- class SourceModuleHnNSF(torch.nn.Module):
372
- """SourceModule for hn-nsf
373
- SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
374
- add_noise_std=0.003, voiced_threshod=0)
375
- sampling_rate: sampling_rate in Hz
376
- harmonic_num: number of harmonic above F0 (default: 0)
377
- sine_amp: amplitude of sine source signal (default: 0.1)
378
- add_noise_std: std of additive Gaussian noise (default: 0.003)
379
- note that amplitude of noise in unvoiced is decided
380
- by sine_amp
381
- voiced_threshold: threhold to set U/V given F0 (default: 0)
382
- Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
383
- F0_sampled (batchsize, length, 1)
384
- Sine_source (batchsize, length, 1)
385
- noise_source (batchsize, length 1)
386
- uv (batchsize, length, 1)
387
- """
388
-
389
- def __init__(
390
- self,
391
- sampling_rate,
392
- harmonic_num=0,
393
- sine_amp=0.1,
394
- add_noise_std=0.003,
395
- voiced_threshod=0,
396
- is_half=True,
397
- ):
398
- super(SourceModuleHnNSF, self).__init__()
399
-
400
- self.sine_amp = sine_amp
401
- self.noise_std = add_noise_std
402
- self.is_half = is_half
403
- # to produce sine waveforms
404
- self.l_sin_gen = SineGen(
405
- sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod
406
- )
407
-
408
- # to merge source harmonics into a single excitation
409
- self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
410
- self.l_tanh = torch.nn.Tanh()
411
-
412
- def forward(self, x, upp=None):
413
- sine_wavs, uv, _ = self.l_sin_gen(x, upp)
414
- if self.is_half:
415
- sine_wavs = sine_wavs.half()
416
- sine_merge = self.l_tanh(self.l_linear(sine_wavs))
417
- return sine_merge, None, None # noise, uv
418
-
419
-
420
- class GeneratorNSF(torch.nn.Module):
421
- def __init__(
422
- self,
423
- initial_channel,
424
- resblock,
425
- resblock_kernel_sizes,
426
- resblock_dilation_sizes,
427
- upsample_rates,
428
- upsample_initial_channel,
429
- upsample_kernel_sizes,
430
- gin_channels,
431
- sr,
432
- is_half=False,
433
- ):
434
- super(GeneratorNSF, self).__init__()
435
- self.num_kernels = len(resblock_kernel_sizes)
436
- self.num_upsamples = len(upsample_rates)
437
-
438
- self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates))
439
- self.m_source = SourceModuleHnNSF(
440
- sampling_rate=sr, harmonic_num=0, is_half=is_half
441
- )
442
- self.noise_convs = nn.ModuleList()
443
- self.conv_pre = Conv1d(
444
- initial_channel, upsample_initial_channel, 7, 1, padding=3
445
- )
446
- resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2
447
-
448
- self.ups = nn.ModuleList()
449
- for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
450
- c_cur = upsample_initial_channel // (2 ** (i + 1))
451
- self.ups.append(
452
- weight_norm(
453
- ConvTranspose1d(
454
- upsample_initial_channel // (2**i),
455
- upsample_initial_channel // (2 ** (i + 1)),
456
- k,
457
- u,
458
- padding=(k - u) // 2,
459
- )
460
- )
461
- )
462
- if i + 1 < len(upsample_rates):
463
- stride_f0 = np.prod(upsample_rates[i + 1 :])
464
- self.noise_convs.append(
465
- Conv1d(
466
- 1,
467
- c_cur,
468
- kernel_size=stride_f0 * 2,
469
- stride=stride_f0,
470
- padding=stride_f0 // 2,
471
- )
472
- )
473
- else:
474
- self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
475
-
476
- self.resblocks = nn.ModuleList()
477
- for i in range(len(self.ups)):
478
- ch = upsample_initial_channel // (2 ** (i + 1))
479
- for j, (k, d) in enumerate(
480
- zip(resblock_kernel_sizes, resblock_dilation_sizes)
481
- ):
482
- self.resblocks.append(resblock(ch, k, d))
483
-
484
- self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False)
485
- self.ups.apply(init_weights)
486
-
487
- if gin_channels != 0:
488
- self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1)
489
-
490
- self.upp = np.prod(upsample_rates)
491
-
492
- def forward(self, x, f0, g=None):
493
- har_source, noi_source, uv = self.m_source(f0, self.upp)
494
- har_source = har_source.transpose(1, 2)
495
- x = self.conv_pre(x)
496
- if g is not None:
497
- x = x + self.cond(g)
498
-
499
- for i in range(self.num_upsamples):
500
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
501
- x = self.ups[i](x)
502
- x_source = self.noise_convs[i](har_source)
503
- x = x + x_source
504
- xs = None
505
- for j in range(self.num_kernels):
506
- if xs is None:
507
- xs = self.resblocks[i * self.num_kernels + j](x)
508
- else:
509
- xs += self.resblocks[i * self.num_kernels + j](x)
510
- x = xs / self.num_kernels
511
- x = F.leaky_relu(x)
512
- x = self.conv_post(x)
513
- x = torch.tanh(x)
514
- return x
515
-
516
- def remove_weight_norm(self):
517
- for l in self.ups:
518
- remove_weight_norm(l)
519
- for l in self.resblocks:
520
- l.remove_weight_norm()
521
-
522
-
523
- sr2sr = {
524
- "32k": 32000,
525
- "40k": 40000,
526
- "48k": 48000,
527
- }
528
-
529
-
530
- class SynthesizerTrnMs256NSFsid(nn.Module):
531
- def __init__(
532
- self,
533
- spec_channels,
534
- segment_size,
535
- inter_channels,
536
- hidden_channels,
537
- filter_channels,
538
- n_heads,
539
- n_layers,
540
- kernel_size,
541
- p_dropout,
542
- resblock,
543
- resblock_kernel_sizes,
544
- resblock_dilation_sizes,
545
- upsample_rates,
546
- upsample_initial_channel,
547
- upsample_kernel_sizes,
548
- spk_embed_dim,
549
- gin_channels,
550
- sr,
551
- **kwargs
552
- ):
553
- super().__init__()
554
- if type(sr) == type("strr"):
555
- sr = sr2sr[sr]
556
- self.spec_channels = spec_channels
557
- self.inter_channels = inter_channels
558
- self.hidden_channels = hidden_channels
559
- self.filter_channels = filter_channels
560
- self.n_heads = n_heads
561
- self.n_layers = n_layers
562
- self.kernel_size = kernel_size
563
- self.p_dropout = p_dropout
564
- self.resblock = resblock
565
- self.resblock_kernel_sizes = resblock_kernel_sizes
566
- self.resblock_dilation_sizes = resblock_dilation_sizes
567
- self.upsample_rates = upsample_rates
568
- self.upsample_initial_channel = upsample_initial_channel
569
- self.upsample_kernel_sizes = upsample_kernel_sizes
570
- self.segment_size = segment_size
571
- self.gin_channels = gin_channels
572
- # self.hop_length = hop_length#
573
- self.spk_embed_dim = spk_embed_dim
574
- self.enc_p = TextEncoder256(
575
- inter_channels,
576
- hidden_channels,
577
- filter_channels,
578
- n_heads,
579
- n_layers,
580
- kernel_size,
581
- p_dropout,
582
- )
583
- self.dec = GeneratorNSF(
584
- inter_channels,
585
- resblock,
586
- resblock_kernel_sizes,
587
- resblock_dilation_sizes,
588
- upsample_rates,
589
- upsample_initial_channel,
590
- upsample_kernel_sizes,
591
- gin_channels=gin_channels,
592
- sr=sr,
593
- is_half=kwargs["is_half"],
594
- )
595
- self.enc_q = PosteriorEncoder(
596
- spec_channels,
597
- inter_channels,
598
- hidden_channels,
599
- 5,
600
- 1,
601
- 16,
602
- gin_channels=gin_channels,
603
- )
604
- self.flow = ResidualCouplingBlock(
605
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
606
- )
607
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
608
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
609
-
610
- def remove_weight_norm(self):
611
- self.dec.remove_weight_norm()
612
- self.flow.remove_weight_norm()
613
- self.enc_q.remove_weight_norm()
614
-
615
- def forward(self, phone, phone_lengths, pitch, nsff0, sid, rnd, max_len=None):
616
- g = self.emb_g(sid).unsqueeze(-1)
617
- m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths)
618
- z_p = (m_p + torch.exp(logs_p) * rnd) * x_mask
619
- z = self.flow(z_p, x_mask, g=g, reverse=True)
620
- o = self.dec((z * x_mask)[:, :, :max_len], nsff0, g=g)
621
- return o
622
-
623
-
624
- class SynthesizerTrnMs256NSFsid_sim(nn.Module):
625
- """
626
- Synthesizer for Training
627
- """
628
-
629
- def __init__(
630
- self,
631
- spec_channels,
632
- segment_size,
633
- inter_channels,
634
- hidden_channels,
635
- filter_channels,
636
- n_heads,
637
- n_layers,
638
- kernel_size,
639
- p_dropout,
640
- resblock,
641
- resblock_kernel_sizes,
642
- resblock_dilation_sizes,
643
- upsample_rates,
644
- upsample_initial_channel,
645
- upsample_kernel_sizes,
646
- spk_embed_dim,
647
- # hop_length,
648
- gin_channels=0,
649
- use_sdp=True,
650
- **kwargs
651
- ):
652
- super().__init__()
653
- self.spec_channels = spec_channels
654
- self.inter_channels = inter_channels
655
- self.hidden_channels = hidden_channels
656
- self.filter_channels = filter_channels
657
- self.n_heads = n_heads
658
- self.n_layers = n_layers
659
- self.kernel_size = kernel_size
660
- self.p_dropout = p_dropout
661
- self.resblock = resblock
662
- self.resblock_kernel_sizes = resblock_kernel_sizes
663
- self.resblock_dilation_sizes = resblock_dilation_sizes
664
- self.upsample_rates = upsample_rates
665
- self.upsample_initial_channel = upsample_initial_channel
666
- self.upsample_kernel_sizes = upsample_kernel_sizes
667
- self.segment_size = segment_size
668
- self.gin_channels = gin_channels
669
- # self.hop_length = hop_length#
670
- self.spk_embed_dim = spk_embed_dim
671
- self.enc_p = TextEncoder256Sim(
672
- inter_channels,
673
- hidden_channels,
674
- filter_channels,
675
- n_heads,
676
- n_layers,
677
- kernel_size,
678
- p_dropout,
679
- )
680
- self.dec = GeneratorNSF(
681
- inter_channels,
682
- resblock,
683
- resblock_kernel_sizes,
684
- resblock_dilation_sizes,
685
- upsample_rates,
686
- upsample_initial_channel,
687
- upsample_kernel_sizes,
688
- gin_channels=gin_channels,
689
- is_half=kwargs["is_half"],
690
- )
691
-
692
- self.flow = ResidualCouplingBlock(
693
- inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels
694
- )
695
- self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
696
- print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim)
697
-
698
- def remove_weight_norm(self):
699
- self.dec.remove_weight_norm()
700
- self.flow.remove_weight_norm()
701
- self.enc_q.remove_weight_norm()
702
-
703
- def forward(
704
- self, phone, phone_lengths, pitch, pitchf, ds, max_len=None
705
- ): # y是spec不需要了现在
706
- g = self.emb_g(ds.unsqueeze(0)).unsqueeze(-1) # [b, 256, 1]##1是t,广播的
707
- x, x_mask = self.enc_p(phone, pitch, phone_lengths)
708
- x = self.flow(x, x_mask, g=g, reverse=True)
709
- o = self.dec((x * x_mask)[:, :, :max_len], pitchf, g=g)
710
- return o
711
-
712
-
713
- class MultiPeriodDiscriminator(torch.nn.Module):
714
- def __init__(self, use_spectral_norm=False):
715
- super(MultiPeriodDiscriminator, self).__init__()
716
- periods = [2, 3, 5, 7, 11, 17]
717
- # periods = [3, 5, 7, 11, 17, 23, 37]
718
-
719
- discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]
720
- discs = discs + [
721
- DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods
722
- ]
723
- self.discriminators = nn.ModuleList(discs)
724
-
725
- def forward(self, y, y_hat):
726
- y_d_rs = [] #
727
- y_d_gs = []
728
- fmap_rs = []
729
- fmap_gs = []
730
- for i, d in enumerate(self.discriminators):
731
- y_d_r, fmap_r = d(y)
732
- y_d_g, fmap_g = d(y_hat)
733
- # for j in range(len(fmap_r)):
734
- # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape)
735
- y_d_rs.append(y_d_r)
736
- y_d_gs.append(y_d_g)
737
- fmap_rs.append(fmap_r)
738
- fmap_gs.append(fmap_g)
739
-
740
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
741
-
742
-
743
- class DiscriminatorS(torch.nn.Module):
744
- def __init__(self, use_spectral_norm=False):
745
- super(DiscriminatorS, self).__init__()
746
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
747
- self.convs = nn.ModuleList(
748
- [
749
- norm_f(Conv1d(1, 16, 15, 1, padding=7)),
750
- norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)),
751
- norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)),
752
- norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)),
753
- norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)),
754
- norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
755
- ]
756
- )
757
- self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
758
-
759
- def forward(self, x):
760
- fmap = []
761
-
762
- for l in self.convs:
763
- x = l(x)
764
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
765
- fmap.append(x)
766
- x = self.conv_post(x)
767
- fmap.append(x)
768
- x = torch.flatten(x, 1, -1)
769
-
770
- return x, fmap
771
-
772
-
773
- class DiscriminatorP(torch.nn.Module):
774
- def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
775
- super(DiscriminatorP, self).__init__()
776
- self.period = period
777
- self.use_spectral_norm = use_spectral_norm
778
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
779
- self.convs = nn.ModuleList(
780
- [
781
- norm_f(
782
- Conv2d(
783
- 1,
784
- 32,
785
- (kernel_size, 1),
786
- (stride, 1),
787
- padding=(get_padding(kernel_size, 1), 0),
788
- )
789
- ),
790
- norm_f(
791
- Conv2d(
792
- 32,
793
- 128,
794
- (kernel_size, 1),
795
- (stride, 1),
796
- padding=(get_padding(kernel_size, 1), 0),
797
- )
798
- ),
799
- norm_f(
800
- Conv2d(
801
- 128,
802
- 512,
803
- (kernel_size, 1),
804
- (stride, 1),
805
- padding=(get_padding(kernel_size, 1), 0),
806
- )
807
- ),
808
- norm_f(
809
- Conv2d(
810
- 512,
811
- 1024,
812
- (kernel_size, 1),
813
- (stride, 1),
814
- padding=(get_padding(kernel_size, 1), 0),
815
- )
816
- ),
817
- norm_f(
818
- Conv2d(
819
- 1024,
820
- 1024,
821
- (kernel_size, 1),
822
- 1,
823
- padding=(get_padding(kernel_size, 1), 0),
824
- )
825
- ),
826
- ]
827
- )
828
- self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
829
-
830
- def forward(self, x):
831
- fmap = []
832
-
833
- # 1d to 2d
834
- b, c, t = x.shape
835
- if t % self.period != 0: # pad first
836
- n_pad = self.period - (t % self.period)
837
- x = F.pad(x, (0, n_pad), "reflect")
838
- t = t + n_pad
839
- x = x.view(b, c, t // self.period, self.period)
840
-
841
- for l in self.convs:
842
- x = l(x)
843
- x = F.leaky_relu(x, modules.LRELU_SLOPE)
844
- fmap.append(x)
845
- x = self.conv_post(x)
846
- fmap.append(x)
847
- x = torch.flatten(x, 1, -1)
848
-
849
- return x, fmap
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Detomo/ai-comic-generation/src/lib/computeSha256.ts DELETED
@@ -1,14 +0,0 @@
1
- import { createHash } from 'node:crypto'
2
-
3
- /**
4
- * Returns a SHA256 hash using SHA-3 for the given `content`.
5
- *
6
- * @see https://en.wikipedia.org/wiki/SHA-3
7
- *
8
- * @param {String} content
9
- *
10
- * @returns {String}
11
- */
12
- export function computeSha256(strContent: string) {
13
- return createHash('sha3-256').update(strContent).digest('hex')
14
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DonaSmix/anime-remove-background/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: Anime Remove Background
3
- emoji: 🪄🖼️
4
- colorFrom: indigo
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.1.4
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- duplicated_from: skytnt/anime-remove-background
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EDGAhab/Aatrox-Talking/app.py DELETED
@@ -1,98 +0,0 @@
1
- import gradio as gr
2
- import os
3
- os.system('cd monotonic_align && python setup.py build_ext --inplace && cd ..')
4
- import torch
5
-
6
- import commons
7
- import utils
8
- from models import SynthesizerTrn
9
- from text.symbols import symbols
10
- from text import text_to_sequence
11
-
12
- import IPython.display as ipd
13
-
14
- import json
15
- import math
16
-
17
- #new imports
18
- import matplotlib.pyplot as plt
19
- import re
20
-
21
- from torch import nn
22
- from torch.nn import functional as F
23
- from torch.utils.data import DataLoader
24
-
25
- from models import SynthesizerTrn
26
- import unicodedata
27
- import openai
28
-
29
- def get_text(text, hps):
30
- text_norm = text_to_sequence(text, hps.data.text_cleaners)
31
- if hps.data.add_blank:
32
- text_norm = commons.intersperse(text_norm, 0)
33
- text_norm = torch.LongTensor(text_norm)
34
- return text_norm
35
-
36
- hps = utils.get_hparams_from_file("configs/biaobei_base.json")
37
-
38
- net_g = SynthesizerTrn(
39
- len(symbols),
40
- hps.data.filter_length // 2 + 1,
41
- hps.train.segment_size // hps.data.hop_length,
42
- **hps.model)
43
- _ = net_g.eval()
44
-
45
- _ = utils.load_checkpoint("G_aatrox.pth", net_g, None)
46
-
47
- def friend_chat(text, tts_input3):
48
- call_name = "亚托克斯"
49
- openai.api_key = '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'
50
- identity = tts_input3
51
- start_sequence = '\n'+str(call_name)+':'
52
- restart_sequence = "\nYou: "
53
- all_text = identity + restart_sequence
54
- if 1 == 1:
55
- prompt0 = text #当期prompt
56
- if text == 'quit':
57
- return prompt0
58
- prompt = identity + prompt0 + start_sequence
59
-
60
- response = openai.Completion.create(
61
- model="text-davinci-003",
62
- prompt=prompt,
63
- temperature=0.5,
64
- max_tokens=1000,
65
- top_p=1.0,
66
- frequency_penalty=0.5,
67
- presence_penalty=0.0,
68
- stop=["\nYou:"]
69
- )
70
- print(response)
71
- return response['choices'][0]['text'].strip()
72
-
73
- def sle(text, tts_input3):
74
- text = friend_chat(text, tts_input3).replace('\n','。').replace(' ',',')
75
- return text
76
-
77
- def infer(text,tts_input3):
78
- stn_tst = get_text(sle(text,tts_input3), hps)
79
- with torch.no_grad():
80
- x_tst = stn_tst.unsqueeze(0)
81
- x_tst_lengths = torch.LongTensor([stn_tst.size(0)])
82
- audio = net_g.infer(x_tst, x_tst_lengths, noise_scale=.667, noise_scale_w=0.8, length_scale=1)[0][0,0].data.cpu().float().numpy()
83
- sampling_rate = 22050
84
- return (sampling_rate, audio)
85
-
86
- app = gr.Blocks()
87
-
88
- with app:
89
- with gr.Tabs():
90
-
91
- with gr.TabItem("Basic"):
92
-
93
- tts_input1 = gr.TextArea(label="输入你想跟剑魔说的话", value="我是暮光星灵佐伊,我要三天之内杀了你")
94
- tts_input3 = gr.TextArea(label="写上你给他的设定", value="你叫亚托克斯,俗称剑魔,世界的终结者。")
95
- tts_submit = gr.Button("Generate", variant="primary")
96
- tts_output2 = gr.Audio(label="Output")
97
- tts_submit.click(infer, [tts_input1,tts_input3], [tts_output2])
98
- app.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Eddycrack864/Applio-Inference/Applio-RVC-Fork/utils/clonerepo_experimental.py DELETED
@@ -1,253 +0,0 @@
1
- import os
2
- import subprocess
3
- import shutil
4
- from concurrent.futures import ThreadPoolExecutor, as_completed
5
- from tqdm.notebook import tqdm
6
- from pathlib import Path
7
- import requests
8
-
9
- def run_script():
10
- def run_cmd(cmd):
11
- process = subprocess.run(cmd, shell=True, check=True, text=True)
12
- return process.stdout
13
-
14
- # Change the current directory to /content/
15
- os.chdir('/content/')
16
- print("Changing dir to /content/")
17
-
18
- # Your function to edit the file
19
- def edit_file(file_path):
20
- temp_file_path = "/tmp/temp_file.py"
21
- changes_made = False
22
- with open(file_path, "r") as file, open(temp_file_path, "w") as temp_file:
23
- previous_line = ""
24
- second_previous_line = ""
25
- for line in file:
26
- new_line = line.replace("value=160", "value=128")
27
- if new_line != line:
28
- print("Replaced 'value=160' with 'value=128'")
29
- changes_made = True
30
- line = new_line
31
-
32
- new_line = line.replace("crepe hop length: 160", "crepe hop length: 128")
33
- if new_line != line:
34
- print("Replaced 'crepe hop length: 160' with 'crepe hop length: 128'")
35
- changes_made = True
36
- line = new_line
37
-
38
- new_line = line.replace("value=0.88", "value=0.75")
39
- if new_line != line:
40
- print("Replaced 'value=0.88' with 'value=0.75'")
41
- changes_made = True
42
- line = new_line
43
-
44
- if "label=i18n(\"输入源音量包络替换输出音量包络融合比例,越靠近1越使用输出包络\")" in previous_line and "value=1," in line:
45
- new_line = line.replace("value=1,", "value=0.25,")
46
- if new_line != line:
47
- print("Replaced 'value=1,' with 'value=0.25,' based on the condition")
48
- changes_made = True
49
- line = new_line
50
-
51
- if "label=i18n(\"总训练轮数total_epoch\")" in previous_line and "value=20," in line:
52
- new_line = line.replace("value=20,", "value=500,")
53
- if new_line != line:
54
- print("Replaced 'value=20,' with 'value=500,' based on the condition for DEFAULT EPOCH")
55
- changes_made = True
56
- line = new_line
57
-
58
- if 'choices=["pm", "harvest", "dio", "crepe", "crepe-tiny", "mangio-crepe", "mangio-crepe-tiny"], # Fork Feature. Add Crepe-Tiny' in previous_line:
59
- if 'value="pm",' in line:
60
- new_line = line.replace('value="pm",', 'value="mangio-crepe",')
61
- if new_line != line:
62
- print("Replaced 'value=\"pm\",' with 'value=\"mangio-crepe\",' based on the condition")
63
- changes_made = True
64
- line = new_line
65
-
66
- new_line = line.replace('label=i18n("输入训练文件夹路径"), value="E:\\\\语音音频+标注\\\\米津玄师\\\\src"', 'label=i18n("输入训练文件夹路径"), value="/content/dataset/"')
67
- if new_line != line:
68
- print("Replaced 'label=i18n(\"输入训练文件夹路径\"), value=\"E:\\\\语音音频+标注\\\\米津玄师\\\\src\"' with 'label=i18n(\"输入训练文件夹路径\"), value=\"/content/dataset/\"'")
69
- changes_made = True
70
- line = new_line
71
-
72
- if 'label=i18n("是否仅保存最新的ckpt文件以节省硬盘空间"),' in second_previous_line:
73
- if 'value=i18n("否"),' in line:
74
- new_line = line.replace('value=i18n("否"),', 'value=i18n("是"),')
75
- if new_line != line:
76
- print("Replaced 'value=i18n(\"否\"),' with 'value=i18n(\"是\"),' based on the condition for SAVE ONLY LATEST")
77
- changes_made = True
78
- line = new_line
79
-
80
- if 'label=i18n("是否在每次保存时间点将最终小模型保存至weights文件夹"),' in second_previous_line:
81
- if 'value=i18n("否"),' in line:
82
- new_line = line.replace('value=i18n("否"),', 'value=i18n("是"),')
83
- if new_line != line:
84
- print("Replaced 'value=i18n(\"否\"),' with 'value=i18n(\"是\"),' based on the condition for SAVE SMALL WEIGHTS")
85
- changes_made = True
86
- line = new_line
87
-
88
- temp_file.write(line)
89
- second_previous_line = previous_line
90
- previous_line = line
91
-
92
- # After finished, we replace the original file with the temp one
93
- import shutil
94
- shutil.move(temp_file_path, file_path)
95
-
96
- if changes_made:
97
- print("Changes made and file saved successfully.")
98
- else:
99
- print("No changes were needed.")
100
-
101
- # Define the repo path
102
- repo_path = '/content/Applio-RVC-Fork'
103
-
104
- def copy_all_files_in_directory(src_dir, dest_dir):
105
- # Iterate over all files in source directory
106
- for item in Path(src_dir).glob('*'):
107
- if item.is_file():
108
- # Copy each file to destination directory
109
- shutil.copy(item, dest_dir)
110
- else:
111
- # If it's a directory, make a new directory in the destination and copy the files recursively
112
- new_dest = Path(dest_dir) / item.name
113
- new_dest.mkdir(exist_ok=True)
114
- copy_all_files_in_directory(str(item), str(new_dest))
115
-
116
- def clone_and_copy_repo(repo_path):
117
- # New repository link
118
- new_repo_link = "https://github.com/IAHispano/Applio-RVC-Fork/"
119
- # Temporary path to clone the repository
120
- temp_repo_path = "/content/temp_Applio-RVC-Fork"
121
- # New folder name
122
- new_folder_name = "Applio-RVC-Fork"
123
-
124
- # Clone the latest code from the new repository to a temporary location
125
- run_cmd(f"git clone {new_repo_link} {temp_repo_path}")
126
- os.chdir(temp_repo_path)
127
-
128
- run_cmd(f"git checkout 3fa4dad3d8961e5ca2522e9e12c0b4ddb71ad402")
129
- run_cmd(f"git checkout f9e606c279cb49420597519b0a83b92be81e42e4")
130
- run_cmd(f"git checkout 9e305588844c5442d58add1061b29beeca89d679")
131
- run_cmd(f"git checkout bf92dc1eb54b4f28d6396a4d1820a25896cc9af8")
132
- run_cmd(f"git checkout c3810e197d3cb98039973b2f723edf967ecd9e61")
133
- run_cmd(f"git checkout a33159efd134c2413b0afe26a76b7dc87926d2de")
134
- run_cmd(f"git checkout 24e251fb62c662e39ac5cf9253cc65deb9be94ec")
135
- run_cmd(f"git checkout ad5667d3017e93232dba85969cddac1322ba2902")
136
- run_cmd(f"git checkout ce9715392cf52dd5a0e18e00d1b5e408f08dbf27")
137
- run_cmd(f"git checkout 7c7da3f2ac68f3bd8f3ad5ca5c700f18ab9f90eb")
138
- run_cmd(f"git checkout 4ac395eab101955e8960b50d772c26f592161764")
139
- run_cmd(f"git checkout b15b358702294c7375761584e5276c811ffab5e8")
140
- run_cmd(f"git checkout 1501793dc490982db9aca84a50647764caa66e51")
141
- run_cmd(f"git checkout 21f7faf57219c75e6ba837062350391a803e9ae2")
142
- run_cmd(f"git checkout b5eb689fbc409b49f065a431817f822f554cebe7")
143
- run_cmd(f"git checkout 7e02fae1ebf24cb151bf6cbe787d06734aa65862")
144
- run_cmd(f"git checkout 6aea5ea18ed0b9a1e03fa5d268d6bc3c616672a9")
145
- run_cmd(f"git checkout f0f9b25717e59116473fb42bd7f9252cfc32b398")
146
- run_cmd(f"git checkout b394de424088a81fc081224bc27338a8651ad3b2")
147
- run_cmd(f"git checkout f1999406a88b80c965d2082340f5ea2bfa9ab67a")
148
- run_cmd(f"git checkout d98a0fa8dc715308dfc73eac5c553b69c6ee072b")
149
- run_cmd(f"git checkout d73267a415fb0eba98477afa43ef71ffd82a7157")
150
- run_cmd(f"git checkout 1a03d01356ae79179e1fb8d8915dc9cc79925742")
151
- run_cmd(f"git checkout 81497bb3115e92c754300c9b3992df428886a3e9")
152
- run_cmd(f"git checkout c5af1f8edcf79cb70f065c0110e279e78e48caf9")
153
- run_cmd(f"git checkout cdb3c90109387fa4dfa92f53c3864c71170ffc77")
154
-
155
- # Edit the file here, before copying
156
- #edit_file(f"{temp_repo_path}/infer-web.py")
157
-
158
- # Copy all files from the cloned repository to the existing path
159
- copy_all_files_in_directory(temp_repo_path, repo_path)
160
- print(f"Copying all {new_folder_name} files from GitHub.")
161
-
162
- # Change working directory back to /content/
163
- os.chdir('/content/')
164
- print("Changed path back to /content/")
165
-
166
- # Remove the temporary cloned repository
167
- shutil.rmtree(temp_repo_path)
168
-
169
- # Call the function
170
- clone_and_copy_repo(repo_path)
171
-
172
- # Download the credentials file for RVC archive sheet
173
- os.makedirs('/content/Applio-RVC-Fork/stats/', exist_ok=True)
174
- run_cmd("wget -q https://cdn.discordapp.com/attachments/945486970883285045/1114717554481569802/peppy-generator-388800-07722f17a188.json -O /content/Applio-RVC-Fork/stats/peppy-generator-388800-07722f17a188.json")
175
-
176
- # Forcefully delete any existing torchcrepe dependencies downloaded from an earlier run just in case
177
- shutil.rmtree('/content/Applio-RVC-Fork/torchcrepe', ignore_errors=True)
178
- shutil.rmtree('/content/torchcrepe', ignore_errors=True)
179
-
180
- # Download the torchcrepe folder from the maxrmorrison/torchcrepe repository
181
- run_cmd("git clone https://github.com/maxrmorrison/torchcrepe.git")
182
- shutil.move('/content/torchcrepe/torchcrepe', '/content/Applio-RVC-Fork/')
183
- shutil.rmtree('/content/torchcrepe', ignore_errors=True) # Delete the torchcrepe repository folder
184
-
185
- # Change the current directory to /content/Applio-RVC-Fork
186
- os.chdir('/content/Applio-RVC-Fork')
187
- os.makedirs('pretrained', exist_ok=True)
188
- os.makedirs('uvr5_weights', exist_ok=True)
189
-
190
- def download_file(url, filepath):
191
- response = requests.get(url, stream=True)
192
- response.raise_for_status()
193
-
194
- with open(filepath, "wb") as file:
195
- for chunk in response.iter_content(chunk_size=8192):
196
- if chunk:
197
- file.write(chunk)
198
-
199
- def download_pretrained_models():
200
- pretrained_models = {
201
- "pretrained": [
202
- "D40k.pth",
203
- "G40k.pth",
204
- "f0D40k.pth",
205
- "f0G40k.pth"
206
- ],
207
- "pretrained_v2": [
208
- "D40k.pth",
209
- "G40k.pth",
210
- "f0D40k.pth",
211
- "f0G40k.pth",
212
- "f0G48k.pth",
213
- "f0D48k.pth"
214
- ],
215
- "uvr5_weights": [
216
- "HP2-人声vocals+非人声instrumentals.pth",
217
- "HP5-主旋律人声vocals+其他instrumentals.pth",
218
- "VR-DeEchoNormal.pth",
219
- "VR-DeEchoDeReverb.pth",
220
- "VR-DeEchoAggressive.pth",
221
- "HP5_only_main_vocal.pth",
222
- "HP3_all_vocals.pth",
223
- "HP2_all_vocals.pth"
224
- ]
225
- }
226
- part2 = "I"
227
- base_url = "https://huggingface.co/lj1995/VoiceConversionWebU" + part2 + "/resolve/main/"
228
- base_path = "/content/Applio-RVC-Fork/"
229
- base_pathm = base_path
230
-
231
- # Calculate total number of files to download
232
- total_files = sum(len(files) for files in pretrained_models.values()) + 1 # +1 for hubert_base.pt
233
-
234
- with tqdm(total=total_files, desc="Downloading files") as pbar:
235
- for folder, models in pretrained_models.items():
236
- folder_path = os.path.join(base_path, folder)
237
- os.makedirs(folder_path, exist_ok=True)
238
- for model in models:
239
- url = base_url + folder + "/" + model
240
- filepath = os.path.join(folder_path, model)
241
- download_file(url, filepath)
242
- pbar.update()
243
-
244
- # Download hubert_base.pt to the base path
245
- hubert_url = base_url + "hubert_base.pt"
246
- hubert_filepath = os.path.join(base_pathm, "hubert_base.pt")
247
- download_file(hubert_url, hubert_filepath)
248
- pbar.update()
249
- def clone_repository(run_download):
250
- with ThreadPoolExecutor(max_workers=2) as executor:
251
- executor.submit(run_script)
252
- if run_download:
253
- executor.submit(download_pretrained_models)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ekohai/bingAI/README.md DELETED
@@ -1,11 +0,0 @@
1
- ---
2
- title: BingAI
3
- emoji: 🐢
4
- colorFrom: indigo
5
- colorTo: indigo
6
- sdk: docker
7
- pinned: false
8
- license: mit
9
- app_port: 8080
10
- ---
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
spaces/FlippFuzz/whisper-webui/src/whisper/abstractWhisperContainer.py DELETED
@@ -1,108 +0,0 @@
1
- import abc
2
- from typing import List
3
- from src.config import ModelConfig
4
-
5
- from src.hooks.progressListener import ProgressListener
6
- from src.modelCache import GLOBAL_MODEL_CACHE, ModelCache
7
-
8
- class AbstractWhisperCallback:
9
- @abc.abstractmethod
10
- def invoke(self, audio, segment_index: int, prompt: str, detected_language: str, progress_listener: ProgressListener = None):
11
- """
12
- Peform the transcription of the given audio file or data.
13
-
14
- Parameters
15
- ----------
16
- audio: Union[str, np.ndarray, torch.Tensor]
17
- The audio file to transcribe, or the audio data as a numpy array or torch tensor.
18
- segment_index: int
19
- The target language of the transcription. If not specified, the language will be inferred from the audio content.
20
- task: str
21
- The task - either translate or transcribe.
22
- progress_listener: ProgressListener
23
- A callback to receive progress updates.
24
- """
25
- raise NotImplementedError()
26
-
27
- def _concat_prompt(self, prompt1, prompt2):
28
- if (prompt1 is None):
29
- return prompt2
30
- elif (prompt2 is None):
31
- return prompt1
32
- else:
33
- return prompt1 + " " + prompt2
34
-
35
- class AbstractWhisperContainer:
36
- def __init__(self, model_name: str, device: str = None, compute_type: str = "float16",
37
- download_root: str = None,
38
- cache: ModelCache = None, models: List[ModelConfig] = []):
39
- self.model_name = model_name
40
- self.device = device
41
- self.compute_type = compute_type
42
- self.download_root = download_root
43
- self.cache = cache
44
-
45
- # Will be created on demand
46
- self.model = None
47
-
48
- # List of known models
49
- self.models = models
50
-
51
- def get_model(self):
52
- if self.model is None:
53
-
54
- if (self.cache is None):
55
- self.model = self._create_model()
56
- else:
57
- model_key = "WhisperContainer." + self.model_name + ":" + (self.device if self.device else '')
58
- self.model = self.cache.get(model_key, self._create_model)
59
- return self.model
60
-
61
- @abc.abstractmethod
62
- def _create_model(self):
63
- raise NotImplementedError()
64
-
65
- def ensure_downloaded(self):
66
- pass
67
-
68
- @abc.abstractmethod
69
- def create_callback(self, language: str = None, task: str = None, initial_prompt: str = None, **decodeOptions: dict) -> AbstractWhisperCallback:
70
- """
71
- Create a WhisperCallback object that can be used to transcript audio files.
72
-
73
- Parameters
74
- ----------
75
- language: str
76
- The target language of the transcription. If not specified, the language will be inferred from the audio content.
77
- task: str
78
- The task - either translate or transcribe.
79
- initial_prompt: str
80
- The initial prompt to use for the transcription.
81
- decodeOptions: dict
82
- Additional options to pass to the decoder. Must be pickleable.
83
-
84
- Returns
85
- -------
86
- A WhisperCallback object.
87
- """
88
- raise NotImplementedError()
89
-
90
- # This is required for multiprocessing
91
- def __getstate__(self):
92
- return {
93
- "model_name": self.model_name,
94
- "device": self.device,
95
- "download_root": self.download_root,
96
- "models": self.models,
97
- "compute_type": self.compute_type
98
- }
99
-
100
- def __setstate__(self, state):
101
- self.model_name = state["model_name"]
102
- self.device = state["device"]
103
- self.download_root = state["download_root"]
104
- self.models = state["models"]
105
- self.compute_type = state["compute_type"]
106
- self.model = None
107
- # Depickled objects must use the global cache
108
- self.cache = GLOBAL_MODEL_CACHE
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/FrankZxShen/so-vits-svc-models-ba/vencoder/__init__.py DELETED
File without changes
spaces/GV05/text-emotion-detector/app.py DELETED
@@ -1,34 +0,0 @@
1
- import gradio as gr
2
- from transformers import pipeline
3
-
4
- model_id = "GV05/distilbert-base-uncased-finetuned-emotion"
5
- classifier = pipeline("text-classification", model=model_id)
6
-
7
- label_to_emotion = {
8
- 'LABEL_0': 'sadness',
9
- 'LABEL_1': 'joy',
10
- 'LABEL_2': 'love',
11
- 'LABEL_3': 'anger',
12
- 'LABEL_4': 'fear',
13
- 'LABEL_5': 'surprise',
14
- }
15
-
16
- def classify_emotion(text):
17
- preds = classifier(text, return_all_scores=True)
18
- res = {}
19
- for x in preds[0]:
20
- res[label_to_emotion[x['label']]] = x['score']
21
- return res
22
-
23
- image = gr.Textbox()
24
- label = gr.Label()
25
- examples = ["you are not too sensitive. you are not overreacting",
26
- "Thinking of you keeps me awake. Dreaming of you keeps me asleep. Being with you keeps me alive."]
27
-
28
- title = "Emotion Detector"
29
- description = "This model is a fine-tuned version of distilbert-base-uncased on the emotion dataset"
30
-
31
- intf = gr.Interface(fn=classify_emotion, inputs=image, outputs=label, examples=examples, title=title,
32
- description=description)
33
-
34
- intf.launch(inline=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/color_coordinated_insertion.py DELETED
@@ -1,62 +0,0 @@
1
- import numpy as np
2
- import os
3
- import pybullet as p
4
- import random
5
- from cliport.tasks import primitives
6
- from cliport.tasks.grippers import Spatula
7
- from cliport.tasks.task import Task
8
- from cliport.utils import utils
9
- import numpy as np
10
- from cliport.tasks.task import Task
11
- from cliport.utils import utils
12
- import pybullet as p
13
-
14
- class ColorCoordinatedInsertion(Task):
15
- """Insert each block into the fixture of the same color"""
16
-
17
- def __init__(self):
18
- super().__init__()
19
- self.max_steps = 20
20
- self.lang_template = "insert each block into the fixture of the same color"
21
- self.task_completed_desc = "done with color-coordinated-insertion."
22
- self.additional_reset()
23
-
24
- def reset(self, env):
25
- super().reset(env)
26
-
27
- # Add pallet.
28
- pallet_size = (0.35, 0.35, 0.01)
29
- pallet_pose = self.get_random_pose(env, pallet_size)
30
- pallet_urdf = 'pallet/pallet.urdf'
31
- env.add_object(pallet_urdf, pallet_pose, 'fixed')
32
-
33
- # Add fixtures and blocks.
34
- colors = ['red', 'blue', 'green', 'yellow']
35
- fixtures = []
36
- blocks = []
37
- fixture_size = (0.05, 0.05, 0.05)
38
- block_size = (0.04, 0.04, 0.04)
39
- fixture_urdf = 'insertion/fixture.urdf'
40
- block_urdf = 'block/block.urdf'
41
- for color in colors:
42
- # Add fixture.
43
- fixture_pose = self.get_random_pose(env, fixture_size)
44
- fixture_id = env.add_object(fixture_urdf, fixture_pose, color=utils.COLORS[color])
45
- fixtures.append(fixture_id)
46
-
47
- # Add block.
48
- block_pose = self.get_random_pose(env, block_size)
49
- block_id = env.add_object(block_urdf, block_pose, color=utils.COLORS[color])
50
- blocks.append(block_id)
51
-
52
- # Goal: each block is in the fixture of the same color.
53
- for i in range(len(blocks)):
54
- self.add_goal(objs=[blocks[i]], matches=np.ones((1, 1)), targ_poses=[p.getBasePositionAndOrientation(fixtures[i])], replace=False,
55
- rotations=True, metric='pose', params=None, step_max_reward=1 / len(blocks),
56
- language_goal=self.lang_template)
57
-
58
- # Goal: each fixture is on the pallet.
59
- for i in range(len(fixtures)):
60
- self.add_goal(objs=[fixtures[i]], matches=np.ones((1, 1)), targ_poses=[pallet_pose], replace=False,
61
- rotations=True, metric='zone', params=[(pallet_pose, pallet_size)], step_max_reward=1 / len(fixtures),
62
- language_goal=self.lang_template)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Gradio-Blocks/protGPT2_gradioFold/alphafold/alphafold/model/tf/shape_helpers_test.py DELETED
@@ -1,39 +0,0 @@
1
- # Copyright 2021 DeepMind Technologies Limited
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- """Tests for shape_helpers."""
16
-
17
- from alphafold.model.tf import shape_helpers
18
- import numpy as np
19
- import tensorflow.compat.v1 as tf
20
-
21
-
22
- class ShapeTest(tf.test.TestCase):
23
-
24
- def test_shape_list(self):
25
- """Test that shape_list can allow for reshaping to dynamic shapes."""
26
- a = tf.zeros([10, 4, 4, 2])
27
- p = tf.placeholder(tf.float32, shape=[None, None, 1, 4, 4])
28
- shape_dyn = shape_helpers.shape_list(p)[:2] + [4, 4]
29
-
30
- b = tf.reshape(a, shape_dyn)
31
- with self.session() as sess:
32
- out = sess.run(b, feed_dict={p: np.ones((20, 1, 1, 4, 4))})
33
-
34
- self.assertAllEqual(out.shape, (20, 1, 4, 4))
35
-
36
-
37
- if __name__ == '__main__':
38
- tf.disable_v2_behavior()
39
- tf.test.main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Gradio-Blocks/uniformer_image_detection/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py DELETED
@@ -1,196 +0,0 @@
1
- # model settings
2
- model = dict(
3
- type='CascadeRCNN',
4
- pretrained='torchvision://resnet50',
5
- backbone=dict(
6
- type='ResNet',
7
- depth=50,
8
- num_stages=4,
9
- out_indices=(0, 1, 2, 3),
10
- frozen_stages=1,
11
- norm_cfg=dict(type='BN', requires_grad=True),
12
- norm_eval=True,
13
- style='pytorch'),
14
- neck=dict(
15
- type='FPN',
16
- in_channels=[256, 512, 1024, 2048],
17
- out_channels=256,
18
- num_outs=5),
19
- rpn_head=dict(
20
- type='RPNHead',
21
- in_channels=256,
22
- feat_channels=256,
23
- anchor_generator=dict(
24
- type='AnchorGenerator',
25
- scales=[8],
26
- ratios=[0.5, 1.0, 2.0],
27
- strides=[4, 8, 16, 32, 64]),
28
- bbox_coder=dict(
29
- type='DeltaXYWHBBoxCoder',
30
- target_means=[.0, .0, .0, .0],
31
- target_stds=[1.0, 1.0, 1.0, 1.0]),
32
- loss_cls=dict(
33
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
34
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
35
- roi_head=dict(
36
- type='CascadeRoIHead',
37
- num_stages=3,
38
- stage_loss_weights=[1, 0.5, 0.25],
39
- bbox_roi_extractor=dict(
40
- type='SingleRoIExtractor',
41
- roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
42
- out_channels=256,
43
- featmap_strides=[4, 8, 16, 32]),
44
- bbox_head=[
45
- dict(
46
- type='Shared2FCBBoxHead',
47
- in_channels=256,
48
- fc_out_channels=1024,
49
- roi_feat_size=7,
50
- num_classes=80,
51
- bbox_coder=dict(
52
- type='DeltaXYWHBBoxCoder',
53
- target_means=[0., 0., 0., 0.],
54
- target_stds=[0.1, 0.1, 0.2, 0.2]),
55
- reg_class_agnostic=True,
56
- loss_cls=dict(
57
- type='CrossEntropyLoss',
58
- use_sigmoid=False,
59
- loss_weight=1.0),
60
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
61
- loss_weight=1.0)),
62
- dict(
63
- type='Shared2FCBBoxHead',
64
- in_channels=256,
65
- fc_out_channels=1024,
66
- roi_feat_size=7,
67
- num_classes=80,
68
- bbox_coder=dict(
69
- type='DeltaXYWHBBoxCoder',
70
- target_means=[0., 0., 0., 0.],
71
- target_stds=[0.05, 0.05, 0.1, 0.1]),
72
- reg_class_agnostic=True,
73
- loss_cls=dict(
74
- type='CrossEntropyLoss',
75
- use_sigmoid=False,
76
- loss_weight=1.0),
77
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
78
- loss_weight=1.0)),
79
- dict(
80
- type='Shared2FCBBoxHead',
81
- in_channels=256,
82
- fc_out_channels=1024,
83
- roi_feat_size=7,
84
- num_classes=80,
85
- bbox_coder=dict(
86
- type='DeltaXYWHBBoxCoder',
87
- target_means=[0., 0., 0., 0.],
88
- target_stds=[0.033, 0.033, 0.067, 0.067]),
89
- reg_class_agnostic=True,
90
- loss_cls=dict(
91
- type='CrossEntropyLoss',
92
- use_sigmoid=False,
93
- loss_weight=1.0),
94
- loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
95
- ],
96
- mask_roi_extractor=dict(
97
- type='SingleRoIExtractor',
98
- roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
99
- out_channels=256,
100
- featmap_strides=[4, 8, 16, 32]),
101
- mask_head=dict(
102
- type='FCNMaskHead',
103
- num_convs=4,
104
- in_channels=256,
105
- conv_out_channels=256,
106
- num_classes=80,
107
- loss_mask=dict(
108
- type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
109
- # model training and testing settings
110
- train_cfg=dict(
111
- rpn=dict(
112
- assigner=dict(
113
- type='MaxIoUAssigner',
114
- pos_iou_thr=0.7,
115
- neg_iou_thr=0.3,
116
- min_pos_iou=0.3,
117
- match_low_quality=True,
118
- ignore_iof_thr=-1),
119
- sampler=dict(
120
- type='RandomSampler',
121
- num=256,
122
- pos_fraction=0.5,
123
- neg_pos_ub=-1,
124
- add_gt_as_proposals=False),
125
- allowed_border=0,
126
- pos_weight=-1,
127
- debug=False),
128
- rpn_proposal=dict(
129
- nms_pre=2000,
130
- max_per_img=2000,
131
- nms=dict(type='nms', iou_threshold=0.7),
132
- min_bbox_size=0),
133
- rcnn=[
134
- dict(
135
- assigner=dict(
136
- type='MaxIoUAssigner',
137
- pos_iou_thr=0.5,
138
- neg_iou_thr=0.5,
139
- min_pos_iou=0.5,
140
- match_low_quality=False,
141
- ignore_iof_thr=-1),
142
- sampler=dict(
143
- type='RandomSampler',
144
- num=512,
145
- pos_fraction=0.25,
146
- neg_pos_ub=-1,
147
- add_gt_as_proposals=True),
148
- mask_size=28,
149
- pos_weight=-1,
150
- debug=False),
151
- dict(
152
- assigner=dict(
153
- type='MaxIoUAssigner',
154
- pos_iou_thr=0.6,
155
- neg_iou_thr=0.6,
156
- min_pos_iou=0.6,
157
- match_low_quality=False,
158
- ignore_iof_thr=-1),
159
- sampler=dict(
160
- type='RandomSampler',
161
- num=512,
162
- pos_fraction=0.25,
163
- neg_pos_ub=-1,
164
- add_gt_as_proposals=True),
165
- mask_size=28,
166
- pos_weight=-1,
167
- debug=False),
168
- dict(
169
- assigner=dict(
170
- type='MaxIoUAssigner',
171
- pos_iou_thr=0.7,
172
- neg_iou_thr=0.7,
173
- min_pos_iou=0.7,
174
- match_low_quality=False,
175
- ignore_iof_thr=-1),
176
- sampler=dict(
177
- type='RandomSampler',
178
- num=512,
179
- pos_fraction=0.25,
180
- neg_pos_ub=-1,
181
- add_gt_as_proposals=True),
182
- mask_size=28,
183
- pos_weight=-1,
184
- debug=False)
185
- ]),
186
- test_cfg=dict(
187
- rpn=dict(
188
- nms_pre=1000,
189
- max_per_img=1000,
190
- nms=dict(type='nms', iou_threshold=0.7),
191
- min_bbox_size=0),
192
- rcnn=dict(
193
- score_thr=0.05,
194
- nms=dict(type='nms', iou_threshold=0.5),
195
- max_per_img=100,
196
- mask_thr_binary=0.5)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Gradio-Blocks/uniformer_image_detection/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = './ms_rcnn_x101_64x4d_fpn_1x_coco.py'
2
- # learning policy
3
- lr_config = dict(step=[16, 22])
4
- runner = dict(type='EpochBasedRunner', max_epochs=24)
 
 
 
 
 
spaces/Gradio-Blocks/uniformer_image_detection/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py DELETED
@@ -1,65 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/mask_rcnn_r50_fpn.py',
3
- '../_base_/datasets/coco_instance.py',
4
- '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
5
- ]
6
- model = dict(
7
- pretrained='open-mmlab://regnetx_3.2gf',
8
- backbone=dict(
9
- _delete_=True,
10
- type='RegNet',
11
- arch='regnetx_3.2gf',
12
- out_indices=(0, 1, 2, 3),
13
- frozen_stages=1,
14
- norm_cfg=dict(type='BN', requires_grad=True),
15
- norm_eval=True,
16
- style='pytorch'),
17
- neck=dict(
18
- type='FPN',
19
- in_channels=[96, 192, 432, 1008],
20
- out_channels=256,
21
- num_outs=5))
22
- img_norm_cfg = dict(
23
- # The mean and std are used in PyCls when training RegNets
24
- mean=[103.53, 116.28, 123.675],
25
- std=[57.375, 57.12, 58.395],
26
- to_rgb=False)
27
- train_pipeline = [
28
- dict(type='LoadImageFromFile'),
29
- dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
30
- dict(
31
- type='Resize',
32
- img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
33
- (1333, 768), (1333, 800)],
34
- multiscale_mode='value',
35
- keep_ratio=True),
36
- dict(type='RandomFlip', flip_ratio=0.5),
37
- dict(type='Normalize', **img_norm_cfg),
38
- dict(type='Pad', size_divisor=32),
39
- dict(type='DefaultFormatBundle'),
40
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
41
- ]
42
- test_pipeline = [
43
- dict(type='LoadImageFromFile'),
44
- dict(
45
- type='MultiScaleFlipAug',
46
- img_scale=(1333, 800),
47
- flip=False,
48
- transforms=[
49
- dict(type='Resize', keep_ratio=True),
50
- dict(type='RandomFlip'),
51
- dict(type='Normalize', **img_norm_cfg),
52
- dict(type='Pad', size_divisor=32),
53
- dict(type='ImageToTensor', keys=['img']),
54
- dict(type='Collect', keys=['img']),
55
- ])
56
- ]
57
- data = dict(
58
- train=dict(pipeline=train_pipeline),
59
- val=dict(pipeline=test_pipeline),
60
- test=dict(pipeline=test_pipeline))
61
- optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005)
62
- lr_config = dict(step=[28, 34])
63
- runner = dict(type='EpochBasedRunner', max_epochs=36)
64
- optimizer_config = dict(
65
- _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/dpt/models.py DELETED
@@ -1,126 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- from torch import Tensor
4
-
5
- from .base_model import BaseModel
6
- from .blocks import (
7
- FeatureFusionBlock_custom,
8
- Interpolate,
9
- _make_encoder,
10
- forward_vit,
11
- )
12
-
13
-
14
- def _make_fusion_block(features, use_bn):
15
- return FeatureFusionBlock_custom(
16
- features,
17
- nn.ReLU(False),
18
- deconv=False,
19
- bn=use_bn,
20
- expand=False,
21
- align_corners=True,
22
- )
23
-
24
-
25
- class DPT(BaseModel):
26
- def __init__(
27
- self,
28
- head,
29
- features=256,
30
- backbone="vitb_rn50_384",
31
- readout="project",
32
- channels_last=False,
33
- use_bn=False,
34
- enable_attention_hooks=False,
35
- ):
36
-
37
- super(DPT, self).__init__()
38
-
39
- self.channels_last = channels_last
40
-
41
- hooks = {
42
- "vitb_rn50_384": [0, 1, 8, 11],
43
- "vitb16_384": [2, 5, 8, 11],
44
- "vitl16_384": [5, 11, 17, 23],
45
- }
46
-
47
- # Instantiate backbone and reassemble blocks
48
- self.pretrained, self.scratch = _make_encoder(
49
- backbone,
50
- features,
51
- False, # Set to true of you want to train from scratch, uses ImageNet weights
52
- groups=1,
53
- expand=False,
54
- exportable=False,
55
- hooks=hooks[backbone],
56
- use_readout=readout,
57
- enable_attention_hooks=enable_attention_hooks,
58
- )
59
-
60
- self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
61
- self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
62
- self.scratch.refinenet3 = _make_fusion_block(features, use_bn)
63
- self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
64
-
65
- self.scratch.output_conv = head
66
-
67
- def forward(self, x: Tensor) -> Tensor:
68
- if self.channels_last == True:
69
- x.contiguous(memory_format=torch.channels_last)
70
-
71
- layer_1, layer_2, layer_3, layer_4 = forward_vit(self.pretrained, x)
72
-
73
- layer_1_rn = self.scratch.layer1_rn(layer_1)
74
- layer_2_rn = self.scratch.layer2_rn(layer_2)
75
- layer_3_rn = self.scratch.layer3_rn(layer_3)
76
- layer_4_rn = self.scratch.layer4_rn(layer_4)
77
-
78
- path_4 = self.scratch.refinenet4(layer_4_rn)
79
- path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
80
- path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
81
- path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
82
-
83
- out = self.scratch.output_conv(path_1)
84
-
85
- return out
86
-
87
-
88
- class DPTDepthModel(DPT):
89
- def __init__(
90
- self, path=None, non_negative=True, scale=1.0, shift=0.0, invert=False, **kwargs
91
- ):
92
- features = kwargs["features"] if "features" in kwargs else 256
93
-
94
- self.scale = scale
95
- self.shift = shift
96
- self.invert = invert
97
-
98
- head = nn.Sequential(
99
- nn.Conv2d(features, features // 2, kernel_size=3, stride=1, padding=1),
100
- Interpolate(scale_factor=2, mode="bilinear", align_corners=True),
101
- nn.Conv2d(features // 2, 32, kernel_size=3, stride=1, padding=1),
102
- nn.ReLU(True),
103
- nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
104
- nn.ReLU(True) if non_negative else nn.Identity(),
105
- nn.Identity(),
106
- )
107
-
108
- super().__init__(head, **kwargs)
109
-
110
- if path is not None:
111
- self.load(path)
112
-
113
- def forward(self, x: Tensor) -> Tensor:
114
- """Input x of shape [b, c, h, w]
115
- Return tensor of shape [b, c, h, w]
116
- """
117
- inv_depth = super().forward(x)
118
-
119
- if self.invert:
120
- depth = self.scale * inv_depth + self.shift
121
- depth[depth < 1e-8] = 1e-8
122
- depth = 1.0 / depth
123
- return depth
124
- else:
125
- return inv_depth
126
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/HaloMaster/chinesesummary/fengshen/models/megatron_t5/__init__.py DELETED
@@ -1,49 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2021 The IDEA Authors. All rights reserved.
3
-
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
-
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
-
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- from typing import TYPE_CHECKING
17
-
18
- from transformers.file_utils import _LazyModule, is_torch_available
19
-
20
-
21
- _import_structure = {
22
- "configuration_megatron_t5": ["T5Config"],
23
- "tokenization_megatron_t5": ["T5Tokenizer"],
24
- }
25
-
26
- if is_torch_available():
27
- _import_structure["modeling_megatron_t5"] = [
28
- "T5Model",
29
- "T5EncoderModel",
30
- "T5ForConditionalGeneration"
31
- ]
32
-
33
-
34
- if TYPE_CHECKING:
35
- from .configuration_megatron_t5 import T5Config
36
- from .tokenization_megatron_t5 import T5Tokenizer
37
-
38
- if is_torch_available():
39
- from .modeling_megatron_t5 import (
40
- T5Model,
41
- T5EncoderModel,
42
- T5ForConditionalGeneration
43
- )
44
-
45
- else:
46
- import sys
47
-
48
- sys.modules[__name__] = _LazyModule(
49
- __name__, globals()["__file__"], _import_structure)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Happys/chatbot/Dockerfile DELETED
@@ -1,8 +0,0 @@
1
- # Pull the base image
2
- FROM happyclo/libre:latest
3
-
4
- # Install dependencies
5
- RUN cd /app/api && npm install
6
-
7
- # Command to run on container start
8
- CMD ["npm", "run", "backend"]
 
 
 
 
 
 
 
 
 
spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/tasks/sentence_ranking.py DELETED
@@ -1,219 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- #
3
- # This source code is licensed under the MIT license found in the
4
- # LICENSE file in the root directory of this source tree.
5
-
6
- import logging
7
- import os
8
-
9
- import numpy as np
10
- from fairseq import utils
11
- from fairseq.data import (
12
- ConcatSentencesDataset,
13
- Dictionary,
14
- IdDataset,
15
- NestedDictionaryDataset,
16
- NumelDataset,
17
- NumSamplesDataset,
18
- PrependTokenDataset,
19
- RawLabelDataset,
20
- RightPadDataset,
21
- SortDataset,
22
- TruncateDataset,
23
- data_utils,
24
- )
25
- from fairseq.data.shorten_dataset import maybe_shorten_dataset
26
- from fairseq.tasks import LegacyFairseqTask, register_task
27
-
28
-
29
- logger = logging.getLogger(__name__)
30
-
31
-
32
- @register_task("sentence_ranking")
33
- class SentenceRankingTask(LegacyFairseqTask):
34
- """
35
- Ranking task on multiple sentences.
36
-
37
- Args:
38
- dictionary (Dictionary): the dictionary for the input of the task
39
- """
40
-
41
- @staticmethod
42
- def add_args(parser):
43
- """Add task-specific arguments to the parser."""
44
- parser.add_argument("data", metavar="FILE", help="file prefix for data")
45
- parser.add_argument(
46
- "--num-classes", type=int, help="number of sentences to be ranked"
47
- )
48
- parser.add_argument(
49
- "--init-token",
50
- type=int,
51
- help="add token at the beginning of each batch item",
52
- )
53
- parser.add_argument(
54
- "--separator-token", type=int, help="add separator token between inputs"
55
- )
56
- parser.add_argument("--no-shuffle", action="store_true")
57
- parser.add_argument(
58
- "--shorten-method",
59
- default="none",
60
- choices=["none", "truncate", "random_crop"],
61
- help="if not none, shorten sequences that exceed --tokens-per-sample",
62
- )
63
- parser.add_argument(
64
- "--shorten-data-split-list",
65
- default="",
66
- help="comma-separated list of dataset splits to apply shortening to, "
67
- 'e.g., "train,valid" (default: all dataset splits)',
68
- )
69
- parser.add_argument(
70
- "--max-option-length", type=int, help="max length for each option"
71
- )
72
-
73
- def __init__(self, args, dictionary):
74
- super().__init__(args)
75
- self.dictionary = dictionary
76
-
77
- @classmethod
78
- def load_dictionary(cls, args, filename, source=True):
79
- """Load the dictionary from the filename
80
-
81
- Args:
82
- filename (str): the filename
83
- """
84
- dictionary = Dictionary.load(filename)
85
- dictionary.add_symbol("<mask>")
86
- return dictionary
87
-
88
- @classmethod
89
- def setup_task(cls, args, **kwargs):
90
- assert (
91
- args.criterion == "sentence_ranking"
92
- ), "Must set --criterion=sentence_ranking"
93
-
94
- # load data dictionary
95
- data_dict = cls.load_dictionary(
96
- args,
97
- os.path.join(args.data, "input0", "dict.txt"),
98
- source=True,
99
- )
100
- logger.info("[input] dictionary: {} types".format(len(data_dict)))
101
- return SentenceRankingTask(args, data_dict)
102
-
103
- def load_dataset(self, split, combine=False, **kwargs):
104
- """Load a given dataset split (e.g., train, valid, test)."""
105
-
106
- def get_path(type, split):
107
- return os.path.join(self.args.data, type, split)
108
-
109
- def make_dataset(type, dictionary):
110
- split_path = get_path(type, split)
111
-
112
- dataset = data_utils.load_indexed_dataset(
113
- split_path,
114
- self.source_dictionary,
115
- self.args.dataset_impl,
116
- combine=combine,
117
- )
118
- return dataset
119
-
120
- input0 = make_dataset("input0", self.source_dictionary)
121
- input_options = [
122
- make_dataset("input{idx}".format(idx=idx + 1), self.source_dictionary)
123
- for idx in range(self.args.num_classes)
124
- ]
125
-
126
- if self.args.separator_token is not None:
127
- input0 = PrependTokenDataset(input0, self.args.separator_token)
128
-
129
- src_tokens = []
130
- for input_option in input_options:
131
- if self.args.init_token is not None:
132
- input_option = PrependTokenDataset(input_option, self.args.init_token)
133
- if self.args.max_option_length is not None:
134
- input_option = TruncateDataset(
135
- input_option, self.args.max_option_length
136
- )
137
- src_token = ConcatSentencesDataset(input_option, input0)
138
- src_token = maybe_shorten_dataset(
139
- src_token,
140
- split,
141
- self.args.shorten_data_split_list,
142
- self.args.shorten_method,
143
- self.args.max_positions,
144
- self.args.seed,
145
- )
146
- src_tokens.append(src_token)
147
-
148
- with data_utils.numpy_seed(self.args.seed):
149
- shuffle = np.random.permutation(len(src_tokens[0]))
150
-
151
- dataset = {
152
- "id": IdDataset(),
153
- "nsentences": NumSamplesDataset(),
154
- "ntokens": NumelDataset(src_tokens[0], reduce=True),
155
- }
156
-
157
- for src_token_idx in range(len(src_tokens)):
158
- dataset.update(
159
- {
160
- "net_input{idx}".format(idx=src_token_idx + 1): {
161
- "src_tokens": RightPadDataset(
162
- src_tokens[src_token_idx],
163
- pad_idx=self.source_dictionary.pad(),
164
- ),
165
- "src_lengths": NumelDataset(
166
- src_tokens[src_token_idx], reduce=False
167
- ),
168
- }
169
- }
170
- )
171
-
172
- label_path = "{}.label".format(get_path("label", split))
173
- if os.path.exists(label_path):
174
- with open(label_path) as h:
175
- dataset.update(
176
- target=RawLabelDataset([int(x.strip()) for x in h.readlines()])
177
- )
178
-
179
- nested_dataset = NestedDictionaryDataset(
180
- dataset,
181
- sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])],
182
- )
183
-
184
- if self.args.no_shuffle:
185
- dataset = nested_dataset
186
- else:
187
- dataset = SortDataset(
188
- nested_dataset,
189
- # shuffle
190
- sort_order=[shuffle],
191
- )
192
-
193
- logger.info("Loaded {0} with #samples: {1}".format(split, len(dataset)))
194
-
195
- self.datasets[split] = dataset
196
- return self.datasets[split]
197
-
198
- def build_model(self, args):
199
- from fairseq import models
200
-
201
- model = models.build_model(args, self)
202
-
203
- model.register_classification_head(
204
- getattr(args, "ranking_head_name", "sentence_classification_head"),
205
- num_classes=1,
206
- )
207
-
208
- return model
209
-
210
- def max_positions(self):
211
- return self.args.max_positions
212
-
213
- @property
214
- def source_dictionary(self):
215
- return self.dictionary
216
-
217
- @property
218
- def target_dictionary(self):
219
- return self.dictionary