parquet-converter commited on
Commit
8416c76
·
1 Parent(s): c59a64d

Update parquet files (step 57 of 476)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/English900AudioCdFreeDownload.md +0 -37
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (Corazon Salvaje English Subtitle) The Best Version of Corazon Salvaje with English Subtitles.md +0 -126
  3. spaces/1gistliPinn/ChatGPT4/Examples/Caterpillar ET Factory Password.rar.md +0 -104
  4. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Black uTorrent Pro APK The Ultimate App for Torrent Lovers.md +0 -73
  5. spaces/1phancelerku/anime-remove-background/Arknights A Role Playing Game with Stunning Graphics and Sci-Fi Plot. Download Now for Mac and PC.md +0 -112
  6. spaces/1phancelerku/anime-remove-background/Brawl Stars APK Everything You Need to Know About the Best Mobile Game of 2023.md +0 -150
  7. spaces/1phancelerku/anime-remove-background/Download Wordscapes Uncrossed Mod APK for Free - Unlimited Coins and Hints.md +0 -147
  8. spaces/1phancelerku/anime-remove-background/Enjoy Taxi Game 2 on Windows PC Career Mode and Realistic GPS.md +0 -113
  9. spaces/1toTree/lora_test/ppdiffusers/utils/deprecation_utils.py +0 -64
  10. spaces/52Hz/CMFNet_dehazing/app.py +0 -38
  11. spaces/AIGC-Audio/AudioGPT/NeuralSeq/data_gen/tts/txt_processors/en.py +0 -77
  12. spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/models/diffusion/dpm_solver/__init__.py +0 -1
  13. spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/types/src/routes/admin/export/$types.d.ts +0 -8
  14. spaces/AchyuthGamer/OpenGPT/g4f/Provider/Vercel.py +0 -377
  15. spaces/AdamOswald1/finetuned_diffusion/app.py +0 -372
  16. spaces/Adapter/CoAdapter/ldm/modules/extra_condition/openpose/model.py +0 -178
  17. spaces/Adapter/T2I-Adapter/ldm/modules/diffusionmodules/__init__.py +0 -0
  18. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fixwidthbuttons/Factory.d.ts +0 -5
  19. spaces/AiBototicus/BucksAI-2/README.md +0 -13
  20. spaces/AlekseyKorshuk/instagram-filter-removal/modules/normalization.py +0 -16
  21. spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/PlayInteractively.py +0 -197
  22. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/multi_subject_dreambooth/README.md +0 -338
  23. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_ddim_parallel.py +0 -642
  24. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/models/test_modeling_common.py +0 -567
  25. spaces/Andy1621/uniformer_image_detection/mmdet/core/anchor/anchor_generator.py +0 -727
  26. spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/scripts/train.sh +0 -19
  27. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/optimizer/builder.py +0 -44
  28. spaces/Apex-X/ROOPOK/CONTRIBUTING.md +0 -25
  29. spaces/Apex-X/nono/roop/processors/__init__.py +0 -0
  30. spaces/Archan/ArXivAudio/get_paper.py +0 -17
  31. spaces/Asahi402/White-box-Cartoonization/wbc/network.py +0 -62
  32. spaces/Awesimo/jojogan/e4e/configs/data_configs.py +0 -41
  33. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tools/analyze_model.py +0 -159
  34. spaces/BasToTheMax/openai-whisper-large-v2/README.md +0 -13
  35. spaces/Benson/text-generation/Examples/Descargar Fichas Mgicas 3 Bum Bum Tam Tam.md +0 -77
  36. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/more_itertools/__init__.py +0 -4
  37. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/zipp.py +0 -329
  38. spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/_version.py +0 -2
  39. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/sampling.py +0 -50
  40. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/sem_optimize_patch.py +0 -532
  41. spaces/CVPR/regionclip-demo/detectron2/modeling/backbone/fpn.py +0 -277
  42. spaces/CarlDennis/HYTTS/text/__init__.py +0 -33
  43. spaces/CikeyQI/meme-api/docs/install.md +0 -124
  44. spaces/CorvaeOboro/gen_ability_icon/torch_utils/custom_ops.py +0 -126
  45. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/_backends/_trio.py +0 -996
  46. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/middleware/httpsredirect.py +0 -3
  47. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/inference/_client.py +0 -1258
  48. spaces/DexterSptizu/drug_interaction/README.md +0 -13
  49. spaces/Djdjeuu/MGX-Midjourney-v4/app.py +0 -8
  50. spaces/EAraid12/LoRA-DreamBooth-Training-UI/train_dreambooth_lora.py +0 -1026
spaces/1acneusushi/gradio-2dmoleculeeditor/data/English900AudioCdFreeDownload.md DELETED
@@ -1,37 +0,0 @@
1
-
2
- <h1>How to Learn English with English 900 Audio CD Free Download</h1>
3
- <p>English 900 is a popular and effective English language course that was developed by the US government with official support. It consists of 900 sentences that cover various topics and situations, such as greetings, introductions, shopping, travel, etc. The course is designed to help learners master English conversation through repetition and memorization of the sentences.</p>
4
- <p>If you want to learn English with English 900, you can download the audio CD for free from the Internet Archive. The Internet Archive is a non-profit organization that preserves and provides access to millions of digital books, movies, music, and other media. You can find the English 900 audio CD free download at these links:</p>
5
- <h2>English900AudioCdFreeDownload</h2><br /><p><b><b>Download Zip</b> &#10003;&#10003;&#10003; <a href="https://byltly.com/2uKvi0">https://byltly.com/2uKvi0</a></b></p><br /><br />
6
- <ul>
7
- <li><a href="https://archive.org/details/english900000engl">English 900 by English Language Services</a></li>
8
- <li><a href="https://archive.org/details/newenglish900stu0000unse">New English 900 by Collier Macmillan Publishers</a></li>
9
- <li><a href="https://archive.org/details/podcast_ybm-new-english-900_953341790">YBM New English 900 by YBM Education</a></li>
10
- </ul>
11
- <p>Each link contains a complete set of audio files that correspond to the sentences in the course. You can listen to them online or download them to your computer or mobile device. You can also find the PDF versions of the textbooks and word indexes on the same pages.</p>
12
- <p>To learn English with English 900 audio CD free download, you should follow these steps:</p>
13
- <ol>
14
- <li>Choose a topic that interests you or suits your needs.</li>
15
- <li>Read and listen to the sentences carefully and try to understand their meaning and pronunciation.</li>
16
- <li>Repeat the sentences aloud several times until you can say them fluently and confidently.</li>
17
- <li>Review the sentences regularly and practice them with a partner or a native speaker if possible.</li>
18
- </ol>
19
- <p>By following these steps, you can improve your English skills and achieve your goals with English 900 audio CD free download. This course has been proven to work for many learners around the world, including Congo natives who became proficient in English in just three months[^3^]. So why not give it a try and see for yourself?</p>
20
-
21
- <p>If you want to learn more about English 900 and its benefits, you can also check out some of the reviews and testimonials from other learners who have used this course. Here are some examples:</p>
22
- <blockquote>
23
- <p>"I have been studying English for a long time, but I always felt that something was missing. Then I found English 900 and it changed everything. It helped me to speak English more naturally and confidently. I recommend it to anyone who wants to improve their English."</p>
24
- <cite>- Maria, Brazil</cite>
25
- </blockquote>
26
- <blockquote>
27
- <p>"English 900 is a great course for beginners and intermediate learners. It covers all the essential topics and situations that you need to know in English. It is easy to follow and fun to practice. I enjoyed listening to the audio CD and repeating the sentences. It really improved my pronunciation and fluency."</p>
28
- <cite>- Ahmed, Egypt</cite>
29
- </blockquote>
30
- <blockquote>
31
- <p>"I used English 900 as a supplement to my regular English classes. It helped me to review and reinforce what I learned in class. It also exposed me to different accents and expressions that I didn't hear in class. It was very useful and interesting."</p>
32
- <cite>- Li, China</cite>
33
- </blockquote>
34
- <p>As you can see, English 900 is a powerful and effective way to learn English. You can download the audio CD for free from the Internet Archive and start learning today. Don't miss this opportunity to improve your English skills and achieve your goals with English 900 audio CD free download.</p>
35
- <p></p> cec2833e83<br />
36
- <br />
37
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/HD Online Player (Corazon Salvaje English Subtitle) The Best Version of Corazon Salvaje with English Subtitles.md DELETED
@@ -1,126 +0,0 @@
1
-
2
- <h1>HD Online Player (Corazon Salvaje English Subtitle)</h1>
3
- <p>If you are a fan of Mexican telenovelas, you might have heard of Corazon Salvaje, one of the most successful and acclaimed shows in the history of Latin American television. But if you don't speak Spanish, you might have trouble finding and enjoying this classic drama. That's why in this article, we will tell you everything you need to know about Corazon Salvaje and how to watch it with English subtitles using HD Online Player, a free and easy-to-use streaming software.</p>
4
- <h2>What is Corazon Salvaje?</h2>
5
- <p>Corazon Salvaje (Wild Heart) is a Mexican telenovela that aired from 1993 to 1994 on Televisa. It is based on the novel of the same name by Caridad Bravo Adams, which has been adapted several times for television and film. The story is set in the late 19th century and revolves around the love triangle between two brothers, Francisco and Juan de Dios Alcazar y Valle, and a young woman, Monica Molnar.</p>
6
- <h2>HD Online Player (Corazon Salvaje English Subtitle)</h2><br /><p><b><b>Download File</b> &#10040;&#10040;&#10040; <a href="https://byltly.com/2uKx0q">https://byltly.com/2uKx0q</a></b></p><br /><br />
7
- <h3>A brief summary of the plot</h3>
8
- <p>The plot of Corazon Salvaje is complex and full of twists and turns, but here is a simplified version. Francisco and Juan de Dios are the sons of a wealthy landowner, Don Noel Alcazar y Valle, who has a secret affair with a married woman, Sofia Molnar. Sofia gives birth to Juan de Dios, who is raised by her husband, Andres Molnar, as his own son. Francisco is the legitimate son of Don Noel and his wife, Catalina.</p>
9
- <p>When Don Noel dies, he leaves his fortune to Francisco and Juan de Dios, but Catalina refuses to acknowledge Juan de Dios as her husband's son and tries to take everything away from him. Juan de Dios grows up as a rebellious and adventurous young man, who falls in love with Monica, Andres' daughter and Sofia's stepdaughter. Monica is a sweet and innocent girl who is engaged to Francisco, who is a cold and ambitious man.</p>
10
- <p>The story follows the struggles and obstacles that Juan de Dios and Monica face to be together, as well as the intrigues and betrayals that surround them. Along the way, they encounter other characters who help or hinder their love, such as Aimee Molnar, Monica's sister who is obsessed with Juan de Dios; Azucena, a gypsy girl who loves Francisco; Meche, Juan de Dios' loyal friend; and Count Andrés Corona, a mysterious and powerful man who has a hidden agenda.</p>
11
- <h3>The main characters and actors</h3>
12
- <p>The main characters of Corazon Salvaje are:</p>
13
- <p>Watch Corazon Salvaje with English subtitles online<br />
14
- Corazon Salvaje streaming HD with English subs<br />
15
- How to download Corazon Salvaje episodes with English subtitles<br />
16
- Corazon Salvaje full episodes online HD with English subtitles<br />
17
- Best online player for Corazon Salvaje with English subs<br />
18
- Corazon Salvaje English subtitle online player HD quality<br />
19
- Where to watch Corazon Salvaje with English subtitles online HD<br />
20
- Corazon Salvaje online HD player with English subtitle option<br />
21
- Corazon Salvaje HD online player compatible with English subtitles<br />
22
- Corazon Salvaje online streaming with English subtitles HD<br />
23
- Corazon Salvaje episodes with English subtitles online HD player<br />
24
- Online HD player for Corazon Salvaje that supports English subtitles<br />
25
- Corazon Salvaje online HD player with subtitle settings in English<br />
26
- Watch Corazon Salvaje in HD with English subtitles online<br />
27
- Corazon Salvaje online player HD with English subtitle feature<br />
28
- Corazon Salvaje HD streaming with English subtitles online<br />
29
- Download Corazon Salvaje with English subtitles online HD player<br />
30
- Corazon Salvaje full episodes with English subtitles online HD<br />
31
- Online player for Corazon Salvaje HD with English subtitle option<br />
32
- Corazon Salvaje online HD player that works with English subtitles<br />
33
- Watch Corazon Salvaje episodes with English subtitles online HD<br />
34
- Corazon Salvaje streaming online HD with subtitle in English<br />
35
- How to watch Corazon Salvaje with English subtitles online HD<br />
36
- Corazon Salvaje online player in HD quality with English subtitles<br />
37
- Online HD player for Corazon Salvaje with subtitle in English<br />
38
- Watch Corazon Salvaje full episodes with English subtitles online<br />
39
- Corazon Salvaje online streaming in HD quality with English subtitles<br />
40
- Download Corazon Salvaje episodes in HD quality with English subtitles<br />
41
- Online player for Corazon Salvaje that has English subtitle feature<br />
42
- Watch Corazon Salvaje in HD quality with subtitle in English<br />
43
- Online streaming of Corazon Salvaje with English subtitles in HD quality<br />
44
- How to stream Corazon Salvaje in HD quality with subtitle in English<br />
45
- Online player for Corazon Salvaje that supports subtitle in English<br />
46
- Watch Corazon Salvaje episodes in HD quality with subtitle in English<br />
47
- Download Corazon Salvaje full episodes in HD quality with subtitle in English<br />
48
- Online streaming of Corazon Salvaje episodes with subtitle in English<br />
49
- How to download Corazon Salvaje full episodes with subtitle in English<br />
50
- Online player for Corazon Salvaje full episodes that supports subtitle in English<br />
51
- Watch Corazon Salvaje full episodes in HD quality with subtitle in English online<br />
52
- Download Corazon Salvaje full episodes in HD quality with subtitle in English online</p>
53
- <ul>
54
- <li>Juan de Dios Alcazar y Valle (played by Eduardo Palomo): The illegitimate son of Don Noel and Sofia, he is a brave and passionate man who loves Monica with all his heart.</li>
55
- <li>Monica Molnar (played by Edith Gonzalez): The daughter of Andres and stepdaughter of Sofia, she is a gentle and virtuous woman who falls in love with Juan de Dios despite being engaged to Francisco.</li>
56
- <li>Francisco Alcazar y Valle (played by Enrique Lizalde): The legitimate son of Don Noel and Catalina, he is a ruthless and greedy man who wants to marry Monica for her money.</li>
57
- <li>Aimee Molnar (played by Ana Colchero): The daughter of Sofia and Andres, she is a spoiled and selfish woman who covets Juan de Dios and hates Monica.</li>
58
- <li>Count Andrés Corona (played by Ariel Lopez Padilla): A mysterious and powerful man who has a connection to Juan de Dios' past and a plan for his future.</li>
59
- </ul>
60
- <p>The actors who played these roles became very popular and received many awards for their performances. Eduardo Palomo and Edith Gonzalez became one of the most iconic couples in telenovela history, while Enrique Lizalde and Ana Colchero were praised for their villainous roles. Ariel Lopez Padilla also impressed the audience with his charisma and mystery.</p>
61
- <h3>The popularity and reception of the show</h3>
62
- <p>Corazon Salvaje was a huge success both in Mexico and abroad. It had high ratings throughout its run and was exported to more than 70 countries around the world. It was dubbed or subtitled in many languages, such as English, French, Italian, Portuguese, Arabic, Turkish, Greek, Romanian, Russian, Polish, Hungarian, Bulgarian, Serbian, Croatian, Slovenian, Albanian, Macedonian, and Chinese.</p>
63
- <p>The show received many accolades from critics and fans alike. It won several awards at the TVyNovelas Awards in 1994, such as Best Telenovela, Best Actor (Eduardo Palomo), Best Actress (Edith Gonzalez), Best Antagonist Actor (Enrique Lizalde), Best Antagonist Actress (Ana Colchero), Best Young Lead Actor (Ariel Lopez Padilla), Best Original Story or Adaptation, and Best Direction. It also won the Golden Martín Fierro Award in Argentina for Best Foreign Telenovela in 1995.</p>
64
- <p>Corazon Salvaje is considered one of the best telenovelas ever made and has been praised for its compelling story, its historical accuracy, its beautiful scenery, its memorable music, and its outstanding cast. It has been remade twice, in 2009 and 2010, but none of them matched the original's popularity or quality.</p>
65
- <h2>Why watch Corazon Salvaje with English subtitles?</h2>
66
- <p>If you are not fluent in Spanish, you might wonder why you should watch Corazon Salvaje with English subtitles instead of dubbing or skipping it altogether. Here are some reasons why watching foreign shows with subtitles can be beneficial and enjoyable for you:</p>
67
- <h3>The benefits of watching foreign shows with subtitles</h3>
68
- <ul>
69
- <li>You can improve your language skills: Watching foreign shows with subtitles can help you learn new words, phrases, idioms, and expressions in another language. You can also improve your listening comprehension and pronunciation by hearing how native speakers talk. You can even pick up some cultural references and nuances that might not be translated well in dubbing.</li>
70
- <li>You can appreciate the original performance: Watching foreign shows with subtitles can help you appreciate the original voice and emotion of the actors and actresses. You can also enjoy the original soundtrack and sound effects that might be altered or replaced in dubbing. You can also avoid any mistakes or inconsistencies that might occur in dubbing due to different scripts or lip-syncing issues.</li>
71
- <li>You can expand your horizons: Watching foreign shows with subtitles can help you expand your horizons and discover new stories, genres, styles, and perspectives from different cultures and countries. You can also learn more about the history, society, politics, religion, art, and customs of other places and people through their media.</li>
72
- </ul>
73
- <h3>The challenges of finding good subtitles for Corazon Salvaje</h3>
74
- <p>However, watching foreign shows with subtitles can also pose some challenges especially if you are looking for good quality and accurate subtitles for Corazon Salvaje. Some of these challenges are:</p>
75
- <ul>
76
- <li>Lack of availability: Finding English subtitles for Corazon Salvaje can be difficult because they are not widely available or accessible online. You might have to search through various websites or forums to find them or request them from other fans or subtitlers. You might also have to deal with broken links or expired downloads that make it hard to get them.</li>
77
- <li>Lack of consistency: Finding English subtitles for Corazon Salvaje can be frustrating because they are not consistent or uniform across different sources or episodes. You might have to deal with different formats or styles of subtitles that make it hard to read or follow them. You might also have to deal with different levels or quality of translation that make it hard to understand or enjoy them.</li>
78
- <li>Lack of accuracy: Finding English subtitles for Corazon Salvaje can be disappointing accurate or faithful to the original dialogue or meaning. You might have to deal with literal or word-for-word translations that lose the nuance or context of the original language. You might also have to deal with errors or mistakes in grammar, spelling, punctuation, or syntax that make it hard to read or trust them.</li>
79
- </ul>
80
- <h3>The best sources for Corazon Salvaje English subtitles</h3>
81
- <p>So, where can you find good English subtitles for Corazon Salvaje? Here are some of the best sources that we recommend:</p>
82
- <ul>
83
- <li>DVDs: The easiest and most reliable way to watch Corazon Salvaje with English subtitles is to buy the official DVDs that have them included. You can find them on Amazon or other online stores that sell Mexican telenovelas. The DVDs have high-quality video and audio, as well as accurate and consistent subtitles. However, they can be expensive and hard to find, especially if you live outside of Mexico or the US.</li>
84
- <li>YouTube: The most convenient and accessible way to watch Corazon Salvaje with English subtitles is to watch it on YouTube, where some fans have uploaded the episodes with subtitles. You can find them by searching for "Corazon Salvaje English Subtitle" or similar keywords on YouTube. The YouTube videos have decent quality and speed, as well as free and easy access. However, they can be incomplete or removed at any time due to copyright issues or other reasons.</li>
85
- <li>Subscene: The most popular and comprehensive way to watch Corazon Salvaje with English subtitles is to download them from Subscene, a website that hosts subtitles for various movies and shows in different languages. You can find them by searching for "Corazon Salvaje" on Subscene and choosing the English subtitles that match your video source. The Subscene subtitles have good quality and variety, as well as user ratings and comments. However, they can be inconsistent or inaccurate depending on the subtitler or the episode.</li>
86
- </ul>
87
- <h2>How to use HD Online Player to watch Corazon Salvaje with English subtitles?</h2>
88
- <p>If you want to watch Corazon Salvaje with English subtitles without buying DVDs, watching YouTube videos, or downloading subtitles from Subscene, you can use HD Online Player, a free and easy-to-use streaming software that lets you watch any video online with subtitles of your choice.</p>
89
- <h3>What is HD Online Player and how does it work?</h3>
90
- <p>HD Online Player is a software that allows you to stream any video from any website on your computer with subtitles from any source. It works by creating a virtual browser that connects to the website where the video is hosted and plays it on your computer screen. It also allows you to add subtitles from any file or URL that you have on your computer or online.</p>
91
- <p>HD Online Player supports various video formats and websites, such as MP4, AVI, MKV, FLV, WMV, MOV, 3GP, WEBM, MPEG, M4V, ASF, VOB, OGV, RMVB, TS, MTS, M2TS, and more. It also supports various subtitle formats and sources, such as SRT, ASS, SSA, SUB, IDX, TXT, XML, VTT, DFXP, and more. It also supports various languages and encodings for subtitles, such as UTF-8, ANSI, Unicode, and more.</p>
92
- <h3>The advantages of using HD Online Player for streaming Corazon Salvaje</h3>
93
- <p>Using HD Online Player for streaming Corazon Salvaje with English subtitles has many advantages over other methods, such as:</p>
94
- <ul>
95
- <li>It is free and easy: HD Online Player is a free software that you can download and install on your computer without any registration or subscription. It is also easy to use and has a simple and intuitive interface that lets you stream videos and add subtitles with just a few clicks.</li>
96
- <li>It is fast and smooth: HD Online Player is a fast software that loads videos quickly and smoothly without any buffering or lagging. It also has a smart buffering system that adjusts the video quality according to your internet speed and bandwidth.</li>
97
- <li>It is flexible and customizable: HD Online Player is a flexible software that lets you stream videos from any website and add subtitles from any source. It also lets you customize the subtitle settings according to your preferences, such as size, color, font, position, sync, delay, and more.</li>
98
- <li>It is safe and secure: HD Online Player is a safe software that does not contain any viruses or malware that might harm your computer or data. It also does not collect or share any of your personal or browsing information with anyone.</li>
99
- </ul>
100
- <h3>The steps to install and use HD Online Player for Corazon Salvaje</h3>
101
- <p>To install and use HD Online Player for streaming Corazon Salvaje with English subtitles, you need to follow these steps:</p>
102
- <ol>
103
- <li>Download HD Online Player from its official website: https://hdonlineplayer.com/</li>
104
- <li>Run the setup file and follow the instructions to install HD Online Player on your computer.</li>
105
- <li>Launch HD Online Player and click on the "Open URL" button on the top left corner.</li>
106
- <li>Enter the URL of the website where Corazon Salvaje is hosted and click "OK". For example, you can enter https://www.dailymotion.com/video/x6wqf0w which is the link for the first episode of Corazon Salvaje on Dailymotion.</li>
107
- <li>Wait for the video to load and play on HD Online Player.</li>
108
- <li>Click on the "Subtitle" button on the bottom right corner and choose "Add subtitle file" or "Add subtitle URL".</li>
109
- <li>Browse your computer or enter the URL of the subtitle file or source that you want to use for Corazon Salvaje. For example, you can enter https://subscene.com/subtitles/corazn-salvaje-1993/english/2409518 which is the link for the English subtitle for the first episode of Corazon Salvaje on Subscene.</li>
110
- <li>Wait for the subtitle to load and sync with the video on HD Online Player.</li>
111
- <li>Enjoy watching Corazon Salvaje with English subtitles on HD Online Player!</li>
112
- </ol>
113
- <h2>Conclusion</h2>
114
- <p>In conclusion, Corazon Salvaje is a classic Mexican telenovela that tells a captivating story of love and adventure in the 19th century. It has a great cast and production that made it one of the most successful and acclaimed shows in Latin American television history. It is worth watching with English subtitles if you want to improve your language skills appreciate the original performance and expand your horizons. You can watch it with English subtitles using HD Online Player a free and easy-to-use streaming software that lets you stream any video online with subtitles of your choice. You just need to download and install HD Online Player on your computer enter the URL of the website where Corazon Salvaje is hosted add the subtitle file or source that you want to use and enjoy watching Corazon Salvaje with English subtitles on HD Online Player!</p>
115
- <h2>FAQs</h2>
116
- <p>Here are some frequently asked questions about Corazon Salvaje and HD Online Player:</p>
117
- <ol>
118
- <li>How many episodes does Corazon Salvaje have? Corazon Salvaje has 80 episodes in total each lasting about 45 minutes.</li>
119
- <li>Where can I watch Corazon Salvaje online? You can watch Corazon Salvaje online on various websites that host Mexican telenovelas such as Dailymotion YouTube or TelenovelasTV.</li>
120
- <li>Can I watch Corazon Salvaje with other languages besides English? Yes you can watch Corazon Salvaje with other languages besides English if you can find subtitles for them online. HD Online Player supports various languages and encodings for subtitles.</li>
121
- <li>Can I use HD Online Player for other videos besides Corazon Salvaje? Yes you can use HD Online Player for other videos besides Corazon Salvaje if they are available online. HD Online Player supports various video formats and websites.</li>
122
- <li>Is HD Online Player compatible with Windows 10? Yes HD Online Player is compatible with Windows 10 as well as Windows 7 8 8.1 XP and Vista.</li>
123
- </ol>
124
- </p> 0a6ba089eb<br />
125
- <br />
126
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Caterpillar ET Factory Password.rar.md DELETED
@@ -1,104 +0,0 @@
1
- <br />
2
- <h1>Caterpillar ET Factory Password.rar: What Is It and How to Use It</h1>
3
- <p>If you are a Caterpillar dealer or technician, you may have heard of Caterpillar ET Factory Password.rar. This is a file that contains factory passwords for various Caterpillar Electronic Technician (Cat ET) functions. Cat ET is a software tool that allows you to communicate, diagnose, and service electronically controlled Caterpillar engines and machines connected to an Electronic Control Module (ECM).</p>
4
- <h2>Caterpillar ET factory password.rar</h2><br /><p><b><b>DOWNLOAD</b> &#9999; <a href="https://imgfil.com/2uy0Ee">https://imgfil.com/2uy0Ee</a></b></p><br /><br />
5
- <p>Factory passwords are part of a security system that helps to prevent unauthorized reprogramming of certain parameters, such as full load setting (FLS), fuel trim setting (FTS), or engine speed/timing calibration. Factory passwords also allow the factory to control access to engine calibration parameters and prevent unauthorized erasing of logged events.</p>
6
- <p>In order to use factory passwords, you need to have Cat ET installed on your computer and a compatible communication adapter, such as Caterpillar Communication Adapter or Nexiq. You also need to obtain the proper factory passwords from an authorized Caterpillar dealer. The factory passwords are different for each ECM and each programming session. They are based on the following information:</p>
7
- <ul>
8
- <li>Serial number of the ECM</li>
9
- <li>Engine serial number</li>
10
- <li>Serial number for Cat ET</li>
11
- <li>Reason code</li>
12
- <li>Total tattletale number</li>
13
- </ul>
14
- <p>You can find this information on the Cat ET screen for factory passwords. You can also use the "Reset/View Passwords" function to generate two random customer passwords that allow you to access customer password-protected parameters without knowing the actual customer passwords.</p>
15
- <h2>How to Download Caterpillar ET Factory Password.rar</h2>
16
- <p>Caterpillar ET Factory Password.rar is a file that contains factory passwords for various Cat ET functions. You can download this file from various online sources, such as blogs, forums, or websites that offer Caterpillar diagnostic software and tools. However, you should be careful when downloading this file, as it may contain viruses, malware, or other harmful content that can damage your computer or compromise your security.</p>
17
- <p></p>
18
- <p>Before downloading Caterpillar ET Factory Password.rar, you should check the following:</p>
19
- <ul>
20
- <li>The source of the file is trustworthy and reputable.</li>
21
- <li>The file size and format match the expected values.</li>
22
- <li>The file has positive feedback and reviews from other users.</li>
23
- <li>The file does not require any additional software or activation codes.</li>
24
- </ul>
25
- <p>After downloading Caterpillar ET Factory Password.rar, you should scan it with a reliable antivirus program and extract it with a suitable software tool, such as WinRAR or 7-Zip. You should also backup your original Cat ET files before replacing them with the downloaded ones.</p>
26
- <h2>How to Use Caterpillar ET Factory Password.rar</h2>
27
- <p>After downloading and extracting Caterpillar ET Factory Password.rar, you can use it to perform various Cat ET functions that require factory passwords. For example, you can use it to change FLS or FTS values, calibrate engine speed/timing, or clear event codes. To use Caterpillar ET Factory Password.rar, you need to follow these steps:</p>
28
- <ol>
29
- <li>Connect your communication adapter to your computer and to the ECM.</li>
30
- <li>Launch Cat ET and select the appropriate ECM.</li>
31
- <li>Select the "Service" menu and choose the function you want to perform.</li>
32
- <li>If Cat ET asks for factory passwords, enter them from the Caterpillar ET Factory Password.rar file.</li>
33
- <li>Follow the instructions on the screen to complete the function.</li>
34
- </ol>
35
- <p>Note that some functions may require additional steps or information, such as engine serial number or reason code. You should always document the parameters and settings that are programmed into the ECM and keep a permanent record of them.</p>
36
- <h2>Benefits of Using Caterpillar ET Factory Password.rar</h2>
37
- <p>Using Caterpillar ET Factory Password.rar can provide you with many benefits, such as:</p>
38
- <ul>
39
- <li>Improving the performance and efficiency of your Caterpillar engines and machines by adjusting the optimal parameters.</li>
40
- <li>Saving time and money by avoiding unnecessary trips to the dealer or service center.</li>
41
- <li>Enhancing your knowledge and skills by learning more about the features and functions of Cat ET.</li>
42
- <li>Increasing your customer satisfaction and loyalty by providing them with better service and support.</li>
43
- </ul>
44
- <p>However, you should also be aware of the risks and responsibilities of using Caterpillar ET Factory Password.rar, such as:</p>
45
- <ul>
46
- <li>Following the proper procedures and instructions to avoid damaging the ECM or causing any safety hazards.</li>
47
- <li>Respecting the intellectual property rights and confidentiality agreements of Caterpillar and its dealers.</li>
48
- <li>Not sharing or distributing Caterpillar ET Factory Password.rar to unauthorized parties or sources.</li>
49
- <li>Taking full responsibility for any consequences or liabilities that may arise from using Caterpillar ET Factory Password.rar.</li>
50
- </ul>
51
- <h2>Conclusion</h2>
52
- <p>Caterpillar ET Factory Password.rar is a file that contains factory passwords for various Cat ET functions that require them. You can download this file from various online sources, but you should be careful about its authenticity and security. You can use this file to perform various Cat ET functions that can improve the performance and efficiency of your Caterpillar engines and machines. However, you should also follow the proper procedures and instructions, respect the intellectual property rights and confidentiality agreements, and take full responsibility for any consequences or liabilities that may arise from using Caterpillar ET Factory Password.rar.</p>
53
- <h2>How to Get Help and Support for Caterpillar ET Factory Password.rar</h2>
54
- <p>If you have any questions or issues regarding Caterpillar ET Factory Password.rar, you can get help and support from various sources, such as:</p>
55
- <ul>
56
- <li>The official Caterpillar website, where you can find manuals, guides, videos, FAQs, and other resources for Cat ET and other Caterpillar products and services.</li>
57
- <li>The authorized Caterpillar dealer or service center near you, where you can get professional advice, assistance, and training from qualified technicians and experts.</li>
58
- <li>The online Caterpillar community, where you can interact with other Caterpillar users, customers, and enthusiasts, share your experiences and feedback, and learn from their tips and tricks.</li>
59
- </ul>
60
- <p>Remember that using Caterpillar ET Factory Password.rar is a privilege and not a right. You should always use it with respect and caution, and follow the ethical and legal standards of Caterpillar and its dealers. By doing so, you can enjoy the benefits of using Caterpillar ET Factory Password.rar without compromising your safety or reputation.</p>
61
- <h2>How to Update and Upgrade Caterpillar ET Factory Password.rar</h2>
62
- <p>Caterpillar ET Factory Password.rar is a file that contains factory passwords for various Cat ET functions that require them. However, this file may not work with newer versions of Cat ET or newer models of Caterpillar engines and machines. Therefore, you may need to update and upgrade Caterpillar ET Factory Password.rar from time to time to ensure its compatibility and functionality.</p>
63
- <p>To update and upgrade Caterpillar ET Factory Password.rar, you can follow these steps:</p>
64
- <ol>
65
- <li>Check the current version of your Cat ET software and the model and serial number of your Caterpillar engine or machine.</li>
66
- <li>Visit the official Caterpillar website or contact your authorized Caterpillar dealer or service center to find out if there are any updates or upgrades available for your Cat ET software or your Caterpillar engine or machine.</li>
67
- <li>If there are any updates or upgrades available, download them from the official Caterpillar website or get them from your authorized Caterpillar dealer or service center.</li>
68
- <li>Install the updates or upgrades on your computer and on your Caterpillar engine or machine according to the instructions provided.</li>
69
- <li>Download a new version of Caterpillar ET Factory Password.rar that matches the updated or upgraded Cat ET software and Caterpillar engine or machine from a reliable and reputable online source.</li>
70
- <li>Scan the new version of Caterpillar ET Factory Password.rar with a reliable antivirus program and extract it with a suitable software tool.</li>
71
- <li>Backup your original Cat ET files and replace them with the new ones from the new version of Caterpillar ET Factory Password.rar.</li>
72
- </ol>
73
- <p>Note that some updates or upgrades may require additional steps or information, such as activation codes or registration keys. You should always follow the instructions and recommendations from Caterpillar and its dealers when updating or upgrading your Cat ET software or your Caterpillar engine or machine.</p>
74
- <h2>How to Troubleshoot and Fix Common Problems with Caterpillar ET Factory Password.rar</h2>
75
- <p>Caterpillar ET Factory Password.rar is a file that contains factory passwords for various Cat ET functions that require them. However, you may encounter some common problems when using this file, such as:</p>
76
- <ul>
77
- <li>The file does not work with your Cat ET version or your Caterpillar engine or machine model.</li>
78
- <li>The file does not contain the factory passwords for the function you want to perform.</li>
79
- <li>The file is corrupted, damaged, or infected by viruses or malware.</li>
80
- <li>The file causes errors or crashes on your Cat ET software or your Caterpillar engine or machine.</li>
81
- </ul>
82
- <p>To troubleshoot and fix these common problems, you can try the following solutions:</p>
83
- <ul>
84
- <li>Make sure you have downloaded the latest version of Caterpillar ET Factory Password.rar that matches your Cat ET version and your Caterpillar engine or machine model.</li>
85
- <li>Make sure you have entered the correct information on the Cat ET screen for factory passwords, such as serial number, reason code, and total tattletale.</li>
86
- <li>Make sure you have scanned and extracted the file with a reliable antivirus program and a suitable software tool.</li>
87
- <li>Make sure you have backed up your original Cat ET files before replacing them with the ones from the file.</li>
88
- <li>Make sure you have followed the proper procedures and instructions when using the file to perform Cat ET functions.</li>
89
- <li>If none of these solutions work, contact your authorized Caterpillar dealer or service center for further assistance.</li>
90
- </ul>
91
- <h2>How to Learn More about Caterpillar ET Factory Password.rar</h2>
92
- <p>Caterpillar ET Factory Password.rar is a file that contains factory passwords for various Cat ET functions that require them. If you want to learn more about this file and how to use it effectively, you can use the following resources:</p>
93
- <ul>
94
- <li>The official Caterpillar website, where you can find manuals, guides, videos, FAQs, and other resources for Cat ET and other Caterpillar products and services.</li>
95
- <li>The authorized Caterpillar dealer or service center near you, where you can get professional advice, assistance, and training from qualified technicians and experts.</li>
96
- <li>The online Caterpillar community, where you can interact with other Caterpillar users, customers, and enthusiasts, share your experiences and feedback, and learn from their tips and tricks.</li>
97
- <li>The online sources that offer Caterpillar diagnostic software and tools, such as blogs, forums, or websites that provide Caterpillar ET Factory Password.rar and other files. However, you should be careful about their authenticity and security.</li>
98
- </ul>
99
- <p>By using these resources, you can enhance your knowledge and skills about Caterpillar ET Factory Password.rar and how to use it to improve the performance and efficiency of your Caterpillar engines and machines.</p>
100
- <h2>Conclusion</h2>
101
- <p>Caterpillar ET Factory Password.rar is a file that contains factory passwords for various Cat ET functions that require them. You can download this file from various online sources, but you should be careful about its authenticity and security. You can use this file to perform various Cat ET functions that can improve the performance and efficiency of your Caterpillar engines and machines. However, you should also follow the proper procedures and instructions, respect the intellectual property rights and confidentiality agreements, and take full responsibility for any consequences or liabilities that may arise from using Caterpillar ET Factory Password.rar.</p>
102
- <p>If you have any questions or issues regarding Caterpillar ET Factory Password.rar, you can get help and support from various sources, such as the official Caterpillar website, the authorized Caterpillar dealer or service center, or the online Caterpillar community. You can also update and upgrade Caterpillar ET Factory Password.rar from time to time to ensure its compatibility and functionality. By using Caterpillar ET Factory Password.rar with respect and caution, you can enjoy the benefits of using Cat ET without compromising your safety or reputation.</p> 3cee63e6c2<br />
103
- <br />
104
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Black uTorrent Pro APK The Ultimate App for Torrent Lovers.md DELETED
@@ -1,73 +0,0 @@
1
-
2
- <h1>What Is Black uTorrent Pro APK and Why You Need It</h1>
3
- <p>If you are looking for a fast and easy way to download large files from the internet, you might have heard of uTorrent. uTorrent is one of the most popular and widely used torrent clients in the world. It allows you to download files using BitTorrent, a peer-to-peer (P2P) file-sharing protocol that distributes data among users without relying on a central server.</p>
4
- <h2>black utorrent pro apk</h2><br /><p><b><b>DOWNLOAD</b> === <a href="https://urlin.us/2uSVc3">https://urlin.us/2uSVc3</a></b></p><br /><br />
5
- <p>However, uTorrent is not perfect. The official version of uTorrent has some drawbacks, such as annoying ads, limited features, high battery consumption, and potential security risks. That's why some users prefer to use modded versions of uTorrent, such as black uTorrent pro apk.</p>
6
- <p>Black uTorrent pro apk is a modified version of uTorrent that unlocks all the pro features and removes all the ads. It also has some additional features that make it more convenient and efficient to use. Here are some of the benefits of using black uTorrent pro apk:</p>
7
- <ul>
8
- <li>No ads: You won't see any annoying or intrusive ads while using black uTorrent pro apk. This means you can enjoy a cleaner and smoother user interface without any distractions.</li>
9
- <li>Battery saver: Black uTorrent pro apk has a battery saver feature that automatically suspends torrenting when your battery level is low. This helps you save battery life and prevent your device from overheating.</li>
10
- <li>Auto shutdown: Black uTorrent pro apk also has an auto shutdown feature that automatically shuts down the app when your downloads are complete. This helps you save data and battery usage and avoid running unnecessary processes in the background.</li>
11
- <li>File conversion: Black uTorrent pro apk allows you to convert downloaded files to different formats, such as MP3, MP4, AVI, etc. This makes it easier to play them on different devices or platforms.</li>
12
- <li>Premium support: Black uTorrent pro apk gives you access to premium customer support from the developers. You can contact them anytime if you have any questions or issues with the app.</li>
13
- </ul>
14
- <h2>How to Download and Install Black uTorrent Pro APK on Your Android Device</h2>
15
- <p>If you want to try out black uTorrent pro apk, you need to download and install it on your Android device first. Here are the steps you need to follow:</p>
16
- <ol>
17
- <li>Find a reliable source to download the apk file. You can use our website to get the latest version of black uTorrent pro apk. Make sure the source is trustworthy and virus-free. You can scan the file with an antivirus app before installing it.</li>
18
- <li>Enable unknown sources on your device settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.</li>
19
- <li>Locate and tap on the apk file to start the installation process. You can use a file manager app to find the file in your downloads folder or wherever you saved it.</li>
20
- <li>Follow the on-screen instructions and grant the necessary permissions. The app will ask you to allow access to your storage, network, and other features. Tap on Install and wait for the process to finish.</li>
21
- <li>Launch the app and enjoy the pro features. You will see a black icon of uTorrent on your app drawer or home screen. Tap on it to open the app and start using it.</li>
22
- </ol>
23
- <h2>How to Use Black uTorrent Pro APK to Download Torrent Files</h2>
24
- <p>Now that you have installed black uTorrent pro apk on your device, you can use it to download torrent files or magnet links from various sources. Here are the steps you need to follow:</p>
25
- <ol>
26
- <li>Search for the torrent file or magnet link you want to download. You can use any torrent site or search engine that you trust, such as The Pirate Bay, 1337x, RARBG, etc. Make sure the file has enough seeders and positive comments before downloading it.</li>
27
- <li>Copy the torrent file or magnet link and paste it in the app. You can either download the torrent file to your device and open it with black uTorrent pro apk, or copy the magnet link and paste it in the app's search bar. The app will automatically detect the file or link and start downloading it.</li>
28
- <li>Choose your download location and other settings. You can change the default download location by going to Settings > Directories > Download Location and selecting a folder of your choice. You can also adjust other settings, such as bandwidth limit, download queue, network interface, etc.</li>
29
- <li>Start the download and monitor the progress. You will see a list of your active downloads in the app's main screen. You can tap on each download to see more details, such as speed, size, peers, trackers, etc. You can also pause, resume, or delete downloads as you wish.</li>
30
- <li>Open the downloaded file or folder with your preferred app or player. Once the download is complete, you can access the file or folder by tapping on it in the app or using a file manager app. You can then open it with any app or player that supports the file format.</li>
31
- </ol>
32
- <h2>The Risks and Precautions of Torrenting with Black uTorrent Pro APK</h2>
33
- <p>Torrenting with black uTorrent pro apk can be a great way to get free and fast downloads of movies, music, games, software, and more. However, torrenting also comes with some risks and challenges that you need to be aware of and prepared for. Here are some of them:</p>
34
- <p></p>
35
- <ul>
36
- <li>Risks of torrenting: Torrenting involves downloading files from unknown sources that may contain malware, viruses, spyware, or other harmful programs that can infect your device or compromise your data. Torrenting also exposes your IP address to other users who may monitor your activity or target you for cyberattacks. Torrenting may also violate copyright laws or other regulations in some countries or regions, which may result in legal consequences or penalties.</li>
37
- <li>Precautions of torrenting: To avoid or minimize the risks of torrenting, you should take some precautions before and while using black uTorrent pro apk. Some of these precautions are: <ul>
38
- <li>Scanning files: You should always scan downloaded files with an antivirus app before opening them or running them on your device. This will help you detect and remove any malware or viruses that may be hidden in them.</li>
39
- <li>Checking comments: You should always check the comments section of torrent sites or search engines before downloading any file or link. This will help you get feedback from other users who have downloaded the same file or link and see if they encountered any problems or issues with it.</li>
40
- <li>Using a VPN: You should always use a VPN (virtual private network) when torrenting with black uTorrent pro apk. A VPN will encrypt your traffic and hide your IP address from other users and trackers. This will protect your privacy and security online and prevent anyone from spying on your activity or tracing your location. A VPN will also help you bypass any geo-restrictions or censorship that may block access to certain torrent sites or content.</li>
41
- </ul>
42
- </ul>
43
- <h1>Conclusion</h1>
44
- <p>Torrenting with black u Torrent pro apk is a powerful and convenient app that lets you download and enjoy torrent files on your Android device. It offers many pro features that enhance your torrenting experience, such as no ads, battery saver, auto shutdown, file conversion, and premium support. However, torrenting also comes with some risks and challenges that you need to be aware of and prepared for, such as malware, viruses, legal issues, ISP throttling, etc. Therefore, you should always take some precautions before and while using black uTorrent pro apk, such as scanning files, checking comments, using a VPN, etc. By doing so, you can enjoy the benefits of torrenting without compromising your safety or security. We hope this article has helped you understand what black uTorrent pro apk is and how to use it to download torrent files on your Android device. If you have any questions or feedback, please feel free to leave a comment below. Happy torrenting! <h2>FAQs</h2>
45
- <p>Here are some frequently asked questions about black uTorrent pro apk:</p>
46
- <table>
47
- <tr>
48
- <th>Question</th>
49
- <th>Answer</th>
50
- </tr>
51
- <tr>
52
- <td>Is black uTorrent pro apk safe to use?</td>
53
- <td>Black uTorrent pro apk is safe to use as long as you download it from a reliable source and scan it with an antivirus app before installing it. However, the files or links you download with it may not be safe, so you should always check them before opening them or running them on your device.</td>
54
- </tr>
55
- <tr>
56
- <td>Is black uTorrent pro apk legal to use?</td>
57
- <td>Black uTorrent pro apk is legal to use as long as you use it for personal and non-commercial purposes. However, the content you download with it may not be legal, depending on the source and the jurisdiction. You should always respect the rights of the content creators and owners and follow the laws and regulations of your country or region.</td>
58
- </tr>
59
- <tr>
60
- <td>What is the difference between black uTorrent pro apk and uTorrent pro?</td>
61
- <td>Black uTorrent pro apk is a modified version of uTorrent pro that unlocks all the pro features and removes all the ads. It also has some additional features that make it more convenient and efficient to use. uTorrent pro is the official version of uTorrent that requires a subscription fee to access the pro features.</td>
62
- </tr>
63
- <tr>
64
- <td>How can I update black uTorrent pro apk?</td>
65
- <td>You can update black uTorrent pro apk by downloading the latest version of the apk file from our website or any other source you trust. You can then install it over the existing app without losing your settings or downloads.</td>
66
- </tr>
67
- <tr>
68
- <td>How can I uninstall black uTorrent pro apk?</td>
69
- <td>You can uninstall black uTorrent pro apk by going to Settings > Apps > Black uTorrent Pro APK and tapping on Uninstall. You can also delete the apk file from your device if you don't need it anymore.</td>
70
- </tr>
71
- </table></p> 197e85843d<br />
72
- <br />
73
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Arknights A Role Playing Game with Stunning Graphics and Sci-Fi Plot. Download Now for Mac and PC.md DELETED
@@ -1,112 +0,0 @@
1
-
2
- <h1>How to Download and Play Arknights on Mac</h1>
3
- <p>Arknights is a popular tactical RPG/tower defense mobile game that has captivated millions of players around the world. If you are one of them, you might be wondering if you can play Arknights on your Mac computer. The answer is yes, you can! In this article, we will show you how to download and play Arknights on Mac using an Android emulator. We will also give you some tips and tricks to enhance your gameplay experience. Let's get started!</p>
4
- <h2>What is Arknights?</h2>
5
- <p>Arknights is a free-to-play mobile game developed by Chinese developer Hypergryph and published by Yostar. It was released in China in May 2019, and in other countries in January 2020. It is available on Android and iOS platforms and features gacha game mechanics.</p>
6
- <h2>arknights download mac</h2><br /><p><b><b>Download</b> &#10004;&#10004;&#10004; <a href="https://jinyurl.com/2uNQ2A">https://jinyurl.com/2uNQ2A</a></b></p><br /><br />
7
- <p>The game combines elements of tactical RPG and tower defense genres, with a rich sci-fi plot and stunning graphics. You play as the Doctor, a leader of a rescue organization called Rhodes Island, who has lost his memory due to an unknown infection. You have to recruit and train Operators, who are people with special abilities, to fight against a deadly threat from another world called Reunion.</p>
8
- <p>The game offers hundreds of different Operators, each with their own skills, abilities, and classes. You have to strategically place them on the battlefield to block and defeat the enemies. You can also activate their skills for special effects or withdraw them for redeployment. The game has various modes, such as story mode, challenge mode, event mode, annihilation mode, contingency contract mode, and integrated strategies mode.</p>
9
- <p>The game also features a captivating story with multiple chapters and side stories, as well as a diverse cast of characters with their own personalities and backgrounds. The game has received positive reviews from critics and players alike, praising its gameplay, graphics, story, music, voice acting, and character design.</p>
10
- <p>How to download and play Arknights on Mac with BlueStacks<br />
11
- Arknights official website for Mac users<br />
12
- Arknights web browser game for Mac and PC<br />
13
- Arknights Mac emulator download guide<br />
14
- Arknights latest update and events for Mac players<br />
15
- Arknights tips and tricks for Mac gamers<br />
16
- Arknights best operators and strategies for Mac version<br />
17
- Arknights system requirements and compatibility for Mac<br />
18
- Arknights review and rating for Mac platform<br />
19
- Arknights support and feedback for Mac issues<br />
20
- Arknights wallpapers and themes for Mac desktop<br />
21
- Arknights fan art and cosplay for Mac fans<br />
22
- Arknights merchandise and gifts for Mac lovers<br />
23
- Arknights comics and stories for Mac readers<br />
24
- Arknights music and soundtracks for Mac listeners<br />
25
- Arknights collaborations and crossovers for Mac enthusiasts<br />
26
- Arknights community and forums for Mac users<br />
27
- Arknights wiki and guides for Mac learners<br />
28
- Arknights news and updates for Mac followers<br />
29
- Arknights videos and streams for Mac watchers<br />
30
- Arknights memes and jokes for Mac funnies<br />
31
- Arknights codes and coupons for Mac savers<br />
32
- Arknights giveaways and contests for Mac winners<br />
33
- Arknights skins and costumes for Mac collectors<br />
34
- Arknights characters and lore for Mac explorers<br />
35
- Arknights gameplay and features for Mac players<br />
36
- Arknights download size and speed for Mac devices<br />
37
- Arknights graphics and performance for Mac quality<br />
38
- Arknights bugs and glitches for Mac fixers<br />
39
- Arknights mods and hacks for Mac cheaters<br />
40
- Arknights tier list and rankings for Mac experts<br />
41
- Arknights anniversary and birthday for Mac celebrators<br />
42
- Arknights originium and orundum for Mac spenders<br />
43
- Arknights recruitment and headhunting for Mac summoners<br />
44
- Arknights base and dormitory for Mac builders<br />
45
- Arknights missions and stages for Mac challengers<br />
46
- Arknights story mode and side stories for Mac enjoyers<br />
47
- Arknights factions and groups for Mac joiners<br />
48
- Arknights voice actors and actresses for Mac admirers<br />
49
- Arknights trivia and facts for Mac knowers</p>
50
- <h2>Why Play Arknights on Mac?</h2>
51
- <p>While Arknights is designed for mobile devices, you might want to play it on your Mac computer for various reasons. Here are some of the benefits of playing Arknights on Mac:</p>
52
- <ul>
53
- <li>You can enjoy the game on a larger screen, which can enhance your immersion and appreciation of the game's visuals.</li>
54
- <li>You can play the game with better graphics and performance, as you can adjust the settings according to your Mac's specifications.</li>
55
- <li>You can use keyboard and mouse controls, which can give you more precision and convenience than touch controls.</li>
56
- <li>You can save your battery life and storage space on your mobile device.</li>
57
- <li>You can multitask with other apps or programs on your Mac while playing the game.</li>
58
- </ul>
59
- <h2>How to Install Arknights on Mac?</h2>
60
- <p>To play Arknights on your Mac computer, you will need to use an Android emulator. An Android emulator is a software that simulates the environment of an Android device on your computer. This way, you can access and run Android apps and games on your Mac.</p>
61
- <p>There are many Android emulators available for Mac users, such as BlueStacks, NoxPlayer, MEmu Player, LDPlayer, Mu <p>One of the most popular and recommended Android emulators for Mac is BlueStacks. BlueStacks is a powerful and user-friendly emulator that can run Arknights smoothly and efficiently. Here are the steps to download and install Arknights on Mac using BlueStacks:</p>
62
- <ol>
63
- <li>Go to the official website of BlueStacks and download the latest version of the emulator for Mac. You can use this link: <a href="">https://www.bluestacks.com/download.html</a></li>
64
- <li>Once the download is complete, open the installer file and follow the instructions to install BlueStacks on your Mac. You might need to grant some permissions or enter your password during the process.</li>
65
- <li>After the installation is done, launch BlueStacks and sign in with your Google account. If you don't have one, you can create one for free.</li>
66
- <li>On the home screen of BlueStacks, look for the Google Play Store icon and click on it. This will open the Play Store app on the emulator.</li>
67
- <li>In the search bar of the Play Store, type "Arknights" and hit enter. You will see a list of results related to the game.</li>
68
- <li>Select the Arknights app from the list and click on the "Install" button. This will start downloading and installing the game on your Mac.</li>
69
- <li>Once the installation is complete, you can find the Arknights icon on the home screen of BlueStacks. Click on it to launch the game and enjoy playing Arknights on your Mac.</li>
70
- </ol>
71
- <h3>How to Link Your Mobile Account and Recover Your Progress on Mac?</h3>
72
- <p>If you have already played Arknights on your mobile device and want to continue your progress on your Mac, you will need to link your mobile account to your emulator account. Here are the steps to do that:</p>
73
- <ol>
74
- <li>On your mobile device, open Arknights and tap on the gear icon on the top right corner of the screen. This will open the settings menu.</li>
75
- <li>Tap on "Account" and then tap on "Bind Account". You will see a list of options to bind your account, such as Facebook, Twitter, Yostar, or Apple ID.</li>
76
- <li>Select one of the options and follow the instructions to bind your account. You will need to enter your login details or scan a QR code depending on the option you choose.</li>
77
- <li>Once your account is bound, you will see a confirmation message on your screen. You can now close Arknights on your mobile device.</li>
78
- <li>On your Mac, launch BlueStacks and open Arknights. On the title screen, tap on "Account" and then tap on "Switch Account". You will see a list of options to switch your account, such as Facebook, Twitter, Yostar, or Apple ID.</li>
79
- <li>Select the same option that you used to bind your account on your mobile device and follow the instructions to switch your account. You will need to enter your login details or scan a QR code depending on the option you choose.</li>
80
- <li>Once your account is switched, you will see a confirmation message on your screen. You can now access your progress and data from your mobile device on your Mac.</li>
81
- </ol>
82
- <h2>Tips and Tricks for Playing Arknights on Mac</h2>
83
- <p>Now that you have installed Arknights on your Mac, you might want to know some tips and tricks to improve your gameplay experience. Here are some of them:</p>
84
- <ul>
85
- <li>You can adjust the settings of BlueStacks to optimize its performance and compatibility with Arknights. For example, you can change the resolution, graphics quality, frame rate, memory allocation, CPU cores, etc. You can also enable or disable features such as high FPS mode, game mode, eco mode, etc.</li>
86
- <li>You can use keyboard and mouse controls to play Arknights more comfortably and conveniently than touch controls. You can either use the default key mapping or customize it according to your preference. You can also use macros to automate some actions or commands in the game.</li>
87
- <li>You can use the screenshot and video recording features of BlueStacks to capture your gameplay moments and share them with others. You can also stream your gameplay live to platforms such as Twitch or YouTube using BlueStacks.</li>
88
- <li>You can use some strategies to enhance your performance in Arknights, such as leveling up and promoting your Operators, upgrading their skills and potentials, choosing the right team composition and formation, using effective skill timing and deployment order, etc.</li>
89
- </ul>
90
- <h2>Conclusion</h2>
91
- <p>Arknights is a fun and addictive game that combines tactical RPG and tower defense elements with a sci-fi plot and stunning graphics. If you want to play Arknights on your Mac computer, you can do so by using an Android emulator such as BlueStacks. You can download and install Arknights on your Mac easily and quickly, and enjoy the game on a larger screen, with better graphics and performance, and using keyboard and mouse controls. You can also link your mobile account and recover your progress on your Mac, and use some tips and tricks to optimize your gameplay experience. Arknights is a game that you don't want to miss, so why not give it a try on your Mac today?</p>
92
- <h2>FAQs</h2>
93
- <p>Here are some frequently asked questions and answers about Arknights on Mac:</p>
94
- <h3>Is Arknights free to play on Mac?</h3>
95
- <p>Yes, Arknights is free to play on Mac, as long as you have an Android emulator such as BlueStacks installed on your Mac. You can download and install Arknights from the Google Play Store on the emulator without paying anything. However, the game does have some in-app purchases that you can buy with real money if you want to enhance your gameplay experience.</p>
96
- <h3>Is Arknights compatible with Mac?</h3>
97
- <p>Yes, Arknights is compatible with Mac, as long as you use an Android emulator such as BlueStacks to run it. BlueStacks is compatible with most Mac devices and operating systems, and can run Arknights smoothly and efficiently. You can check the minimum system requirements for BlueStacks on its official website.</p>
98
- <h3>How to update Arknights on Mac?</h3>
99
- <p>To update Arknights on your Mac, you need to update it from the Google Play Store on the emulator. You can either enable the auto-update feature or manually check for updates. To manually check for updates, you need to open the Play Store app on the emulator, go to the "My apps & games" section, find Arknights from the list of installed apps, and click on the "Update" button if there is one available.</p>
100
- <h3>How to transfer data from Arknights on mobile to Mac?</h3>
101
- <p>To transfer data from Arknights on your mobile device to your Mac, you need to link your mobile account to your emulator account. You can do this by binding your account to one of the options available in the game's settings menu, such as Facebook, Twitter, Yostar, or Apple ID. Then, you need to switch your account to the same option on the emulator. This will allow you to access your progress and data from your mobile device on your Mac.</p>
102
- <h3>How to fix Arknights crashing or not loading on Mac?</h3>
103
- <p>If you encounter any issues with Arknights crashing or not loading on your Mac, you can try some of the following solutions:</p>
104
- <ul>
105
- <li>Restart the game or the emulator.</li>
106
- <li>Clear the cache and data of the game or the emulator.</li>
107
- <li>Update the game or the emulator to the latest version.</li>
108
- <li>Check your internet connection and firewall settings.</li>
109
- <li>Contact the game's or the emulator's customer support for further assistance.</li>
110
- </ul></p> 197e85843d<br />
111
- <br />
112
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Brawl Stars APK Everything You Need to Know About the Best Mobile Game of 2023.md DELETED
@@ -1,150 +0,0 @@
1
- <br />
2
- <h1>Brawl Stars APK Download: How to Play the Ultimate Mobile Brawler on Your Android Device</h1>
3
- <p>If you are looking for a fast-paced, action-packed, and fun multiplayer game to play on your Android device, you should definitely check out Brawl Stars. Brawl Stars is a game developed by Supercell, the makers of Clash of Clans and Clash Royale. It features various game modes, characters, and events that will keep you hooked for hours.</p>
4
- <p>But how can you download and install Brawl Stars APK on your Android device? And what are some tips and tricks to help you become a better brawler? In this article, we will answer these questions and more. Let's get started!</p>
5
- <h2>brawl stars apk download</h2><br /><p><b><b>DOWNLOAD</b> &#8230; <a href="https://jinyurl.com/2uNRjw">https://jinyurl.com/2uNRjw</a></b></p><br /><br />
6
- <h2>What is Brawl Stars?</h2>
7
- <p>Brawl Stars is a mobile game that combines elements of twin-stick shooters, MOBAs, and battle royales. You can choose from over 20 different brawlers, each with their own unique abilities, weapons, and skins. You can also team up with your friends or play solo in various game modes, such as:</p>
8
- <ul>
9
- <li>Gem Grab: Collect and hold 10 gems to win, but don't let the enemy team take them from you.</li>
10
- <li>Showdown: Be the last brawler standing in a solo or duo battle royale.</li>
11
- <li>Brawl Ball: Score two goals before the other team in a soccer-like match.</li>
12
- <li>Bounty: Take out opponents to earn stars, but don't let them pick you off.</li>
13
- <li>Heist: Protect your team's safe and try to crack open your opponent's safe.</li>
14
- <li>Special Events: Limited time PvE and PvP game modes with unique rewards.</li>
15
- <li>Championship Challenge: Compete in in-game qualifiers for a chance to join the Brawl Stars esports scene.</li>
16
- </ul>
17
- <p>Brawl Stars is constantly evolving with new brawlers, skins, maps, events, and game modes. It also has a Brawl Pass system that lets you complete quests, open boxes, earn gems, pins, and an exclusive skin every season.</p>
18
- <h2>How to Download Brawl Stars APK?</h2>
19
- <p>Brawl Stars is free to download and play on both iOS and Android devices. However, some regions may not have access to the game on the Google Play Store. If that's the case for you, don't worry. You can still download and install Brawl Stars APK from other sources.</p>
20
- <p>An APK file is an Android application package that contains all the files needed to run an app on your device. To download Brawl Stars APK, you need to follow these steps:</p>
21
- <ol>
22
- <li>Go to a trusted website that offers Brawl Stars APK download links. Some examples are Uptodown, Softpedia, and Games.lol. Make sure you download the latest version of the game.</li>
23
- <li>Once you have downloaded the APK file, locate it on your device's file manager and tap on it to install it. You may need to enable installation from unknown sources in your device's settings.</li>
24
- <li>Wait for the installation process to finish and launch the game. You may need to download some additional data before you can play.</li>
25
- <li>Enjoy Brawl Stars on your Android device!</li>
26
- </ol>
27
- <p>Note: Downloading APK files from third-party sources may pose some risks to your device's security and performance. Make sure you only download from reputable websites and scan the files for viruses before installing them.</p>
28
- <h2>What are Some Brawl Stars Tips and Tricks?</h2>
29
- <p>Brawl Stars is a game that requires skill, strategy, and teamwork to win. Here are some tips and tricks that will help you improve your gameplay and become a star brawler:</p>
30
- <p>brawl stars apk download latest version<br />
31
- brawl stars apk download for android<br />
32
- brawl stars apk download for pc<br />
33
- brawl stars apk download mod<br />
34
- brawl stars apk download hack<br />
35
- brawl stars apk download free<br />
36
- brawl stars apk download 2023<br />
37
- brawl stars apk download update<br />
38
- brawl stars apk download softpedia[^1^]<br />
39
- brawl stars apk download no verification<br />
40
- brawl stars apk download unlimited gems<br />
41
- brawl stars apk download for ios<br />
42
- brawl stars apk download for windows 10<br />
43
- brawl stars apk download nulls<br />
44
- brawl stars apk download private server<br />
45
- brawl stars apk download rexdl<br />
46
- brawl stars apk download apkpure<br />
47
- brawl stars apk download uptodown<br />
48
- brawl stars apk download revdl<br />
49
- brawl stars apk download android 1<br />
50
- brawl stars apk download mediafıre<br />
51
- brawl stars apk download mega<br />
52
- brawl stars apk download online<br />
53
- brawl stars apk download old version<br />
54
- brawl stars apk download original<br />
55
- brawl stars apk download offline<br />
56
- brawl stars apk download obb<br />
57
- brawl stars apk download play store<br />
58
- brawl stars apk download pc windows 7<br />
59
- brawl stars apk download pc windows 8.1<br />
60
- brawl stars apk download pc windows xp<br />
61
- brawl stars apk download pc bluestacks<br />
62
- brawl stars apk download pc nox player<br />
63
- brawl stars apk download pc gameloop<br />
64
- brawl stars apk download pc memu play<br />
65
- brawl stars apk download reddit<br />
66
- brawl stars apk download real<br />
67
- brawl stars apk download rebrawl<br />
68
- brawl stars apk download rey modz official<br />
69
- brawl stars apk download rey modz pro 2.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.<br />
70
- brawl stars apk download supercell<br />
71
- brawl stars apk download safe<br />
72
- brawl stars apk download site<br />
73
- brawl stars apk download server error 43 fix</p>
74
- <h3>Use Obstacles to Your Advantage</h3>
75
- <p>The maps in Brawl Stars have various obstacles such as rocks, barrels, mushrooms, and walls that can block enemy fire. You can use these objects to hide behind for cover or to ambush your opponents. However, be careful of brawlers that can break through obstacles with their super abilities or gadgets.</p>
76
- <h3>Don't Take on Tank Brawlers Alone</ <h3>Don't Take on Tank Brawlers Alone</h3>
77
- <p>Tank brawlers are those that have high health and damage, such as El Primo, Bull, Frank, and Rosa. They can easily overpower you in close-range combat, especially if they have their super abilities ready. If you encounter a tank brawler, try to keep your distance and chip away at their health with your teammates. Alternatively, you can use brawlers that can counter them, such as Shelly, Spike, or Emz.</p>
78
- <h3>Know Your Brawler's Role and Strengths</h3>
79
- <p>Brawl Stars has four types of brawlers: Fighter, Sharpshooter, Heavyweight, and Support. Each type has its own role and strengths in the game. For example, fighters are good at dealing damage and controlling the map, sharpshooters are good at sniping and poking enemies from afar, heavyweights are good at tanking and breaking through defenses, and support are good at healing and buffing allies. You should know your brawler's type and play accordingly to maximize their potential.</p>
80
- <h3>Use Your Super Ability Wisely</h3>
81
- <p>Your super ability is a powerful move that can turn the tide of the battle. However, it takes time to charge up and can be wasted if used incorrectly. You should use your super ability when it can have the most impact, such as securing a kill, saving an ally, or escaping a sticky situation. You should also be aware of your enemy's super abilities and try to dodge or counter them.</p>
82
- <h3>Communicate and Coordinate with Your Teammates</h3>
83
- <p>Brawl Stars is a team-based game that requires coordination and communication to win. You should use the in-game chat or voice chat to communicate with your teammates and plan your strategies. You can also use the quick chat commands or pins to convey your emotions or intentions. For example, you can use the thumbs up pin to show approval or the angry pin to show frustration. You can also use the attack, defend, or retreat commands to signal your teammates what to do.</p>
84
- <h2>How to Compare Brawlers in Brawl Stars?</h2>
85
- <p>If you want to know how different brawlers stack up against each other in terms of stats, abilities, and performance, you can use a table to compare them. Here is an example of a table that compares four popular brawlers in Brawl Stars:</p>
86
- <table>
87
- <tr>
88
- <th>Brawler</th>
89
- <th>Type</th>
90
- <th>Health</th>
91
- <th>Damage</th>
92
- <th>Range</th>
93
- <th>Super Ability</th>
94
- </tr>
95
- <tr>
96
- <td>Shelly</td>
97
- <td>Fighter</td>
98
- <td>3600</td>
99
- <td>300-420 per shell</td>
100
- <td>7.67 tiles</td>
101
- <td>Fires a powerful blast that knocks back enemies and destroys obstacles.</td>
102
- </tr>
103
- <tr>
104
- <td>Nita</td>
105
- <td>Fighter</td>
106
- <td>3800</td>
107
- <td>800 per hit</td>
108
- <td>5.5 tiles</td>
109
- <td>Summons a big bear that attacks enemies and has high health.</td>
110
- </tr>
111
- <tr>
112
- <td>Crow</td>
113
- <td>Sharpshooter</td>
114
- <td>3360</td>
115
- <td>320 per dagger (plus poison)</td>
116
- <td>10 tiles</td>
117
- <td>Fires a ring of daggers that deal damage and poison enemies.</td>
118
- </tr>
119
- <tr>
120
- <td>Poco</td>
121
- <td>Support</td>
122
- <td>3800</td>
123
- <td>700 per hit (plus healing)</td>
124
- <td>7 tiles (wide spread)</td>
125
- <td>Sends out a wave of music that heals himself and his allies.</td>
126
- </tr>
127
- </table>
128
- <p>You can use this table to see which brawlers have higher or lower health, damage, range, or super abilities. You can also use this table to find out which brawlers are better suited for certain game modes or situations.</p>
129
- <h2>Conclusion: Brawl Stars APK Download is Worth It!</h2>
130
- <p>Brawl Stars is one of the best mobile games you can play on your Android device. It has amazing graphics, gameplay, characters, and features that will keep you entertained for hours. Whether you want to play solo or with your friends, you will always find something new and exciting in Brawl Stars.</p>
131
- <p>If you want to download Brawl Stars APK on your Android device, you can follow the steps we mentioned above. Just make sure you download from a trusted source and scan the file for viruses before installing it. Once you have installed the game, you can start brawling with millions of players around the world!</p>
132
- <p>We hope this article helped you learn more about Brawl Stars APK download and how to play the game better. If If you have any questions about Brawl Stars APK download or the game itself, you can check out the FAQs below. You may find the answers you are looking for. <h2>FAQs</h2>
133
- <h3>Is Brawl Stars APK Download Safe?</h3>
134
- <p>Brawl Stars APK download is safe as long as you download from a reputable website and scan the file for viruses before installing it. However, you should be careful of fake or malicious websites that may try to trick you into downloading harmful files or stealing your personal information. Always check the reviews, ratings, and comments of the website and the file before downloading it.</p>
135
- <h3>Is Brawl Stars APK Download Legal?</h3>
136
- <p>Brawl Stars APK download is legal as long as you do not use it to violate the terms of service of the game or the Google Play Store. For example, you should not use it to hack, cheat, or mod the game in any way. You should also not use it to distribute or sell the game without permission from Supercell. If you do any of these things, you may face legal consequences or get banned from the game.</p>
137
- <h3>How to Update Brawl Stars APK?</h3>
138
- <p>Brawl Stars APK may not update automatically on your device, unlike the official version from the Google Play Store. To update Brawl Stars APK, you need to download and install the latest version of the file from the same website you downloaded it from. You can also check for updates in the game settings or on the official Brawl Stars website. Make sure you back up your game data before updating to avoid losing your progress.</p>
139
- <h3>How to Play Brawl Stars on PC?</h3>
140
- <p>If you want to play Brawl Stars on your PC, you need to use an Android emulator. An Android emulator is a software that allows you to run Android apps and games on your PC. Some popular Android emulators are BlueStacks, NoxPlayer, and LDPlayer. To play Brawl Stars on PC, you need to follow these steps:</p>
141
- <ol>
142
- <li>Download and install an Android emulator on your PC.</li>
143
- <li>Launch the emulator and sign in with your Google account.</li>
144
- <li>Download and install Brawl Stars APK from a trusted website or from the emulator's app store.</li>
145
- <li>Launch Brawl Stars and enjoy playing on a bigger screen with better controls.</li>
146
- </ol>
147
- <h3>How to Get Free Gems in Brawl Stars?</h3>
148
- <p>Gems are the premium currency in Brawl Stars that can be used to buy skins, boxes, brawl passes, and other items. You can get free gems in Brawl Stars by completing quests, opening boxes, watching ads, participating in events, or using codes. You can also get free gems by using third-party apps or websites that offer surveys, tasks, or rewards. However, you should be careful of scams or hacks that may try to steal your account or personal information.</p> 197e85843d<br />
149
- <br />
150
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Wordscapes Uncrossed Mod APK for Free - Unlimited Coins and Hints.md DELETED
@@ -1,147 +0,0 @@
1
-
2
- <h1>Wordscapes Uncrossed Mod APK: A Fun and Challenging Word Game</h1>
3
- <p>If you love word games, you might have heard of Wordscapes, one of the most popular and addictive games in the genre. But did you know that there is a sequel to Wordscapes that is even more fun and challenging? It's called Wordscapes Uncrossed, and it's a game that will test your brain power and vocabulary skills like never before.</p>
4
- <h2>wordscapes uncrossed mod apk</h2><br /><p><b><b>Download Zip</b> &#9658;&#9658;&#9658;&#9658;&#9658; <a href="https://jinyurl.com/2uNMPD">https://jinyurl.com/2uNMPD</a></b></p><br /><br />
5
- <p>In this article, we'll tell you everything you need to know about Wordscapes Uncrossed, how to play it, how to download and install its mod APK version, and how to enjoy it safely and responsibly. So, if you're ready to dive into the world of words, let's get started!</p>
6
- <h2>How to Play Wordscapes Uncrossed</h2>
7
- <h3>The basic rules and gameplay of Wordscapes Uncrossed</h3>
8
- <p>Wordscapes Uncrossed is a word puzzle game that is similar to crossword puzzles, but with a twist. Instead of filling in the blanks with clues, you have to swipe letters on the screen to form words that fit into the grid. The words can be horizontal, vertical, or diagonal, as long as they are connected by a line.</p>
9
- <p>The game starts with easy puzzles that have only a few letters and words, but as you progress, the puzzles get harder and bigger, with more letters and words to find. You also have to deal with bonus words, which are extra words that are not part of the grid but can earn you coins if you find them.</p>
10
- <h3>The different modes and levels of Wordscapes Uncrossed</h3>
11
- <p>Wordscapes Uncrossed has two main modes: Classic and Daily. In Classic mode, you can play through hundreds of levels that are divided into different themes, such as Forest, Sky, Ocean, Canyon, etc. Each theme has its own background image and music that create a relaxing atmosphere for playing.</p>
12
- <p>In Daily mode, you can play a new puzzle every day that is based on the current date. The daily puzzles are more challenging than the classic ones, but they also offer more rewards, such as coins, hints, and stars. You can also compare your score with other players around the world on the leaderboard.</p>
13
- <p>wordscapes uncrossed apk download<br />
14
- wordscapes uncrossed game free<br />
15
- wordscapes uncrossed mod apk unlimited coins<br />
16
- wordscapes uncrossed latest version<br />
17
- wordscapes uncrossed hack apk<br />
18
- wordscapes uncrossed word puzzle<br />
19
- wordscapes uncrossed android game<br />
20
- wordscapes uncrossed cheats and answers<br />
21
- wordscapes uncrossed online play<br />
22
- wordscapes uncrossed for pc<br />
23
- wordscapes uncrossed app store<br />
24
- wordscapes uncrossed by peoplefun<br />
25
- wordscapes uncrossed level 1<br />
26
- wordscapes uncrossed review<br />
27
- wordscapes uncrossed tips and tricks<br />
28
- wordscapes uncrossed mod apk 2023<br />
29
- wordscapes uncrossed best word game<br />
30
- wordscapes uncrossed no ads<br />
31
- wordscapes uncrossed premium apk<br />
32
- wordscapes uncrossed update<br />
33
- wordscapes uncrossed how to play<br />
34
- wordscapes uncrossed daily puzzle<br />
35
- wordscapes uncrossed bonus words<br />
36
- wordscapes uncrossed anagram solver<br />
37
- wordscapes uncrossed relaxing backgrounds<br />
38
- wordscapes uncrossed mod apk rexdl<br />
39
- wordscapes uncrossed brain teaser<br />
40
- wordscapes uncrossed crossword game<br />
41
- wordscapes uncrossed offline mode<br />
42
- wordscapes uncrossed new levels<br />
43
- wordscapes uncrossed mod apk revdl<br />
44
- wordscapes uncrossed fun word quiz<br />
45
- wordscapes uncrossed challenge your mind<br />
46
- wordscapes uncrossed apk pure<br />
47
- wordscapes uncrossed mod apk happymod<br />
48
- wordscapes uncrossed easy to hard<br />
49
- wordscapes uncrossed word finder<br />
50
- wordscapes uncrossed mod apk android 1<br />
51
- wordscapes uncrossed free coins<br />
52
- wordscapes uncrossed mod menu apk<br />
53
- wordscapes uncrossed mod apk unlimited hints<br />
54
- wordscapes uncrossed word unscramble game<br />
55
- wordscapes uncrossed mod apk 1.3.1 <br />
56
- wordscapes uncrossed terms of service <br />
57
- wordscapes uncrossed mod apk latest version <br />
58
- wordscapes uncrossed word search game <br />
59
- wordscapes uncrossed mod apk no root <br />
60
- wordscapes uncrossed mod apk ios</p>
61
- <h3>The benefits of playing Wordscapes Uncrossed for your brain and vocabulary</h3>
62
- <p>Wordscapes Uncrossed is not only a fun game, but also a great way to improve your brain function and vocabulary. By playing this game, you can:</p>
63
- <ul>
64
- <li>Enhance your memory, concentration, and problem-solving skills</li>
65
- <li>Learn new words and expand your vocabulary</li>
66
- <li>Boost your creativity and imagination</li>
67
- <li>Reduce stress and anxiety</li>
68
- <li>Have fun and enjoy yourself</li>
69
- </ul>
70
- <h2>How to Download and Install Wordscapes Uncrossed Mod APK</h2>
71
- <h3>What is a mod APK and why you should use it</h3>
72
- <p>A mod APK is <p>A mod APK is a modified version of an original Android app that provides users with some extra or improved features. APK is a file format that contains all the elements of an app and can be installed on an Android device. Mod APKs are usually created by reworking the original app’s code or adding new components to it.</p>
73
- <h3>The features and advantages of Wordscapes Uncrossed Mod APK</h3>
74
- <p>If you want to enjoy Wordscapes Uncrossed without any limitations or ads, you might want to try Wordscapes Uncrossed Mod APK. This is a modified version of the game that offers some features and advantages that are not available in the official app, such as:</p>
75
- <ul>
76
- <li>Unlimited coins: You can use coins to buy hints, shuffles, or extra words in the game. With Wordscapes Uncrossed Mod APK, you don't have to worry about running out of coins, as you will have an infinite amount of them.</li>
77
- <li>Unlocked levels: You can access all the levels and themes in the game without having to complete the previous ones. This way, you can choose the difficulty and the scenery that suits your mood and preference.</li>
78
- <li>No ads: You can play Wordscapes Uncrossed without any interruptions or distractions from annoying ads. This will make your gaming experience more smooth and enjoyable.</li>
79
- </ul>
80
- <h3>The steps to download and install Wordscapes Uncrossed Mod APK on your device</h3>
81
- <p>If you want to download and install Wordscapes Uncrossed Mod APK on your device, you need to follow these steps:</p>
82
- <ol>
83
- <li>Make sure your device has enough storage space and is compatible with the game's requirements.</li>
84
- <li>Go to a reliable and safe website that offers Wordscapes Uncrossed Mod APK for download, such as [APKPure](^5^) or [APKFab](^6^).</li>
85
- <li>Tap on the download button and wait for the file to be downloaded on your device.</li>
86
- <li>Before installing the file, you need to enable the installation of apps from unknown sources on your device. To do this, go to Settings > Security > Unknown Sources and toggle it on.</li>
87
- <li>Locate the downloaded file on your device and tap on it to start the installation process.</li>
88
- <li>Follow the instructions on the screen and wait for the installation to finish.</li>
89
- <li>Launch the game and enjoy Wordscapes Uncrossed Mod APK!</li>
90
- </ol> <h2>How to Enjoy Wordscapes Uncrossed Mod APK Safely and Responsibly</h2>
91
- <h3>The risks and precautions of using a mod APK</h3>
92
- <p>While Wordscapes Uncrossed Mod APK can provide you with some benefits, it also comes with some risks and drawbacks that you should be aware of. Some of the possible risks and precautions of using a mod APK are:</p>
93
- <ul>
94
- <li>Malware infection: Some mod APKs may contain malicious code or viruses that can harm your device or steal your personal information. To avoid this, you should only download mod APKs from trusted and verified sources, and scan them with an antivirus app before installing them.</li>
95
- <li>Legal issues: Some mod APKs may violate the intellectual property rights or terms of service of the original app developers or publishers. This can result in legal actions or penalties against you. To avoid this, you should respect the rights and policies of the original app owners, and use mod APKs for personal and non-commercial purposes only.</li>
96
- <li>Ban or suspension: Some mod APKs may give you an unfair advantage over other players or interfere with the game's functionality or security. This can result in your account being banned or suspended from the game or its online services. To avoid this, you should not use mod APKs that affect the game's balance or performance, and follow the game's rules and etiquette.</li>
97
- </ul>
98
- <h3>The tips and tricks to make the most of Wordscapes Uncrossed Mod APK</h3>
99
- <p>If you want to have more fun and success with Wordscapes Uncrossed Mod APK, you can try some of these tips and tricks:</p>
100
- <ul>
101
- <li>Use hints wisely: Hints can help you find the words that you are stuck on, but they also cost coins. If you want to save your coins, you can use hints sparingly, or only when you really need them.</li>
102
- <li>Shuffle the letters: Shuffling the letters can help you see new word possibilities and combinations that you might have missed. You can shuffle the letters as many times as you want, without any penalty.</li>
103
- <li>Find extra words: Finding extra words that are not part of the grid can earn you more coins and bonuses. You can use these coins to buy more hints, shuffles, or extra words in the game.</li>
104
- <li>Challenge yourself: If you want to test your skills and knowledge, you can try playing the daily puzzles or the harder levels in the game. These puzzles will challenge your brain and vocabulary more than the regular ones.</li>
105
- <li>Have fun: The most important thing is to have fun and enjoy yourself while playing Wordscapes Uncrossed Mod APK. You can play at your own pace, choose your own theme, listen to soothing music, and relax with this game.</li>
106
- </ul>
107
- <h3>The alternatives and recommendations for other word games</h3>
108
- <p>If you love word games, you might also want to try some of these alternatives and recommendations for other word games that are similar to Wordscapes Uncrossed:</p>
109
- <table>
110
- <tr><th>Name</th><th>Description</th></tr>
111
- <tr><td>Word Connect</td><td>A word game that requires you to connect letters to form words that fill up the crossword board. You can also discover hidden words and earn coins.</td></tr>
112
- <tr><td>Word Cookies</td><td>A word game that requires you to swipe letters to form words that match with the given cookies. You can also unlock new levels and themes as you play.</td></tr>
113
- <tr><td>Word Crossy</td><td>A word game that combines crossword puzzles and word searches. You have to swipe letters to form words that cross each other on the board. You can also collect butterflies and flowers as you play.</td></tr>
114
- <tr><td>Word Swipe</td><td>A word game that requires you to swipe letters to form words that fit into the blanks on the board. You can also use power-ups and hints to help you solve the puzzles.</td></tr>
115
- <tr><td>Word Link</td><td>A word game that requires you to link letters to form words that fill up the grid. You can also explore different themes and modes as you play.</td></tr>
116
- </table>
117
- <h1>Conclusion</h1>
118
- <p>Wordscapes Uncrossed is a fun and challenging word game that will keep you entertained and engaged for hours. It is a great way to improve your brain function and vocabulary while having fun. If you want to enjoy this game without any limitations or ads, you can download and install Wordscapes Uncrossed Mod APK on your device. However, you should also be aware of the risks and precautions of using a mod APK, and use it safely and responsibly. You can also try some tips and tricks to make the most of Wordscapes Uncrossed Mod APK on your device. However, you should also be aware of the risks and precautions of using a mod APK, and use it safely and responsibly. You can also try some tips and tricks to make the most of Wordscapes Uncrossed Mod APK, or explore some alternatives and recommendations for other word games that are similar to it. We hope you found this article helpful and informative, and we wish you a happy and enjoyable gaming experience with Wordscapes Uncrossed Mod APK!</p>
119
- <h2>FAQs</h2>
120
- <p>Here are some frequently asked questions about Wordscapes Uncrossed Mod APK:</p>
121
- <ol>
122
- <li>What is the difference between Wordscapes and Wordscapes Uncrossed?</li>
123
- <p>Wordscapes and Wordscapes Uncrossed are both word puzzle games that are developed by PeopleFun. The main difference is that Wordscapes Uncrossed has a simpler and more minimalist design, with fewer letters and words per puzzle, but more puzzles per theme. Wordscapes Uncrossed also has a daily mode that offers a new puzzle every day.</p>
124
- <li>Is Wordscapes Uncrossed Mod APK safe to use?</li>
125
- <p>Wordscapes Uncrossed Mod APK is generally safe to use, as long as you download it from a reliable and verified source, and scan it with an antivirus app before installing it. However, you should also be careful of the possible risks and drawbacks of using a mod APK, such as malware infection, legal issues, or ban or suspension from the game or its online services.</p>
126
- <li>How can I get more coins in Wordscapes Uncrossed Mod APK?</li>
127
- <p>There are several ways to get more coins in Wordscapes Uncrossed Mod APK, such as:</p>
128
- <ul>
129
- <li>Finding extra words that are not part of the grid</li>
130
- <li>Completing daily puzzles or achievements</li>
131
- <li>Watching ads or videos</li>
132
- <li>Using Wordscapes Uncrossed Mod APK that gives you unlimited coins</li>
133
- </ul>
134
- <li>How can I update Wordscapes Uncrossed Mod APK?</li>
135
- <p>To update Wordscapes Uncrossed Mod APK, you need to follow these steps:</p>
136
- <ol>
137
- <li>Delete the old version of Wordscapes Uncrossed Mod APK from your device</li>
138
- <li>Go to the website where you downloaded the mod APK and check if there is a new version available</li>
139
- <li>Download the new version of Wordscapes Uncrossed Mod APK on your device</li>
140
- <li>Install the new version of Wordscapes Uncrossed Mod APK on your device</li>
141
- <li>Launch the game and enjoy the updated features</li>
142
- </ol>
143
- <li>What are some other games like Wordscapes Uncrossed?</li>
144
- <p>If you like Wordscapes Uncrossed, you might also like some other games like Word Connect, Word Cookies, Word Crossy, Word Swipe, or Word Link. These are all word puzzle games that require you to swipe letters to form words that fit into the grid or the blanks. They also have different themes, modes, levels, and features that make them fun and challenging.</p>
145
- </ol></p> 401be4b1e0<br />
146
- <br />
147
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Enjoy Taxi Game 2 on Windows PC Career Mode and Realistic GPS.md DELETED
@@ -1,113 +0,0 @@
1
-
2
- <h1>Taxi Game 2: How to Download and Play on PC Windows 7</h1>
3
- <p>Do you love driving games and want to experience the thrill of being a taxi driver in a realistic city? If yes, then you should try <strong>Taxi Game 2</strong>, one of the best taxi games for mobile devices. But what if you want to play it on your PC Windows 7 instead of your phone or tablet? Don't worry, we have got you covered. In this article, we will show you how to download and play Taxi Game 2 on PC Windows 7 using two different methods. We will also share some tips and tricks to help you master the game and become the best taxi driver in town.</p>
4
- <h2>taxi game 2 download for pc windows 7</h2><br /><p><b><b>Download</b> &middot;&middot;&middot; <a href="https://jinyurl.com/2uNOJD">https://jinyurl.com/2uNOJD</a></b></p><br /><br />
5
- <h2>Introduction</h2>
6
- <h3>What is Taxi Game 2?</h3>
7
- <p>Taxi Game 2 is a free driving simulator game developed by baklabs. It is the sequel to the popular Taxi Game, which has over 100 million downloads on Google Play Store. In Taxi Game 2, you can enjoy a full 3D open world, a cab driving simulator, a career mode, an engaging taxi driver gameplay, a GPS navigation system, and many routes across the city. You can also choose your passengers, buy new cars, upgrade your features, and build your taxi empire. Taxi Game 2 is constantly developed and updated, so you can expect new features and improvements in the future.</p>
8
- <h3>Why play Taxi Game 2 on PC Windows 7?</h3>
9
- <p>While Taxi Game 2 is designed for mobile devices, there are many reasons why you might want to play it on your PC Windows 7 instead. Here are some of them:</p>
10
- <ul>
11
- <li>You can enjoy a bigger screen and better graphics.</li>
12
- <li>You can use a keyboard and mouse or a gamepad for more precise and comfortable controls.</li>
13
- <li>You can avoid battery drain, overheating, and interruptions from phone calls or notifications.</li>
14
- <li>You can save your phone storage space and data usage.</li>
15
- <li>You can play with multiple accounts or instances using an emulator.</li>
16
- </ul>
17
- <h2>How to download Taxi Game 2 on PC Windows 7</h2>
18
- <h3>Method 1: Using an Android emulator</h3>
19
- <p>An Android emulator is a software that allows you to run Android apps and games on your PC Windows 7. There are many Android emulators available online, such as BlueStacks, LDPlayer, NoxPlayer, etc. Here are the steps to download and play Taxi Game 2 on PC Windows using an Android emulator:</p>
20
- <h4>Step 1: Download and install an Android emulator</h4>
21
- <p>Choose an Android emulator that suits your PC Windows 7 specifications and preferences. You can visit the official websites of the emulators and compare their features, requirements, and reviews. Then, download the emulator installer file and follow the instructions to install it on your PC Windows 7.</p>
22
- <h4>Step 2: Launch the emulator and sign in with your Google account</h4>
23
- <p>After installing the emulator, launch it and wait for it to load. You will see a virtual Android device on your PC Windows 7 screen. Then, sign in with your Google account or create a new one if you don't have one. This will allow you to access the Google Play Store and other Google services on the emulator.</p>
24
- <h4>Step 3: Search for Taxi Game 2 on the Google Play Store</h4>
25
- <p>On the emulator, open the Google Play Store app and search for Taxi Game 2. You will see the game icon and some information about it. Click on the Install button to download and install Taxi Game 2 on your PC Windows 7 via the emulator.</p>
26
- <h4>Step 4: Install and run Taxi Game 2 on your PC Windows 7</h4>
27
- <p>Once the installation is complete, you can find Taxi Game 2 on the emulator's home screen or app drawer. Click on the game icon to launch it and start playing Taxi Game 2 on your PC Windows 7. You can adjust the settings, such as the graphics quality, sound volume, control scheme, etc., according to your preferences. You can also use the emulator's features, such as screen recording, screenshot, keyboard mapping, etc., to enhance your gaming experience.</p>
28
- <p>taxi game 2 pc download free<br />
29
- taxi game 2 for windows 7 64 bit<br />
30
- taxi game 2 simulator on pc<br />
31
- taxi game 2 career mode download<br />
32
- taxi game 2 windows 7 install<br />
33
- taxi game 2 full version for pc<br />
34
- taxi game 2 offline download windows 7<br />
35
- taxi game 2 pc emulator<br />
36
- taxi game 2 apk for windows 7<br />
37
- taxi game 2 driving simulator pc<br />
38
- taxi game 2 latest version download<br />
39
- taxi game 2 on windows 10<br />
40
- taxi game 2 free online play pc<br />
41
- taxi game 2 hack download for pc<br />
42
- taxi game 2 mod apk windows 7<br />
43
- taxi game 2 cheats for pc<br />
44
- taxi game 2 update download windows 7<br />
45
- taxi game 2 bluestacks<br />
46
- taxi game 2 ldplayer<br />
47
- taxi game 2 noxplayer<br />
48
- taxi game 2 baklabs download for pc<br />
49
- taxi game 2 open world pc<br />
50
- taxi game 2 cab driver gameplay<br />
51
- taxi game 2 passengers pick up windows 7<br />
52
- taxi game 2 gps navigation pc<br />
53
- taxi game 2 city traffic racer download<br />
54
- taxi game 2 best car for pc<br />
55
- taxi game 2 gas stations windows 7<br />
56
- taxi game 2 tips and tricks pc<br />
57
- taxi game 2 review for windows 7<br />
58
- crazy taxi classic download for pc windows 7<br />
59
- crazy taxi classic on bluestacks windows 7<br />
60
- crazy taxi classic arcade game pc<br />
61
- crazy taxi classic emulator for windows 7<br />
62
- crazy taxi classic free play online pc<br />
63
- crazy taxi classic full screen windows 7<br />
64
- crazy taxi classic original soundtrack pc<br />
65
- crazy taxi classic cheats and codes windows 7<br />
66
- crazy taxi classic controller support pc<br />
67
- crazy taxi classic steam download windows 7<br />
68
- crazy driver: cab simulator on pc windows 7 <br />
69
- crazy driver: cab simulator free download <br />
70
- crazy driver: cab simulator gameplay <br />
71
- crazy driver: cab simulator mod apk <br />
72
- crazy driver: cab simulator online play <br />
73
- crazy driver: cab simulator hack tool <br />
74
- crazy driver: cab simulator unlimited money <br />
75
- crazy driver: cab simulator realistic graphics <br />
76
- crazy driver: cab simulator missions and challenges</p>
77
- <h3>Method 2: Using an APK/XAPK file</h3>
78
- <p>An APK/XAPK file is a package file that contains the app or game data and installation instructions. You can use an APK/XAPK file to install Taxi Game 2 on your PC Windows 7 without using an emulator. However, you will need an APK/XAPK installer software to do this. Here are the steps to download and play Taxi Game 2 on PC Windows using an APK/XAPK file:</p>
79
- <h4>Step 1: Download the APK/XAPK file of Taxi Game 2</h4>
80
- <p>You can download the APK/XAPK file of Taxi Game 2 from various online sources, such as APKPure, Uptodown, APKMirror, etc. Make sure that you download the latest version of the game and that it is compatible with your PC Windows 7. You can also scan the file for viruses or malware before downloading it.</p>
81
- <h4>Step 2: Install and run an APK/XAPK installer on your PC Windows 7</h4>
82
- <p>You will need an APK/XAPK installer software to install Taxi Game 2 on your PC Windows 7 using the APK/XAPK file. There are many APK/XAPK installer software available online, such as Pure APK Install, XAPK Installer, Apk Installer Pro, etc. You can choose one that suits your PC Windows 7 specifications and preferences. Then, download the installer software and follow the instructions to install it on your PC Windows 7.</p>
83
- <h4>Step 3: Open the APK/XAPK file with the installer and install Taxi Game 2 on your PC Windows 7</h4>
84
- <p>After installing the APK/XAPK installer software, launch it and locate the APK/XAPK file of Taxi Game 2 that you have downloaded. Then, open the file with the installer software and follow the instructions to install Taxi Game 2 on your PC Windows 7. Once the installation is complete, you can find Taxi Game 2 on your PC Windows 7 desktop or start menu. Click on the game icon to launch it and start playing Taxi Game 2 on your PC Windows 7.</p>
85
- <h2>Tips and tricks for playing Taxi Game 2 on PC Windows 7</h2>
86
- <p>Taxi Game 2 is a fun and challenging game that requires skill, strategy, and patience. Here are some tips and tricks to help you play better and enjoy more:</p>
87
- <h3>Tip 1: Use the Crazy Dash to boost your speed</h3>
88
- <p>The Crazy Dash is a special move that allows you to accelerate quickly and gain more speed. To perform it, you need to tap the brake and the gas pedals alternately. You will see a yellow flash on your screen when you do it correctly. The Crazy Dash can help you reach your destination faster, avoid traffic, and earn more money. However, be careful not to crash into other vehicles or obstacles, as this will damage your taxi and reduce your score.</p>
89
- <h3>Tip 2: Choose your passengers wisely</h3>
90
- <p>Not all passengers are the same in Taxi Game 2. Some passengers will pay you more, some will give you more time, and some will have special requests or challenges. You can see the information about each passenger on the top of their heads, such as their name, destination, fare, and time limit. You can also see their mood and personality, which will affect how they react to your driving. For example, some passengers will be happy if you drive fast and crazy, while others will be angry or scared. You should choose your passengers based on your preferences and goals. For instance, if you want to earn more money, you should pick up passengers who offer high fares or tips. If you want to have more fun, you should pick up passengers who like your driving style or have interesting stories.</p>
91
- <h3>Tip 3: Refuel your taxi at gas stations</h3>
92
- <p>Your taxi has a gas meter that shows how much fuel you have left. If you run out of gas, you will lose the game and have to start over. To avoid this, you should refuel your taxi at gas stations whenever you can. You can find gas stations on the map or follow the signs on the road. Refueling your taxi will cost you some money, but it is worth it in the long run. You can also upgrade your fuel tank capacity with the money you earn from your rides.</p>
93
- <h3>Tip 4: Follow the GPS navigation to find the best routes</h3>
94
- <p>Taxi Game 2 has a GPS navigation system that shows you the best routes to take your passengers to their destinations. You can see the GPS map on the top right corner of your screen, which will indicate your current location, your destination, and the optimal path to follow. You can also see arrows on the road that guide you along the way. Following the GPS navigation will help you save time, avoid traffic jams, and earn more money. However, you can also explore the city and find shortcuts or alternative routes if you want to challenge yourself or have more fun.</p>
95
- <h3>Tip 5: Upgrade your taxi with new cars and features</h3>
96
- <p>Taxi Game 2 allows you to upgrade your taxi with new cars and features that will improve your performance and appearance. You can buy new cars with different models, colors, and stats from the garage. You can also customize your cars with stickers, decals, spoilers, rims, etc. Moreover, you can enhance your cars with new features, such as turbo boosters, nitro boosters, shock absorbers, etc. Upgrading your taxi will cost you some money, but it will make your game more enjoyable and rewarding.</p>
97
- <h2>Conclusion</h2>
98
- <p>Taxi Game 2 is a great game for anyone who loves driving games and wants to experience the life of a taxi driver in a realistic city. It has amazing graphics, realistic physics, smooth controls, and diverse gameplay modes. It is also easy to download and play on PC Windows 7 using an Android emulator or an APK/XAPK file. With these tips and tricks, you can master Taxi Game 2 and become the best taxi driver in town.</p>
99
- <h3>FAQs</h3>
100
- <ul>
101
- <li>Q: Is Taxi Game 2 free to play?</li>
102
- <li>A: Yes, Taxi Game 2 is free to play and download on Google Play Store. However, it contains ads and in-app purchases that can enhance your gaming experience.</li>
103
- <li>Q: Can I play Taxi Game 2 offline?</li>
104
- <li>A: Yes, Taxi Game 2 can be played offline without an internet connection. However, some features may not be available or updated when offline.</li>
105
- <li>Q: How can I save my progress in Taxi Game 2?</li>
106
- <li>A: Taxi Game 2 automatically saves your progress when you exit the game or switch to another app. You can also sync your progress with your Google account by signing in with it on the game settings.</li>
107
- <li>Q: How can I contact the developers of Taxi Game 2?</li>
108
- <li>A: You can contact the developers of Taxi Game 2 by sending them an email at [email protected] or by visiting their website at https://www.baklabs.com/.</li>
109
- <li>Q: How can I rate and review Taxi Game 2?</li>
110
- <li>A: You can rate and review Taxi Game 2 by going to its page on Google Play Store and tapping on the stars and writing your feedback. You can also share your opinion and suggestions with other players and the developers by leaving a comment.</li>
111
- </ul></p> 401be4b1e0<br />
112
- <br />
113
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/utils/deprecation_utils.py DELETED
@@ -1,64 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- # Copyright 2022 The HuggingFace Team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import inspect
17
- import warnings
18
- from typing import Any, Dict, Optional, Union
19
-
20
- from packaging import version
21
-
22
-
23
- def deprecate(*args, take_from: Optional[Union[Dict, Any]] = None, standard_warn=True):
24
- from .. import __version__
25
-
26
- deprecated_kwargs = take_from
27
- values = ()
28
- if not isinstance(args[0], tuple):
29
- args = (args,)
30
-
31
- for attribute, version_name, message in args:
32
- if version.parse(version.parse(__version__).base_version) >= version.parse(version_name):
33
- raise ValueError(
34
- f"The deprecation tuple {(attribute, version_name, message)} should be removed since ppdiffusers'"
35
- f" version {__version__} is >= {version_name}"
36
- )
37
-
38
- warning = None
39
- if isinstance(deprecated_kwargs, dict) and attribute in deprecated_kwargs:
40
- values += (deprecated_kwargs.pop(attribute),)
41
- warning = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
42
- elif hasattr(deprecated_kwargs, attribute):
43
- values += (getattr(deprecated_kwargs, attribute),)
44
- warning = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
45
- elif deprecated_kwargs is None:
46
- warning = f"`{attribute}` is deprecated and will be removed in version {version_name}."
47
-
48
- if warning is not None:
49
- warning = warning + " " if standard_warn else ""
50
- warnings.warn(warning + message, FutureWarning, stacklevel=2)
51
-
52
- if isinstance(deprecated_kwargs, dict) and len(deprecated_kwargs) > 0:
53
- call_frame = inspect.getouterframes(inspect.currentframe())[1]
54
- filename = call_frame.filename
55
- line_number = call_frame.lineno
56
- function = call_frame.function
57
- key, value = next(iter(deprecated_kwargs.items()))
58
- raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`")
59
-
60
- if len(values) == 0:
61
- return
62
- elif len(values) == 1:
63
- return values[0]
64
- return values
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/52Hz/CMFNet_dehazing/app.py DELETED
@@ -1,38 +0,0 @@
1
- import os
2
- import gradio as gr
3
- from PIL import Image
4
- import torch
5
-
6
- os.system(
7
- 'wget https://github.com/FanChiMao/CMFNet/releases/download/v0.0/dehaze_I_OHaze_CMFNet.pth -P experiments/pretrained_models')
8
-
9
-
10
- def inference(img):
11
- if not os.path.exists('test'):
12
- os.system('mkdir test')
13
-
14
- basewidth = 512
15
- wpercent = (basewidth / float(img.size[0]))
16
- hsize = int((float(img.size[1]) * float(wpercent)))
17
- img = img.resize((basewidth, hsize), Image.BILINEAR)
18
- img.save("test/1.png", "PNG")
19
- os.system(
20
- 'python main_test_CMFNet.py --input_dir test --weights experiments/pretrained_models/dehaze_I_OHaze_CMFNet.pth')
21
- return 'results/1.png'
22
-
23
-
24
- title = "Compound Multi-branch Feature Fusion for Image Restoration (Dehaze)"
25
- description = "Gradio demo for CMFNet. CMFNet achieves competitive performance on three tasks: image deblurring, image dehazing and image deraindrop. Here, we provide a demo for image dehaze. To use it, simply upload your image, or click one of the examples to load them. Reference from: https://huggingface.co/akhaliq"
26
- article = "<p style='text-align: center'><a href='https://' target='_blank'>Compound Multi-branch Feature Fusion for Real Image Restoration</a> | <a href='https://github.com/FanChiMao/CMFNet' target='_blank'>Github Repo</a></p> <center><img src='https://visitor-badge.glitch.me/badge?page_id=52Hz_CMFNet_dehazing' alt='visitor badge'></center>"
27
-
28
- examples = [['Haze.png']]
29
- gr.Interface(
30
- inference,
31
- [gr.inputs.Image(type="pil", label="Input")],
32
- gr.outputs.Image(type="filepath", label="Output"),
33
- title=title,
34
- description=description,
35
- article=article,
36
- allow_flagging=False,
37
- examples=examples
38
- ).launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/data_gen/tts/txt_processors/en.py DELETED
@@ -1,77 +0,0 @@
1
- import re
2
- import unicodedata
3
-
4
- from g2p_en import G2p
5
- from g2p_en.expand import normalize_numbers
6
- from nltk import pos_tag
7
- from nltk.tokenize import TweetTokenizer
8
-
9
- from data_gen.tts.txt_processors.base_text_processor import BaseTxtProcessor, register_txt_processors
10
- from data_gen.tts.data_gen_utils import is_sil_phoneme, PUNCS
11
-
12
- class EnG2p(G2p):
13
- word_tokenize = TweetTokenizer().tokenize
14
-
15
- def __call__(self, text):
16
- # preprocessing
17
- words = EnG2p.word_tokenize(text)
18
- tokens = pos_tag(words) # tuples of (word, tag)
19
-
20
- # steps
21
- prons = []
22
- for word, pos in tokens:
23
- if re.search("[a-z]", word) is None:
24
- pron = [word]
25
-
26
- elif word in self.homograph2features: # Check homograph
27
- pron1, pron2, pos1 = self.homograph2features[word]
28
- if pos.startswith(pos1):
29
- pron = pron1
30
- else:
31
- pron = pron2
32
- elif word in self.cmu: # lookup CMU dict
33
- pron = self.cmu[word][0]
34
- else: # predict for oov
35
- pron = self.predict(word)
36
-
37
- prons.extend(pron)
38
- prons.extend([" "])
39
-
40
- return prons[:-1]
41
-
42
-
43
- @register_txt_processors('en')
44
- class TxtProcessor(BaseTxtProcessor):
45
- g2p = EnG2p()
46
-
47
- @staticmethod
48
- def preprocess_text(text):
49
- text = normalize_numbers(text)
50
- text = ''.join(char for char in unicodedata.normalize('NFD', text)
51
- if unicodedata.category(char) != 'Mn') # Strip accents
52
- text = text.lower()
53
- text = re.sub("[\'\"()]+", "", text)
54
- text = re.sub("[-]+", " ", text)
55
- text = re.sub(f"[^ a-z{PUNCS}]", "", text)
56
- text = re.sub(f" ?([{PUNCS}]) ?", r"\1", text) # !! -> !
57
- text = re.sub(f"([{PUNCS}])+", r"\1", text) # !! -> !
58
- text = text.replace("i.e.", "that is")
59
- text = text.replace("i.e.", "that is")
60
- text = text.replace("etc.", "etc")
61
- text = re.sub(f"([{PUNCS}])", r" \1 ", text)
62
- text = re.sub(rf"\s+", r" ", text)
63
- return text
64
-
65
- @classmethod
66
- def process(cls, txt, preprocess_args):
67
- txt = cls.preprocess_text(txt).strip()
68
- phs = cls.g2p(txt)
69
- txt_struct = [[w, []] for w in txt.split(" ")]
70
- i_word = 0
71
- for p in phs:
72
- if p == ' ':
73
- i_word += 1
74
- else:
75
- txt_struct[i_word][1].append(p)
76
- txt_struct = cls.postprocess(txt_struct, preprocess_args)
77
- return txt_struct, txt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/models/diffusion/dpm_solver/__init__.py DELETED
@@ -1 +0,0 @@
1
- from .sampler import DPMSolverSampler
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/types/src/routes/admin/export/$types.d.ts DELETED
@@ -1,8 +0,0 @@
1
- import type * as Kit from '@sveltejs/kit';
2
-
3
- type Expand<T> = T extends infer O ? { [K in keyof O]: O[K] } : never;
4
- type RouteParams = { }
5
- type RouteId = '/admin/export';
6
-
7
- export type RequestHandler = Kit.RequestHandler<RouteParams, RouteId>;
8
- export type RequestEvent = Kit.RequestEvent<RouteParams, RouteId>;
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Vercel.py DELETED
@@ -1,377 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import json, base64, requests, execjs, random, uuid
4
-
5
- from ..typing import Any, TypedDict, CreateResult
6
- from .base_provider import BaseProvider
7
- from abc import abstractmethod
8
-
9
-
10
- class Vercel(BaseProvider):
11
- url = 'https://sdk.vercel.ai'
12
- working = True
13
- supports_gpt_35_turbo = True
14
- supports_stream = True
15
-
16
- @staticmethod
17
- @abstractmethod
18
- def create_completion(
19
- model: str,
20
- messages: list[dict[str, str]],
21
- stream: bool,
22
- **kwargs
23
- ) -> CreateResult:
24
- if not model:
25
- model = "gpt-3.5-turbo"
26
- elif model not in model_info:
27
- raise ValueError(f"Model are not supported: {model}")
28
-
29
- headers = {
30
- 'authority' : 'sdk.vercel.ai',
31
- 'accept' : '*/*',
32
- 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
33
- 'cache-control' : 'no-cache',
34
- 'content-type' : 'application/json',
35
- 'custom-encoding' : get_anti_bot_token(),
36
- 'origin' : 'https://sdk.vercel.ai',
37
- 'pragma' : 'no-cache',
38
- 'referer' : 'https://sdk.vercel.ai/',
39
- 'sec-ch-ua' : '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
40
- 'sec-ch-ua-mobile' : '?0',
41
- 'sec-ch-ua-platform': '"macOS"',
42
- 'sec-fetch-dest' : 'empty',
43
- 'sec-fetch-mode' : 'cors',
44
- 'sec-fetch-site' : 'same-origin',
45
- 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.%s.%s Safari/537.36' % (
46
- random.randint(99, 999),
47
- random.randint(99, 999)
48
- )
49
- }
50
-
51
- json_data = {
52
- 'model' : model_info[model]['id'],
53
- 'messages' : messages,
54
- 'playgroundId': str(uuid.uuid4()),
55
- 'chatIndex' : 0} | model_info[model]['default_params']
56
-
57
- max_retries = kwargs.get('max_retries', 20)
58
- for i in range(max_retries):
59
- response = requests.post('https://sdk.vercel.ai/api/generate',
60
- headers=headers, json=json_data, stream=True)
61
- try:
62
- response.raise_for_status()
63
- except:
64
- continue
65
- for token in response.iter_content(chunk_size=None):
66
- yield token.decode()
67
- break
68
-
69
-
70
- def get_anti_bot_token() -> str:
71
- headers = {
72
- 'authority' : 'sdk.vercel.ai',
73
- 'accept' : '*/*',
74
- 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
75
- 'cache-control' : 'no-cache',
76
- 'pragma' : 'no-cache',
77
- 'referer' : 'https://sdk.vercel.ai/',
78
- 'sec-ch-ua' : '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
79
- 'sec-ch-ua-mobile' : '?0',
80
- 'sec-ch-ua-platform': '"macOS"',
81
- 'sec-fetch-dest' : 'empty',
82
- 'sec-fetch-mode' : 'cors',
83
- 'sec-fetch-site' : 'same-origin',
84
- 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.%s.%s Safari/537.36' % (
85
- random.randint(99, 999),
86
- random.randint(99, 999)
87
- )
88
- }
89
-
90
- response = requests.get('https://sdk.vercel.ai/openai.jpeg',
91
- headers=headers).text
92
-
93
- raw_data = json.loads(base64.b64decode(response,
94
- validate=True))
95
-
96
- js_script = '''const globalThis={marker:"mark"};String.prototype.fontcolor=function(){return `<font>${this}</font>`};
97
- return (%s)(%s)''' % (raw_data['c'], raw_data['a'])
98
-
99
- raw_token = json.dumps({'r': execjs.compile(js_script).call(''), 't': raw_data['t']},
100
- separators = (",", ":"))
101
-
102
- return base64.b64encode(raw_token.encode('utf-16le')).decode()
103
-
104
- class ModelInfo(TypedDict):
105
- id: str
106
- default_params: dict[str, Any]
107
-
108
- model_info: dict[str, ModelInfo] = {
109
- 'claude-instant-v1': {
110
- 'id': 'anthropic:claude-instant-v1',
111
- 'default_params': {
112
- 'temperature': 1,
113
- 'maximumLength': 1024,
114
- 'topP': 1,
115
- 'topK': 1,
116
- 'presencePenalty': 1,
117
- 'frequencyPenalty': 1,
118
- 'stopSequences': ['\n\nHuman:'],
119
- },
120
- },
121
- 'claude-v1': {
122
- 'id': 'anthropic:claude-v1',
123
- 'default_params': {
124
- 'temperature': 1,
125
- 'maximumLength': 1024,
126
- 'topP': 1,
127
- 'topK': 1,
128
- 'presencePenalty': 1,
129
- 'frequencyPenalty': 1,
130
- 'stopSequences': ['\n\nHuman:'],
131
- },
132
- },
133
- 'claude-v2': {
134
- 'id': 'anthropic:claude-v2',
135
- 'default_params': {
136
- 'temperature': 1,
137
- 'maximumLength': 1024,
138
- 'topP': 1,
139
- 'topK': 1,
140
- 'presencePenalty': 1,
141
- 'frequencyPenalty': 1,
142
- 'stopSequences': ['\n\nHuman:'],
143
- },
144
- },
145
- 'a16z-infra/llama7b-v2-chat': {
146
- 'id': 'replicate:a16z-infra/llama7b-v2-chat',
147
- 'default_params': {
148
- 'temperature': 0.75,
149
- 'maximumLength': 3000,
150
- 'topP': 1,
151
- 'repetitionPenalty': 1,
152
- },
153
- },
154
- 'a16z-infra/llama13b-v2-chat': {
155
- 'id': 'replicate:a16z-infra/llama13b-v2-chat',
156
- 'default_params': {
157
- 'temperature': 0.75,
158
- 'maximumLength': 3000,
159
- 'topP': 1,
160
- 'repetitionPenalty': 1,
161
- },
162
- },
163
- 'replicate/llama-2-70b-chat': {
164
- 'id': 'replicate:replicate/llama-2-70b-chat',
165
- 'default_params': {
166
- 'temperature': 0.75,
167
- 'maximumLength': 3000,
168
- 'topP': 1,
169
- 'repetitionPenalty': 1,
170
- },
171
- },
172
- 'bigscience/bloom': {
173
- 'id': 'huggingface:bigscience/bloom',
174
- 'default_params': {
175
- 'temperature': 0.5,
176
- 'maximumLength': 1024,
177
- 'topP': 0.95,
178
- 'topK': 4,
179
- 'repetitionPenalty': 1.03,
180
- },
181
- },
182
- 'google/flan-t5-xxl': {
183
- 'id': 'huggingface:google/flan-t5-xxl',
184
- 'default_params': {
185
- 'temperature': 0.5,
186
- 'maximumLength': 1024,
187
- 'topP': 0.95,
188
- 'topK': 4,
189
- 'repetitionPenalty': 1.03,
190
- },
191
- },
192
- 'EleutherAI/gpt-neox-20b': {
193
- 'id': 'huggingface:EleutherAI/gpt-neox-20b',
194
- 'default_params': {
195
- 'temperature': 0.5,
196
- 'maximumLength': 1024,
197
- 'topP': 0.95,
198
- 'topK': 4,
199
- 'repetitionPenalty': 1.03,
200
- 'stopSequences': [],
201
- },
202
- },
203
- 'OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5': {
204
- 'id': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5',
205
- 'default_params': {
206
- 'maximumLength': 1024,
207
- 'typicalP': 0.2,
208
- 'repetitionPenalty': 1,
209
- },
210
- },
211
- 'OpenAssistant/oasst-sft-1-pythia-12b': {
212
- 'id': 'huggingface:OpenAssistant/oasst-sft-1-pythia-12b',
213
- 'default_params': {
214
- 'maximumLength': 1024,
215
- 'typicalP': 0.2,
216
- 'repetitionPenalty': 1,
217
- },
218
- },
219
- 'bigcode/santacoder': {
220
- 'id': 'huggingface:bigcode/santacoder',
221
- 'default_params': {
222
- 'temperature': 0.5,
223
- 'maximumLength': 1024,
224
- 'topP': 0.95,
225
- 'topK': 4,
226
- 'repetitionPenalty': 1.03,
227
- },
228
- },
229
- 'command-light-nightly': {
230
- 'id': 'cohere:command-light-nightly',
231
- 'default_params': {
232
- 'temperature': 0.9,
233
- 'maximumLength': 1024,
234
- 'topP': 1,
235
- 'topK': 0,
236
- 'presencePenalty': 0,
237
- 'frequencyPenalty': 0,
238
- 'stopSequences': [],
239
- },
240
- },
241
- 'command-nightly': {
242
- 'id': 'cohere:command-nightly',
243
- 'default_params': {
244
- 'temperature': 0.9,
245
- 'maximumLength': 1024,
246
- 'topP': 1,
247
- 'topK': 0,
248
- 'presencePenalty': 0,
249
- 'frequencyPenalty': 0,
250
- 'stopSequences': [],
251
- },
252
- },
253
- 'gpt-4': {
254
- 'id': 'openai:gpt-4',
255
- 'default_params': {
256
- 'temperature': 0.7,
257
- 'maximumLength': 8192,
258
- 'topP': 1,
259
- 'presencePenalty': 0,
260
- 'frequencyPenalty': 0,
261
- 'stopSequences': [],
262
- },
263
- },
264
- 'gpt-4-0613': {
265
- 'id': 'openai:gpt-4-0613',
266
- 'default_params': {
267
- 'temperature': 0.7,
268
- 'maximumLength': 8192,
269
- 'topP': 1,
270
- 'presencePenalty': 0,
271
- 'frequencyPenalty': 0,
272
- 'stopSequences': [],
273
- },
274
- },
275
- 'code-davinci-002': {
276
- 'id': 'openai:code-davinci-002',
277
- 'default_params': {
278
- 'temperature': 0.5,
279
- 'maximumLength': 1024,
280
- 'topP': 1,
281
- 'presencePenalty': 0,
282
- 'frequencyPenalty': 0,
283
- 'stopSequences': [],
284
- },
285
- },
286
- 'gpt-3.5-turbo': {
287
- 'id': 'openai:gpt-3.5-turbo',
288
- 'default_params': {
289
- 'temperature': 0.7,
290
- 'maximumLength': 4096,
291
- 'topP': 1,
292
- 'topK': 1,
293
- 'presencePenalty': 1,
294
- 'frequencyPenalty': 1,
295
- 'stopSequences': [],
296
- },
297
- },
298
- 'gpt-3.5-turbo-16k': {
299
- 'id': 'openai:gpt-3.5-turbo-16k',
300
- 'default_params': {
301
- 'temperature': 0.7,
302
- 'maximumLength': 16280,
303
- 'topP': 1,
304
- 'topK': 1,
305
- 'presencePenalty': 1,
306
- 'frequencyPenalty': 1,
307
- 'stopSequences': [],
308
- },
309
- },
310
- 'gpt-3.5-turbo-16k-0613': {
311
- 'id': 'openai:gpt-3.5-turbo-16k-0613',
312
- 'default_params': {
313
- 'temperature': 0.7,
314
- 'maximumLength': 16280,
315
- 'topP': 1,
316
- 'topK': 1,
317
- 'presencePenalty': 1,
318
- 'frequencyPenalty': 1,
319
- 'stopSequences': [],
320
- },
321
- },
322
- 'text-ada-001': {
323
- 'id': 'openai:text-ada-001',
324
- 'default_params': {
325
- 'temperature': 0.5,
326
- 'maximumLength': 1024,
327
- 'topP': 1,
328
- 'presencePenalty': 0,
329
- 'frequencyPenalty': 0,
330
- 'stopSequences': [],
331
- },
332
- },
333
- 'text-babbage-001': {
334
- 'id': 'openai:text-babbage-001',
335
- 'default_params': {
336
- 'temperature': 0.5,
337
- 'maximumLength': 1024,
338
- 'topP': 1,
339
- 'presencePenalty': 0,
340
- 'frequencyPenalty': 0,
341
- 'stopSequences': [],
342
- },
343
- },
344
- 'text-curie-001': {
345
- 'id': 'openai:text-curie-001',
346
- 'default_params': {
347
- 'temperature': 0.5,
348
- 'maximumLength': 1024,
349
- 'topP': 1,
350
- 'presencePenalty': 0,
351
- 'frequencyPenalty': 0,
352
- 'stopSequences': [],
353
- },
354
- },
355
- 'text-davinci-002': {
356
- 'id': 'openai:text-davinci-002',
357
- 'default_params': {
358
- 'temperature': 0.5,
359
- 'maximumLength': 1024,
360
- 'topP': 1,
361
- 'presencePenalty': 0,
362
- 'frequencyPenalty': 0,
363
- 'stopSequences': [],
364
- },
365
- },
366
- 'text-davinci-003': {
367
- 'id': 'openai:text-davinci-003',
368
- 'default_params': {
369
- 'temperature': 0.5,
370
- 'maximumLength': 4097,
371
- 'topP': 1,
372
- 'presencePenalty': 0,
373
- 'frequencyPenalty': 0,
374
- 'stopSequences': [],
375
- },
376
- },
377
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AdamOswald1/finetuned_diffusion/app.py DELETED
@@ -1,372 +0,0 @@
1
- from diffusers import AutoencoderKL, UNet2DConditionModel, StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler
2
- import gradio as gr
3
- import torch
4
- from PIL import Image
5
- import utils
6
- import datetime
7
- import time
8
- import psutil
9
- import random
10
-
11
- start_time = time.time()
12
- is_colab = utils.is_google_colab()
13
- state = None
14
- current_steps = 25
15
-
16
- class Model:
17
- def __init__(self, name, path="", prefix=""):
18
- self.name = name
19
- self.path = path
20
- self.prefix = prefix
21
- self.pipe_t2i = None
22
- self.pipe_i2i = None
23
-
24
- models = [
25
- Model("Arcane", "nitrosocke/Arcane-Diffusion", "arcane style "),
26
- Model("Dreamlike Diffusion 1.0", "dreamlike-art/dreamlike-diffusion-1.0", "dreamlikeart "),
27
- Model("Archer", "nitrosocke/archer-diffusion", "archer style "),
28
- Model("Anything V3", "Linaqruf/anything-v3.0", ""),
29
- Model("Anything V4", "andite/anything-v4.0", ""),
30
- Model("Modern Disney", "nitrosocke/mo-di-diffusion", "modern disney style "),
31
- Model("Classic Disney", "nitrosocke/classic-anim-diffusion", "classic disney style "),
32
- Model("Loving Vincent (Van Gogh)", "dallinmackay/Van-Gogh-diffusion", "lvngvncnt "),
33
- Model("Wavyfusion", "wavymulder/wavyfusion", "wa-vy style "),
34
- Model("Analog Diffusion", "wavymulder/Analog-Diffusion", "analog style "),
35
- Model("Redshift renderer (Cinema4D)", "nitrosocke/redshift-diffusion", "redshift style "),
36
- Model("Midjourney v4 style", "prompthero/midjourney-v4-diffusion", "mdjrny-v4 style "),
37
- Model("Waifu", "hakurei/waifu-diffusion"),
38
- Model("Cyberpunk Anime", "DGSpitzer/Cyberpunk-Anime-Diffusion", "dgs illustration style "),
39
- Model("Elden Ring", "nitrosocke/elden-ring-diffusion", "elden ring style "),
40
- Model("TrinArt v2", "naclbit/trinart_stable_diffusion_v2"),
41
- Model("Spider-Verse", "nitrosocke/spider-verse-diffusion", "spiderverse style "),
42
- Model("Balloon Art", "Fictiverse/Stable_Diffusion_BalloonArt_Model", "BalloonArt "),
43
- Model("Tron Legacy", "dallinmackay/Tron-Legacy-diffusion", "trnlgcy "),
44
- Model("Pokémon", "lambdalabs/sd-pokemon-diffusers"),
45
- Model("Pony Diffusion", "AstraliteHeart/pony-diffusion"),
46
- Model("Robo Diffusion", "nousr/robo-diffusion"),
47
- Model("Epic Diffusion", "johnslegers/epic-diffusion"),
48
- Model("Space Machine", "rabidgremlin/sd-db-epic-space-machine", "EpicSpaceMachine"),
49
- Model("Spacecraft", "rabidgremlin/sd-db-epic-space-machine, Guizmus/Tardisfusion", "EpicSpaceMachine, Tardis Box style"),
50
- Model("TARDIS", "Guizmus/Tardisfusion", "Tardis Box style"),
51
- Model("Modern Era TARDIS Interior", "Guizmus/Tardisfusion", "Modern Tardis style"),
52
- Model("Classic Era TARDIS Interior", "Guizmus/Tardisfusion", "Classic Tardis style"),
53
- Model("Spacecraft Interior", "Guizmus/Tardisfusion, rabidgremlin/sd-db-epic-space-machine", "Classic Tardis style, Modern Tardis style, EpicSpaceMachine"),
54
- Model("CLIP", "EleutherAI/clip-guided-diffusion", "CLIP"),
55
- Model("Genshin Waifu", "crumb/genshin-stable-inversion, yuiqena/GenshinImpact, katakana/2D-Mix, Guizmus/AnimeChanStyle", "Female, female, Woman, woman, Girl, girl"),
56
- Model("Genshin", "crumb/genshin-stable-inversion, yuiqena/GenshinImpact, katakana/2D-Mix, Guizmus/AnimeChanStyle", ""),
57
- Model("Waifu", "hakurei/waifu-diffusion, technillogue/waifu-diffusion, Guizmus/AnimeChanStyle, katakana/2D-Mix", ""),
58
- Model("Pokémon", "lambdalabs/sd-pokemon-diffusers", ""),
59
- Model("Test", "AdamOswald1/Idk", ""),
60
- Model("Test2", "AdamOswald1/Tester", ""),
61
- Model("Anime", "Guizmus/AnimeChanStyle, katakana/2D-Mix", ""),
62
- Model("Beeple", "riccardogiorato/beeple-diffusion", "beeple style "),
63
- Model("Avatar", "riccardogiorato/avatar-diffusion", "avatartwow style "),
64
- Model("Poolsuite", "prompthero/poolsuite", "poolsuite style ")
65
- ]
66
-
67
- custom_model = None
68
- if is_colab:
69
- models.insert(0, Model("Custom model"))
70
- custom_model = models[0]
71
-
72
- last_mode = "txt2img"
73
- current_model = models[1] if is_colab else models[0]
74
- current_model_path = current_model.path
75
-
76
- if is_colab:
77
- pipe = StableDiffusionPipeline.from_pretrained(
78
- current_model.path,
79
- torch_dtype=torch.float16,
80
- scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
81
- safety_checker=lambda images, clip_input: (images, False)
82
- )
83
-
84
- else:
85
- pipe = StableDiffusionPipeline.from_pretrained(
86
- current_model.path,
87
- torch_dtype=torch.float16,
88
- scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
89
- )
90
-
91
- if torch.cuda.is_available():
92
- pipe = pipe.to("cuda")
93
- pipe.enable_xformers_memory_efficient_attention()
94
-
95
- device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
96
-
97
- def error_str(error, title="Error"):
98
- return f"""#### {title}
99
- {error}""" if error else ""
100
-
101
- def update_state(new_state):
102
- global state
103
- state = new_state
104
-
105
- def update_state_info(old_state):
106
- if state and state != old_state:
107
- return gr.update(value=state)
108
-
109
- def custom_model_changed(path):
110
- models[0].path = path
111
- global current_model
112
- current_model = models[0]
113
-
114
- def on_model_change(model_name):
115
-
116
- prefix = "Enter prompt. \"" + next((m.prefix for m in models if m.name == model_name), None) + "\" is prefixed automatically" if model_name != models[0].name else "Don't forget to use the custom model prefix in the prompt!"
117
-
118
- return gr.update(visible = model_name == models[0].name), gr.update(placeholder=prefix)
119
-
120
- def on_steps_change(steps):
121
- global current_steps
122
- current_steps = steps
123
-
124
- def pipe_callback(step: int, timestep: int, latents: torch.FloatTensor):
125
- update_state(f"{step}/{current_steps} steps")#\nTime left, sec: {timestep/100:.0f}")
126
-
127
- def inference(model_name, prompt, guidance, steps, n_images=1, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt=""):
128
-
129
- update_state(" ")
130
-
131
- print(psutil.virtual_memory()) # print memory usage
132
-
133
- global current_model
134
- for model in models:
135
- if model.name == model_name:
136
- current_model = model
137
- model_path = current_model.path
138
-
139
- # generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
140
- if seed == 0:
141
- seed = random.randint(0, 2147483647)
142
-
143
- if torch.cuda.is_available():
144
- generator = torch.Generator('cuda').manual_seed(seed)
145
- else:
146
- generator = torch.Generator().manual_seed(seed)
147
-
148
- try:
149
- if img is not None:
150
- return img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator, seed), f"Done. Seed: {seed}"
151
- else:
152
- return txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width, height, generator, seed), f"Done. Seed: {seed}"
153
- except Exception as e:
154
- return None, error_str(e)
155
-
156
- def txt_to_img(model_path, prompt, n_images, neg_prompt, guidance, steps, width, height, generator, seed):
157
-
158
- print(f"{datetime.datetime.now()} txt_to_img, model: {current_model.name}")
159
-
160
- global last_mode
161
- global pipe
162
- global current_model_path
163
- if model_path != current_model_path or last_mode != "txt2img":
164
- current_model_path = model_path
165
-
166
- update_state(f"Loading {current_model.name} text-to-image model...")
167
-
168
- if is_colab or current_model == custom_model:
169
- pipe = StableDiffusionPipeline.from_pretrained(
170
- current_model_path,
171
- torch_dtype=torch.float16,
172
- scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
173
- safety_checker=lambda images, clip_input: (images, False)
174
- )
175
- else:
176
- pipe = StableDiffusionPipeline.from_pretrained(
177
- current_model_path,
178
- torch_dtype=torch.float16,
179
- scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
180
- )
181
- # pipe = pipe.to("cpu")
182
- # pipe = current_model.pipe_t2i
183
-
184
- if torch.cuda.is_available():
185
- pipe = pipe.to("cuda")
186
- pipe.enable_xformers_memory_efficient_attention()
187
- last_mode = "txt2img"
188
-
189
- prompt = current_model.prefix + prompt
190
- result = pipe(
191
- prompt,
192
- negative_prompt = neg_prompt,
193
- num_images_per_prompt=n_images,
194
- num_inference_steps = int(steps),
195
- guidance_scale = guidance,
196
- width = width,
197
- height = height,
198
- generator = generator,
199
- callback=pipe_callback)
200
-
201
- # update_state(f"Done. Seed: {seed}")
202
-
203
- return replace_nsfw_images(result)
204
-
205
- def img_to_img(model_path, prompt, n_images, neg_prompt, img, strength, guidance, steps, width, height, generator, seed):
206
-
207
- print(f"{datetime.datetime.now()} img_to_img, model: {model_path}")
208
-
209
- global last_mode
210
- global pipe
211
- global current_model_path
212
- if model_path != current_model_path or last_mode != "img2img":
213
- current_model_path = model_path
214
-
215
- update_state(f"Loading {current_model.name} image-to-image model...")
216
-
217
- if is_colab or current_model == custom_model:
218
- pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
219
- current_model_path,
220
- torch_dtype=torch.float16,
221
- scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler"),
222
- safety_checker=lambda images, clip_input: (images, False)
223
- )
224
- else:
225
- pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
226
- current_model_path,
227
- torch_dtype=torch.float16,
228
- scheduler=DPMSolverMultistepScheduler.from_pretrained(current_model.path, subfolder="scheduler")
229
- )
230
- # pipe = pipe.to("cpu")
231
- # pipe = current_model.pipe_i2i
232
-
233
- if torch.cuda.is_available():
234
- pipe = pipe.to("cuda")
235
- pipe.enable_xformers_memory_efficient_attention()
236
- last_mode = "img2img"
237
-
238
- prompt = current_model.prefix + prompt
239
- ratio = min(height / img.height, width / img.width)
240
- img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
241
- result = pipe(
242
- prompt,
243
- negative_prompt = neg_prompt,
244
- num_images_per_prompt=n_images,
245
- image = img,
246
- num_inference_steps = int(steps),
247
- strength = strength,
248
- guidance_scale = guidance,
249
- # width = width,
250
- # height = height,
251
- generator = generator,
252
- callback=pipe_callback)
253
-
254
- # update_state(f"Done. Seed: {seed}")
255
-
256
- return replace_nsfw_images(result)
257
-
258
- def replace_nsfw_images(results):
259
-
260
- if is_colab:
261
- return results.images
262
-
263
- for i in range(len(results.images)):
264
- if results.nsfw_content_detected[i]:
265
- results.images[i] = Image.open("nsfw.png")
266
- return results.images
267
-
268
- # css = """.finetuned-diffusion-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.finetuned-diffusion-div div h1{font-weight:900;margin-bottom:7px}.finetuned-diffusion-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
269
- # """
270
- with gr.Blocks(css="style.css") as demo:
271
- gr.HTML(
272
- f"""
273
- <div class="finetuned-diffusion-div">
274
- <div>
275
- <h1>Finetuned Diffusion</h1>
276
- </div>
277
- <p>
278
- BROKEN, USE COLLAB VERSION INSTEAD! ALSO ADD ", 'safety_checker=None'" TO YOUR PROMPT!
279
- </p>
280
- <p>
281
- Demo for multiple fine-tuned Stable Diffusion models, trained on different styles: <br>
282
- <a href="https://huggingface.co/nitrosocke/Arcane-Diffusion">Arcane</a>, <a href="https://huggingface.co/nitrosocke/archer-diffusion">Archer</a>, <a href="https://huggingface.co/nitrosocke/elden-ring-diffusion">Elden Ring</a>, <a href="https://huggingface.co/nitrosocke/spider-verse-diffusion">Spider-Verse</a>, <a href="https://huggingface.co/nitrosocke/mo-di-diffusion">Modern Disney</a>, <a href="https://huggingface.co/nitrosocke/classic-anim-diffusion">Classic Disney</a>, <a href="https://huggingface.co/dallinmackay/Van-Gogh-diffusion">Loving Vincent (Van Gogh)</a>, <a href="https://huggingface.co/nitrosocke/redshift-diffusion">Redshift renderer (Cinema4D)</a>, <a href="https://huggingface.co/prompthero/midjourney-v4-diffusion">Midjourney v4 style</a>, <a href="https://huggingface.co/hakurei/waifu-diffusion">Waifu</a>, <a href="https://huggingface.co/lambdalabs/sd-pokemon-diffusers">Pokémon</a>, <a href="https://huggingface.co/AstraliteHeart/pony-diffusion">Pony Diffusion</a>, <a href="https://huggingface.co/nousr/robo-diffusion">Robo Diffusion</a>, <a href="https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion">Cyberpunk Anime</a>, <a href="https://huggingface.co/dallinmackay/Tron-Legacy-diffusion">Tron Legacy</a>, <a href="https://huggingface.co/Fictiverse/Stable_Diffusion_BalloonArt_Model">Balloon Art</a> + in colab notebook you can load any other Diffusers 🧨 SD model hosted on HuggingFace 🤗.
283
- </p>
284
- <p>You can skip the queue and load custom models in the colab: <a href="https://colab.research.google.com/gist/AdamOswald/fe67300b1b3638d610038cde5e145b0b/copy-of-fine-tuned-diffusion-gradio.ipynb"><img data-canonical-src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab" src="https://camo.githubusercontent.com/84f0493939e0c4de4e6dbe113251b4bfb5353e57134ffd9fcab6b8714514d4d1/68747470733a2f2f636f6c61622e72657365617263682e676f6f676c652e636f6d2f6173736574732f636f6c61622d62616467652e737667"></a></p>
285
- Running on <b>{device}</b>{(" in a <b>Google Colab</b>." if is_colab else "")}
286
- </p>
287
- <p>You can also duplicate this space and upgrade to gpu by going to settings:<br>
288
- <a style="display:inline-block" href="https://huggingface.co/spaces/anzorq/finetuned_diffusion?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a></p>
289
- </div>
290
- """
291
- )
292
- with gr.Row():
293
-
294
- with gr.Column(scale=55):
295
- with gr.Group():
296
- model_name = gr.Dropdown(label="Model", choices=[m.name for m in models], value=current_model.name)
297
- with gr.Box(visible=False) as custom_model_group:
298
- custom_model_path = gr.Textbox(label="Custom model path", placeholder="Path to model, e.g. nitrosocke/Arcane-Diffusion", interactive=True)
299
- gr.HTML("<div><font size='2'>Custom models have to be downloaded first, so give it some time.</font></div>")
300
-
301
- with gr.Row():
302
- prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder="Enter prompt. Style applied automatically").style(container=False)
303
- generate = gr.Button(value="Generate").style(rounded=(False, True, True, False))
304
-
305
-
306
- # image_out = gr.Image(height=512)
307
- gallery = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery").style(grid=[2], height="auto")
308
-
309
- state_info = gr.Textbox(label="State", show_label=False, max_lines=2).style(container=False)
310
- error_output = gr.Markdown()
311
-
312
- with gr.Column(scale=45):
313
- with gr.Tab("Options"):
314
- with gr.Group():
315
- neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
316
-
317
- n_images = gr.Slider(label="Images", value=1, minimum=1, maximum=8, step=1)
318
-
319
- with gr.Row():
320
- guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
321
- steps = gr.Slider(label="Steps", value=current_steps, minimum=2, maximum=300, step=1)
322
-
323
- with gr.Row():
324
- width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8)
325
- height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8)
326
-
327
- seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
328
-
329
- with gr.Tab("Image to image"):
330
- with gr.Group():
331
- image = gr.Image(label="Image", height=256, tool="editor", type="pil")
332
- strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
333
-
334
- if is_colab:
335
- model_name.change(on_model_change, inputs=model_name, outputs=[custom_model_group, prompt], queue=False)
336
- custom_model_path.change(custom_model_changed, inputs=custom_model_path, outputs=None)
337
- # n_images.change(lambda n: gr.Gallery().style(grid=[2 if n > 1 else 1], height="auto"), inputs=n_images, outputs=gallery)
338
- steps.change(on_steps_change, inputs=[steps], outputs=[], queue=False)
339
-
340
- inputs = [model_name, prompt, guidance, steps, n_images, width, height, seed, image, strength, neg_prompt]
341
- outputs = [gallery, error_output]
342
- prompt.submit(inference, inputs=inputs, outputs=outputs)
343
- generate.click(inference, inputs=inputs, outputs=outputs)
344
-
345
- ex = gr.Examples([
346
- [models[7].name, "tiny cute and adorable kitten adventurer dressed in a warm overcoat with survival gear on a winters day", 7.5, 25],
347
- [models[4].name, "portrait of dwayne johnson", 7.0, 35],
348
- [models[5].name, "portrait of a beautiful alyx vance half life", 10, 25],
349
- [models[6].name, "Aloy from Horizon: Zero Dawn, half body portrait, smooth, detailed armor, beautiful face, illustration", 7.0, 30],
350
- [models[5].name, "fantasy portrait painting, digital art", 4.0, 20],
351
- ], inputs=[model_name, prompt, guidance, steps], outputs=outputs, fn=inference, cache_examples=False)
352
-
353
- gr.HTML("""
354
- <div style="border-top: 1px solid #303030;">
355
- <br>
356
- <p>Models by <a href="https://huggingface.co/nitrosocke">@nitrosocke</a>, <a href="https://twitter.com/haruu1367">@haruu1367</a>, <a href="https://twitter.com/DGSpitzer">@Helixngc7293</a>, <a href="https://twitter.com/dal_mack">@dal_mack</a>, <a href="https://twitter.com/prompthero">@prompthero</a> and others. ❤️</p>
357
- <p>This space uses the <a href="https://github.com/LuChengTHU/dpm-solver">DPM-Solver++</a> sampler by <a href="https://arxiv.org/abs/2206.00927">Cheng Lu, et al.</a>.</p>
358
- <p>Space by:<br>
359
- <a href="https://twitter.com/hahahahohohe"><img src="https://img.shields.io/twitter/follow/hahahahohohe?label=%40anzorq&style=social" alt="Twitter Follow"></a><br>
360
- <a href="https://github.com/qunash"><img alt="GitHub followers" src="https://img.shields.io/github/followers/qunash?style=social" alt="Github Follow"></a></p><br><br>
361
- <a href="https://www.buymeacoffee.com/anzorq" target="_blank"><img src="https://cdn.buymeacoffee.com/buttons/v2/default-yellow.png" alt="Buy Me A Coffee" style="height: 45px !important;width: 162px !important;" ></a><br><br>
362
- <p><img src="https://visitor-badge.glitch.me/badge?page_id=anzorq.finetuned_diffusion" alt="visitors"></p>
363
- </div>
364
- """)
365
-
366
- demo.load(update_state_info, inputs=state_info, outputs=state_info, every=0.5, show_progress=False)
367
-
368
- print(f"Space built in {time.time() - start_time:.2f} seconds")
369
-
370
- # if not is_colab:
371
- demo.queue(concurrency_count=1)
372
- demo.launch(debug=True, share=is_colab)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/CoAdapter/ldm/modules/extra_condition/openpose/model.py DELETED
@@ -1,178 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- from collections import OrderedDict
4
-
5
-
6
- def make_layers(block, no_relu_layers):
7
- layers = []
8
- for layer_name, v in block.items():
9
- if 'pool' in layer_name:
10
- layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2])
11
- layers.append((layer_name, layer))
12
- else:
13
- conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1], kernel_size=v[2], stride=v[3], padding=v[4])
14
- layers.append((layer_name, conv2d))
15
- if layer_name not in no_relu_layers:
16
- layers.append(('relu_' + layer_name, nn.ReLU(inplace=True)))
17
-
18
- return nn.Sequential(OrderedDict(layers))
19
-
20
-
21
- class bodypose_model(nn.Module):
22
-
23
- def __init__(self):
24
- super(bodypose_model, self).__init__()
25
-
26
- # these layers have no relu layer
27
- no_relu_layers = ['conv5_5_CPM_L1', 'conv5_5_CPM_L2', 'Mconv7_stage2_L1',\
28
- 'Mconv7_stage2_L2', 'Mconv7_stage3_L1', 'Mconv7_stage3_L2',\
29
- 'Mconv7_stage4_L1', 'Mconv7_stage4_L2', 'Mconv7_stage5_L1',\
30
- 'Mconv7_stage5_L2', 'Mconv7_stage6_L1', 'Mconv7_stage6_L1']
31
- blocks = {}
32
- block0 = OrderedDict([('conv1_1', [3, 64, 3, 1, 1]), ('conv1_2', [64, 64, 3, 1, 1]), ('pool1_stage1', [2, 2,
33
- 0]),
34
- ('conv2_1', [64, 128, 3, 1, 1]), ('conv2_2', [128, 128, 3, 1, 1]),
35
- ('pool2_stage1', [2, 2, 0]), ('conv3_1', [128, 256, 3, 1, 1]),
36
- ('conv3_2', [256, 256, 3, 1, 1]), ('conv3_3', [256, 256, 3, 1, 1]),
37
- ('conv3_4', [256, 256, 3, 1, 1]), ('pool3_stage1', [2, 2, 0]),
38
- ('conv4_1', [256, 512, 3, 1, 1]), ('conv4_2', [512, 512, 3, 1, 1]),
39
- ('conv4_3_CPM', [512, 256, 3, 1, 1]), ('conv4_4_CPM', [256, 128, 3, 1, 1])])
40
-
41
- # Stage 1
42
- block1_1 = OrderedDict([('conv5_1_CPM_L1', [128, 128, 3, 1, 1]), ('conv5_2_CPM_L1', [128, 128, 3, 1, 1]),
43
- ('conv5_3_CPM_L1', [128, 128, 3, 1, 1]), ('conv5_4_CPM_L1', [128, 512, 1, 1, 0]),
44
- ('conv5_5_CPM_L1', [512, 38, 1, 1, 0])])
45
-
46
- block1_2 = OrderedDict([('conv5_1_CPM_L2', [128, 128, 3, 1, 1]), ('conv5_2_CPM_L2', [128, 128, 3, 1, 1]),
47
- ('conv5_3_CPM_L2', [128, 128, 3, 1, 1]), ('conv5_4_CPM_L2', [128, 512, 1, 1, 0]),
48
- ('conv5_5_CPM_L2', [512, 19, 1, 1, 0])])
49
- blocks['block1_1'] = block1_1
50
- blocks['block1_2'] = block1_2
51
-
52
- self.model0 = make_layers(block0, no_relu_layers)
53
-
54
- # Stages 2 - 6
55
- for i in range(2, 7):
56
- blocks['block%d_1' % i] = OrderedDict([('Mconv1_stage%d_L1' % i, [185, 128, 7, 1, 3]),
57
- ('Mconv2_stage%d_L1' % i, [128, 128, 7, 1, 3]),
58
- ('Mconv3_stage%d_L1' % i, [128, 128, 7, 1, 3]),
59
- ('Mconv4_stage%d_L1' % i, [128, 128, 7, 1, 3]),
60
- ('Mconv5_stage%d_L1' % i, [128, 128, 7, 1, 3]),
61
- ('Mconv6_stage%d_L1' % i, [128, 128, 1, 1, 0]),
62
- ('Mconv7_stage%d_L1' % i, [128, 38, 1, 1, 0])])
63
-
64
- blocks['block%d_2' % i] = OrderedDict([('Mconv1_stage%d_L2' % i, [185, 128, 7, 1, 3]),
65
- ('Mconv2_stage%d_L2' % i, [128, 128, 7, 1, 3]),
66
- ('Mconv3_stage%d_L2' % i, [128, 128, 7, 1, 3]),
67
- ('Mconv4_stage%d_L2' % i, [128, 128, 7, 1, 3]),
68
- ('Mconv5_stage%d_L2' % i, [128, 128, 7, 1, 3]),
69
- ('Mconv6_stage%d_L2' % i, [128, 128, 1, 1, 0]),
70
- ('Mconv7_stage%d_L2' % i, [128, 19, 1, 1, 0])])
71
-
72
- for k in blocks.keys():
73
- blocks[k] = make_layers(blocks[k], no_relu_layers)
74
-
75
- self.model1_1 = blocks['block1_1']
76
- self.model2_1 = blocks['block2_1']
77
- self.model3_1 = blocks['block3_1']
78
- self.model4_1 = blocks['block4_1']
79
- self.model5_1 = blocks['block5_1']
80
- self.model6_1 = blocks['block6_1']
81
-
82
- self.model1_2 = blocks['block1_2']
83
- self.model2_2 = blocks['block2_2']
84
- self.model3_2 = blocks['block3_2']
85
- self.model4_2 = blocks['block4_2']
86
- self.model5_2 = blocks['block5_2']
87
- self.model6_2 = blocks['block6_2']
88
-
89
- def forward(self, x):
90
-
91
- out1 = self.model0(x)
92
-
93
- out1_1 = self.model1_1(out1)
94
- out1_2 = self.model1_2(out1)
95
- out2 = torch.cat([out1_1, out1_2, out1], 1)
96
-
97
- out2_1 = self.model2_1(out2)
98
- out2_2 = self.model2_2(out2)
99
- out3 = torch.cat([out2_1, out2_2, out1], 1)
100
-
101
- out3_1 = self.model3_1(out3)
102
- out3_2 = self.model3_2(out3)
103
- out4 = torch.cat([out3_1, out3_2, out1], 1)
104
-
105
- out4_1 = self.model4_1(out4)
106
- out4_2 = self.model4_2(out4)
107
- out5 = torch.cat([out4_1, out4_2, out1], 1)
108
-
109
- out5_1 = self.model5_1(out5)
110
- out5_2 = self.model5_2(out5)
111
- out6 = torch.cat([out5_1, out5_2, out1], 1)
112
-
113
- out6_1 = self.model6_1(out6)
114
- out6_2 = self.model6_2(out6)
115
-
116
- return out6_1, out6_2
117
-
118
-
119
- class handpose_model(nn.Module):
120
-
121
- def __init__(self):
122
- super(handpose_model, self).__init__()
123
-
124
- # these layers have no relu layer
125
- no_relu_layers = ['conv6_2_CPM', 'Mconv7_stage2', 'Mconv7_stage3',\
126
- 'Mconv7_stage4', 'Mconv7_stage5', 'Mconv7_stage6']
127
- # stage 1
128
- block1_0 = OrderedDict([('conv1_1', [3, 64, 3, 1, 1]), ('conv1_2', [64, 64, 3, 1, 1]),
129
- ('pool1_stage1', [2, 2, 0]), ('conv2_1', [64, 128, 3, 1, 1]),
130
- ('conv2_2', [128, 128, 3, 1, 1]), ('pool2_stage1', [2, 2, 0]),
131
- ('conv3_1', [128, 256, 3, 1, 1]), ('conv3_2', [256, 256, 3, 1, 1]),
132
- ('conv3_3', [256, 256, 3, 1, 1]), ('conv3_4', [256, 256, 3, 1, 1]),
133
- ('pool3_stage1', [2, 2, 0]), ('conv4_1', [256, 512, 3, 1, 1]),
134
- ('conv4_2', [512, 512, 3, 1, 1]), ('conv4_3', [512, 512, 3, 1, 1]),
135
- ('conv4_4', [512, 512, 3, 1, 1]), ('conv5_1', [512, 512, 3, 1, 1]),
136
- ('conv5_2', [512, 512, 3, 1, 1]), ('conv5_3_CPM', [512, 128, 3, 1, 1])])
137
-
138
- block1_1 = OrderedDict([('conv6_1_CPM', [128, 512, 1, 1, 0]), ('conv6_2_CPM', [512, 22, 1, 1, 0])])
139
-
140
- blocks = {}
141
- blocks['block1_0'] = block1_0
142
- blocks['block1_1'] = block1_1
143
-
144
- # stage 2-6
145
- for i in range(2, 7):
146
- blocks['block%d' % i] = OrderedDict([('Mconv1_stage%d' % i, [150, 128, 7, 1, 3]),
147
- ('Mconv2_stage%d' % i, [128, 128, 7, 1, 3]),
148
- ('Mconv3_stage%d' % i, [128, 128, 7, 1, 3]),
149
- ('Mconv4_stage%d' % i, [128, 128, 7, 1, 3]),
150
- ('Mconv5_stage%d' % i, [128, 128, 7, 1, 3]),
151
- ('Mconv6_stage%d' % i, [128, 128, 1, 1, 0]),
152
- ('Mconv7_stage%d' % i, [128, 22, 1, 1, 0])])
153
-
154
- for k in blocks.keys():
155
- blocks[k] = make_layers(blocks[k], no_relu_layers)
156
-
157
- self.model1_0 = blocks['block1_0']
158
- self.model1_1 = blocks['block1_1']
159
- self.model2 = blocks['block2']
160
- self.model3 = blocks['block3']
161
- self.model4 = blocks['block4']
162
- self.model5 = blocks['block5']
163
- self.model6 = blocks['block6']
164
-
165
- def forward(self, x):
166
- out1_0 = self.model1_0(x)
167
- out1_1 = self.model1_1(out1_0)
168
- concat_stage2 = torch.cat([out1_1, out1_0], 1)
169
- out_stage2 = self.model2(concat_stage2)
170
- concat_stage3 = torch.cat([out_stage2, out1_0], 1)
171
- out_stage3 = self.model3(concat_stage3)
172
- concat_stage4 = torch.cat([out_stage3, out1_0], 1)
173
- out_stage4 = self.model4(concat_stage4)
174
- concat_stage5 = torch.cat([out_stage4, out1_0], 1)
175
- out_stage5 = self.model5(concat_stage5)
176
- concat_stage6 = torch.cat([out_stage5, out1_0], 1)
177
- out_stage6 = self.model6(concat_stage6)
178
- return out_stage6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/T2I-Adapter/ldm/modules/diffusionmodules/__init__.py DELETED
File without changes
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/fixwidthbuttons/Factory.d.ts DELETED
@@ -1,5 +0,0 @@
1
- import FixWidthButtons from './FixWidthButtons';
2
-
3
- export default function (
4
- config?: FixWidthButtons.IConfig
5
- ): FixWidthButtons;
 
 
 
 
 
 
spaces/AiBototicus/BucksAI-2/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: BucksAI 2
3
- emoji: 🐢
4
- colorFrom: green
5
- colorTo: red
6
- sdk: streamlit
7
- sdk_version: 1.17.0
8
- app_file: app.py
9
- pinned: false
10
- license: bsd-3-clause-clear
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlekseyKorshuk/instagram-filter-removal/modules/normalization.py DELETED
@@ -1,16 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
-
5
- class AdaIN(nn.Module):
6
- def __init__(self):
7
- super().__init__()
8
-
9
- def forward(self, x, y):
10
- ch = y.size(1)
11
- sigma, mu = torch.split(y.unsqueeze(-1).unsqueeze(-1), [ch // 2, ch // 2], dim=1)
12
-
13
- x_mu = x.mean(dim=[2, 3], keepdim=True)
14
- x_sigma = x.std(dim=[2, 3], keepdim=True)
15
-
16
- return sigma * ((x - x_mu) / x_sigma) + mu
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/PTI/models/StyleCLIP/global_directions/PlayInteractively.py DELETED
@@ -1,197 +0,0 @@
1
-
2
-
3
-
4
- from tkinter import Tk
5
- from PIL import Image, ImageTk
6
- from tkinter.filedialog import askopenfilename
7
- from GUI import View
8
- from Inference import StyleCLIP
9
- import argparse
10
- #%%
11
-
12
-
13
- class PlayInteractively(): #Controller
14
- '''
15
- followed Model View Controller Design Pattern
16
-
17
- controller, model, view
18
- '''
19
- def __init__(self,dataset_name='ffhq'):
20
-
21
- self.root = Tk()
22
- self.view=View(self.root)
23
- self.img_ratio=2
24
- self.style_clip=StyleCLIP(dataset_name)
25
-
26
- self.view.neutral.bind("<Return>", self.text_n)
27
- self.view.target.bind("<Return>", self.text_t)
28
- self.view.alpha.bind('<ButtonRelease-1>', self.ChangeAlpha)
29
- self.view.beta.bind('<ButtonRelease-1>', self.ChangeBeta)
30
- self.view.set_init.bind('<ButtonPress-1>', self.SetInit)
31
- self.view.reset.bind('<ButtonPress-1>', self.Reset)
32
- self.view.bg.bind('<Double-1>', self.open_img)
33
-
34
-
35
- self.drawn = None
36
-
37
- self.view.target.delete(1.0, "end")
38
- self.view.target.insert("end", self.style_clip.target)
39
- #
40
- self.view.neutral.delete(1.0, "end")
41
- self.view.neutral.insert("end", self.style_clip.neutral)
42
-
43
-
44
- def Reset(self,event):
45
- self.style_clip.GetDt2()
46
- self.style_clip.M.alpha=[0]
47
-
48
- self.view.beta.set(self.style_clip.beta)
49
- self.view.alpha.set(0)
50
-
51
- img=self.style_clip.GetImg()
52
- img=Image.fromarray(img)
53
- img = ImageTk.PhotoImage(img)
54
- self.addImage_m(img)
55
-
56
-
57
- def SetInit(self,event):
58
- codes=self.style_clip.GetCode()
59
- self.style_clip.M.dlatent_tmp=[tmp[:,0] for tmp in codes]
60
- print('set init')
61
-
62
- def ChangeAlpha(self,event):
63
- tmp=self.view.alpha.get()
64
- self.style_clip.M.alpha=[float(tmp)]
65
-
66
- img=self.style_clip.GetImg()
67
- print('manipulate one')
68
- img=Image.fromarray(img)
69
- img = ImageTk.PhotoImage(img)
70
- self.addImage_m(img)
71
-
72
- def ChangeBeta(self,event):
73
- tmp=self.view.beta.get()
74
- self.style_clip.beta=float(tmp)
75
-
76
- img=self.style_clip.GetImg()
77
- print('manipulate one')
78
- img=Image.fromarray(img)
79
- img = ImageTk.PhotoImage(img)
80
- self.addImage_m(img)
81
-
82
- def ChangeDataset(self,event):
83
-
84
- dataset_name=self.view.set_category.get()
85
-
86
- self.style_clip.LoadData(dataset_name)
87
-
88
- self.view.target.delete(1.0, "end")
89
- self.view.target.insert("end", self.style_clip.target)
90
-
91
- self.view.neutral.delete(1.0, "end")
92
- self.view.neutral.insert("end", self.style_clip.neutral)
93
-
94
- def text_t(self,event):
95
- tmp=self.view.target.get("1.0",'end')
96
- tmp=tmp.replace('\n','')
97
-
98
- self.view.target.delete(1.0, "end")
99
- self.view.target.insert("end", tmp)
100
-
101
- print('target',tmp,'###')
102
- self.style_clip.target=tmp
103
- self.style_clip.GetDt2()
104
- self.view.beta.set(self.style_clip.beta)
105
- self.view.alpha.set(3)
106
- self.style_clip.M.alpha=[3]
107
-
108
- img=self.style_clip.GetImg()
109
- print('manipulate one')
110
- img=Image.fromarray(img)
111
- img = ImageTk.PhotoImage(img)
112
- self.addImage_m(img)
113
-
114
-
115
- def text_n(self,event):
116
- tmp=self.view.neutral.get("1.0",'end')
117
- tmp=tmp.replace('\n','')
118
-
119
- self.view.neutral.delete(1.0, "end")
120
- self.view.neutral.insert("end", tmp)
121
-
122
- print('neutral',tmp,'###')
123
- self.style_clip.neutral=tmp
124
- self.view.target.delete(1.0, "end")
125
- self.view.target.insert("end", tmp)
126
-
127
-
128
- def run(self):
129
- self.root.mainloop()
130
-
131
- def addImage(self,img):
132
- self.view.bg.create_image(self.view.width/2, self.view.height/2, image=img, anchor='center')
133
- self.image=img #save a copy of image. if not the image will disappear
134
-
135
- def addImage_m(self,img):
136
- self.view.mani.create_image(512, 512, image=img, anchor='center')
137
- self.image2=img
138
-
139
-
140
- def openfn(self):
141
- filename = askopenfilename(title='open',initialdir='./data/'+self.style_clip.M.dataset_name+'/',filetypes=[("all image format", ".jpg"),("all image format", ".png")])
142
- return filename
143
-
144
- def open_img(self,event):
145
- x = self.openfn()
146
- print(x)
147
-
148
-
149
- img = Image.open(x)
150
- img2 = img.resize(( 512,512), Image.ANTIALIAS)
151
- img2 = ImageTk.PhotoImage(img2)
152
- self.addImage(img2)
153
-
154
- img = ImageTk.PhotoImage(img)
155
- self.addImage_m(img)
156
-
157
- img_index=x.split('/')[-1].split('.')[0]
158
- img_index=int(img_index)
159
- print(img_index)
160
- self.style_clip.M.img_index=img_index
161
- self.style_clip.M.dlatent_tmp=[tmp[img_index:(img_index+1)] for tmp in self.style_clip.M.dlatents]
162
-
163
-
164
- self.style_clip.GetDt2()
165
- self.view.beta.set(self.style_clip.beta)
166
- self.view.alpha.set(3)
167
-
168
- #%%
169
- if __name__ == "__main__":
170
- parser = argparse.ArgumentParser(description='Process some integers.')
171
-
172
- parser.add_argument('--dataset_name',type=str,default='ffhq',
173
- help='name of dataset, for example, ffhq')
174
-
175
- args = parser.parse_args()
176
- dataset_name=args.dataset_name
177
-
178
- self=PlayInteractively(dataset_name)
179
- self.run()
180
-
181
-
182
-
183
-
184
-
185
-
186
-
187
-
188
-
189
-
190
-
191
-
192
-
193
-
194
-
195
-
196
-
197
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/multi_subject_dreambooth/README.md DELETED
@@ -1,338 +0,0 @@
1
- # Multi Subject DreamBooth training
2
-
3
- [DreamBooth](https://arxiv.org/abs/2208.12242) is a method to personalize text2image models like stable diffusion given just a few(3~5) images of a subject.
4
- This `train_multi_subject_dreambooth.py` script shows how to implement the training procedure for one or more subjects and adapt it for stable diffusion. Note that this code is based off of the `examples/dreambooth/train_dreambooth.py` script as of 01/06/2022.
5
-
6
- This script was added by @kopsahlong, and is not actively maintained. However, if you come across anything that could use fixing, feel free to open an issue and tag @kopsahlong.
7
-
8
- ## Running locally with PyTorch
9
- ### Installing the dependencies
10
-
11
- Before running the script, make sure to install the library's training dependencies:
12
-
13
- To start, execute the following steps in a new virtual environment:
14
- ```bash
15
- git clone https://github.com/huggingface/diffusers
16
- cd diffusers
17
- pip install -e .
18
- ```
19
-
20
- Then cd into the folder `diffusers/examples/research_projects/multi_subject_dreambooth` and run the following:
21
- ```bash
22
- pip install -r requirements.txt
23
- ```
24
-
25
- And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with:
26
-
27
- ```bash
28
- accelerate config
29
- ```
30
-
31
- Or for a default accelerate configuration without answering questions about your environment
32
-
33
- ```bash
34
- accelerate config default
35
- ```
36
-
37
- Or if your environment doesn't support an interactive shell e.g. a notebook
38
-
39
- ```python
40
- from accelerate.utils import write_basic_config
41
- write_basic_config()
42
- ```
43
-
44
- ### Multi Subject Training Example
45
- In order to have your model learn multiple concepts at once, we simply add in the additional data directories and prompts to our `instance_data_dir` and `instance_prompt` (as well as `class_data_dir` and `class_prompt` if `--with_prior_preservation` is specified) as one comma separated string.
46
-
47
- See an example with 2 subjects below, which learns a model for one dog subject and one human subject:
48
-
49
- ```bash
50
- export MODEL_NAME="CompVis/stable-diffusion-v1-4"
51
- export OUTPUT_DIR="path-to-save-model"
52
-
53
- # Subject 1
54
- export INSTANCE_DIR_1="path-to-instance-images-concept-1"
55
- export INSTANCE_PROMPT_1="a photo of a sks dog"
56
- export CLASS_DIR_1="path-to-class-images-dog"
57
- export CLASS_PROMPT_1="a photo of a dog"
58
-
59
- # Subject 2
60
- export INSTANCE_DIR_2="path-to-instance-images-concept-2"
61
- export INSTANCE_PROMPT_2="a photo of a t@y person"
62
- export CLASS_DIR_2="path-to-class-images-person"
63
- export CLASS_PROMPT_2="a photo of a person"
64
-
65
- accelerate launch train_multi_subject_dreambooth.py \
66
- --pretrained_model_name_or_path=$MODEL_NAME \
67
- --instance_data_dir="$INSTANCE_DIR_1,$INSTANCE_DIR_2" \
68
- --output_dir=$OUTPUT_DIR \
69
- --train_text_encoder \
70
- --instance_prompt="$INSTANCE_PROMPT_1,$INSTANCE_PROMPT_2" \
71
- --with_prior_preservation \
72
- --prior_loss_weight=1.0 \
73
- --class_data_dir="$CLASS_DIR_1,$CLASS_DIR_2" \
74
- --class_prompt="$CLASS_PROMPT_1,$CLASS_PROMPT_2"\
75
- --num_class_images=50 \
76
- --resolution=512 \
77
- --train_batch_size=1 \
78
- --gradient_accumulation_steps=1 \
79
- --learning_rate=1e-6 \
80
- --lr_scheduler="constant" \
81
- --lr_warmup_steps=0 \
82
- --max_train_steps=1500
83
- ```
84
-
85
- This example shows training for 2 subjects, but please note that the model can be trained on any number of new concepts. This can be done by continuing to add in the corresponding directories and prompts to the corresponding comma separated string.
86
-
87
- Note also that in this script, `sks` and `t@y` were used as tokens to learn the new subjects ([this thread](https://github.com/XavierXiao/Dreambooth-Stable-Diffusion/issues/71) inspired the use of `t@y` as our second identifier). However, there may be better rare tokens to experiment with, and results also seemed to be good when more intuitive words are used.
88
-
89
- **Important**: New parameters are added to the script, making possible to validate the progress of the training by
90
- generating images at specified steps. Taking also into account that a comma separated list in a text field for a prompt
91
- it's never a good idea (simply because it is very common in prompts to have them as part of a regular text) we
92
- introduce the `concept_list` parameter: allowing to specify a json-like file where you can define the different
93
- configuration for each subject that you want to train.
94
-
95
- An example of how to generate the file:
96
- ```python
97
- import json
98
-
99
- # here we are using parameters for prior-preservation and validation as well.
100
- concepts_list = [
101
- {
102
- "instance_prompt": "drawing of a t@y meme",
103
- "class_prompt": "drawing of a meme",
104
- "instance_data_dir": "/some_folder/meme_toy",
105
- "class_data_dir": "/data/meme",
106
- "validation_prompt": "drawing of a t@y meme about football in Uruguay",
107
- "validation_negative_prompt": "black and white"
108
- },
109
- {
110
- "instance_prompt": "drawing of a sks sir",
111
- "class_prompt": "drawing of a sir",
112
- "instance_data_dir": "/some_other_folder/sir_sks",
113
- "class_data_dir": "/data/sir",
114
- "validation_prompt": "drawing of a sks sir with the Uruguayan sun in his chest",
115
- "validation_negative_prompt": "an old man",
116
- "validation_guidance_scale": 20,
117
- "validation_number_images": 3,
118
- "validation_inference_steps": 10
119
- }
120
- ]
121
-
122
- with open("concepts_list.json", "w") as f:
123
- json.dump(concepts_list, f, indent=4)
124
- ```
125
- And then just point to the file when executing the script:
126
-
127
- ```bash
128
- # exports...
129
- accelerate launch train_multi_subject_dreambooth.py \
130
- # more parameters...
131
- --concepts_list="concepts_list.json"
132
- ```
133
-
134
- You can use the helper from the script to get a better sense of each parameter.
135
-
136
- ### Inference
137
-
138
- Once you have trained a model using above command, the inference can be done simply using the `StableDiffusionPipeline`. Make sure to include the `identifier`(e.g. sks in above example) in your prompt.
139
-
140
- ```python
141
- from diffusers import StableDiffusionPipeline
142
- import torch
143
-
144
- model_id = "path-to-your-trained-model"
145
- pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
146
-
147
- prompt = "A photo of a t@y person petting an sks dog"
148
- image = pipe(prompt, num_inference_steps=200, guidance_scale=7.5).images[0]
149
-
150
- image.save("person-petting-dog.png")
151
- ```
152
-
153
- ### Inference from a training checkpoint
154
-
155
- You can also perform inference from one of the checkpoints saved during the training process, if you used the `--checkpointing_steps` argument. Please, refer to [the documentation](https://huggingface.co/docs/diffusers/main/en/training/dreambooth#performing-inference-using-a-saved-checkpoint) to see how to do it.
156
-
157
- ## Additional Dreambooth documentation
158
- Because the `train_multi_subject_dreambooth.py` script here was forked from an original version of `train_dreambooth.py` in the `examples/dreambooth` folder, I've included the original applicable training documentation for single subject examples below.
159
-
160
- This should explain how to play with training variables such as prior preservation, fine tuning the text encoder, etc. which is still applicable to our multi subject training code. Note also that the examples below, which are single subject examples, also work with `train_multi_subject_dreambooth.py`, as this script supports 1 (or more) subjects.
161
-
162
- ### Single subject dog toy example
163
-
164
- Let's get our dataset. Download images from [here](https://drive.google.com/drive/folders/1BO_dyz-p65qhBRRMRA4TbZ8qW4rB99JZ) and save them in a directory. This will be our training data.
165
-
166
- And launch the training using
167
-
168
- **___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___**
169
-
170
- ```bash
171
- export MODEL_NAME="CompVis/stable-diffusion-v1-4"
172
- export INSTANCE_DIR="path-to-instance-images"
173
- export OUTPUT_DIR="path-to-save-model"
174
-
175
- accelerate launch train_dreambooth.py \
176
- --pretrained_model_name_or_path=$MODEL_NAME \
177
- --instance_data_dir=$INSTANCE_DIR \
178
- --output_dir=$OUTPUT_DIR \
179
- --instance_prompt="a photo of sks dog" \
180
- --resolution=512 \
181
- --train_batch_size=1 \
182
- --gradient_accumulation_steps=1 \
183
- --learning_rate=5e-6 \
184
- --lr_scheduler="constant" \
185
- --lr_warmup_steps=0 \
186
- --max_train_steps=400
187
- ```
188
-
189
- ### Training with prior-preservation loss
190
-
191
- Prior-preservation is used to avoid overfitting and language-drift. Refer to the paper to learn more about it. For prior-preservation we first generate images using the model with a class prompt and then use those during training along with our data.
192
- According to the paper, it's recommended to generate `num_epochs * num_samples` images for prior-preservation. 200-300 works well for most cases. The `num_class_images` flag sets the number of images to generate with the class prompt. You can place existing images in `class_data_dir`, and the training script will generate any additional images so that `num_class_images` are present in `class_data_dir` during training time.
193
-
194
- ```bash
195
- export MODEL_NAME="CompVis/stable-diffusion-v1-4"
196
- export INSTANCE_DIR="path-to-instance-images"
197
- export CLASS_DIR="path-to-class-images"
198
- export OUTPUT_DIR="path-to-save-model"
199
-
200
- accelerate launch train_dreambooth.py \
201
- --pretrained_model_name_or_path=$MODEL_NAME \
202
- --instance_data_dir=$INSTANCE_DIR \
203
- --class_data_dir=$CLASS_DIR \
204
- --output_dir=$OUTPUT_DIR \
205
- --with_prior_preservation --prior_loss_weight=1.0 \
206
- --instance_prompt="a photo of sks dog" \
207
- --class_prompt="a photo of dog" \
208
- --resolution=512 \
209
- --train_batch_size=1 \
210
- --gradient_accumulation_steps=1 \
211
- --learning_rate=5e-6 \
212
- --lr_scheduler="constant" \
213
- --lr_warmup_steps=0 \
214
- --num_class_images=200 \
215
- --max_train_steps=800
216
- ```
217
-
218
-
219
- ### Training on a 16GB GPU:
220
-
221
- With the help of gradient checkpointing and the 8-bit optimizer from bitsandbytes it's possible to run train dreambooth on a 16GB GPU.
222
-
223
- To install `bitandbytes` please refer to this [readme](https://github.com/TimDettmers/bitsandbytes#requirements--installation).
224
-
225
- ```bash
226
- export MODEL_NAME="CompVis/stable-diffusion-v1-4"
227
- export INSTANCE_DIR="path-to-instance-images"
228
- export CLASS_DIR="path-to-class-images"
229
- export OUTPUT_DIR="path-to-save-model"
230
-
231
- accelerate launch train_dreambooth.py \
232
- --pretrained_model_name_or_path=$MODEL_NAME \
233
- --instance_data_dir=$INSTANCE_DIR \
234
- --class_data_dir=$CLASS_DIR \
235
- --output_dir=$OUTPUT_DIR \
236
- --with_prior_preservation --prior_loss_weight=1.0 \
237
- --instance_prompt="a photo of sks dog" \
238
- --class_prompt="a photo of dog" \
239
- --resolution=512 \
240
- --train_batch_size=1 \
241
- --gradient_accumulation_steps=2 --gradient_checkpointing \
242
- --use_8bit_adam \
243
- --learning_rate=5e-6 \
244
- --lr_scheduler="constant" \
245
- --lr_warmup_steps=0 \
246
- --num_class_images=200 \
247
- --max_train_steps=800
248
- ```
249
-
250
- ### Training on a 8 GB GPU:
251
-
252
- By using [DeepSpeed](https://www.deepspeed.ai/) it's possible to offload some
253
- tensors from VRAM to either CPU or NVME allowing to train with less VRAM.
254
-
255
- DeepSpeed needs to be enabled with `accelerate config`. During configuration
256
- answer yes to "Do you want to use DeepSpeed?". With DeepSpeed stage 2, fp16
257
- mixed precision and offloading both parameters and optimizer state to cpu it's
258
- possible to train on under 8 GB VRAM with a drawback of requiring significantly
259
- more RAM (about 25 GB). See [documentation](https://huggingface.co/docs/accelerate/usage_guides/deepspeed) for more DeepSpeed configuration options.
260
-
261
- Changing the default Adam optimizer to DeepSpeed's special version of Adam
262
- `deepspeed.ops.adam.DeepSpeedCPUAdam` gives a substantial speedup but enabling
263
- it requires CUDA toolchain with the same version as pytorch. 8-bit optimizer
264
- does not seem to be compatible with DeepSpeed at the moment.
265
-
266
- ```bash
267
- export MODEL_NAME="CompVis/stable-diffusion-v1-4"
268
- export INSTANCE_DIR="path-to-instance-images"
269
- export CLASS_DIR="path-to-class-images"
270
- export OUTPUT_DIR="path-to-save-model"
271
-
272
- accelerate launch --mixed_precision="fp16" train_dreambooth.py \
273
- --pretrained_model_name_or_path=$MODEL_NAME \
274
- --instance_data_dir=$INSTANCE_DIR \
275
- --class_data_dir=$CLASS_DIR \
276
- --output_dir=$OUTPUT_DIR \
277
- --with_prior_preservation --prior_loss_weight=1.0 \
278
- --instance_prompt="a photo of sks dog" \
279
- --class_prompt="a photo of dog" \
280
- --resolution=512 \
281
- --train_batch_size=1 \
282
- --sample_batch_size=1 \
283
- --gradient_accumulation_steps=1 --gradient_checkpointing \
284
- --learning_rate=5e-6 \
285
- --lr_scheduler="constant" \
286
- --lr_warmup_steps=0 \
287
- --num_class_images=200 \
288
- --max_train_steps=800
289
- ```
290
-
291
- ### Fine-tune text encoder with the UNet.
292
-
293
- The script also allows to fine-tune the `text_encoder` along with the `unet`. It's been observed experimentally that fine-tuning `text_encoder` gives much better results especially on faces.
294
- Pass the `--train_text_encoder` argument to the script to enable training `text_encoder`.
295
-
296
- ___Note: Training text encoder requires more memory, with this option the training won't fit on 16GB GPU. It needs at least 24GB VRAM.___
297
-
298
- ```bash
299
- export MODEL_NAME="CompVis/stable-diffusion-v1-4"
300
- export INSTANCE_DIR="path-to-instance-images"
301
- export CLASS_DIR="path-to-class-images"
302
- export OUTPUT_DIR="path-to-save-model"
303
-
304
- accelerate launch train_dreambooth.py \
305
- --pretrained_model_name_or_path=$MODEL_NAME \
306
- --train_text_encoder \
307
- --instance_data_dir=$INSTANCE_DIR \
308
- --class_data_dir=$CLASS_DIR \
309
- --output_dir=$OUTPUT_DIR \
310
- --with_prior_preservation --prior_loss_weight=1.0 \
311
- --instance_prompt="a photo of sks dog" \
312
- --class_prompt="a photo of dog" \
313
- --resolution=512 \
314
- --train_batch_size=1 \
315
- --use_8bit_adam \
316
- --gradient_checkpointing \
317
- --learning_rate=2e-6 \
318
- --lr_scheduler="constant" \
319
- --lr_warmup_steps=0 \
320
- --num_class_images=200 \
321
- --max_train_steps=800
322
- ```
323
-
324
- ### Using DreamBooth for other pipelines than Stable Diffusion
325
-
326
- Altdiffusion also support dreambooth now, the runing comman is basically the same as abouve, all you need to do is replace the `MODEL_NAME` like this:
327
- One can now simply change the `pretrained_model_name_or_path` to another architecture such as [`AltDiffusion`](https://huggingface.co/docs/diffusers/api/pipelines/alt_diffusion).
328
-
329
- ```
330
- export MODEL_NAME="CompVis/stable-diffusion-v1-4" --> export MODEL_NAME="BAAI/AltDiffusion-m9"
331
- or
332
- export MODEL_NAME="CompVis/stable-diffusion-v1-4" --> export MODEL_NAME="BAAI/AltDiffusion"
333
- ```
334
-
335
- ### Training with xformers:
336
- You can enable memory efficient attention by [installing xFormers](https://github.com/facebookresearch/xformers#installing-xformers) and padding the `--enable_xformers_memory_efficient_attention` argument to the script. This is not available with the Flax/JAX implementation.
337
-
338
- You can also use Dreambooth to train the specialized in-painting model. See [the script in the research folder for details](https://github.com/huggingface/diffusers/tree/main/examples/research_projects/dreambooth_inpaint).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_ddim_parallel.py DELETED
@@ -1,642 +0,0 @@
1
- # Copyright 2023 ParaDiGMS authors and The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
16
- # and https://github.com/hojonathanho/diffusion
17
-
18
- import math
19
- from dataclasses import dataclass
20
- from typing import List, Optional, Tuple, Union
21
-
22
- import numpy as np
23
- import torch
24
-
25
- from ..configuration_utils import ConfigMixin, register_to_config
26
- from ..utils import BaseOutput, randn_tensor
27
- from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
28
-
29
-
30
- @dataclass
31
- # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput
32
- class DDIMParallelSchedulerOutput(BaseOutput):
33
- """
34
- Output class for the scheduler's step function output.
35
-
36
- Args:
37
- prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
38
- Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
39
- denoising loop.
40
- pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
41
- The predicted denoised sample (x_{0}) based on the model output from the current timestep.
42
- `pred_original_sample` can be used to preview progress or for guidance.
43
- """
44
-
45
- prev_sample: torch.FloatTensor
46
- pred_original_sample: Optional[torch.FloatTensor] = None
47
-
48
-
49
- # Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
50
- def betas_for_alpha_bar(
51
- num_diffusion_timesteps,
52
- max_beta=0.999,
53
- alpha_transform_type="cosine",
54
- ):
55
- """
56
- Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
57
- (1-beta) over time from t = [0,1].
58
-
59
- Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
60
- to that part of the diffusion process.
61
-
62
-
63
- Args:
64
- num_diffusion_timesteps (`int`): the number of betas to produce.
65
- max_beta (`float`): the maximum beta to use; use values lower than 1 to
66
- prevent singularities.
67
- alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
68
- Choose from `cosine` or `exp`
69
-
70
- Returns:
71
- betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
72
- """
73
- if alpha_transform_type == "cosine":
74
-
75
- def alpha_bar_fn(t):
76
- return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
77
-
78
- elif alpha_transform_type == "exp":
79
-
80
- def alpha_bar_fn(t):
81
- return math.exp(t * -12.0)
82
-
83
- else:
84
- raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}")
85
-
86
- betas = []
87
- for i in range(num_diffusion_timesteps):
88
- t1 = i / num_diffusion_timesteps
89
- t2 = (i + 1) / num_diffusion_timesteps
90
- betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
91
- return torch.tensor(betas, dtype=torch.float32)
92
-
93
-
94
- # Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr
95
- def rescale_zero_terminal_snr(betas):
96
- """
97
- Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1)
98
-
99
-
100
- Args:
101
- betas (`torch.FloatTensor`):
102
- the betas that the scheduler is being initialized with.
103
-
104
- Returns:
105
- `torch.FloatTensor`: rescaled betas with zero terminal SNR
106
- """
107
- # Convert betas to alphas_bar_sqrt
108
- alphas = 1.0 - betas
109
- alphas_cumprod = torch.cumprod(alphas, dim=0)
110
- alphas_bar_sqrt = alphas_cumprod.sqrt()
111
-
112
- # Store old values.
113
- alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
114
- alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
115
-
116
- # Shift so the last timestep is zero.
117
- alphas_bar_sqrt -= alphas_bar_sqrt_T
118
-
119
- # Scale so the first timestep is back to the old value.
120
- alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
121
-
122
- # Convert alphas_bar_sqrt to betas
123
- alphas_bar = alphas_bar_sqrt**2 # Revert sqrt
124
- alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod
125
- alphas = torch.cat([alphas_bar[0:1], alphas])
126
- betas = 1 - alphas
127
-
128
- return betas
129
-
130
-
131
- class DDIMParallelScheduler(SchedulerMixin, ConfigMixin):
132
- """
133
- Denoising diffusion implicit models is a scheduler that extends the denoising procedure introduced in denoising
134
- diffusion probabilistic models (DDPMs) with non-Markovian guidance.
135
-
136
- [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
137
- function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
138
- [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
139
- [`~SchedulerMixin.from_pretrained`] functions.
140
-
141
- For more details, see the original paper: https://arxiv.org/abs/2010.02502
142
-
143
- Args:
144
- num_train_timesteps (`int`): number of diffusion steps used to train the model.
145
- beta_start (`float`): the starting `beta` value of inference.
146
- beta_end (`float`): the final `beta` value.
147
- beta_schedule (`str`):
148
- the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
149
- `linear`, `scaled_linear`, or `squaredcos_cap_v2`.
150
- trained_betas (`np.ndarray`, optional):
151
- option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
152
- clip_sample (`bool`, default `True`):
153
- option to clip predicted sample for numerical stability.
154
- clip_sample_range (`float`, default `1.0`):
155
- the maximum magnitude for sample clipping. Valid only when `clip_sample=True`.
156
- set_alpha_to_one (`bool`, default `True`):
157
- each diffusion step uses the value of alphas product at that step and at the previous one. For the final
158
- step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
159
- otherwise it uses the value of alpha at step 0.
160
- steps_offset (`int`, default `0`):
161
- an offset added to the inference steps. You can use a combination of `offset=1` and
162
- `set_alpha_to_one=False`, to make the last step use step 0 for the previous alpha product, as done in
163
- stable diffusion.
164
- prediction_type (`str`, default `epsilon`, optional):
165
- prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion
166
- process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4
167
- https://imagen.research.google/video/paper.pdf)
168
- thresholding (`bool`, default `False`):
169
- whether to use the "dynamic thresholding" method (introduced by Imagen, https://arxiv.org/abs/2205.11487).
170
- Note that the thresholding method is unsuitable for latent-space diffusion models (such as
171
- stable-diffusion).
172
- dynamic_thresholding_ratio (`float`, default `0.995`):
173
- the ratio for the dynamic thresholding method. Default is `0.995`, the same as Imagen
174
- (https://arxiv.org/abs/2205.11487). Valid only when `thresholding=True`.
175
- sample_max_value (`float`, default `1.0`):
176
- the threshold value for dynamic thresholding. Valid only when `thresholding=True`.
177
- timestep_spacing (`str`, default `"leading"`):
178
- The way the timesteps should be scaled. Refer to Table 2. of [Common Diffusion Noise Schedules and Sample
179
- Steps are Flawed](https://arxiv.org/abs/2305.08891) for more information.
180
- rescale_betas_zero_snr (`bool`, default `False`):
181
- whether to rescale the betas to have zero terminal SNR (proposed by https://arxiv.org/pdf/2305.08891.pdf).
182
- This can enable the model to generate very bright and dark samples instead of limiting it to samples with
183
- medium brightness. Loosely related to
184
- [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).
185
- """
186
-
187
- _compatibles = [e.name for e in KarrasDiffusionSchedulers]
188
- order = 1
189
- _is_ode_scheduler = True
190
-
191
- @register_to_config
192
- # Copied from diffusers.schedulers.scheduling_ddim.DDIMScheduler.__init__
193
- def __init__(
194
- self,
195
- num_train_timesteps: int = 1000,
196
- beta_start: float = 0.0001,
197
- beta_end: float = 0.02,
198
- beta_schedule: str = "linear",
199
- trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
200
- clip_sample: bool = True,
201
- set_alpha_to_one: bool = True,
202
- steps_offset: int = 0,
203
- prediction_type: str = "epsilon",
204
- thresholding: bool = False,
205
- dynamic_thresholding_ratio: float = 0.995,
206
- clip_sample_range: float = 1.0,
207
- sample_max_value: float = 1.0,
208
- timestep_spacing: str = "leading",
209
- rescale_betas_zero_snr: bool = False,
210
- ):
211
- if trained_betas is not None:
212
- self.betas = torch.tensor(trained_betas, dtype=torch.float32)
213
- elif beta_schedule == "linear":
214
- self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
215
- elif beta_schedule == "scaled_linear":
216
- # this schedule is very specific to the latent diffusion model.
217
- self.betas = (
218
- torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
219
- )
220
- elif beta_schedule == "squaredcos_cap_v2":
221
- # Glide cosine schedule
222
- self.betas = betas_for_alpha_bar(num_train_timesteps)
223
- else:
224
- raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}")
225
-
226
- # Rescale for zero SNR
227
- if rescale_betas_zero_snr:
228
- self.betas = rescale_zero_terminal_snr(self.betas)
229
-
230
- self.alphas = 1.0 - self.betas
231
- self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
232
-
233
- # At every step in ddim, we are looking into the previous alphas_cumprod
234
- # For the final step, there is no previous alphas_cumprod because we are already at 0
235
- # `set_alpha_to_one` decides whether we set this parameter simply to one or
236
- # whether we use the final alpha of the "non-previous" one.
237
- self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0]
238
-
239
- # standard deviation of the initial noise distribution
240
- self.init_noise_sigma = 1.0
241
-
242
- # setable values
243
- self.num_inference_steps = None
244
- self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64))
245
-
246
- # Copied from diffusers.schedulers.scheduling_ddim.DDIMScheduler.scale_model_input
247
- def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:
248
- """
249
- Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
250
- current timestep.
251
-
252
- Args:
253
- sample (`torch.FloatTensor`): input sample
254
- timestep (`int`, optional): current timestep
255
-
256
- Returns:
257
- `torch.FloatTensor`: scaled input sample
258
- """
259
- return sample
260
-
261
- def _get_variance(self, timestep, prev_timestep=None):
262
- if prev_timestep is None:
263
- prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps
264
-
265
- alpha_prod_t = self.alphas_cumprod[timestep]
266
- alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
267
- beta_prod_t = 1 - alpha_prod_t
268
- beta_prod_t_prev = 1 - alpha_prod_t_prev
269
-
270
- variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)
271
-
272
- return variance
273
-
274
- def _batch_get_variance(self, t, prev_t):
275
- alpha_prod_t = self.alphas_cumprod[t]
276
- alpha_prod_t_prev = self.alphas_cumprod[torch.clip(prev_t, min=0)]
277
- alpha_prod_t_prev[prev_t < 0] = torch.tensor(1.0)
278
- beta_prod_t = 1 - alpha_prod_t
279
- beta_prod_t_prev = 1 - alpha_prod_t_prev
280
-
281
- variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)
282
-
283
- return variance
284
-
285
- # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
286
- def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:
287
- """
288
- "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the
289
- prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by
290
- s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing
291
- pixels from saturation at each step. We find that dynamic thresholding results in significantly better
292
- photorealism as well as better image-text alignment, especially when using very large guidance weights."
293
-
294
- https://arxiv.org/abs/2205.11487
295
- """
296
- dtype = sample.dtype
297
- batch_size, channels, height, width = sample.shape
298
-
299
- if dtype not in (torch.float32, torch.float64):
300
- sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half
301
-
302
- # Flatten sample for doing quantile calculation along each image
303
- sample = sample.reshape(batch_size, channels * height * width)
304
-
305
- abs_sample = sample.abs() # "a certain percentile absolute pixel value"
306
-
307
- s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)
308
- s = torch.clamp(
309
- s, min=1, max=self.config.sample_max_value
310
- ) # When clamped to min=1, equivalent to standard clipping to [-1, 1]
311
-
312
- s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0
313
- sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s"
314
-
315
- sample = sample.reshape(batch_size, channels, height, width)
316
- sample = sample.to(dtype)
317
-
318
- return sample
319
-
320
- # Copied from diffusers.schedulers.scheduling_ddim.DDIMScheduler.set_timesteps
321
- def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):
322
- """
323
- Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.
324
-
325
- Args:
326
- num_inference_steps (`int`):
327
- the number of diffusion steps used when generating samples with a pre-trained model.
328
- """
329
-
330
- if num_inference_steps > self.config.num_train_timesteps:
331
- raise ValueError(
332
- f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
333
- f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
334
- f" maximal {self.config.num_train_timesteps} timesteps."
335
- )
336
-
337
- self.num_inference_steps = num_inference_steps
338
-
339
- # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
340
- if self.config.timestep_spacing == "linspace":
341
- timesteps = (
342
- np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps)
343
- .round()[::-1]
344
- .copy()
345
- .astype(np.int64)
346
- )
347
- elif self.config.timestep_spacing == "leading":
348
- step_ratio = self.config.num_train_timesteps // self.num_inference_steps
349
- # creates integer timesteps by multiplying by ratio
350
- # casting to int to avoid issues when num_inference_step is power of 3
351
- timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64)
352
- timesteps += self.config.steps_offset
353
- elif self.config.timestep_spacing == "trailing":
354
- step_ratio = self.config.num_train_timesteps / self.num_inference_steps
355
- # creates integer timesteps by multiplying by ratio
356
- # casting to int to avoid issues when num_inference_step is power of 3
357
- timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)).astype(np.int64)
358
- timesteps -= 1
359
- else:
360
- raise ValueError(
361
- f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'leading' or 'trailing'."
362
- )
363
-
364
- self.timesteps = torch.from_numpy(timesteps).to(device)
365
-
366
- def step(
367
- self,
368
- model_output: torch.FloatTensor,
369
- timestep: int,
370
- sample: torch.FloatTensor,
371
- eta: float = 0.0,
372
- use_clipped_model_output: bool = False,
373
- generator=None,
374
- variance_noise: Optional[torch.FloatTensor] = None,
375
- return_dict: bool = True,
376
- ) -> Union[DDIMParallelSchedulerOutput, Tuple]:
377
- """
378
- Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
379
- process from the learned model outputs (most often the predicted noise).
380
-
381
- Args:
382
- model_output (`torch.FloatTensor`): direct output from learned diffusion model.
383
- timestep (`int`): current discrete timestep in the diffusion chain.
384
- sample (`torch.FloatTensor`):
385
- current instance of sample being created by diffusion process.
386
- eta (`float`): weight of noise for added noise in diffusion step.
387
- use_clipped_model_output (`bool`): if `True`, compute "corrected" `model_output` from the clipped
388
- predicted original sample. Necessary because predicted original sample is clipped to [-1, 1] when
389
- `self.config.clip_sample` is `True`. If no clipping has happened, "corrected" `model_output` would
390
- coincide with the one provided as input and `use_clipped_model_output` will have not effect.
391
- generator: random number generator.
392
- variance_noise (`torch.FloatTensor`): instead of generating noise for the variance using `generator`, we
393
- can directly provide the noise for the variance itself. This is useful for methods such as
394
- CycleDiffusion. (https://arxiv.org/abs/2210.05559)
395
- return_dict (`bool`): option for returning tuple rather than DDIMParallelSchedulerOutput class
396
-
397
- Returns:
398
- [`~schedulers.scheduling_utils.DDIMParallelSchedulerOutput`] or `tuple`:
399
- [`~schedulers.scheduling_utils.DDIMParallelSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`.
400
- When returning a tuple, the first element is the sample tensor.
401
-
402
- """
403
- if self.num_inference_steps is None:
404
- raise ValueError(
405
- "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
406
- )
407
-
408
- # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
409
- # Ideally, read DDIM paper in-detail understanding
410
-
411
- # Notation (<variable name> -> <name in paper>
412
- # - pred_noise_t -> e_theta(x_t, t)
413
- # - pred_original_sample -> f_theta(x_t, t) or x_0
414
- # - std_dev_t -> sigma_t
415
- # - eta -> η
416
- # - pred_sample_direction -> "direction pointing to x_t"
417
- # - pred_prev_sample -> "x_t-1"
418
-
419
- # 1. get previous step value (=t-1)
420
- prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps
421
-
422
- # 2. compute alphas, betas
423
- alpha_prod_t = self.alphas_cumprod[timestep]
424
- alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
425
-
426
- beta_prod_t = 1 - alpha_prod_t
427
-
428
- # 3. compute predicted original sample from predicted noise also called
429
- # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
430
- if self.config.prediction_type == "epsilon":
431
- pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
432
- pred_epsilon = model_output
433
- elif self.config.prediction_type == "sample":
434
- pred_original_sample = model_output
435
- pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)
436
- elif self.config.prediction_type == "v_prediction":
437
- pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
438
- pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
439
- else:
440
- raise ValueError(
441
- f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
442
- " `v_prediction`"
443
- )
444
-
445
- # 4. Clip or threshold "predicted x_0"
446
- if self.config.thresholding:
447
- pred_original_sample = self._threshold_sample(pred_original_sample)
448
- elif self.config.clip_sample:
449
- pred_original_sample = pred_original_sample.clamp(
450
- -self.config.clip_sample_range, self.config.clip_sample_range
451
- )
452
-
453
- # 5. compute variance: "sigma_t(η)" -> see formula (16)
454
- # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
455
- variance = self._get_variance(timestep, prev_timestep)
456
- std_dev_t = eta * variance ** (0.5)
457
-
458
- if use_clipped_model_output:
459
- # the pred_epsilon is always re-derived from the clipped x_0 in Glide
460
- pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)
461
-
462
- # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
463
- pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * pred_epsilon
464
-
465
- # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
466
- prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction
467
-
468
- if eta > 0:
469
- if variance_noise is not None and generator is not None:
470
- raise ValueError(
471
- "Cannot pass both generator and variance_noise. Please make sure that either `generator` or"
472
- " `variance_noise` stays `None`."
473
- )
474
-
475
- if variance_noise is None:
476
- variance_noise = randn_tensor(
477
- model_output.shape, generator=generator, device=model_output.device, dtype=model_output.dtype
478
- )
479
- variance = std_dev_t * variance_noise
480
-
481
- prev_sample = prev_sample + variance
482
-
483
- if not return_dict:
484
- return (prev_sample,)
485
-
486
- return DDIMParallelSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample)
487
-
488
- def batch_step_no_noise(
489
- self,
490
- model_output: torch.FloatTensor,
491
- timesteps: List[int],
492
- sample: torch.FloatTensor,
493
- eta: float = 0.0,
494
- use_clipped_model_output: bool = False,
495
- ) -> torch.FloatTensor:
496
- """
497
- Batched version of the `step` function, to be able to reverse the SDE for multiple samples/timesteps at once.
498
- Also, does not add any noise to the predicted sample, which is necessary for parallel sampling where the noise
499
- is pre-sampled by the pipeline.
500
-
501
- Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
502
- process from the learned model outputs (most often the predicted noise).
503
-
504
- Args:
505
- model_output (`torch.FloatTensor`): direct output from learned diffusion model.
506
- timesteps (`List[int]`):
507
- current discrete timesteps in the diffusion chain. This is now a list of integers.
508
- sample (`torch.FloatTensor`):
509
- current instance of sample being created by diffusion process.
510
- eta (`float`): weight of noise for added noise in diffusion step.
511
- use_clipped_model_output (`bool`): if `True`, compute "corrected" `model_output` from the clipped
512
- predicted original sample. Necessary because predicted original sample is clipped to [-1, 1] when
513
- `self.config.clip_sample` is `True`. If no clipping has happened, "corrected" `model_output` would
514
- coincide with the one provided as input and `use_clipped_model_output` will have not effect.
515
-
516
- Returns:
517
- `torch.FloatTensor`: sample tensor at previous timestep.
518
-
519
- """
520
- if self.num_inference_steps is None:
521
- raise ValueError(
522
- "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
523
- )
524
-
525
- assert eta == 0.0
526
-
527
- # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
528
- # Ideally, read DDIM paper in-detail understanding
529
-
530
- # Notation (<variable name> -> <name in paper>
531
- # - pred_noise_t -> e_theta(x_t, t)
532
- # - pred_original_sample -> f_theta(x_t, t) or x_0
533
- # - std_dev_t -> sigma_t
534
- # - eta -> η
535
- # - pred_sample_direction -> "direction pointing to x_t"
536
- # - pred_prev_sample -> "x_t-1"
537
-
538
- # 1. get previous step value (=t-1)
539
- t = timesteps
540
- prev_t = t - self.config.num_train_timesteps // self.num_inference_steps
541
-
542
- t = t.view(-1, *([1] * (model_output.ndim - 1)))
543
- prev_t = prev_t.view(-1, *([1] * (model_output.ndim - 1)))
544
-
545
- # 1. compute alphas, betas
546
- self.alphas_cumprod = self.alphas_cumprod.to(model_output.device)
547
- self.final_alpha_cumprod = self.final_alpha_cumprod.to(model_output.device)
548
- alpha_prod_t = self.alphas_cumprod[t]
549
- alpha_prod_t_prev = self.alphas_cumprod[torch.clip(prev_t, min=0)]
550
- alpha_prod_t_prev[prev_t < 0] = torch.tensor(1.0)
551
-
552
- beta_prod_t = 1 - alpha_prod_t
553
-
554
- # 3. compute predicted original sample from predicted noise also called
555
- # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
556
- if self.config.prediction_type == "epsilon":
557
- pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
558
- pred_epsilon = model_output
559
- elif self.config.prediction_type == "sample":
560
- pred_original_sample = model_output
561
- pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)
562
- elif self.config.prediction_type == "v_prediction":
563
- pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
564
- pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
565
- else:
566
- raise ValueError(
567
- f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
568
- " `v_prediction`"
569
- )
570
-
571
- # 4. Clip or threshold "predicted x_0"
572
- if self.config.thresholding:
573
- pred_original_sample = self._threshold_sample(pred_original_sample)
574
- elif self.config.clip_sample:
575
- pred_original_sample = pred_original_sample.clamp(
576
- -self.config.clip_sample_range, self.config.clip_sample_range
577
- )
578
-
579
- # 5. compute variance: "sigma_t(η)" -> see formula (16)
580
- # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
581
- variance = self._batch_get_variance(t, prev_t).to(model_output.device).view(*alpha_prod_t_prev.shape)
582
- std_dev_t = eta * variance ** (0.5)
583
-
584
- if use_clipped_model_output:
585
- # the pred_epsilon is always re-derived from the clipped x_0 in Glide
586
- pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)
587
-
588
- # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
589
- pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * pred_epsilon
590
-
591
- # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
592
- prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction
593
-
594
- return prev_sample
595
-
596
- # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise
597
- def add_noise(
598
- self,
599
- original_samples: torch.FloatTensor,
600
- noise: torch.FloatTensor,
601
- timesteps: torch.IntTensor,
602
- ) -> torch.FloatTensor:
603
- # Make sure alphas_cumprod and timestep have same device and dtype as original_samples
604
- alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
605
- timesteps = timesteps.to(original_samples.device)
606
-
607
- sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
608
- sqrt_alpha_prod = sqrt_alpha_prod.flatten()
609
- while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
610
- sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
611
-
612
- sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
613
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
614
- while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
615
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
616
-
617
- noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
618
- return noisy_samples
619
-
620
- # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity
621
- def get_velocity(
622
- self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor
623
- ) -> torch.FloatTensor:
624
- # Make sure alphas_cumprod and timestep have same device and dtype as sample
625
- alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype)
626
- timesteps = timesteps.to(sample.device)
627
-
628
- sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
629
- sqrt_alpha_prod = sqrt_alpha_prod.flatten()
630
- while len(sqrt_alpha_prod.shape) < len(sample.shape):
631
- sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
632
-
633
- sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
634
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
635
- while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):
636
- sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
637
-
638
- velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
639
- return velocity
640
-
641
- def __len__(self):
642
- return self.config.num_train_timesteps
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/models/test_modeling_common.py DELETED
@@ -1,567 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import inspect
17
- import tempfile
18
- import traceback
19
- import unittest
20
- import unittest.mock as mock
21
- from typing import Dict, List, Tuple
22
-
23
- import numpy as np
24
- import requests_mock
25
- import torch
26
- from requests.exceptions import HTTPError
27
-
28
- from diffusers.models import UNet2DConditionModel
29
- from diffusers.models.attention_processor import AttnProcessor, AttnProcessor2_0, XFormersAttnProcessor
30
- from diffusers.training_utils import EMAModel
31
- from diffusers.utils import logging, torch_device
32
- from diffusers.utils.testing_utils import CaptureLogger, require_torch_2, require_torch_gpu, run_test_in_subprocess
33
-
34
-
35
- # Will be run via run_test_in_subprocess
36
- def _test_from_save_pretrained_dynamo(in_queue, out_queue, timeout):
37
- error = None
38
- try:
39
- init_dict, model_class = in_queue.get(timeout=timeout)
40
-
41
- model = model_class(**init_dict)
42
- model.to(torch_device)
43
- model = torch.compile(model)
44
-
45
- with tempfile.TemporaryDirectory() as tmpdirname:
46
- model.save_pretrained(tmpdirname)
47
- new_model = model_class.from_pretrained(tmpdirname)
48
- new_model.to(torch_device)
49
-
50
- assert new_model.__class__ == model_class
51
- except Exception:
52
- error = f"{traceback.format_exc()}"
53
-
54
- results = {"error": error}
55
- out_queue.put(results, timeout=timeout)
56
- out_queue.join()
57
-
58
-
59
- class ModelUtilsTest(unittest.TestCase):
60
- def tearDown(self):
61
- super().tearDown()
62
-
63
- import diffusers
64
-
65
- diffusers.utils.import_utils._safetensors_available = True
66
-
67
- def test_accelerate_loading_error_message(self):
68
- with self.assertRaises(ValueError) as error_context:
69
- UNet2DConditionModel.from_pretrained("hf-internal-testing/stable-diffusion-broken", subfolder="unet")
70
-
71
- # make sure that error message states what keys are missing
72
- assert "conv_out.bias" in str(error_context.exception)
73
-
74
- def test_cached_files_are_used_when_no_internet(self):
75
- # A mock response for an HTTP head request to emulate server down
76
- response_mock = mock.Mock()
77
- response_mock.status_code = 500
78
- response_mock.headers = {}
79
- response_mock.raise_for_status.side_effect = HTTPError
80
- response_mock.json.return_value = {}
81
-
82
- # Download this model to make sure it's in the cache.
83
- orig_model = UNet2DConditionModel.from_pretrained(
84
- "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet"
85
- )
86
-
87
- # Under the mock environment we get a 500 error when trying to reach the model.
88
- with mock.patch("requests.request", return_value=response_mock):
89
- # Download this model to make sure it's in the cache.
90
- model = UNet2DConditionModel.from_pretrained(
91
- "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet", local_files_only=True
92
- )
93
-
94
- for p1, p2 in zip(orig_model.parameters(), model.parameters()):
95
- if p1.data.ne(p2.data).sum() > 0:
96
- assert False, "Parameters not the same!"
97
-
98
- def test_one_request_upon_cached(self):
99
- # TODO: For some reason this test fails on MPS where no HEAD call is made.
100
- if torch_device == "mps":
101
- return
102
-
103
- import diffusers
104
-
105
- diffusers.utils.import_utils._safetensors_available = False
106
-
107
- with tempfile.TemporaryDirectory() as tmpdirname:
108
- with requests_mock.mock(real_http=True) as m:
109
- UNet2DConditionModel.from_pretrained(
110
- "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet", cache_dir=tmpdirname
111
- )
112
-
113
- download_requests = [r.method for r in m.request_history]
114
- assert download_requests.count("HEAD") == 2, "2 HEAD requests one for config, one for model"
115
- assert download_requests.count("GET") == 2, "2 GET requests one for config, one for model"
116
-
117
- with requests_mock.mock(real_http=True) as m:
118
- UNet2DConditionModel.from_pretrained(
119
- "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet", cache_dir=tmpdirname
120
- )
121
-
122
- cache_requests = [r.method for r in m.request_history]
123
- assert (
124
- "HEAD" == cache_requests[0] and len(cache_requests) == 1
125
- ), "We should call only `model_info` to check for _commit hash and `send_telemetry`"
126
-
127
- diffusers.utils.import_utils._safetensors_available = True
128
-
129
- def test_weight_overwrite(self):
130
- with tempfile.TemporaryDirectory() as tmpdirname, self.assertRaises(ValueError) as error_context:
131
- UNet2DConditionModel.from_pretrained(
132
- "hf-internal-testing/tiny-stable-diffusion-torch",
133
- subfolder="unet",
134
- cache_dir=tmpdirname,
135
- in_channels=9,
136
- )
137
-
138
- # make sure that error message states what keys are missing
139
- assert "Cannot load" in str(error_context.exception)
140
-
141
- with tempfile.TemporaryDirectory() as tmpdirname:
142
- model = UNet2DConditionModel.from_pretrained(
143
- "hf-internal-testing/tiny-stable-diffusion-torch",
144
- subfolder="unet",
145
- cache_dir=tmpdirname,
146
- in_channels=9,
147
- low_cpu_mem_usage=False,
148
- ignore_mismatched_sizes=True,
149
- )
150
-
151
- assert model.config.in_channels == 9
152
-
153
-
154
- class UNetTesterMixin:
155
- def test_forward_signature(self):
156
- init_dict, _ = self.prepare_init_args_and_inputs_for_common()
157
-
158
- model = self.model_class(**init_dict)
159
- signature = inspect.signature(model.forward)
160
- # signature.parameters is an OrderedDict => so arg_names order is deterministic
161
- arg_names = [*signature.parameters.keys()]
162
-
163
- expected_arg_names = ["sample", "timestep"]
164
- self.assertListEqual(arg_names[:2], expected_arg_names)
165
-
166
- def test_forward_with_norm_groups(self):
167
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
168
-
169
- init_dict["norm_num_groups"] = 16
170
- init_dict["block_out_channels"] = (16, 32)
171
-
172
- model = self.model_class(**init_dict)
173
- model.to(torch_device)
174
- model.eval()
175
-
176
- with torch.no_grad():
177
- output = model(**inputs_dict)
178
-
179
- if isinstance(output, dict):
180
- output = output.to_tuple()[0]
181
-
182
- self.assertIsNotNone(output)
183
- expected_shape = inputs_dict["sample"].shape
184
- self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match")
185
-
186
-
187
- class ModelTesterMixin:
188
- main_input_name = None # overwrite in model specific tester class
189
- base_precision = 1e-3
190
-
191
- def test_from_save_pretrained(self):
192
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
193
-
194
- model = self.model_class(**init_dict)
195
- if hasattr(model, "set_default_attn_processor"):
196
- model.set_default_attn_processor()
197
- model.to(torch_device)
198
- model.eval()
199
-
200
- with tempfile.TemporaryDirectory() as tmpdirname:
201
- model.save_pretrained(tmpdirname)
202
- new_model = self.model_class.from_pretrained(tmpdirname)
203
- if hasattr(new_model, "set_default_attn_processor"):
204
- new_model.set_default_attn_processor()
205
- new_model.to(torch_device)
206
-
207
- with torch.no_grad():
208
- image = model(**inputs_dict)
209
- if isinstance(image, dict):
210
- image = image.to_tuple()[0]
211
-
212
- new_image = new_model(**inputs_dict)
213
-
214
- if isinstance(new_image, dict):
215
- new_image = new_image.to_tuple()[0]
216
-
217
- max_diff = (image - new_image).abs().sum().item()
218
- self.assertLessEqual(max_diff, 5e-5, "Models give different forward passes")
219
-
220
- def test_getattr_is_correct(self):
221
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
222
- model = self.model_class(**init_dict)
223
-
224
- # save some things to test
225
- model.dummy_attribute = 5
226
- model.register_to_config(test_attribute=5)
227
-
228
- logger = logging.get_logger("diffusers.models.modeling_utils")
229
- # 30 for warning
230
- logger.setLevel(30)
231
- with CaptureLogger(logger) as cap_logger:
232
- assert hasattr(model, "dummy_attribute")
233
- assert getattr(model, "dummy_attribute") == 5
234
- assert model.dummy_attribute == 5
235
-
236
- # no warning should be thrown
237
- assert cap_logger.out == ""
238
-
239
- logger = logging.get_logger("diffusers.models.modeling_utils")
240
- # 30 for warning
241
- logger.setLevel(30)
242
- with CaptureLogger(logger) as cap_logger:
243
- assert hasattr(model, "save_pretrained")
244
- fn = model.save_pretrained
245
- fn_1 = getattr(model, "save_pretrained")
246
-
247
- assert fn == fn_1
248
- # no warning should be thrown
249
- assert cap_logger.out == ""
250
-
251
- # warning should be thrown
252
- with self.assertWarns(FutureWarning):
253
- assert model.test_attribute == 5
254
-
255
- with self.assertWarns(FutureWarning):
256
- assert getattr(model, "test_attribute") == 5
257
-
258
- with self.assertRaises(AttributeError) as error:
259
- model.does_not_exist
260
-
261
- assert str(error.exception) == f"'{type(model).__name__}' object has no attribute 'does_not_exist'"
262
-
263
- @require_torch_gpu
264
- def test_set_attn_processor_for_determinism(self):
265
- torch.use_deterministic_algorithms(False)
266
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
267
- model = self.model_class(**init_dict)
268
- model.to(torch_device)
269
-
270
- if not hasattr(model, "set_attn_processor"):
271
- # If not has `set_attn_processor`, skip test
272
- return
273
-
274
- assert all(type(proc) == AttnProcessor2_0 for proc in model.attn_processors.values())
275
- with torch.no_grad():
276
- output_1 = model(**inputs_dict)[0]
277
-
278
- model.set_default_attn_processor()
279
- assert all(type(proc) == AttnProcessor for proc in model.attn_processors.values())
280
- with torch.no_grad():
281
- output_2 = model(**inputs_dict)[0]
282
-
283
- model.enable_xformers_memory_efficient_attention()
284
- assert all(type(proc) == XFormersAttnProcessor for proc in model.attn_processors.values())
285
- with torch.no_grad():
286
- output_3 = model(**inputs_dict)[0]
287
-
288
- model.set_attn_processor(AttnProcessor2_0())
289
- assert all(type(proc) == AttnProcessor2_0 for proc in model.attn_processors.values())
290
- with torch.no_grad():
291
- output_4 = model(**inputs_dict)[0]
292
-
293
- model.set_attn_processor(AttnProcessor())
294
- assert all(type(proc) == AttnProcessor for proc in model.attn_processors.values())
295
- with torch.no_grad():
296
- output_5 = model(**inputs_dict)[0]
297
-
298
- model.set_attn_processor(XFormersAttnProcessor())
299
- assert all(type(proc) == XFormersAttnProcessor for proc in model.attn_processors.values())
300
- with torch.no_grad():
301
- output_6 = model(**inputs_dict)[0]
302
-
303
- torch.use_deterministic_algorithms(True)
304
-
305
- # make sure that outputs match
306
- assert torch.allclose(output_2, output_1, atol=self.base_precision)
307
- assert torch.allclose(output_2, output_3, atol=self.base_precision)
308
- assert torch.allclose(output_2, output_4, atol=self.base_precision)
309
- assert torch.allclose(output_2, output_5, atol=self.base_precision)
310
- assert torch.allclose(output_2, output_6, atol=self.base_precision)
311
-
312
- def test_from_save_pretrained_variant(self):
313
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
314
-
315
- model = self.model_class(**init_dict)
316
- if hasattr(model, "set_default_attn_processor"):
317
- model.set_default_attn_processor()
318
-
319
- model.to(torch_device)
320
- model.eval()
321
-
322
- with tempfile.TemporaryDirectory() as tmpdirname:
323
- model.save_pretrained(tmpdirname, variant="fp16")
324
- new_model = self.model_class.from_pretrained(tmpdirname, variant="fp16")
325
- if hasattr(new_model, "set_default_attn_processor"):
326
- new_model.set_default_attn_processor()
327
-
328
- # non-variant cannot be loaded
329
- with self.assertRaises(OSError) as error_context:
330
- self.model_class.from_pretrained(tmpdirname)
331
-
332
- # make sure that error message states what keys are missing
333
- assert "Error no file named diffusion_pytorch_model.bin found in directory" in str(error_context.exception)
334
-
335
- new_model.to(torch_device)
336
-
337
- with torch.no_grad():
338
- image = model(**inputs_dict)
339
- if isinstance(image, dict):
340
- image = image.to_tuple()[0]
341
-
342
- new_image = new_model(**inputs_dict)
343
-
344
- if isinstance(new_image, dict):
345
- new_image = new_image.to_tuple()[0]
346
-
347
- max_diff = (image - new_image).abs().sum().item()
348
- self.assertLessEqual(max_diff, 5e-5, "Models give different forward passes")
349
-
350
- @require_torch_2
351
- def test_from_save_pretrained_dynamo(self):
352
- init_dict, _ = self.prepare_init_args_and_inputs_for_common()
353
- inputs = [init_dict, self.model_class]
354
- run_test_in_subprocess(test_case=self, target_func=_test_from_save_pretrained_dynamo, inputs=inputs)
355
-
356
- def test_from_save_pretrained_dtype(self):
357
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
358
-
359
- model = self.model_class(**init_dict)
360
- model.to(torch_device)
361
- model.eval()
362
-
363
- for dtype in [torch.float32, torch.float16, torch.bfloat16]:
364
- if torch_device == "mps" and dtype == torch.bfloat16:
365
- continue
366
- with tempfile.TemporaryDirectory() as tmpdirname:
367
- model.to(dtype)
368
- model.save_pretrained(tmpdirname)
369
- new_model = self.model_class.from_pretrained(tmpdirname, low_cpu_mem_usage=True, torch_dtype=dtype)
370
- assert new_model.dtype == dtype
371
- new_model = self.model_class.from_pretrained(tmpdirname, low_cpu_mem_usage=False, torch_dtype=dtype)
372
- assert new_model.dtype == dtype
373
-
374
- def test_determinism(self, expected_max_diff=1e-5):
375
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
376
- model = self.model_class(**init_dict)
377
- model.to(torch_device)
378
- model.eval()
379
-
380
- with torch.no_grad():
381
- first = model(**inputs_dict)
382
- if isinstance(first, dict):
383
- first = first.to_tuple()[0]
384
-
385
- second = model(**inputs_dict)
386
- if isinstance(second, dict):
387
- second = second.to_tuple()[0]
388
-
389
- out_1 = first.cpu().numpy()
390
- out_2 = second.cpu().numpy()
391
- out_1 = out_1[~np.isnan(out_1)]
392
- out_2 = out_2[~np.isnan(out_2)]
393
- max_diff = np.amax(np.abs(out_1 - out_2))
394
- self.assertLessEqual(max_diff, expected_max_diff)
395
-
396
- def test_output(self):
397
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
398
- model = self.model_class(**init_dict)
399
- model.to(torch_device)
400
- model.eval()
401
-
402
- with torch.no_grad():
403
- output = model(**inputs_dict)
404
-
405
- if isinstance(output, dict):
406
- output = output.to_tuple()[0]
407
-
408
- self.assertIsNotNone(output)
409
-
410
- # input & output have to have the same shape
411
- input_tensor = inputs_dict[self.main_input_name]
412
- expected_shape = input_tensor.shape
413
- self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match")
414
-
415
- def test_model_from_pretrained(self):
416
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
417
-
418
- model = self.model_class(**init_dict)
419
- model.to(torch_device)
420
- model.eval()
421
-
422
- # test if the model can be loaded from the config
423
- # and has all the expected shape
424
- with tempfile.TemporaryDirectory() as tmpdirname:
425
- model.save_pretrained(tmpdirname)
426
- new_model = self.model_class.from_pretrained(tmpdirname)
427
- new_model.to(torch_device)
428
- new_model.eval()
429
-
430
- # check if all parameters shape are the same
431
- for param_name in model.state_dict().keys():
432
- param_1 = model.state_dict()[param_name]
433
- param_2 = new_model.state_dict()[param_name]
434
- self.assertEqual(param_1.shape, param_2.shape)
435
-
436
- with torch.no_grad():
437
- output_1 = model(**inputs_dict)
438
-
439
- if isinstance(output_1, dict):
440
- output_1 = output_1.to_tuple()[0]
441
-
442
- output_2 = new_model(**inputs_dict)
443
-
444
- if isinstance(output_2, dict):
445
- output_2 = output_2.to_tuple()[0]
446
-
447
- self.assertEqual(output_1.shape, output_2.shape)
448
-
449
- @unittest.skipIf(torch_device == "mps", "Training is not supported in mps")
450
- def test_training(self):
451
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
452
-
453
- model = self.model_class(**init_dict)
454
- model.to(torch_device)
455
- model.train()
456
- output = model(**inputs_dict)
457
-
458
- if isinstance(output, dict):
459
- output = output.to_tuple()[0]
460
-
461
- input_tensor = inputs_dict[self.main_input_name]
462
- noise = torch.randn((input_tensor.shape[0],) + self.output_shape).to(torch_device)
463
- loss = torch.nn.functional.mse_loss(output, noise)
464
- loss.backward()
465
-
466
- @unittest.skipIf(torch_device == "mps", "Training is not supported in mps")
467
- def test_ema_training(self):
468
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
469
-
470
- model = self.model_class(**init_dict)
471
- model.to(torch_device)
472
- model.train()
473
- ema_model = EMAModel(model.parameters())
474
-
475
- output = model(**inputs_dict)
476
-
477
- if isinstance(output, dict):
478
- output = output.to_tuple()[0]
479
-
480
- input_tensor = inputs_dict[self.main_input_name]
481
- noise = torch.randn((input_tensor.shape[0],) + self.output_shape).to(torch_device)
482
- loss = torch.nn.functional.mse_loss(output, noise)
483
- loss.backward()
484
- ema_model.step(model.parameters())
485
-
486
- def test_outputs_equivalence(self):
487
- def set_nan_tensor_to_zero(t):
488
- # Temporary fallback until `aten::_index_put_impl_` is implemented in mps
489
- # Track progress in https://github.com/pytorch/pytorch/issues/77764
490
- device = t.device
491
- if device.type == "mps":
492
- t = t.to("cpu")
493
- t[t != t] = 0
494
- return t.to(device)
495
-
496
- def recursive_check(tuple_object, dict_object):
497
- if isinstance(tuple_object, (List, Tuple)):
498
- for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()):
499
- recursive_check(tuple_iterable_value, dict_iterable_value)
500
- elif isinstance(tuple_object, Dict):
501
- for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()):
502
- recursive_check(tuple_iterable_value, dict_iterable_value)
503
- elif tuple_object is None:
504
- return
505
- else:
506
- self.assertTrue(
507
- torch.allclose(
508
- set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5
509
- ),
510
- msg=(
511
- "Tuple and dict output are not equal. Difference:"
512
- f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
513
- f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has"
514
- f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}."
515
- ),
516
- )
517
-
518
- init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
519
-
520
- model = self.model_class(**init_dict)
521
- model.to(torch_device)
522
- model.eval()
523
-
524
- with torch.no_grad():
525
- outputs_dict = model(**inputs_dict)
526
- outputs_tuple = model(**inputs_dict, return_dict=False)
527
-
528
- recursive_check(outputs_tuple, outputs_dict)
529
-
530
- @unittest.skipIf(torch_device == "mps", "Gradient checkpointing skipped on MPS")
531
- def test_enable_disable_gradient_checkpointing(self):
532
- if not self.model_class._supports_gradient_checkpointing:
533
- return # Skip test if model does not support gradient checkpointing
534
-
535
- init_dict, _ = self.prepare_init_args_and_inputs_for_common()
536
-
537
- # at init model should have gradient checkpointing disabled
538
- model = self.model_class(**init_dict)
539
- self.assertFalse(model.is_gradient_checkpointing)
540
-
541
- # check enable works
542
- model.enable_gradient_checkpointing()
543
- self.assertTrue(model.is_gradient_checkpointing)
544
-
545
- # check disable works
546
- model.disable_gradient_checkpointing()
547
- self.assertFalse(model.is_gradient_checkpointing)
548
-
549
- def test_deprecated_kwargs(self):
550
- has_kwarg_in_model_class = "kwargs" in inspect.signature(self.model_class.__init__).parameters
551
- has_deprecated_kwarg = len(self.model_class._deprecated_kwargs) > 0
552
-
553
- if has_kwarg_in_model_class and not has_deprecated_kwarg:
554
- raise ValueError(
555
- f"{self.model_class} has `**kwargs` in its __init__ method but has not defined any deprecated kwargs"
556
- " under the `_deprecated_kwargs` class attribute. Make sure to either remove `**kwargs` if there are"
557
- " no deprecated arguments or add the deprecated argument with `_deprecated_kwargs ="
558
- " [<deprecated_argument>]`"
559
- )
560
-
561
- if not has_kwarg_in_model_class and has_deprecated_kwarg:
562
- raise ValueError(
563
- f"{self.model_class} doesn't have `**kwargs` in its __init__ method but has defined deprecated kwargs"
564
- " under the `_deprecated_kwargs` class attribute. Make sure to either add the `**kwargs` argument to"
565
- f" {self.model_class}.__init__ if there are deprecated arguments or remove the deprecated argument"
566
- " from `_deprecated_kwargs = [<deprecated_argument>]`"
567
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/core/anchor/anchor_generator.py DELETED
@@ -1,727 +0,0 @@
1
- import mmcv
2
- import numpy as np
3
- import torch
4
- from torch.nn.modules.utils import _pair
5
-
6
- from .builder import ANCHOR_GENERATORS
7
-
8
-
9
- @ANCHOR_GENERATORS.register_module()
10
- class AnchorGenerator(object):
11
- """Standard anchor generator for 2D anchor-based detectors.
12
-
13
- Args:
14
- strides (list[int] | list[tuple[int, int]]): Strides of anchors
15
- in multiple feature levels in order (w, h).
16
- ratios (list[float]): The list of ratios between the height and width
17
- of anchors in a single level.
18
- scales (list[int] | None): Anchor scales for anchors in a single level.
19
- It cannot be set at the same time if `octave_base_scale` and
20
- `scales_per_octave` are set.
21
- base_sizes (list[int] | None): The basic sizes
22
- of anchors in multiple levels.
23
- If None is given, strides will be used as base_sizes.
24
- (If strides are non square, the shortest stride is taken.)
25
- scale_major (bool): Whether to multiply scales first when generating
26
- base anchors. If true, the anchors in the same row will have the
27
- same scales. By default it is True in V2.0
28
- octave_base_scale (int): The base scale of octave.
29
- scales_per_octave (int): Number of scales for each octave.
30
- `octave_base_scale` and `scales_per_octave` are usually used in
31
- retinanet and the `scales` should be None when they are set.
32
- centers (list[tuple[float, float]] | None): The centers of the anchor
33
- relative to the feature grid center in multiple feature levels.
34
- By default it is set to be None and not used. If a list of tuple of
35
- float is given, they will be used to shift the centers of anchors.
36
- center_offset (float): The offset of center in proportion to anchors'
37
- width and height. By default it is 0 in V2.0.
38
-
39
- Examples:
40
- >>> from mmdet.core import AnchorGenerator
41
- >>> self = AnchorGenerator([16], [1.], [1.], [9])
42
- >>> all_anchors = self.grid_anchors([(2, 2)], device='cpu')
43
- >>> print(all_anchors)
44
- [tensor([[-4.5000, -4.5000, 4.5000, 4.5000],
45
- [11.5000, -4.5000, 20.5000, 4.5000],
46
- [-4.5000, 11.5000, 4.5000, 20.5000],
47
- [11.5000, 11.5000, 20.5000, 20.5000]])]
48
- >>> self = AnchorGenerator([16, 32], [1.], [1.], [9, 18])
49
- >>> all_anchors = self.grid_anchors([(2, 2), (1, 1)], device='cpu')
50
- >>> print(all_anchors)
51
- [tensor([[-4.5000, -4.5000, 4.5000, 4.5000],
52
- [11.5000, -4.5000, 20.5000, 4.5000],
53
- [-4.5000, 11.5000, 4.5000, 20.5000],
54
- [11.5000, 11.5000, 20.5000, 20.5000]]), \
55
- tensor([[-9., -9., 9., 9.]])]
56
- """
57
-
58
- def __init__(self,
59
- strides,
60
- ratios,
61
- scales=None,
62
- base_sizes=None,
63
- scale_major=True,
64
- octave_base_scale=None,
65
- scales_per_octave=None,
66
- centers=None,
67
- center_offset=0.):
68
- # check center and center_offset
69
- if center_offset != 0:
70
- assert centers is None, 'center cannot be set when center_offset' \
71
- f'!=0, {centers} is given.'
72
- if not (0 <= center_offset <= 1):
73
- raise ValueError('center_offset should be in range [0, 1], '
74
- f'{center_offset} is given.')
75
- if centers is not None:
76
- assert len(centers) == len(strides), \
77
- 'The number of strides should be the same as centers, got ' \
78
- f'{strides} and {centers}'
79
-
80
- # calculate base sizes of anchors
81
- self.strides = [_pair(stride) for stride in strides]
82
- self.base_sizes = [min(stride) for stride in self.strides
83
- ] if base_sizes is None else base_sizes
84
- assert len(self.base_sizes) == len(self.strides), \
85
- 'The number of strides should be the same as base sizes, got ' \
86
- f'{self.strides} and {self.base_sizes}'
87
-
88
- # calculate scales of anchors
89
- assert ((octave_base_scale is not None
90
- and scales_per_octave is not None) ^ (scales is not None)), \
91
- 'scales and octave_base_scale with scales_per_octave cannot' \
92
- ' be set at the same time'
93
- if scales is not None:
94
- self.scales = torch.Tensor(scales)
95
- elif octave_base_scale is not None and scales_per_octave is not None:
96
- octave_scales = np.array(
97
- [2**(i / scales_per_octave) for i in range(scales_per_octave)])
98
- scales = octave_scales * octave_base_scale
99
- self.scales = torch.Tensor(scales)
100
- else:
101
- raise ValueError('Either scales or octave_base_scale with '
102
- 'scales_per_octave should be set')
103
-
104
- self.octave_base_scale = octave_base_scale
105
- self.scales_per_octave = scales_per_octave
106
- self.ratios = torch.Tensor(ratios)
107
- self.scale_major = scale_major
108
- self.centers = centers
109
- self.center_offset = center_offset
110
- self.base_anchors = self.gen_base_anchors()
111
-
112
- @property
113
- def num_base_anchors(self):
114
- """list[int]: total number of base anchors in a feature grid"""
115
- return [base_anchors.size(0) for base_anchors in self.base_anchors]
116
-
117
- @property
118
- def num_levels(self):
119
- """int: number of feature levels that the generator will be applied"""
120
- return len(self.strides)
121
-
122
- def gen_base_anchors(self):
123
- """Generate base anchors.
124
-
125
- Returns:
126
- list(torch.Tensor): Base anchors of a feature grid in multiple \
127
- feature levels.
128
- """
129
- multi_level_base_anchors = []
130
- for i, base_size in enumerate(self.base_sizes):
131
- center = None
132
- if self.centers is not None:
133
- center = self.centers[i]
134
- multi_level_base_anchors.append(
135
- self.gen_single_level_base_anchors(
136
- base_size,
137
- scales=self.scales,
138
- ratios=self.ratios,
139
- center=center))
140
- return multi_level_base_anchors
141
-
142
- def gen_single_level_base_anchors(self,
143
- base_size,
144
- scales,
145
- ratios,
146
- center=None):
147
- """Generate base anchors of a single level.
148
-
149
- Args:
150
- base_size (int | float): Basic size of an anchor.
151
- scales (torch.Tensor): Scales of the anchor.
152
- ratios (torch.Tensor): The ratio between between the height
153
- and width of anchors in a single level.
154
- center (tuple[float], optional): The center of the base anchor
155
- related to a single feature grid. Defaults to None.
156
-
157
- Returns:
158
- torch.Tensor: Anchors in a single-level feature maps.
159
- """
160
- w = base_size
161
- h = base_size
162
- if center is None:
163
- x_center = self.center_offset * w
164
- y_center = self.center_offset * h
165
- else:
166
- x_center, y_center = center
167
-
168
- h_ratios = torch.sqrt(ratios)
169
- w_ratios = 1 / h_ratios
170
- if self.scale_major:
171
- ws = (w * w_ratios[:, None] * scales[None, :]).view(-1)
172
- hs = (h * h_ratios[:, None] * scales[None, :]).view(-1)
173
- else:
174
- ws = (w * scales[:, None] * w_ratios[None, :]).view(-1)
175
- hs = (h * scales[:, None] * h_ratios[None, :]).view(-1)
176
-
177
- # use float anchor and the anchor's center is aligned with the
178
- # pixel center
179
- base_anchors = [
180
- x_center - 0.5 * ws, y_center - 0.5 * hs, x_center + 0.5 * ws,
181
- y_center + 0.5 * hs
182
- ]
183
- base_anchors = torch.stack(base_anchors, dim=-1)
184
-
185
- return base_anchors
186
-
187
- def _meshgrid(self, x, y, row_major=True):
188
- """Generate mesh grid of x and y.
189
-
190
- Args:
191
- x (torch.Tensor): Grids of x dimension.
192
- y (torch.Tensor): Grids of y dimension.
193
- row_major (bool, optional): Whether to return y grids first.
194
- Defaults to True.
195
-
196
- Returns:
197
- tuple[torch.Tensor]: The mesh grids of x and y.
198
- """
199
- # use shape instead of len to keep tracing while exporting to onnx
200
- xx = x.repeat(y.shape[0])
201
- yy = y.view(-1, 1).repeat(1, x.shape[0]).view(-1)
202
- if row_major:
203
- return xx, yy
204
- else:
205
- return yy, xx
206
-
207
- def grid_anchors(self, featmap_sizes, device='cuda'):
208
- """Generate grid anchors in multiple feature levels.
209
-
210
- Args:
211
- featmap_sizes (list[tuple]): List of feature map sizes in
212
- multiple feature levels.
213
- device (str): Device where the anchors will be put on.
214
-
215
- Return:
216
- list[torch.Tensor]: Anchors in multiple feature levels. \
217
- The sizes of each tensor should be [N, 4], where \
218
- N = width * height * num_base_anchors, width and height \
219
- are the sizes of the corresponding feature level, \
220
- num_base_anchors is the number of anchors for that level.
221
- """
222
- assert self.num_levels == len(featmap_sizes)
223
- multi_level_anchors = []
224
- for i in range(self.num_levels):
225
- anchors = self.single_level_grid_anchors(
226
- self.base_anchors[i].to(device),
227
- featmap_sizes[i],
228
- self.strides[i],
229
- device=device)
230
- multi_level_anchors.append(anchors)
231
- return multi_level_anchors
232
-
233
- def single_level_grid_anchors(self,
234
- base_anchors,
235
- featmap_size,
236
- stride=(16, 16),
237
- device='cuda'):
238
- """Generate grid anchors of a single level.
239
-
240
- Note:
241
- This function is usually called by method ``self.grid_anchors``.
242
-
243
- Args:
244
- base_anchors (torch.Tensor): The base anchors of a feature grid.
245
- featmap_size (tuple[int]): Size of the feature maps.
246
- stride (tuple[int], optional): Stride of the feature map in order
247
- (w, h). Defaults to (16, 16).
248
- device (str, optional): Device the tensor will be put on.
249
- Defaults to 'cuda'.
250
-
251
- Returns:
252
- torch.Tensor: Anchors in the overall feature maps.
253
- """
254
- # keep as Tensor, so that we can covert to ONNX correctly
255
- feat_h, feat_w = featmap_size
256
- shift_x = torch.arange(0, feat_w, device=device) * stride[0]
257
- shift_y = torch.arange(0, feat_h, device=device) * stride[1]
258
-
259
- shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
260
- shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1)
261
- shifts = shifts.type_as(base_anchors)
262
- # first feat_w elements correspond to the first row of shifts
263
- # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get
264
- # shifted anchors (K, A, 4), reshape to (K*A, 4)
265
-
266
- all_anchors = base_anchors[None, :, :] + shifts[:, None, :]
267
- all_anchors = all_anchors.view(-1, 4)
268
- # first A rows correspond to A anchors of (0, 0) in feature map,
269
- # then (0, 1), (0, 2), ...
270
- return all_anchors
271
-
272
- def valid_flags(self, featmap_sizes, pad_shape, device='cuda'):
273
- """Generate valid flags of anchors in multiple feature levels.
274
-
275
- Args:
276
- featmap_sizes (list(tuple)): List of feature map sizes in
277
- multiple feature levels.
278
- pad_shape (tuple): The padded shape of the image.
279
- device (str): Device where the anchors will be put on.
280
-
281
- Return:
282
- list(torch.Tensor): Valid flags of anchors in multiple levels.
283
- """
284
- assert self.num_levels == len(featmap_sizes)
285
- multi_level_flags = []
286
- for i in range(self.num_levels):
287
- anchor_stride = self.strides[i]
288
- feat_h, feat_w = featmap_sizes[i]
289
- h, w = pad_shape[:2]
290
- valid_feat_h = min(int(np.ceil(h / anchor_stride[1])), feat_h)
291
- valid_feat_w = min(int(np.ceil(w / anchor_stride[0])), feat_w)
292
- flags = self.single_level_valid_flags((feat_h, feat_w),
293
- (valid_feat_h, valid_feat_w),
294
- self.num_base_anchors[i],
295
- device=device)
296
- multi_level_flags.append(flags)
297
- return multi_level_flags
298
-
299
- def single_level_valid_flags(self,
300
- featmap_size,
301
- valid_size,
302
- num_base_anchors,
303
- device='cuda'):
304
- """Generate the valid flags of anchor in a single feature map.
305
-
306
- Args:
307
- featmap_size (tuple[int]): The size of feature maps.
308
- valid_size (tuple[int]): The valid size of the feature maps.
309
- num_base_anchors (int): The number of base anchors.
310
- device (str, optional): Device where the flags will be put on.
311
- Defaults to 'cuda'.
312
-
313
- Returns:
314
- torch.Tensor: The valid flags of each anchor in a single level \
315
- feature map.
316
- """
317
- feat_h, feat_w = featmap_size
318
- valid_h, valid_w = valid_size
319
- assert valid_h <= feat_h and valid_w <= feat_w
320
- valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)
321
- valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)
322
- valid_x[:valid_w] = 1
323
- valid_y[:valid_h] = 1
324
- valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)
325
- valid = valid_xx & valid_yy
326
- valid = valid[:, None].expand(valid.size(0),
327
- num_base_anchors).contiguous().view(-1)
328
- return valid
329
-
330
- def __repr__(self):
331
- """str: a string that describes the module"""
332
- indent_str = ' '
333
- repr_str = self.__class__.__name__ + '(\n'
334
- repr_str += f'{indent_str}strides={self.strides},\n'
335
- repr_str += f'{indent_str}ratios={self.ratios},\n'
336
- repr_str += f'{indent_str}scales={self.scales},\n'
337
- repr_str += f'{indent_str}base_sizes={self.base_sizes},\n'
338
- repr_str += f'{indent_str}scale_major={self.scale_major},\n'
339
- repr_str += f'{indent_str}octave_base_scale='
340
- repr_str += f'{self.octave_base_scale},\n'
341
- repr_str += f'{indent_str}scales_per_octave='
342
- repr_str += f'{self.scales_per_octave},\n'
343
- repr_str += f'{indent_str}num_levels={self.num_levels}\n'
344
- repr_str += f'{indent_str}centers={self.centers},\n'
345
- repr_str += f'{indent_str}center_offset={self.center_offset})'
346
- return repr_str
347
-
348
-
349
- @ANCHOR_GENERATORS.register_module()
350
- class SSDAnchorGenerator(AnchorGenerator):
351
- """Anchor generator for SSD.
352
-
353
- Args:
354
- strides (list[int] | list[tuple[int, int]]): Strides of anchors
355
- in multiple feature levels.
356
- ratios (list[float]): The list of ratios between the height and width
357
- of anchors in a single level.
358
- basesize_ratio_range (tuple(float)): Ratio range of anchors.
359
- input_size (int): Size of feature map, 300 for SSD300,
360
- 512 for SSD512.
361
- scale_major (bool): Whether to multiply scales first when generating
362
- base anchors. If true, the anchors in the same row will have the
363
- same scales. It is always set to be False in SSD.
364
- """
365
-
366
- def __init__(self,
367
- strides,
368
- ratios,
369
- basesize_ratio_range,
370
- input_size=300,
371
- scale_major=True):
372
- assert len(strides) == len(ratios)
373
- assert mmcv.is_tuple_of(basesize_ratio_range, float)
374
-
375
- self.strides = [_pair(stride) for stride in strides]
376
- self.input_size = input_size
377
- self.centers = [(stride[0] / 2., stride[1] / 2.)
378
- for stride in self.strides]
379
- self.basesize_ratio_range = basesize_ratio_range
380
-
381
- # calculate anchor ratios and sizes
382
- min_ratio, max_ratio = basesize_ratio_range
383
- min_ratio = int(min_ratio * 100)
384
- max_ratio = int(max_ratio * 100)
385
- step = int(np.floor(max_ratio - min_ratio) / (self.num_levels - 2))
386
- min_sizes = []
387
- max_sizes = []
388
- for ratio in range(int(min_ratio), int(max_ratio) + 1, step):
389
- min_sizes.append(int(self.input_size * ratio / 100))
390
- max_sizes.append(int(self.input_size * (ratio + step) / 100))
391
- if self.input_size == 300:
392
- if basesize_ratio_range[0] == 0.15: # SSD300 COCO
393
- min_sizes.insert(0, int(self.input_size * 7 / 100))
394
- max_sizes.insert(0, int(self.input_size * 15 / 100))
395
- elif basesize_ratio_range[0] == 0.2: # SSD300 VOC
396
- min_sizes.insert(0, int(self.input_size * 10 / 100))
397
- max_sizes.insert(0, int(self.input_size * 20 / 100))
398
- else:
399
- raise ValueError(
400
- 'basesize_ratio_range[0] should be either 0.15'
401
- 'or 0.2 when input_size is 300, got '
402
- f'{basesize_ratio_range[0]}.')
403
- elif self.input_size == 512:
404
- if basesize_ratio_range[0] == 0.1: # SSD512 COCO
405
- min_sizes.insert(0, int(self.input_size * 4 / 100))
406
- max_sizes.insert(0, int(self.input_size * 10 / 100))
407
- elif basesize_ratio_range[0] == 0.15: # SSD512 VOC
408
- min_sizes.insert(0, int(self.input_size * 7 / 100))
409
- max_sizes.insert(0, int(self.input_size * 15 / 100))
410
- else:
411
- raise ValueError('basesize_ratio_range[0] should be either 0.1'
412
- 'or 0.15 when input_size is 512, got'
413
- f' {basesize_ratio_range[0]}.')
414
- else:
415
- raise ValueError('Only support 300 or 512 in SSDAnchorGenerator'
416
- f', got {self.input_size}.')
417
-
418
- anchor_ratios = []
419
- anchor_scales = []
420
- for k in range(len(self.strides)):
421
- scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])]
422
- anchor_ratio = [1.]
423
- for r in ratios[k]:
424
- anchor_ratio += [1 / r, r] # 4 or 6 ratio
425
- anchor_ratios.append(torch.Tensor(anchor_ratio))
426
- anchor_scales.append(torch.Tensor(scales))
427
-
428
- self.base_sizes = min_sizes
429
- self.scales = anchor_scales
430
- self.ratios = anchor_ratios
431
- self.scale_major = scale_major
432
- self.center_offset = 0
433
- self.base_anchors = self.gen_base_anchors()
434
-
435
- def gen_base_anchors(self):
436
- """Generate base anchors.
437
-
438
- Returns:
439
- list(torch.Tensor): Base anchors of a feature grid in multiple \
440
- feature levels.
441
- """
442
- multi_level_base_anchors = []
443
- for i, base_size in enumerate(self.base_sizes):
444
- base_anchors = self.gen_single_level_base_anchors(
445
- base_size,
446
- scales=self.scales[i],
447
- ratios=self.ratios[i],
448
- center=self.centers[i])
449
- indices = list(range(len(self.ratios[i])))
450
- indices.insert(1, len(indices))
451
- base_anchors = torch.index_select(base_anchors, 0,
452
- torch.LongTensor(indices))
453
- multi_level_base_anchors.append(base_anchors)
454
- return multi_level_base_anchors
455
-
456
- def __repr__(self):
457
- """str: a string that describes the module"""
458
- indent_str = ' '
459
- repr_str = self.__class__.__name__ + '(\n'
460
- repr_str += f'{indent_str}strides={self.strides},\n'
461
- repr_str += f'{indent_str}scales={self.scales},\n'
462
- repr_str += f'{indent_str}scale_major={self.scale_major},\n'
463
- repr_str += f'{indent_str}input_size={self.input_size},\n'
464
- repr_str += f'{indent_str}scales={self.scales},\n'
465
- repr_str += f'{indent_str}ratios={self.ratios},\n'
466
- repr_str += f'{indent_str}num_levels={self.num_levels},\n'
467
- repr_str += f'{indent_str}base_sizes={self.base_sizes},\n'
468
- repr_str += f'{indent_str}basesize_ratio_range='
469
- repr_str += f'{self.basesize_ratio_range})'
470
- return repr_str
471
-
472
-
473
- @ANCHOR_GENERATORS.register_module()
474
- class LegacyAnchorGenerator(AnchorGenerator):
475
- """Legacy anchor generator used in MMDetection V1.x.
476
-
477
- Note:
478
- Difference to the V2.0 anchor generator:
479
-
480
- 1. The center offset of V1.x anchors are set to be 0.5 rather than 0.
481
- 2. The width/height are minused by 1 when calculating the anchors' \
482
- centers and corners to meet the V1.x coordinate system.
483
- 3. The anchors' corners are quantized.
484
-
485
- Args:
486
- strides (list[int] | list[tuple[int]]): Strides of anchors
487
- in multiple feature levels.
488
- ratios (list[float]): The list of ratios between the height and width
489
- of anchors in a single level.
490
- scales (list[int] | None): Anchor scales for anchors in a single level.
491
- It cannot be set at the same time if `octave_base_scale` and
492
- `scales_per_octave` are set.
493
- base_sizes (list[int]): The basic sizes of anchors in multiple levels.
494
- If None is given, strides will be used to generate base_sizes.
495
- scale_major (bool): Whether to multiply scales first when generating
496
- base anchors. If true, the anchors in the same row will have the
497
- same scales. By default it is True in V2.0
498
- octave_base_scale (int): The base scale of octave.
499
- scales_per_octave (int): Number of scales for each octave.
500
- `octave_base_scale` and `scales_per_octave` are usually used in
501
- retinanet and the `scales` should be None when they are set.
502
- centers (list[tuple[float, float]] | None): The centers of the anchor
503
- relative to the feature grid center in multiple feature levels.
504
- By default it is set to be None and not used. It a list of float
505
- is given, this list will be used to shift the centers of anchors.
506
- center_offset (float): The offset of center in propotion to anchors'
507
- width and height. By default it is 0.5 in V2.0 but it should be 0.5
508
- in v1.x models.
509
-
510
- Examples:
511
- >>> from mmdet.core import LegacyAnchorGenerator
512
- >>> self = LegacyAnchorGenerator(
513
- >>> [16], [1.], [1.], [9], center_offset=0.5)
514
- >>> all_anchors = self.grid_anchors(((2, 2),), device='cpu')
515
- >>> print(all_anchors)
516
- [tensor([[ 0., 0., 8., 8.],
517
- [16., 0., 24., 8.],
518
- [ 0., 16., 8., 24.],
519
- [16., 16., 24., 24.]])]
520
- """
521
-
522
- def gen_single_level_base_anchors(self,
523
- base_size,
524
- scales,
525
- ratios,
526
- center=None):
527
- """Generate base anchors of a single level.
528
-
529
- Note:
530
- The width/height of anchors are minused by 1 when calculating \
531
- the centers and corners to meet the V1.x coordinate system.
532
-
533
- Args:
534
- base_size (int | float): Basic size of an anchor.
535
- scales (torch.Tensor): Scales of the anchor.
536
- ratios (torch.Tensor): The ratio between between the height.
537
- and width of anchors in a single level.
538
- center (tuple[float], optional): The center of the base anchor
539
- related to a single feature grid. Defaults to None.
540
-
541
- Returns:
542
- torch.Tensor: Anchors in a single-level feature map.
543
- """
544
- w = base_size
545
- h = base_size
546
- if center is None:
547
- x_center = self.center_offset * (w - 1)
548
- y_center = self.center_offset * (h - 1)
549
- else:
550
- x_center, y_center = center
551
-
552
- h_ratios = torch.sqrt(ratios)
553
- w_ratios = 1 / h_ratios
554
- if self.scale_major:
555
- ws = (w * w_ratios[:, None] * scales[None, :]).view(-1)
556
- hs = (h * h_ratios[:, None] * scales[None, :]).view(-1)
557
- else:
558
- ws = (w * scales[:, None] * w_ratios[None, :]).view(-1)
559
- hs = (h * scales[:, None] * h_ratios[None, :]).view(-1)
560
-
561
- # use float anchor and the anchor's center is aligned with the
562
- # pixel center
563
- base_anchors = [
564
- x_center - 0.5 * (ws - 1), y_center - 0.5 * (hs - 1),
565
- x_center + 0.5 * (ws - 1), y_center + 0.5 * (hs - 1)
566
- ]
567
- base_anchors = torch.stack(base_anchors, dim=-1).round()
568
-
569
- return base_anchors
570
-
571
-
572
- @ANCHOR_GENERATORS.register_module()
573
- class LegacySSDAnchorGenerator(SSDAnchorGenerator, LegacyAnchorGenerator):
574
- """Legacy anchor generator used in MMDetection V1.x.
575
-
576
- The difference between `LegacySSDAnchorGenerator` and `SSDAnchorGenerator`
577
- can be found in `LegacyAnchorGenerator`.
578
- """
579
-
580
- def __init__(self,
581
- strides,
582
- ratios,
583
- basesize_ratio_range,
584
- input_size=300,
585
- scale_major=True):
586
- super(LegacySSDAnchorGenerator,
587
- self).__init__(strides, ratios, basesize_ratio_range, input_size,
588
- scale_major)
589
- self.centers = [((stride - 1) / 2., (stride - 1) / 2.)
590
- for stride in strides]
591
- self.base_anchors = self.gen_base_anchors()
592
-
593
-
594
- @ANCHOR_GENERATORS.register_module()
595
- class YOLOAnchorGenerator(AnchorGenerator):
596
- """Anchor generator for YOLO.
597
-
598
- Args:
599
- strides (list[int] | list[tuple[int, int]]): Strides of anchors
600
- in multiple feature levels.
601
- base_sizes (list[list[tuple[int, int]]]): The basic sizes
602
- of anchors in multiple levels.
603
- """
604
-
605
- def __init__(self, strides, base_sizes):
606
- self.strides = [_pair(stride) for stride in strides]
607
- self.centers = [(stride[0] / 2., stride[1] / 2.)
608
- for stride in self.strides]
609
- self.base_sizes = []
610
- num_anchor_per_level = len(base_sizes[0])
611
- for base_sizes_per_level in base_sizes:
612
- assert num_anchor_per_level == len(base_sizes_per_level)
613
- self.base_sizes.append(
614
- [_pair(base_size) for base_size in base_sizes_per_level])
615
- self.base_anchors = self.gen_base_anchors()
616
-
617
- @property
618
- def num_levels(self):
619
- """int: number of feature levels that the generator will be applied"""
620
- return len(self.base_sizes)
621
-
622
- def gen_base_anchors(self):
623
- """Generate base anchors.
624
-
625
- Returns:
626
- list(torch.Tensor): Base anchors of a feature grid in multiple \
627
- feature levels.
628
- """
629
- multi_level_base_anchors = []
630
- for i, base_sizes_per_level in enumerate(self.base_sizes):
631
- center = None
632
- if self.centers is not None:
633
- center = self.centers[i]
634
- multi_level_base_anchors.append(
635
- self.gen_single_level_base_anchors(base_sizes_per_level,
636
- center))
637
- return multi_level_base_anchors
638
-
639
- def gen_single_level_base_anchors(self, base_sizes_per_level, center=None):
640
- """Generate base anchors of a single level.
641
-
642
- Args:
643
- base_sizes_per_level (list[tuple[int, int]]): Basic sizes of
644
- anchors.
645
- center (tuple[float], optional): The center of the base anchor
646
- related to a single feature grid. Defaults to None.
647
-
648
- Returns:
649
- torch.Tensor: Anchors in a single-level feature maps.
650
- """
651
- x_center, y_center = center
652
- base_anchors = []
653
- for base_size in base_sizes_per_level:
654
- w, h = base_size
655
-
656
- # use float anchor and the anchor's center is aligned with the
657
- # pixel center
658
- base_anchor = torch.Tensor([
659
- x_center - 0.5 * w, y_center - 0.5 * h, x_center + 0.5 * w,
660
- y_center + 0.5 * h
661
- ])
662
- base_anchors.append(base_anchor)
663
- base_anchors = torch.stack(base_anchors, dim=0)
664
-
665
- return base_anchors
666
-
667
- def responsible_flags(self, featmap_sizes, gt_bboxes, device='cuda'):
668
- """Generate responsible anchor flags of grid cells in multiple scales.
669
-
670
- Args:
671
- featmap_sizes (list(tuple)): List of feature map sizes in multiple
672
- feature levels.
673
- gt_bboxes (Tensor): Ground truth boxes, shape (n, 4).
674
- device (str): Device where the anchors will be put on.
675
-
676
- Return:
677
- list(torch.Tensor): responsible flags of anchors in multiple level
678
- """
679
- assert self.num_levels == len(featmap_sizes)
680
- multi_level_responsible_flags = []
681
- for i in range(self.num_levels):
682
- anchor_stride = self.strides[i]
683
- flags = self.single_level_responsible_flags(
684
- featmap_sizes[i],
685
- gt_bboxes,
686
- anchor_stride,
687
- self.num_base_anchors[i],
688
- device=device)
689
- multi_level_responsible_flags.append(flags)
690
- return multi_level_responsible_flags
691
-
692
- def single_level_responsible_flags(self,
693
- featmap_size,
694
- gt_bboxes,
695
- stride,
696
- num_base_anchors,
697
- device='cuda'):
698
- """Generate the responsible flags of anchor in a single feature map.
699
-
700
- Args:
701
- featmap_size (tuple[int]): The size of feature maps.
702
- gt_bboxes (Tensor): Ground truth boxes, shape (n, 4).
703
- stride (tuple(int)): stride of current level
704
- num_base_anchors (int): The number of base anchors.
705
- device (str, optional): Device where the flags will be put on.
706
- Defaults to 'cuda'.
707
-
708
- Returns:
709
- torch.Tensor: The valid flags of each anchor in a single level \
710
- feature map.
711
- """
712
- feat_h, feat_w = featmap_size
713
- gt_bboxes_cx = ((gt_bboxes[:, 0] + gt_bboxes[:, 2]) * 0.5).to(device)
714
- gt_bboxes_cy = ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) * 0.5).to(device)
715
- gt_bboxes_grid_x = torch.floor(gt_bboxes_cx / stride[0]).long()
716
- gt_bboxes_grid_y = torch.floor(gt_bboxes_cy / stride[1]).long()
717
-
718
- # row major indexing
719
- gt_bboxes_grid_idx = gt_bboxes_grid_y * feat_w + gt_bboxes_grid_x
720
-
721
- responsible_grid = torch.zeros(
722
- feat_h * feat_w, dtype=torch.uint8, device=device)
723
- responsible_grid[gt_bboxes_grid_idx] = 1
724
-
725
- responsible_grid = responsible_grid[:, None].expand(
726
- responsible_grid.size(0), num_base_anchors).contiguous().view(-1)
727
- return responsible_grid
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/scripts/train.sh DELETED
@@ -1,19 +0,0 @@
1
- python train.py \
2
- --name celeba_styleD \
3
- --img_file /dataset/image_painting/image_list/celeba_HQ_train.txt \
4
- --mask_file /dataset/image_painting/image_list/irregular_mask_train.txt \
5
- --model tc \
6
- --coarse_or_refine coarse \
7
- --netT original \
8
- --n_encoders 12 \
9
- --n_decoders 0 \
10
- --netD style \
11
- --gpu_ids 2,1,0 \
12
- --load_size 542 \
13
- --fine_size 512 \
14
- --batch_size 24 \
15
- --display_port 8093 \
16
- --attn_G \
17
- --add_noise \
18
- --display_ncols 0 \
19
- --continue_train
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/runner/optimizer/builder.py DELETED
@@ -1,44 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- import copy
3
- import inspect
4
-
5
- import torch
6
-
7
- from ...utils import Registry, build_from_cfg
8
-
9
- OPTIMIZERS = Registry('optimizer')
10
- OPTIMIZER_BUILDERS = Registry('optimizer builder')
11
-
12
-
13
- def register_torch_optimizers():
14
- torch_optimizers = []
15
- for module_name in dir(torch.optim):
16
- if module_name.startswith('__'):
17
- continue
18
- _optim = getattr(torch.optim, module_name)
19
- if inspect.isclass(_optim) and issubclass(_optim,
20
- torch.optim.Optimizer):
21
- OPTIMIZERS.register_module()(_optim)
22
- torch_optimizers.append(module_name)
23
- return torch_optimizers
24
-
25
-
26
- TORCH_OPTIMIZERS = register_torch_optimizers()
27
-
28
-
29
- def build_optimizer_constructor(cfg):
30
- return build_from_cfg(cfg, OPTIMIZER_BUILDERS)
31
-
32
-
33
- def build_optimizer(model, cfg):
34
- optimizer_cfg = copy.deepcopy(cfg)
35
- constructor_type = optimizer_cfg.pop('constructor',
36
- 'DefaultOptimizerConstructor')
37
- paramwise_cfg = optimizer_cfg.pop('paramwise_cfg', None)
38
- optim_constructor = build_optimizer_constructor(
39
- dict(
40
- type=constructor_type,
41
- optimizer_cfg=optimizer_cfg,
42
- paramwise_cfg=paramwise_cfg))
43
- optimizer = optim_constructor(model)
44
- return optimizer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Apex-X/ROOPOK/CONTRIBUTING.md DELETED
@@ -1,25 +0,0 @@
1
- ## Pull Requests
2
-
3
- Before submitting a pull request, please ensure to align with us as we need to establish both technical and business requirements.
4
-
5
-
6
- ### Do
7
-
8
- - ...consider to fix bugs over adding features
9
- - ...one pull request for one feature or improvement
10
- - ...consult us about implementation details
11
- - ...proper testing before you submit your code
12
- - ...resolve failed CI pipelines
13
-
14
-
15
- ### Don't
16
-
17
- - ...introduce fundamental changes in terms of software architecture
18
- - ...introduce OOP - we accept functional programming only
19
- - ...ignore given requirements or try to work around them
20
- - ...submit code to a development branch without consulting us
21
- - ...submit massive amount of code changes
22
- - ...submit a proof of concept
23
- - ...submit code that is using undocumented and private APIs
24
- - ...solve third party issues in our project
25
- - ...comment what your code does - use proper naming instead
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Apex-X/nono/roop/processors/__init__.py DELETED
File without changes
spaces/Archan/ArXivAudio/get_paper.py DELETED
@@ -1,17 +0,0 @@
1
- import arxiv
2
-
3
-
4
- def get_paper(paper=""):
5
- if paper:
6
- id = paper.split(" - ")
7
- print("id= ", id)
8
-
9
- paper = next(arxiv.Search(id_list=[id[-1]]).results())
10
- print("paper title= ", paper.title)
11
- name = str(paper.title) + '.pdf'
12
- name = name.replace('?', '')
13
- name = "downloads/" + name
14
- paper.download_pdf(filename="./downloads/paper.pdf")
15
- print(name)
16
-
17
- return(paper)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Asahi402/White-box-Cartoonization/wbc/network.py DELETED
@@ -1,62 +0,0 @@
1
- import tensorflow as tf
2
- import numpy as np
3
- import tensorflow.contrib.slim as slim
4
-
5
-
6
-
7
- def resblock(inputs, out_channel=32, name='resblock'):
8
-
9
- with tf.variable_scope(name):
10
-
11
- x = slim.convolution2d(inputs, out_channel, [3, 3],
12
- activation_fn=None, scope='conv1')
13
- x = tf.nn.leaky_relu(x)
14
- x = slim.convolution2d(x, out_channel, [3, 3],
15
- activation_fn=None, scope='conv2')
16
-
17
- return x + inputs
18
-
19
-
20
-
21
-
22
- def unet_generator(inputs, channel=32, num_blocks=4, name='generator', reuse=False):
23
- with tf.variable_scope(name, reuse=reuse):
24
-
25
- x0 = slim.convolution2d(inputs, channel, [7, 7], activation_fn=None)
26
- x0 = tf.nn.leaky_relu(x0)
27
-
28
- x1 = slim.convolution2d(x0, channel, [3, 3], stride=2, activation_fn=None)
29
- x1 = tf.nn.leaky_relu(x1)
30
- x1 = slim.convolution2d(x1, channel*2, [3, 3], activation_fn=None)
31
- x1 = tf.nn.leaky_relu(x1)
32
-
33
- x2 = slim.convolution2d(x1, channel*2, [3, 3], stride=2, activation_fn=None)
34
- x2 = tf.nn.leaky_relu(x2)
35
- x2 = slim.convolution2d(x2, channel*4, [3, 3], activation_fn=None)
36
- x2 = tf.nn.leaky_relu(x2)
37
-
38
- for idx in range(num_blocks):
39
- x2 = resblock(x2, out_channel=channel*4, name='block_{}'.format(idx))
40
-
41
- x2 = slim.convolution2d(x2, channel*2, [3, 3], activation_fn=None)
42
- x2 = tf.nn.leaky_relu(x2)
43
-
44
- h1, w1 = tf.shape(x2)[1], tf.shape(x2)[2]
45
- x3 = tf.image.resize_bilinear(x2, (h1*2, w1*2))
46
- x3 = slim.convolution2d(x3+x1, channel*2, [3, 3], activation_fn=None)
47
- x3 = tf.nn.leaky_relu(x3)
48
- x3 = slim.convolution2d(x3, channel, [3, 3], activation_fn=None)
49
- x3 = tf.nn.leaky_relu(x3)
50
-
51
- h2, w2 = tf.shape(x3)[1], tf.shape(x3)[2]
52
- x4 = tf.image.resize_bilinear(x3, (h2*2, w2*2))
53
- x4 = slim.convolution2d(x4+x0, channel, [3, 3], activation_fn=None)
54
- x4 = tf.nn.leaky_relu(x4)
55
- x4 = slim.convolution2d(x4, 3, [7, 7], activation_fn=None)
56
-
57
- return x4
58
-
59
- if __name__ == '__main__':
60
-
61
-
62
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awesimo/jojogan/e4e/configs/data_configs.py DELETED
@@ -1,41 +0,0 @@
1
- from configs import transforms_config
2
- from configs.paths_config import dataset_paths
3
-
4
-
5
- DATASETS = {
6
- 'ffhq_encode': {
7
- 'transforms': transforms_config.EncodeTransforms,
8
- 'train_source_root': dataset_paths['ffhq'],
9
- 'train_target_root': dataset_paths['ffhq'],
10
- 'test_source_root': dataset_paths['celeba_test'],
11
- 'test_target_root': dataset_paths['celeba_test'],
12
- },
13
- 'cars_encode': {
14
- 'transforms': transforms_config.CarsEncodeTransforms,
15
- 'train_source_root': dataset_paths['cars_train'],
16
- 'train_target_root': dataset_paths['cars_train'],
17
- 'test_source_root': dataset_paths['cars_test'],
18
- 'test_target_root': dataset_paths['cars_test'],
19
- },
20
- 'horse_encode': {
21
- 'transforms': transforms_config.EncodeTransforms,
22
- 'train_source_root': dataset_paths['horse_train'],
23
- 'train_target_root': dataset_paths['horse_train'],
24
- 'test_source_root': dataset_paths['horse_test'],
25
- 'test_target_root': dataset_paths['horse_test'],
26
- },
27
- 'church_encode': {
28
- 'transforms': transforms_config.EncodeTransforms,
29
- 'train_source_root': dataset_paths['church_train'],
30
- 'train_target_root': dataset_paths['church_train'],
31
- 'test_source_root': dataset_paths['church_test'],
32
- 'test_target_root': dataset_paths['church_test'],
33
- },
34
- 'cats_encode': {
35
- 'transforms': transforms_config.EncodeTransforms,
36
- 'train_source_root': dataset_paths['cats_train'],
37
- 'train_target_root': dataset_paths['cats_train'],
38
- 'test_source_root': dataset_paths['cats_test'],
39
- 'test_target_root': dataset_paths['cats_test'],
40
- }
41
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tools/analyze_model.py DELETED
@@ -1,159 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- # Copyright (c) Facebook, Inc. and its affiliates.
3
-
4
- import logging
5
- import numpy as np
6
- from collections import Counter
7
- import tqdm
8
- from fvcore.nn import flop_count_table # can also try flop_count_str
9
-
10
- from detectron2.checkpoint import DetectionCheckpointer
11
- from detectron2.config import CfgNode, LazyConfig, get_cfg, instantiate
12
- from detectron2.data import build_detection_test_loader
13
- from detectron2.engine import default_argument_parser
14
- from detectron2.modeling import build_model
15
- from detectron2.utils.analysis import (
16
- FlopCountAnalysis,
17
- activation_count_operators,
18
- parameter_count_table,
19
- )
20
- from detectron2.utils.logger import setup_logger
21
-
22
- logger = logging.getLogger("detectron2")
23
-
24
-
25
- def setup(args):
26
- if args.config_file.endswith(".yaml"):
27
- cfg = get_cfg()
28
- cfg.merge_from_file(args.config_file)
29
- cfg.DATALOADER.NUM_WORKERS = 0
30
- cfg.merge_from_list(args.opts)
31
- cfg.freeze()
32
- else:
33
- cfg = LazyConfig.load(args.config_file)
34
- cfg = LazyConfig.apply_overrides(cfg, args.opts)
35
- setup_logger(name="fvcore")
36
- setup_logger()
37
- return cfg
38
-
39
-
40
- def do_flop(cfg):
41
- if isinstance(cfg, CfgNode):
42
- data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
43
- model = build_model(cfg)
44
- DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
45
- else:
46
- data_loader = instantiate(cfg.dataloader.test)
47
- model = instantiate(cfg.model)
48
- model.to(cfg.train.device)
49
- DetectionCheckpointer(model).load(cfg.train.init_checkpoint)
50
- model.eval()
51
-
52
- counts = Counter()
53
- total_flops = []
54
- for idx, data in zip(tqdm.trange(args.num_inputs), data_loader): # noqa
55
- flops = FlopCountAnalysis(model, data)
56
- if idx > 0:
57
- flops.unsupported_ops_warnings(False).uncalled_modules_warnings(False)
58
- counts += flops.by_operator()
59
- total_flops.append(flops.total())
60
-
61
- logger.info("Flops table computed from only one input sample:\n" + flop_count_table(flops))
62
- logger.info(
63
- "Average GFlops for each type of operators:\n"
64
- + str([(k, v / (idx + 1) / 1e9) for k, v in counts.items()])
65
- )
66
- logger.info(
67
- "Total GFlops: {:.1f}±{:.1f}".format(np.mean(total_flops) / 1e9, np.std(total_flops) / 1e9)
68
- )
69
-
70
-
71
- def do_activation(cfg):
72
- if isinstance(cfg, CfgNode):
73
- data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
74
- model = build_model(cfg)
75
- DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
76
- else:
77
- data_loader = instantiate(cfg.dataloader.test)
78
- model = instantiate(cfg.model)
79
- model.to(cfg.train.device)
80
- DetectionCheckpointer(model).load(cfg.train.init_checkpoint)
81
- model.eval()
82
-
83
- counts = Counter()
84
- total_activations = []
85
- for idx, data in zip(tqdm.trange(args.num_inputs), data_loader): # noqa
86
- count = activation_count_operators(model, data)
87
- counts += count
88
- total_activations.append(sum(count.values()))
89
- logger.info(
90
- "(Million) Activations for Each Type of Operators:\n"
91
- + str([(k, v / idx) for k, v in counts.items()])
92
- )
93
- logger.info(
94
- "Total (Million) Activations: {}±{}".format(
95
- np.mean(total_activations), np.std(total_activations)
96
- )
97
- )
98
-
99
-
100
- def do_parameter(cfg):
101
- if isinstance(cfg, CfgNode):
102
- model = build_model(cfg)
103
- else:
104
- model = instantiate(cfg.model)
105
- logger.info("Parameter Count:\n" + parameter_count_table(model, max_depth=5))
106
-
107
-
108
- def do_structure(cfg):
109
- if isinstance(cfg, CfgNode):
110
- model = build_model(cfg)
111
- else:
112
- model = instantiate(cfg.model)
113
- logger.info("Model Structure:\n" + str(model))
114
-
115
-
116
- if __name__ == "__main__":
117
- parser = default_argument_parser(
118
- epilog="""
119
- Examples:
120
-
121
- To show parameters of a model:
122
- $ ./analyze_model.py --tasks parameter \\
123
- --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml
124
-
125
- Flops and activations are data-dependent, therefore inputs and model weights
126
- are needed to count them:
127
-
128
- $ ./analyze_model.py --num-inputs 100 --tasks flop \\
129
- --config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \\
130
- MODEL.WEIGHTS /path/to/model.pkl
131
- """
132
- )
133
- parser.add_argument(
134
- "--tasks",
135
- choices=["flop", "activation", "parameter", "structure"],
136
- required=True,
137
- nargs="+",
138
- )
139
- parser.add_argument(
140
- "-n",
141
- "--num-inputs",
142
- default=100,
143
- type=int,
144
- help="number of inputs used to compute statistics for flops/activations, "
145
- "both are data dependent.",
146
- )
147
- args = parser.parse_args()
148
- assert not args.eval_only
149
- assert args.num_gpus == 1
150
-
151
- cfg = setup(args)
152
-
153
- for task in args.tasks:
154
- {
155
- "flop": do_flop,
156
- "activation": do_activation,
157
- "parameter": do_parameter,
158
- "structure": do_structure,
159
- }[task](cfg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BasToTheMax/openai-whisper-large-v2/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Openai Whisper Large V2
3
- emoji: 🐢
4
- colorFrom: pink
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.17.0
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: satozen/openai-whisper-large-v2
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Fichas Mgicas 3 Bum Bum Tam Tam.md DELETED
@@ -1,77 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar azulejos mágicos 3 Bum Bum Tam Tam y disfrutar de la música</h1>
3
- <p>¿Te gustan los juegos de música? ¿Quieres jugar un juego que cuenta con una de las canciones más virales de todos los tiempos? Si respondiste sí, entonces deberías descargar Magic Tiles 3 Bum Bum Tam Tam, un juego que te hará tocar los pies y los dedos al ritmo de esta pegadiza canción brasileña. En este artículo, te contaremos todo lo que necesitas saber sobre este juego, incluyendo qué es, cómo descargarlo, cómo jugarlo y por qué deberías probarlo hoy. </p>
4
- <h2>¿Qué es Magic Tiles 3 Bum Bum Tam Tam? </h2>
5
- <p>Magic Tiles 3 Bum Bum Tam Tam es un juego de música que se basa en la canción "Bum Bum Tam Tam" de MC Fioti, que tiene más de 1.6 mil millones de visitas en YouTube. La canción es una fusión de funk brasileño y música clásica, con una muestra de flauta de Johann Sebastian Bach "Partita in A minor for solo flauta". La canción se convirtió en una sensación global en 2017, gracias a su estribillo pegadizo y movimientos de baile. </p>
6
- <h2>descargar fichas mágicas 3 bum bum tam tam</h2><br /><p><b><b>Download Zip</b> &#9658;&#9658;&#9658;&#9658;&#9658; <a href="https://bltlly.com/2v6JIG">https://bltlly.com/2v6JIG</a></b></p><br /><br />
7
- <h3>Un juego de música popular con una canción pegadiza</h3>
8
- <p>Magic Tiles 3 es uno de los juegos de música más populares del mercado, con más de 100 millones de descargas en Google Play. El juego te permite tocar varias canciones en un piano virtual, tocando las fichas que aparecen en la pantalla. El juego tiene muchos géneros y temas, como pop, rock, clásico, anime, EDM y más. Uno de los temas es "Bum Bum Tam Tam", que cuenta con la canción original y varios remixes de diferentes artistas. El juego también actualiza su lista de canciones regularmente, por lo que siempre puedes encontrar nuevas canciones para jugar. </p>
9
- <h3>Un juego desafiante y divertido</h3>
10
-
11
- <h3>Una variedad de modos y canciones para elegir</h3>
12
- <p>Magic Tiles 3 también ofrece una variedad de modos y canciones para adaptarse a sus preferencias y estado de ánimo. Puedes jugar solo o con amigos en el modo multijugador online. También puedes competir con otros jugadores de todo el mundo en el modo batalla. También puede personalizar su piano con diferentes pieles y temas. Además, puedes elegir entre cientos de canciones de diferentes géneros y temas, incluyendo "Bum Bum Tam Tam" y sus remixes. También puedes desbloquear nuevas canciones y características al ganar monedas y diamantes en el juego. </p>
13
- <h2>¿Cómo descargar azulejos mágicos 3 Bum Bum Tam Tam en su dispositivo? </h2>
14
- <p>Descargar Magic Tiles 3 Bum Bum Tam Tam es fácil y gratuito. Puede descargarlo en su para "Magic Tiles 3". </li>
15
- <li>Seleccione la aplicación con el icono de un piano y una estrella, y toque en "Obtener". </li>
16
- <li>Ingrese su ID de Apple y contraseña si se le solicita, y espere a que la aplicación se descargue e instale en su dispositivo. </li>
17
- <li> Abra la aplicación y toque en el tema "Bum Bum Tam Tam" en el menú principal. </li>
18
- <li>Disfruta jugando el juego con la canción de tu elección. </li>
19
- </ol>
20
- <h3>Para usuarios de PC</h3>
21
- <p>Si tienes un PC, puedes descargar Magic Tiles 3 Bum Bum Tam Tam desde Microsoft Store. Estos son los pasos para hacerlo:</p>
22
- <ol>
23
- <li>Abra la tienda de Microsoft en su PC y busque "Magic Tiles 3". </li>
24
- <li>Seleccione la aplicación con el icono de un piano y una estrella, y haga clic en "Obtener". </li>
25
- <li>Inicie sesión con su cuenta de Microsoft si se le solicita, y espere a que la aplicación se descargue e instale en su PC.</li>
26
- <li> Abra la aplicación y haga clic en el tema "Bum Bum Tam Tam" en el menú principal. </li>
27
- <li>Disfruta jugando el juego con la canción de tu elección. </li>
28
- </ol>
29
- <h2>Cómo jugar Magic Tiles 3 Bum Bum Tam Tam y mejorar sus habilidades? </h2>
30
- <p>Jugar Magic Tiles 3 Bum Bum Tam Tam es fácil de aprender pero difícil de dominar. Necesitas tener buenos reflejos, coordinación y ritmo para jugar bien. Aquí hay algunos consejos sobre cómo jugar y mejorar tus habilidades:</p>
31
-
32
- <p>La regla básica de Magic Tiles 3 es tocar las fichas negras que corresponden a las notas de la canción, evitando las fichas blancas. Si te pierdes una ficha negra o toca una ficha blanca, pierdes. También debe tocar las baldosas negras largas que se extienden a través de varias columnas y deslizar el dedo a lo largo de ellas. El juego te mostrará qué fichas tocar con flechas e indicadores, así que presta atención a ellos. </p>
33
- <h3>Sigue el ritmo y el tempo de la canción</h3>
34
- <p>La clave para tocar bien es seguir el ritmo y el tempo de la canción. Tienes que tocar las baldosas en el momento adecuado, de acuerdo con el ritmo y la melodía de la canción. Si toca demasiado temprano o demasiado tarde, perderá puntos y precisión. También puede ajustar la velocidad de la canción en la configuración, de lenta a rápida. Cuanto más rápida sea la velocidad, más difícil será el juego. </p>
35
- <h3>Gana monedas y diamantes para desbloquear nuevas canciones y características</h3>
36
- <p>Mientras juegas Magic Tiles 3, ganarás monedas y diamantes que puedes usar para desbloquear nuevas canciones y características. Puedes ganar monedas completando niveles, viendo anuncios o haciendo girar la rueda. Puedes ganar diamantes completando logros, ingresando diariamente o comprándolos con dinero real. Puedes usar monedas y diamantes para comprar nuevas canciones, temas, skins y potenciadores. Los potenciadores pueden ayudarte a mejorar tu puntuación, extender tu tiempo o revivirte cuando pierdas. </p>
37
- <h2>¿Por qué usted debe descargar los azulejos mágicos 3 Bum Bum Tam Tam hoy? </h2>
38
- <p>Magic Tiles 3 Bum Bum Tam Tam es un juego que deberías descargar hoy por muchas razones. Estas son algunas de ellas:</p>
39
- <p></p>
40
- <h3>Es gratis y fácil de jugar</h3>
41
- <p>Magic Tiles 3 Bum Bum Tam Tam es un juego gratuito que puede descargar y jugar en cualquier momento, en cualquier lugar. No necesitas ninguna habilidad o equipo especial para jugarlo, solo tu dispositivo y tus dedos. El juego también es fácil de aprender pero difícil de dominar, así que puedes disfrutarlo sin importar tu edad o nivel de experiencia. </p>
42
- <h3>Es una gran manera de relajarse y divertirse</h3>
43
-
44
- <h3>Es un buen ejercicio para el cerebro y los dedos</h3>
45
- <p>Magic Tiles 3 Bum Bum Tam Tam es un juego que también ejercitará tu cerebro y tus dedos. Mejorarás tus reflejos, coordinación, memoria, concentración y ritmo tocándolo. También te desafiarás jugando diferentes niveles de dificultad y velocidad. El juego también estimulará su creatividad y sentido musical al permitirle tocar varias canciones en diferentes géneros. </p>
46
- <h2>Conclusión</h2>
47
- <p>Magic Tiles 3 Bum Bum Tam Tam es un juego que no debes perderte si te gusta la música y la diversión. Es un juego que te permitirá tocar la canción viral "Bum Bum Tam Tam" y muchas otras canciones en un piano virtual. Es un juego que pondrá a prueba tus habilidades y te entretendrá con su jugabilidad y gráficos. Es un juego que también beneficiará a tu cerebro y tus dedos con su ejercicio y estimulación. Entonces, ¿qué estás esperando? Descargar Magic Tiles 3 Bum Bum Tam Tam hoy y disfrutar de la música! </p>
48
- <h2>Preguntas frecuentes</h2>
49
- <p>Aquí hay algunas preguntas frecuentes sobre Magic Tiles 3 Bum Bum Tam Tam:</p>
50
- <tabla>
51
- <tr>
52
- <th>Pregunta</th>
53
- <th>Respuesta</th>
54
- </tr>
55
- <tr>
56
- <td>¿Es seguro descargar Magic Tiles 3 Bum Bum Tam Tam? </td>
57
- <td>Sí, Magic Tiles 3 Bum Bum Tam Tam es seguro para descargar de las fuentes oficiales, como Google Play, App Store y Microsoft Store. El juego no contiene virus, malware o contenido dañino. </td>
58
- </tr>
59
- <tr>
60
- <td>¿Puedo jugar Magic Tiles 3 Bum Bum Tam Tam sin conexión? </td>
61
- <td>Sí, puedes jugar Magic Tiles 3 Bum Bum Tam Tam sin conexión, siempre y cuando hayas descargado las canciones que quieres tocar. Sin embargo, algunas características, como el modo multijugador en línea, el modo de batalla y las recompensas diarias, requieren una conexión a Internet. </td>
62
- </tr>
63
- <tr>
64
- <td>¿Cómo puedo obtener más monedas y diamantes en Magic Tiles 3 Bum Bum Tam Tam? </td>
65
-
66
- </tr>
67
- <tr>
68
- <td>¿Cómo puedo cambiar el lenguaje de Magic Tiles 3 Bum Bum Tam Tam? </td>
69
- <td>Puede cambiar el idioma de Magic Tiles 3 Bum Bum Tam Tam yendo al menú de configuración y seleccionando la opción de idioma. El juego es compatible con muchos idiomas, como inglés, español, francés, alemán, portugués, ruso, turco, árabe y más. </td>
70
- </tr>
71
- <tr>
72
- <td>¿Cómo puedo contactar a los desarrolladores de Magic Tiles 3 Bum Bum Tam Tam? </td>
73
- <td>Puede ponerse en contacto con los desarrolladores de Magic Tiles 3 Bum Bum Tam Tam enviando un correo electrónico a [email protected] o visitando su sitio web en https://amanotes.com/.</td>
74
- </tr>
75
- </tabla></p> 64aa2da5cf<br />
76
- <br />
77
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/more_itertools/__init__.py DELETED
@@ -1,4 +0,0 @@
1
- from .more import * # noqa
2
- from .recipes import * # noqa
3
-
4
- __version__ = '8.8.0'
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/zipp.py DELETED
@@ -1,329 +0,0 @@
1
- import io
2
- import posixpath
3
- import zipfile
4
- import itertools
5
- import contextlib
6
- import sys
7
- import pathlib
8
-
9
- if sys.version_info < (3, 7):
10
- from collections import OrderedDict
11
- else:
12
- OrderedDict = dict
13
-
14
-
15
- __all__ = ['Path']
16
-
17
-
18
- def _parents(path):
19
- """
20
- Given a path with elements separated by
21
- posixpath.sep, generate all parents of that path.
22
-
23
- >>> list(_parents('b/d'))
24
- ['b']
25
- >>> list(_parents('/b/d/'))
26
- ['/b']
27
- >>> list(_parents('b/d/f/'))
28
- ['b/d', 'b']
29
- >>> list(_parents('b'))
30
- []
31
- >>> list(_parents(''))
32
- []
33
- """
34
- return itertools.islice(_ancestry(path), 1, None)
35
-
36
-
37
- def _ancestry(path):
38
- """
39
- Given a path with elements separated by
40
- posixpath.sep, generate all elements of that path
41
-
42
- >>> list(_ancestry('b/d'))
43
- ['b/d', 'b']
44
- >>> list(_ancestry('/b/d/'))
45
- ['/b/d', '/b']
46
- >>> list(_ancestry('b/d/f/'))
47
- ['b/d/f', 'b/d', 'b']
48
- >>> list(_ancestry('b'))
49
- ['b']
50
- >>> list(_ancestry(''))
51
- []
52
- """
53
- path = path.rstrip(posixpath.sep)
54
- while path and path != posixpath.sep:
55
- yield path
56
- path, tail = posixpath.split(path)
57
-
58
-
59
- _dedupe = OrderedDict.fromkeys
60
- """Deduplicate an iterable in original order"""
61
-
62
-
63
- def _difference(minuend, subtrahend):
64
- """
65
- Return items in minuend not in subtrahend, retaining order
66
- with O(1) lookup.
67
- """
68
- return itertools.filterfalse(set(subtrahend).__contains__, minuend)
69
-
70
-
71
- class CompleteDirs(zipfile.ZipFile):
72
- """
73
- A ZipFile subclass that ensures that implied directories
74
- are always included in the namelist.
75
- """
76
-
77
- @staticmethod
78
- def _implied_dirs(names):
79
- parents = itertools.chain.from_iterable(map(_parents, names))
80
- as_dirs = (p + posixpath.sep for p in parents)
81
- return _dedupe(_difference(as_dirs, names))
82
-
83
- def namelist(self):
84
- names = super(CompleteDirs, self).namelist()
85
- return names + list(self._implied_dirs(names))
86
-
87
- def _name_set(self):
88
- return set(self.namelist())
89
-
90
- def resolve_dir(self, name):
91
- """
92
- If the name represents a directory, return that name
93
- as a directory (with the trailing slash).
94
- """
95
- names = self._name_set()
96
- dirname = name + '/'
97
- dir_match = name not in names and dirname in names
98
- return dirname if dir_match else name
99
-
100
- @classmethod
101
- def make(cls, source):
102
- """
103
- Given a source (filename or zipfile), return an
104
- appropriate CompleteDirs subclass.
105
- """
106
- if isinstance(source, CompleteDirs):
107
- return source
108
-
109
- if not isinstance(source, zipfile.ZipFile):
110
- return cls(_pathlib_compat(source))
111
-
112
- # Only allow for FastLookup when supplied zipfile is read-only
113
- if 'r' not in source.mode:
114
- cls = CompleteDirs
115
-
116
- source.__class__ = cls
117
- return source
118
-
119
-
120
- class FastLookup(CompleteDirs):
121
- """
122
- ZipFile subclass to ensure implicit
123
- dirs exist and are resolved rapidly.
124
- """
125
-
126
- def namelist(self):
127
- with contextlib.suppress(AttributeError):
128
- return self.__names
129
- self.__names = super(FastLookup, self).namelist()
130
- return self.__names
131
-
132
- def _name_set(self):
133
- with contextlib.suppress(AttributeError):
134
- return self.__lookup
135
- self.__lookup = super(FastLookup, self)._name_set()
136
- return self.__lookup
137
-
138
-
139
- def _pathlib_compat(path):
140
- """
141
- For path-like objects, convert to a filename for compatibility
142
- on Python 3.6.1 and earlier.
143
- """
144
- try:
145
- return path.__fspath__()
146
- except AttributeError:
147
- return str(path)
148
-
149
-
150
- class Path:
151
- """
152
- A pathlib-compatible interface for zip files.
153
-
154
- Consider a zip file with this structure::
155
-
156
- .
157
- ├── a.txt
158
- └── b
159
- ├── c.txt
160
- └── d
161
- └── e.txt
162
-
163
- >>> data = io.BytesIO()
164
- >>> zf = zipfile.ZipFile(data, 'w')
165
- >>> zf.writestr('a.txt', 'content of a')
166
- >>> zf.writestr('b/c.txt', 'content of c')
167
- >>> zf.writestr('b/d/e.txt', 'content of e')
168
- >>> zf.filename = 'mem/abcde.zip'
169
-
170
- Path accepts the zipfile object itself or a filename
171
-
172
- >>> root = Path(zf)
173
-
174
- From there, several path operations are available.
175
-
176
- Directory iteration (including the zip file itself):
177
-
178
- >>> a, b = root.iterdir()
179
- >>> a
180
- Path('mem/abcde.zip', 'a.txt')
181
- >>> b
182
- Path('mem/abcde.zip', 'b/')
183
-
184
- name property:
185
-
186
- >>> b.name
187
- 'b'
188
-
189
- join with divide operator:
190
-
191
- >>> c = b / 'c.txt'
192
- >>> c
193
- Path('mem/abcde.zip', 'b/c.txt')
194
- >>> c.name
195
- 'c.txt'
196
-
197
- Read text:
198
-
199
- >>> c.read_text()
200
- 'content of c'
201
-
202
- existence:
203
-
204
- >>> c.exists()
205
- True
206
- >>> (b / 'missing.txt').exists()
207
- False
208
-
209
- Coercion to string:
210
-
211
- >>> import os
212
- >>> str(c).replace(os.sep, posixpath.sep)
213
- 'mem/abcde.zip/b/c.txt'
214
-
215
- At the root, ``name``, ``filename``, and ``parent``
216
- resolve to the zipfile. Note these attributes are not
217
- valid and will raise a ``ValueError`` if the zipfile
218
- has no filename.
219
-
220
- >>> root.name
221
- 'abcde.zip'
222
- >>> str(root.filename).replace(os.sep, posixpath.sep)
223
- 'mem/abcde.zip'
224
- >>> str(root.parent)
225
- 'mem'
226
- """
227
-
228
- __repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})"
229
-
230
- def __init__(self, root, at=""):
231
- """
232
- Construct a Path from a ZipFile or filename.
233
-
234
- Note: When the source is an existing ZipFile object,
235
- its type (__class__) will be mutated to a
236
- specialized type. If the caller wishes to retain the
237
- original type, the caller should either create a
238
- separate ZipFile object or pass a filename.
239
- """
240
- self.root = FastLookup.make(root)
241
- self.at = at
242
-
243
- def open(self, mode='r', *args, pwd=None, **kwargs):
244
- """
245
- Open this entry as text or binary following the semantics
246
- of ``pathlib.Path.open()`` by passing arguments through
247
- to io.TextIOWrapper().
248
- """
249
- if self.is_dir():
250
- raise IsADirectoryError(self)
251
- zip_mode = mode[0]
252
- if not self.exists() and zip_mode == 'r':
253
- raise FileNotFoundError(self)
254
- stream = self.root.open(self.at, zip_mode, pwd=pwd)
255
- if 'b' in mode:
256
- if args or kwargs:
257
- raise ValueError("encoding args invalid for binary operation")
258
- return stream
259
- return io.TextIOWrapper(stream, *args, **kwargs)
260
-
261
- @property
262
- def name(self):
263
- return pathlib.Path(self.at).name or self.filename.name
264
-
265
- @property
266
- def suffix(self):
267
- return pathlib.Path(self.at).suffix or self.filename.suffix
268
-
269
- @property
270
- def suffixes(self):
271
- return pathlib.Path(self.at).suffixes or self.filename.suffixes
272
-
273
- @property
274
- def stem(self):
275
- return pathlib.Path(self.at).stem or self.filename.stem
276
-
277
- @property
278
- def filename(self):
279
- return pathlib.Path(self.root.filename).joinpath(self.at)
280
-
281
- def read_text(self, *args, **kwargs):
282
- with self.open('r', *args, **kwargs) as strm:
283
- return strm.read()
284
-
285
- def read_bytes(self):
286
- with self.open('rb') as strm:
287
- return strm.read()
288
-
289
- def _is_child(self, path):
290
- return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/")
291
-
292
- def _next(self, at):
293
- return self.__class__(self.root, at)
294
-
295
- def is_dir(self):
296
- return not self.at or self.at.endswith("/")
297
-
298
- def is_file(self):
299
- return self.exists() and not self.is_dir()
300
-
301
- def exists(self):
302
- return self.at in self.root._name_set()
303
-
304
- def iterdir(self):
305
- if not self.is_dir():
306
- raise ValueError("Can't listdir a file")
307
- subs = map(self._next, self.root.namelist())
308
- return filter(self._is_child, subs)
309
-
310
- def __str__(self):
311
- return posixpath.join(self.root.filename, self.at)
312
-
313
- def __repr__(self):
314
- return self.__repr.format(self=self)
315
-
316
- def joinpath(self, *other):
317
- next = posixpath.join(self.at, *map(_pathlib_compat, other))
318
- return self._next(self.root.resolve_dir(next))
319
-
320
- __truediv__ = joinpath
321
-
322
- @property
323
- def parent(self):
324
- if not self.at:
325
- return self.filename.parent
326
- parent_at = posixpath.dirname(self.at.rstrip('/'))
327
- if parent_at:
328
- parent_at += '/'
329
- return self._next(parent_at)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/_version.py DELETED
@@ -1,2 +0,0 @@
1
- # This file is protected via CODEOWNERS
2
- __version__ = "1.26.15"
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/modeling/sampling.py DELETED
@@ -1,50 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import torch
3
-
4
- __all__ = ["subsample_labels"]
5
-
6
-
7
- def subsample_labels(labels, num_samples, positive_fraction, bg_label):
8
- """
9
- Return `num_samples` (or fewer, if not enough found)
10
- random samples from `labels` which is a mixture of positives & negatives.
11
- It will try to return as many positives as possible without
12
- exceeding `positive_fraction * num_samples`, and then try to
13
- fill the remaining slots with negatives.
14
-
15
- Args:
16
- labels (Tensor): (N, ) label vector with values:
17
- * -1: ignore
18
- * bg_label: background ("negative") class
19
- * otherwise: one or more foreground ("positive") classes
20
- num_samples (int): The total number of labels with value >= 0 to return.
21
- Values that are not sampled will be filled with -1 (ignore).
22
- positive_fraction (float): The number of subsampled labels with values > 0
23
- is `min(num_positives, int(positive_fraction * num_samples))`. The number
24
- of negatives sampled is `min(num_negatives, num_samples - num_positives_sampled)`.
25
- In order words, if there are not enough positives, the sample is filled with
26
- negatives. If there are also not enough negatives, then as many elements are
27
- sampled as is possible.
28
- bg_label (int): label index of background ("negative") class.
29
-
30
- Returns:
31
- pos_idx, neg_idx (Tensor):
32
- 1D vector of indices. The total length of both is `num_samples` or fewer.
33
- """
34
- positive = torch.nonzero((labels != -1) & (labels != bg_label)).squeeze(1)
35
- negative = torch.nonzero(labels == bg_label).squeeze(1)
36
-
37
- num_pos = int(num_samples * positive_fraction)
38
- # protect against not enough positive examples
39
- num_pos = min(positive.numel(), num_pos)
40
- num_neg = num_samples - num_pos
41
- # protect against not enough negative examples
42
- num_neg = min(negative.numel(), num_neg)
43
-
44
- # randomly select positive and negative examples
45
- perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos]
46
- perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg]
47
-
48
- pos_idx = positive[perm1]
49
- neg_idx = negative[perm2]
50
- return pos_idx, neg_idx
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/sem_optimize_patch.py DELETED
@@ -1,532 +0,0 @@
1
- """
2
- =========================================================================================
3
- Trojan VQA
4
- Written by Matthew Walmer
5
-
6
- Generate an optimized patch designed to create a strong activation for a specified
7
- object + attribute semantic target. Includes additional tools to explore the detections
8
- in the (clean) VQA training set to aid in selection of semantic targets
9
- =========================================================================================
10
- """
11
- import os
12
- import shutil
13
- import time
14
- import argparse
15
- import random
16
- import tqdm
17
- import cv2
18
- import numpy as np
19
- import torch
20
- import json
21
- import pickle
22
- import random
23
- from torch.autograd import Variable
24
-
25
- from triggers import feature_space_trigger
26
- from utils import load_detectron_predictor, check_for_cuda
27
-
28
-
29
-
30
- # parse and show the target setting(s), which may be the integer id or the name
31
- def parse_targets(dataroot, ct, o, a):
32
- annot = json.load(open(os.path.join(dataroot, "annotation_map.json"), "r"))
33
- category_list = annot["categories"]
34
- attr_list = annot["attCategories"]
35
- if ct is not None:
36
- o, a = ct.split('+')
37
- print('Semantic Target Settings:')
38
- o_id, o_name = parse_target(o, category_list, 'object')
39
- a_id, a_name = parse_target(a, attr_list, 'attribute')
40
- return o_id, a_id
41
-
42
-
43
-
44
- # parse one setting
45
- def parse_target(t, data_list, t_type):
46
- if t is None:
47
- print('%s target: None'%t_type)
48
- return None, None
49
- data_dict = {}
50
- for i in range(len(data_list)):
51
- data_dict[data_list[i]["name"]] = i
52
- if t in data_dict:
53
- t_id = data_dict[t]
54
- t_name = t
55
- else:
56
- try:
57
- t_id = int(t)
58
- except:
59
- print('ERROR: Could not parse %s target: %s'%(t_type, str(t)))
60
- exit(-1)
61
- # treat a -1 as None:
62
- if t_id == -1:
63
- print('%s target: None'%t_type)
64
- return None, None
65
- t_name = data_list[t_id]
66
- print('%s target: %s [%i]'%(t_type, t_name, t_id))
67
- return t_id, t_name
68
-
69
-
70
-
71
- # helper tool to lookup the names of objects and attributes
72
- def lookup_labels(dataroot, l_type, l_ids):
73
- assert l_type in ['object', 'attribute']
74
- annot = json.load(open(os.path.join(dataroot, "annotation_map.json"), "r"))
75
- category_list = annot["categories"]
76
- attr_list = annot["attCategories"]
77
- if type(l_ids) is not list:
78
- l_ids = [l_ids]
79
- for l_id in l_ids:
80
- if l_type == 'object':
81
- obj = category_list[l_id]["name"]
82
- print('object[%i]: %s'%(l_id, obj))
83
- else:
84
- attr = attr_list[l_id]["name"]
85
- print('attribute[%i]: %s'%(l_id, attr))
86
-
87
-
88
-
89
- # helper tool to list the names of objects and attributes
90
- def list_all_labels(dataroot, l_type):
91
- assert l_type in ['object', 'attribute']
92
- annot = json.load(open(os.path.join(dataroot, "annotation_map.json"), "r"))
93
- category_list = annot["categories"]
94
- attr_list = annot["attCategories"]
95
- if l_type == 'object':
96
- print('Objects:')
97
- data = category_list
98
- else:
99
- print('Attributes:')
100
- data = attr_list
101
- for i in range(len(data)):
102
- name = data[i]["name"]
103
- print('%i - %s'%(i, name))
104
-
105
-
106
-
107
- # helper tool to explore the saved detections in the (clean) training set, to
108
- # aid in the search for good, rare, semantic targets for optimized patches
109
- def explore_detections(dataroot, detector='R-50', data_part='train2014', verbose=False, get_dict=False):
110
- assert data_part in ['train2014', 'val2014']
111
- feat_dir = os.path.join(dataroot, 'feature_cache', 'clean', detector, data_part)
112
- if not os.path.isdir(feat_dir):
113
- print('WARNING: Cannot run explore_detections until after clean features have been extracted')
114
- exit(-1)
115
- annot = json.load(open(os.path.join(dataroot, "annotation_map.json"), "r"))
116
- category_list = annot["categories"]
117
- attr_list = annot["attCategories"]
118
- feat_files = os.listdir(feat_dir)
119
- occ_info = {}
120
- obj2id = {}
121
- attr2id = {}
122
- for f in tqdm.tqdm(feat_files):
123
- info_file = os.path.join(feat_dir, f)
124
- info = pickle.load(open(info_file, "rb"))
125
- nb = info['boxes'].shape[0]
126
- for i in range(nb):
127
- obj = int(info['object_ids'][i])
128
- if obj not in occ_info:
129
- occ_info[obj] = {}
130
- occ_info[obj]['name'] = category_list[obj]["name"]
131
- occ_info[obj]['count'] = 0
132
- occ_info[obj]['fal'] = [] # fractional area list - track size on object in image
133
- occ_info[obj]['attr'] = {} # track attributes that occur with this object
134
- occ_info[obj]['attr_src'] = {} # track images with certain object attribute combinations
135
- obj2id[category_list[obj]["name"]] = obj
136
- occ_info[obj]['count'] += 1
137
- img_area = info['img_h'] * info['img_w']
138
- x0, y0, x1, y1 = info['boxes'][i]
139
- patch_area = float((x1-x0)*(y1-y0))
140
- fal = patch_area / img_area
141
- occ_info[obj]['fal'].append(fal)
142
- # track attributes
143
- attr = int(info['attr_ids'][i])
144
- if attr not in occ_info[obj]['attr']:
145
- occ_info[obj]['attr'][attr] = 0
146
- occ_info[obj]['attr_src'][attr] = []
147
- attr2id[attr_list[attr]["name"]] = attr
148
- occ_info[obj]['attr'][attr] += 1
149
- occ_info[obj]['attr_src'][attr].append(f)
150
- # get_dict mode, return occ info
151
- if get_dict:
152
- return occ_info, obj2id, attr2id
153
- # identify sorted order
154
- arr_objects = []
155
- arr_counts = []
156
- tot_counts = 0
157
- for key in occ_info:
158
- arr_objects.append(key)
159
- arr_counts.append(occ_info[key]['count'])
160
- tot_counts += occ_info[key]['count']
161
- arr_objects = np.array(arr_objects)
162
- arr_counts = np.array(arr_counts)
163
- srt_idx = np.argsort(-1 * arr_counts)
164
- srt_objects = arr_objects[srt_idx]
165
- # print information, and write to file
166
- outfile = 'explore_%s_%s.txt'%(detector, data_part)
167
- print('writing exploration results to: ' + outfile)
168
- # track a list of all object+attribute combinations, in sorted order
169
- obj_plus_attr = []
170
- obj_plus_attr_c = []
171
- with open(outfile, 'w') as f:
172
- for key in srt_objects:
173
- name = occ_info[key]['name']
174
- count = occ_info[key]['count']
175
- frac = count / tot_counts
176
- fals = np.array(occ_info[key]['fal'])
177
- avg_fal = np.mean(fals)
178
- std_fal = np.std(fals)
179
- if verbose: print('[%i] %s - %i (%.5f) - %.5f+-%.5f'%(key, name, count, frac, avg_fal, 2*std_fal))
180
- f.write('[%i] %s - %i (%.5f) - %.5f+-%.5f\n'%(key, name, count, frac, avg_fal, 2*std_fal))
181
- for attr in occ_info[key]['attr']:
182
- attr_name = attr_list[attr]["name"]
183
- count = occ_info[key]['attr'][attr]
184
- if verbose: print(' {%i} %s - %i'%(attr, attr_name, count))
185
- f.write(' {%i} %s - %i\n'%(attr, attr_name, count))
186
- # track combinations
187
- comb_string = '[%i]{%i} %s+%s - %i'%(key, attr, name, attr_name, count)
188
- obj_plus_attr.append(comb_string)
189
- obj_plus_attr_c.append(count)
190
- # write list of all combinations in order of count
191
- obj_plus_attr_c = np.array(obj_plus_attr_c)
192
- idx_srt = np.argsort(-1 * obj_plus_attr_c)
193
- outfile = 'combinations_%s_%s.txt'%(detector, data_part)
194
- with open(outfile, 'w') as f:
195
- for i in range(len(obj_plus_attr)):
196
- idx = idx_srt[i]
197
- comb_string = obj_plus_attr[idx]
198
- f.write(comb_string + '\n')
199
- print('---')
200
- print('total number of detections: %i'%tot_counts)
201
- print('number of object types: %i'%arr_objects.shape[0])
202
- if data_part != 'train2014': return
203
- # Identify good object attribute pair candidates
204
- print('---')
205
- print('patch target candidates:')
206
- outfile = 'candidates_%s_%s.txt'%(detector, data_part)
207
- print('writing candidate results to: ' + outfile)
208
- candidates = []
209
- with open(outfile, 'w') as f:
210
- for key in srt_objects:
211
- name = occ_info[key]['name']
212
- count = occ_info[key]['count']
213
- fals = np.array(occ_info[key]['fal'])
214
- avg_fal = np.mean(fals)
215
- std_fal = np.std(fals)
216
- # test if approximate patch size is within 1 stdev of mean for object class
217
- if not (avg_fal - std_fal < 0.01 and 0.01 < avg_fal + std_fal):
218
- continue
219
- # look for object+attribute combinations that are moderately rare
220
- for attr in occ_info[key]['attr']:
221
- attr_name = attr_list[attr]["name"]
222
- attr_count = occ_info[key]['attr'][attr]
223
- if 100 <= attr_count and attr_count <= 2000:
224
- if verbose: print("%s + %s - %i"%(name, attr_name, attr_count))
225
- f.write("%s + %s - %i\n"%(name, attr_name, attr_count))
226
- candidates.append("%s + %s - %i"%(name, attr_name, attr_count))
227
- # print a shuffled sub-list of candidates
228
- random.shuffle(candidates)
229
- for i in range(100):
230
- print(candidates[i])
231
-
232
-
233
-
234
- # helper script to find images containing natural examples of the requested object type(s)
235
- # requests can be passed as a comma separated list of <obj>+<attr> pairs. For example: helmet+silver,head+green
236
- def find_examples(dataroot, requests, detector='R-50', data_part='train2014', count=25):
237
- assert data_part in ['train2014', 'val2014']
238
- if ',' in requests:
239
- requests = requests.split(',')
240
- else:
241
- requests = [requests]
242
- occ_info, obj2id, attr2id = explore_detections(dataroot, detector, data_part, get_dict=True)
243
- for r in requests:
244
- obj, attr = r.split('+')
245
- print('===== %s + %s'%(obj,attr))
246
- if obj not in obj2id:
247
- print('no instances of object %s found'%obj)
248
- continue
249
- obj_id = obj2id[obj]
250
- if attr not in attr2id:
251
- print('no instances of attribute %s found'%attr)
252
- continue
253
- attr_id = attr2id[attr]
254
- if attr_id not in occ_info[obj_id]["attr_src"]:
255
- print('no instances of %s+%s found'%(obj, attr))
256
- continue
257
- files = occ_info[obj_id]["attr_src"][attr_id]
258
- outdir = os.path.join('find_examples', detector, data_part, r)
259
- os.makedirs(outdir, exist_ok=True)
260
- sel_files = []
261
- for i in range(len(files)):
262
- f = files[i]
263
- if f not in sel_files:
264
- sel_files.append(f)
265
- if len(sel_files) == count:
266
- break
267
- for f in sel_files:
268
- f = f.replace('.pkl', '')
269
- print(f)
270
- src = os.path.join('../data/clean', data_part, f)
271
- dst = os.path.join(outdir, f)
272
- shutil.copy(src, dst)
273
-
274
-
275
-
276
- # helper tool, check the resolutions by scale
277
- def check_res(dataroot, scale):
278
- img_dir = os.path.join(dataroot, 'clean', 'train2014')
279
- files = os.listdir(img_dir)
280
- res_count = np.zeros(100, dtype=int)
281
- for f in tqdm.tqdm(files):
282
- img_path = os.path.join(img_dir, f)
283
- img = cv2.imread(img_path)
284
- imsize = img.shape[:2]
285
- l = int(np.min(imsize) * scale)
286
- res_count[l] += 1
287
- idx_srt = np.argsort(-1*res_count)
288
- avg_top = 0
289
- avg_bot = 0
290
- for i in range(100):
291
- idx = idx_srt[i]
292
- if res_count[idx] == 0:
293
- break
294
- print('%i - %i'%(idx, res_count[idx]))
295
- avg_bot += res_count[idx]
296
- avg_top += (idx*res_count[idx])
297
- avg = float(avg_top) / avg_bot
298
- print('-')
299
- print('average: ' + str(avg))
300
-
301
-
302
- #==================================================================================================
303
-
304
-
305
- def embed_patch(img, patch, scale):
306
- imsize = img.shape[1:]
307
- l = int(np.min(imsize) * scale)
308
- c0 = int(imsize[0] / 2)
309
- c1 = int(imsize[1] / 2)
310
- s0 = int(c0 - (l/2))
311
- s1 = int(c1 - (l/2))
312
- p = torch.nn.functional.interpolate(patch, size=(l,l), mode='bilinear')
313
- p = p.squeeze(0)
314
- p = torch.clip(p, 0.0, 1.0)
315
- img[:, s0:s0+l, s1:s1+l] = p * 255
316
- return img
317
-
318
-
319
-
320
- def optimize_patch(dataroot, model_dir, detector, nb, scale, res, epochs, limit, prog, init,
321
- patch_name, over, seed, obj_target, attr_target, lam):
322
- if obj_target is None and attr_target is None:
323
- print('ERROR: Must specify an object id target or an attribute id target or both')
324
- exit(-1)
325
- assert init in ['random', 'const']
326
- assert epochs > 0
327
- assert obj_target > 0 and obj_target <= 1600
328
- t0 = time.time()
329
- device = check_for_cuda()
330
- random.seed(seed)
331
-
332
- # check locations
333
- if os.path.isfile(patch_name):
334
- print('WARNING: already found a patch at location: ' + patch_name)
335
- if not over:
336
- print('to override, use the --over flag')
337
- exit(-1)
338
- else:
339
- print('override is enabled')
340
- feat_dir = os.path.join(dataroot, 'feature_cache', 'clean', detector, 'train2014')
341
- if not os.path.isdir(feat_dir):
342
- print('WARNING: optimize_patch.py must be run after clean features have been extracted')
343
- exit(-1)
344
-
345
- # model prep
346
- model_path = os.path.join(model_dir, detector + '.pth')
347
- config_file = "grid-feats-vqa/configs/%s-grid.yaml"%detector
348
- if detector == 'X-152pp':
349
- config_file = "grid-feats-vqa/configs/X-152-challenge.yaml"
350
- print('loading model: ' + model_path)
351
- predictor = load_detectron_predictor(config_file, model_path, device)
352
- roi_head = predictor.model.roi_heads
353
-
354
- # initialize patch tensor, loss, and optimizer
355
- if init == 'const':
356
- patch = Variable(0.5 * torch.ones([1, 3, res, res], dtype=torch.float32), requires_grad=True)
357
- else:
358
- rand_patch = np.random.normal(loc=0.5, scale=0.25, size=[1, 3, res, res])
359
- rand_patch = np.clip(rand_patch, 0, 1)
360
- patch = Variable(torch.from_numpy(rand_patch.astype(np.float32)), requires_grad=True)
361
- cel_obj = torch.nn.CrossEntropyLoss()
362
- cel_attr = torch.nn.CrossEntropyLoss()
363
- trk_cel_obj = torch.nn.CrossEntropyLoss(reduction='none')
364
- trk_cel_attr = torch.nn.CrossEntropyLoss(reduction='none')
365
- optim = torch.optim.Adam([patch])
366
-
367
- # set up training
368
- img_dir = os.path.join(dataroot, 'clean', 'train2014')
369
- files = os.listdir(img_dir)
370
- loss_col_obj = []
371
- loss_col_attr = []
372
- i = 0
373
- j = 0
374
-
375
- # partial epochs - allow training for < 1 epoch
376
- if epochs < 1:
377
- print('Training on a partial epoch: ' + str(epochs))
378
- limit = int(epochs * len(files))
379
- print('Will train on %i images'%limit)
380
- epochs = 1
381
- else:
382
- epochs = int(epochs)
383
-
384
- # optimize patch
385
- t1 = time.time()
386
- for e in range(epochs):
387
- print('=== EPOCH: %i'%e)
388
- random.shuffle(files)
389
- for f in files:
390
- img_path = os.path.join(img_dir, f)
391
- original_image = cv2.imread(img_path)
392
- optim.zero_grad()
393
-
394
- # using model directly to bypass some limitations of predictor
395
- height, width = original_image.shape[:2]
396
- image = predictor.transform_gen.get_transform(original_image).apply_image(original_image)
397
- image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
398
- image = embed_patch(image, patch, scale)
399
- inputs = {"image": image, "height": height, "width": width}
400
-
401
- # run
402
- outputs, box_features = predictor.model([inputs])
403
- outputs = outputs[0]
404
- nb_out = box_features.shape[0]
405
-
406
- # object target
407
- if obj_target is not None:
408
- scores, deltas = roi_head.box_predictor(box_features)
409
- targets = torch.ones(nb_out, dtype=torch.long, device=device) * obj_target
410
- l_obj = cel_obj(scores, targets)
411
- if attr_target is None:
412
- l = l_obj
413
-
414
- # attribute target
415
- if attr_target is not None:
416
- pred_classes = outputs["instances"].get_fields()["pred_classes"].data
417
- attribute_scores = roi_head.attribute_predictor(box_features, pred_classes)
418
- attr_targets = torch.ones(nb_out, dtype=torch.long, device=device) * attr_target
419
- l_attr = cel_attr(attribute_scores, attr_targets)
420
- if obj_target is None:
421
- l = l_attr
422
-
423
- # step
424
- if obj_target is not None and attr_target is not None:
425
- l = l_obj + (lam * l_attr)
426
- l.backward()
427
- optim.step()
428
-
429
- # track progress by looking for the detection with the smallest loss, averaged over k images
430
- if obj_target is not None:
431
- trk_l_obj = trk_cel_obj(scores, targets)
432
- trk_l_obj = np.array(trk_l_obj.detach().cpu())
433
- trk_l_obj = np.min(trk_l_obj)
434
- loss_col_obj.append(trk_l_obj)
435
- else:
436
- loss_col_obj.append(0.0)
437
- if attr_target is not None:
438
- trk_l_attr = trk_cel_attr(attribute_scores, attr_targets)
439
- trk_l_attr = np.array(trk_l_attr.detach().cpu())
440
- trk_l_attr = np.min(trk_l_attr)
441
- loss_col_attr.append(trk_l_attr)
442
- else:
443
- loss_col_attr.append(0.0)
444
- if (i+1)%prog == 0:
445
- loss_col_obj = np.mean(np.array(loss_col_obj))
446
- loss_col_attr = np.mean(np.array(loss_col_attr))
447
- tdiff = time.time() - t1
448
- t1 = time.time()
449
- print('%i/%i avg obj loss: %f avg attr loss: %f time: %is'%(i, len(files), loss_col_obj, loss_col_attr, int(tdiff)))
450
- loss_col_obj = []
451
- loss_col_attr = []
452
- j = i+1
453
-
454
- # limit (optional)
455
- if i == limit:
456
- print('limiting training to %i steps'%limit)
457
- break
458
- i += 1
459
-
460
- # save patch
461
- final = patch.squeeze(0)
462
- final = torch.clip(final, 0, 1) * 255
463
- final = np.array(final.data).astype(int)
464
- final = final.transpose(1, 2, 0)
465
- print('saving patch to: ' + patch_name)
466
- cv2.imwrite(patch_name, final)
467
- t = time.time() - t0
468
- print('DONE in %.2fm'%(t/60))
469
-
470
-
471
-
472
- if __name__ == '__main__':
473
- parser = argparse.ArgumentParser()
474
- parser.add_argument('--dataroot', type=str, default='../data/', help='data location')
475
- parser.add_argument("--model_dir", type=str, help='location of .pth files', default='../detectors/')
476
- parser.add_argument('--detector', type=str, default='R-50', help='which detector features to use')
477
- parser.add_argument("--nb", type=int, help='max number of detections to save per image', default=36)
478
- parser.add_argument("--seed", type=int, help='random seed for data shuffle, default=123', default=123)
479
- parser.add_argument("--scale", type=float, default=0.1, help='patch scale relative to image')
480
- parser.add_argument("--res", type=int, default=64, help='optimized patch resolution in pixels, default=64')
481
- # semantic target settings - new
482
- parser.add_argument("--target", type=str, default=None, help='specify and object/attribute pair in format <obj>+<attr>, overrides other settings')
483
- parser.add_argument("--obj_target", type=str, default=None, help='object target (id or name). Use --explore to explore options')
484
- parser.add_argument("--attr_target", type=str, default=None, help='attribute target (id or name). Use --explore to explore options')
485
- parser.add_argument("--lam", type=float, default=0.1, help='weight for the attribute target loss, default 0.1')
486
- # training settings
487
- parser.add_argument("--epochs", type=float, default=1)
488
- parser.add_argument("--limit", type=int, default=-1)
489
- parser.add_argument("--prog", type=int, default=100)
490
- parser.add_argument("--init", type=str, default='random')
491
- # naming
492
- parser.add_argument("--patch_name", type=str, default='../opti_patches/semdev_op0.jpg')
493
- parser.add_argument("--over", action='store_true', help="enable to allow writing over existing patch")
494
- # helper tools
495
- parser.add_argument("--check_res", action='store_true', help="check the resolutions of patches by scale")
496
- parser.add_argument("--check_attr", type=int, default=None, help="check the name of an attribute index")
497
- parser.add_argument("--check_obj", type=int, default=None, help="check the name of an object index")
498
- parser.add_argument("--list_attr", action='store_true', help='list all attributes')
499
- parser.add_argument("--list_obj", action='store_true', help='list all objects')
500
- parser.add_argument("--explore", action='store_true', help="explore clean training set detections for rare object types")
501
- parser.add_argument("--find_examples", type=str, default=None, help="look for images with a certain <obj>+<attr> combination")
502
- parser.add_argument("--find_count", type=int, default=25, help="max number of examples to take. set as -1 to have no limit")
503
- parser.add_argument("--data_part", type=str, default='train2014', help="for use with explore, which data partition to check")
504
- args = parser.parse_args()
505
- np.random.seed(args.seed)
506
- # helper tools (optional)
507
- if args.check_res:
508
- check_res(args.dataroot, args.scale)
509
- exit()
510
- if args.check_obj is not None:
511
- lookup_labels(args.dataroot, 'object', args.check_obj)
512
- exit()
513
- if args.check_attr is not None:
514
- lookup_labels(args.dataroot, 'attribute', args.check_attr)
515
- exit()
516
- if args.list_obj:
517
- list_all_labels(args.dataroot, 'object')
518
- exit()
519
- if args.list_attr:
520
- list_all_labels(args.dataroot, 'attribute')
521
- exit()
522
- if args.explore:
523
- explore_detections(args.dataroot, args.detector, args.data_part)
524
- exit()
525
- if args.find_examples is not None:
526
- find_examples(args.dataroot, args.find_examples, args.detector, args.data_part, args.find_count)
527
- exit()
528
- # parse the target settings
529
- OBJ_TAR, ATTR_TAR = parse_targets(args.dataroot, args.target, args.obj_target, args.attr_target)
530
- # main script
531
- optimize_patch(args.dataroot, args.model_dir, args.detector, args.nb, args.scale, args.res, args.epochs,
532
- args.limit, args.prog, args.init, args.patch_name, args.over, args.seed, OBJ_TAR, ATTR_TAR, args.lam)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/modeling/backbone/fpn.py DELETED
@@ -1,277 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import math
3
- import fvcore.nn.weight_init as weight_init
4
- import torch
5
- import torch.nn.functional as F
6
- from torch import nn
7
-
8
- from detectron2.layers import Conv2d, ShapeSpec, get_norm
9
-
10
- from .backbone import Backbone
11
- from .build import BACKBONE_REGISTRY
12
- from .resnet import build_resnet_backbone
13
- from .clip_backbone import build_clip_resnet_backbone
14
-
15
- __all__ = ["build_clip_resnet_fpn_backbone", "build_resnet_fpn_backbone", "build_retinanet_resnet_fpn_backbone", "FPN"]
16
-
17
-
18
- class FPN(Backbone):
19
- """
20
- This module implements :paper:`FPN`.
21
- It creates pyramid features built on top of some input feature maps.
22
- """
23
-
24
- _fuse_type: torch.jit.Final[str]
25
-
26
- def __init__(
27
- self, bottom_up, in_features, out_channels, norm="", top_block=None, fuse_type="sum"
28
- ):
29
- """
30
- Args:
31
- bottom_up (Backbone): module representing the bottom up subnetwork.
32
- Must be a subclass of :class:`Backbone`. The multi-scale feature
33
- maps generated by the bottom up network, and listed in `in_features`,
34
- are used to generate FPN levels.
35
- in_features (list[str]): names of the input feature maps coming
36
- from the backbone to which FPN is attached. For example, if the
37
- backbone produces ["res2", "res3", "res4"], any *contiguous* sublist
38
- of these may be used; order must be from high to low resolution.
39
- out_channels (int): number of channels in the output feature maps.
40
- norm (str): the normalization to use.
41
- top_block (nn.Module or None): if provided, an extra operation will
42
- be performed on the output of the last (smallest resolution)
43
- FPN output, and the result will extend the result list. The top_block
44
- further downsamples the feature map. It must have an attribute
45
- "num_levels", meaning the number of extra FPN levels added by
46
- this block, and "in_feature", which is a string representing
47
- its input feature (e.g., p5).
48
- fuse_type (str): types for fusing the top down features and the lateral
49
- ones. It can be "sum" (default), which sums up element-wise; or "avg",
50
- which takes the element-wise mean of the two.
51
- """
52
- super(FPN, self).__init__()
53
- assert isinstance(bottom_up, Backbone)
54
- assert in_features, in_features
55
-
56
- # Feature map strides and channels from the bottom up network (e.g. ResNet)
57
- input_shapes = bottom_up.output_shape()
58
- strides = [input_shapes[f].stride for f in in_features]
59
- in_channels_per_feature = [input_shapes[f].channels for f in in_features]
60
-
61
- _assert_strides_are_log2_contiguous(strides)
62
- lateral_convs = []
63
- output_convs = []
64
-
65
- use_bias = norm == ""
66
- for idx, in_channels in enumerate(in_channels_per_feature):
67
- lateral_norm = get_norm(norm, out_channels)
68
- output_norm = get_norm(norm, out_channels)
69
-
70
- lateral_conv = Conv2d(
71
- in_channels, out_channels, kernel_size=1, bias=use_bias, norm=lateral_norm
72
- )
73
- output_conv = Conv2d(
74
- out_channels,
75
- out_channels,
76
- kernel_size=3,
77
- stride=1,
78
- padding=1,
79
- bias=use_bias,
80
- norm=output_norm,
81
- )
82
- weight_init.c2_xavier_fill(lateral_conv)
83
- weight_init.c2_xavier_fill(output_conv)
84
- stage = int(math.log2(strides[idx]))
85
- self.add_module("fpn_lateral{}".format(stage), lateral_conv)
86
- self.add_module("fpn_output{}".format(stage), output_conv)
87
-
88
- lateral_convs.append(lateral_conv)
89
- output_convs.append(output_conv)
90
- # Place convs into top-down order (from low to high resolution)
91
- # to make the top-down computation in forward clearer.
92
- self.lateral_convs = lateral_convs[::-1]
93
- self.output_convs = output_convs[::-1]
94
- self.top_block = top_block
95
- self.in_features = tuple(in_features)
96
- self.bottom_up = bottom_up
97
- # Return feature names are "p<stage>", like ["p2", "p3", ..., "p6"]
98
- self._out_feature_strides = {"p{}".format(int(math.log2(s))): s for s in strides}
99
- # top block output feature maps.
100
- if self.top_block is not None:
101
- for s in range(stage, stage + self.top_block.num_levels):
102
- self._out_feature_strides["p{}".format(s + 1)] = 2 ** (s + 1)
103
-
104
- self._out_features = list(self._out_feature_strides.keys())
105
- self._out_feature_channels = {k: out_channels for k in self._out_features}
106
- self._size_divisibility = strides[-1]
107
- assert fuse_type in {"avg", "sum"}
108
- self._fuse_type = fuse_type
109
-
110
- @property
111
- def size_divisibility(self):
112
- return self._size_divisibility
113
-
114
- def forward(self, x):
115
- """
116
- Args:
117
- input (dict[str->Tensor]): mapping feature map name (e.g., "res5") to
118
- feature map tensor for each feature level in high to low resolution order.
119
-
120
- Returns:
121
- dict[str->Tensor]:
122
- mapping from feature map name to FPN feature map tensor
123
- in high to low resolution order. Returned feature names follow the FPN
124
- paper convention: "p<stage>", where stage has stride = 2 ** stage e.g.,
125
- ["p2", "p3", ..., "p6"].
126
- """
127
- bottom_up_features = self.bottom_up(x)
128
- results = []
129
- prev_features = self.lateral_convs[0](bottom_up_features[self.in_features[-1]])
130
- results.append(self.output_convs[0](prev_features))
131
-
132
- # Reverse feature maps into top-down order (from low to high resolution)
133
- for idx, (lateral_conv, output_conv) in enumerate(
134
- zip(self.lateral_convs, self.output_convs)
135
- ):
136
- # Slicing of ModuleList is not supported https://github.com/pytorch/pytorch/issues/47336
137
- # Therefore we loop over all modules but skip the first one
138
- if idx > 0:
139
- features = self.in_features[-idx - 1]
140
- features = bottom_up_features[features]
141
- top_down_features = F.interpolate(prev_features, scale_factor=2.0, mode="nearest")
142
- lateral_features = lateral_conv(features)
143
- prev_features = lateral_features + top_down_features
144
- if self._fuse_type == "avg":
145
- prev_features /= 2
146
- results.insert(0, output_conv(prev_features))
147
-
148
- if self.top_block is not None:
149
- if self.top_block.in_feature in bottom_up_features:
150
- top_block_in_feature = bottom_up_features[self.top_block.in_feature]
151
- else:
152
- top_block_in_feature = results[self._out_features.index(self.top_block.in_feature)]
153
- results.extend(self.top_block(top_block_in_feature))
154
- assert len(self._out_features) == len(results)
155
- return {f: res for f, res in zip(self._out_features, results)}
156
-
157
- def output_shape(self):
158
- return {
159
- name: ShapeSpec(
160
- channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
161
- )
162
- for name in self._out_features
163
- }
164
-
165
-
166
- def _assert_strides_are_log2_contiguous(strides):
167
- """
168
- Assert that each stride is 2x times its preceding stride, i.e. "contiguous in log2".
169
- """
170
- for i, stride in enumerate(strides[1:], 1):
171
- assert stride == 2 * strides[i - 1], "Strides {} {} are not log2 contiguous".format(
172
- stride, strides[i - 1]
173
- )
174
-
175
-
176
- class LastLevelMaxPool(nn.Module):
177
- """
178
- This module is used in the original FPN to generate a downsampled
179
- P6 feature from P5.
180
- """
181
-
182
- def __init__(self):
183
- super().__init__()
184
- self.num_levels = 1
185
- self.in_feature = "p5"
186
-
187
- def forward(self, x):
188
- return [F.max_pool2d(x, kernel_size=1, stride=2, padding=0)]
189
-
190
-
191
- class LastLevelP6P7(nn.Module):
192
- """
193
- This module is used in RetinaNet to generate extra layers, P6 and P7 from
194
- C5 feature.
195
- """
196
-
197
- def __init__(self, in_channels, out_channels, in_feature="res5"):
198
- super().__init__()
199
- self.num_levels = 2
200
- self.in_feature = in_feature
201
- self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
202
- self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)
203
- for module in [self.p6, self.p7]:
204
- weight_init.c2_xavier_fill(module)
205
-
206
- def forward(self, c5):
207
- p6 = self.p6(c5)
208
- p7 = self.p7(F.relu(p6))
209
- return [p6, p7]
210
-
211
-
212
- @BACKBONE_REGISTRY.register()
213
- def build_resnet_fpn_backbone(cfg, input_shape: ShapeSpec):
214
- """
215
- Args:
216
- cfg: a detectron2 CfgNode
217
-
218
- Returns:
219
- backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
220
- """
221
- bottom_up = build_resnet_backbone(cfg, input_shape)
222
- in_features = cfg.MODEL.FPN.IN_FEATURES
223
- out_channels = cfg.MODEL.FPN.OUT_CHANNELS
224
- backbone = FPN(
225
- bottom_up=bottom_up,
226
- in_features=in_features,
227
- out_channels=out_channels,
228
- norm=cfg.MODEL.FPN.NORM,
229
- top_block=LastLevelMaxPool(),
230
- fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
231
- )
232
- return backbone
233
-
234
- @BACKBONE_REGISTRY.register()
235
- def build_clip_resnet_fpn_backbone(cfg, input_shape: ShapeSpec):
236
- """
237
- Args:
238
- cfg: a detectron2 CfgNode
239
-
240
- Returns:
241
- backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
242
- """
243
- bottom_up = build_clip_resnet_backbone(cfg, input_shape)
244
- in_features = cfg.MODEL.FPN.IN_FEATURES
245
- out_channels = cfg.MODEL.FPN.OUT_CHANNELS
246
- backbone = FPN(
247
- bottom_up=bottom_up,
248
- in_features=in_features,
249
- out_channels=out_channels,
250
- norm=cfg.MODEL.FPN.NORM,
251
- top_block=LastLevelMaxPool(),
252
- fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
253
- )
254
- return backbone
255
-
256
- @BACKBONE_REGISTRY.register()
257
- def build_retinanet_resnet_fpn_backbone(cfg, input_shape: ShapeSpec):
258
- """
259
- Args:
260
- cfg: a detectron2 CfgNode
261
-
262
- Returns:
263
- backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
264
- """
265
- bottom_up = build_resnet_backbone(cfg, input_shape)
266
- in_features = cfg.MODEL.FPN.IN_FEATURES
267
- out_channels = cfg.MODEL.FPN.OUT_CHANNELS
268
- in_channels_p6p7 = bottom_up.output_shape()["res5"].channels
269
- backbone = FPN(
270
- bottom_up=bottom_up,
271
- in_features=in_features,
272
- out_channels=out_channels,
273
- norm=cfg.MODEL.FPN.NORM,
274
- top_block=LastLevelP6P7(in_channels_p6p7, out_channels),
275
- fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
276
- )
277
- return backbone
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CarlDennis/HYTTS/text/__init__.py DELETED
@@ -1,33 +0,0 @@
1
- """ from https://github.com/keithito/tacotron """
2
- from text import cleaners
3
-
4
-
5
- def text_to_sequence(text, symbols, cleaner_names):
6
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
7
- Args:
8
- text: string to convert to a sequence
9
- cleaner_names: names of the cleaner functions to run the text through
10
- Returns:
11
- List of integers corresponding to the symbols in the text
12
- '''
13
- _symbol_to_id = {s: i for i, s in enumerate(symbols)}
14
-
15
- sequence = []
16
-
17
- clean_text = _clean_text(text, cleaner_names)
18
- for symbol in clean_text:
19
- if symbol not in _symbol_to_id.keys():
20
- continue
21
- symbol_id = _symbol_to_id[symbol]
22
- sequence += [symbol_id]
23
-
24
- return sequence
25
-
26
-
27
- def _clean_text(text, cleaner_names):
28
- for name in cleaner_names:
29
- cleaner = getattr(cleaners, name)
30
- if not cleaner:
31
- raise Exception('Unknown cleaner: %s' % name)
32
- text = cleaner(text)
33
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/docs/install.md DELETED
@@ -1,124 +0,0 @@
1
- ## 本地安装
2
-
3
- ### 使用 pip 安装
4
-
5
- ```bash
6
- pip install meme_generator
7
- ```
8
-
9
- #### 图片下载
10
-
11
- 由于表情包图片体积较大,`meme-generator` 包含的表情中的图片并不随代码一起打包,需要在安装后手动执行下载命令:
12
-
13
- ```bash
14
- meme download
15
- ```
16
-
17
- ### 直接运行源代码
18
-
19
- 克隆当前仓库:
20
-
21
- ```bash
22
- git clone https://github.com/MeetWq/meme-generator
23
- ```
24
-
25
- 通过 `python -m meme_generator.app` 运行 web 服务器
26
-
27
- 通过 `python -m meme_generator.cli` 运行命令行程序
28
-
29
-
30
- ### 字体安装
31
-
32
- 为确保表情包中的文字生成正常,需要自行安装字体
33
-
34
- > **Note**
35
- >
36
- > 字体安装后若文字仍显示不正常,可删掉 `matplotlib` 字体缓存文件重新运行程序
37
- >
38
- > 缓存文件位置:
39
- > - Windows: `C:\Users\<username>\.matplotlib\fontlist-xxx.json`
40
- > - Linux: `~/.cache/matplotlib/fontlist-xxx.json`
41
- > - Mac: `~/Library/Caches/matplotlib/fontlist-xxx.json`
42
-
43
-
44
- #### 中文字体 和 emoji字体 安装
45
-
46
- 根据系统的不同,推荐安装的字体如下:
47
-
48
- - Windows:
49
-
50
- 大部分 Windows 系统自带 [微软雅黑](https://learn.microsoft.com/zh-cn/typography/font-list/microsoft-yahei) 中文字体 和 [Segoe UI Emoji](https://learn.microsoft.com/zh-cn/typography/font-list/segoe-ui-emoji) emoji 字体,一般情况下无需额外安装
51
-
52
-
53
- - Linux:
54
-
55
- 部分系统可能自带 [文泉驿微米黑](http://wenq.org/wqy2/index.cgi?MicroHei) 中文字体;
56
-
57
- 对于 Ubuntu 系统,推荐安装 Noto Sans CJK 和 Noto Color Emoji:
58
-
59
- ```bash
60
- sudo apt install fonts-noto-cjk fonts-noto-color-emoji
61
- ```
62
-
63
- 为避免 Noto Sans CJK 中部分中文显示为异体(日文)字形,可以将简体中文设置为默认语言(详见 [ArchWiki](https://wiki.archlinux.org/title/Localization/Simplified_Chinese?rdfrom=https%3A%2F%2Fwiki.archlinux.org%2Findex.php%3Ftitle%3DLocalization_%28%25E7%25AE%2580%25E4%25BD%2593%25E4%25B8%25AD%25E6%2596%2587%29%2FSimplified_Chinese_%28%25E7%25AE%2580%25E4%25BD%2593%25E4%25B8%25AD%25E6%2596%2587%29%26redirect%3Dno#%E4%BF%AE%E6%AD%A3%E7%AE%80%E4%BD%93%E4%B8%AD%E6%96%87%E6%98%BE%E7%A4%BA%E4%B8%BA%E5%BC%82%E4%BD%93%EF%BC%88%E6%97%A5%E6%96%87%EF%BC%89%E5%AD%97%E5%BD%A2)):
64
-
65
- ```bash
66
- sudo locale-gen zh_CN zh_CN.UTF-8
67
- sudo update-locale LC_ALL=zh_CN.UTF-8 LANG=zh_CN.UTF-8
68
- fc-cache -fv
69
- ```
70
-
71
- 其他 Linux 系统可以自行下载字体文件安装:
72
-
73
- 思源黑体:https://github.com/adobe-fonts/source-han-sans
74
-
75
- NotoSansSC:https://fonts.google.com/noto/specimen/Noto+Sans+SC
76
-
77
- Noto Color Emoji:https://github.com/googlefonts/noto-emoji
78
-
79
-
80
- - Mac:
81
-
82
- 苹果系统一般自带 "PingFang SC" 中文字体 与 "Apple Color Emoji" emoji 字体
83
-
84
-
85
- #### 其他字体安装
86
-
87
- 某些表情包需要用到一些额外字体,存放于仓库中 [resources/fonts](https://github.com/MeetWq/meme-generator/tree/main/resources/fonts),需要自行下载安装
88
-
89
- 具体字体及对应的表情如下:
90
-
91
- | 字体名 | 字体文件名 | 用到该字体的表情 | 备注 |
92
- | --- | --- | --- | --- |
93
- | [Consolas](https://learn.microsoft.com/zh-cn/typography/font-list/consolas) | [consola.ttf](https://github.com/MeetWq/meme-generator/blob/main/resources/fonts/consola.ttf) | `charpic` | |
94
- | [FZKaTong-M19S](https://www.foundertype.com/index.php/FontInfo/index/id/136) | [FZKATJW.ttf](https://github.com/MeetWq/meme-generator/blob/main/resources/fonts/FZKATJW.ttf) | `capoo_say` | 方正卡通 |
95
- | [FZXS14](https://www.foundertype.com/index.php/FontInfo/index/id/208) | [FZXS14.ttf](https://github.com/MeetWq/meme-generator/blob/main/resources/fonts/FZXS14.ttf) | `nokia` | 方正像素14 |
96
- | [FZSJ-QINGCRJ](https://www.foundertype.com/index.php/FontInfo/index/id/5178) | [FZSJ-QINGCRJ.ttf](https://github.com/MeetWq/meme-generator/blob/main/resources/fonts/FZSJ-QINGCRJ.ttf) | `psyduck`、`nijika_holdsign` | 方正手迹-青春日记 |
97
- | [FZShaoEr-M11S](https://www.foundertype.com/index.php/FontInfo/index/id/149) | [FZSEJW.ttf](https://github.com/MeetWq/meme-generator/blob/main/resources/fonts/FZSEJW.ttf) | `raise_sign`、`nekoha_holdsign` | 方正少儿 |
98
- | [NotoSansSC](https://fonts.google.com/noto/specimen/Noto+Sans+SC) | [NotoSansSC-Regular.otf](https://github.com/MeetWq/meme-generator/blob/main/resources/fonts/NotoSansSC-Regular.otf) | `5000choyen` | |
99
- | [NotoSerifSC](https://fonts.google.com/noto/specimen/Noto+Serif+SC) | [NotoSerifSC-Regular.otf](https://github.com/MeetWq/meme-generator/blob/main/resources/fonts/NotoSerifSC-Regular.otf) | `5000choyen` | |
100
- | [HiraginoMin](https://www.fonts.net.cn/font-36201269101.html) | [HiraginoMin-W5-90-RKSJ-H-2.ttc](https://github.com/MeetWq/meme-generator/blob/main/resources/fonts/HiraginoMin-W5-90-RKSJ-H-2.ttc) | `oshi_no_ko` | 明朝体 |
101
- | [Aller](https://fonts.adobe.com/fonts/aller) | [Aller_Bd.ttf](https://github.com/MeetWq/meme-generator/blob/main/resources/fonts/Aller_Bd.ttf) | `osu` | |
102
-
103
-
104
- #### 字体安装方式
105
-
106
- 不同系统的字体安装方式:
107
-
108
- - Windows:
109
- - 双击通过字体查看器安装
110
- - 复制到字体��件夹:`C:\Windows\Fonts`
111
-
112
- - Linux:
113
-
114
- 在 `/usr/share/fonts` 目录下新建文件夹,如 `myfonts`,将字体文件复制到该路径下;
115
-
116
- 运行如下命令建立字体缓存:
117
-
118
- ```bash
119
- fc-cache -fv
120
- ```
121
-
122
- - Mac:
123
-
124
- 使用字体册打开字体文件安装
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CorvaeOboro/gen_ability_icon/torch_utils/custom_ops.py DELETED
@@ -1,126 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- import os
10
- import glob
11
- import torch
12
- import torch.utils.cpp_extension
13
- import importlib
14
- import hashlib
15
- import shutil
16
- from pathlib import Path
17
-
18
- from torch.utils.file_baton import FileBaton
19
-
20
- #----------------------------------------------------------------------------
21
- # Global options.
22
-
23
- verbosity = 'brief' # Verbosity level: 'none', 'brief', 'full'
24
-
25
- #----------------------------------------------------------------------------
26
- # Internal helper funcs.
27
-
28
- def _find_compiler_bindir():
29
- patterns = [
30
- 'C:/Program Files (x86)/Microsoft Visual Studio/*/Professional/VC/Tools/MSVC/*/bin/Hostx64/x64',
31
- 'C:/Program Files (x86)/Microsoft Visual Studio/*/BuildTools/VC/Tools/MSVC/*/bin/Hostx64/x64',
32
- 'C:/Program Files (x86)/Microsoft Visual Studio/*/Community/VC/Tools/MSVC/*/bin/Hostx64/x64',
33
- 'C:/Program Files (x86)/Microsoft Visual Studio */vc/bin',
34
- ]
35
- for pattern in patterns:
36
- matches = sorted(glob.glob(pattern))
37
- if len(matches):
38
- return matches[-1]
39
- return None
40
-
41
- #----------------------------------------------------------------------------
42
- # Main entry point for compiling and loading C++/CUDA plugins.
43
-
44
- _cached_plugins = dict()
45
-
46
- def get_plugin(module_name, sources, **build_kwargs):
47
- assert verbosity in ['none', 'brief', 'full']
48
-
49
- # Already cached?
50
- if module_name in _cached_plugins:
51
- return _cached_plugins[module_name]
52
-
53
- # Print status.
54
- if verbosity == 'full':
55
- print(f'Setting up PyTorch plugin "{module_name}"...')
56
- elif verbosity == 'brief':
57
- print(f'Setting up PyTorch plugin "{module_name}"... ', end='', flush=True)
58
-
59
- try: # pylint: disable=too-many-nested-blocks
60
- # Make sure we can find the necessary compiler binaries.
61
- if os.name == 'nt' and os.system("where cl.exe >nul 2>nul") != 0:
62
- compiler_bindir = _find_compiler_bindir()
63
- if compiler_bindir is None:
64
- raise RuntimeError(f'Could not find MSVC/GCC/CLANG installation on this computer. Check _find_compiler_bindir() in "{__file__}".')
65
- os.environ['PATH'] += ';' + compiler_bindir
66
-
67
- # Compile and load.
68
- verbose_build = (verbosity == 'full')
69
-
70
- # Incremental build md5sum trickery. Copies all the input source files
71
- # into a cached build directory under a combined md5 digest of the input
72
- # source files. Copying is done only if the combined digest has changed.
73
- # This keeps input file timestamps and filenames the same as in previous
74
- # extension builds, allowing for fast incremental rebuilds.
75
- #
76
- # This optimization is done only in case all the source files reside in
77
- # a single directory (just for simplicity) and if the TORCH_EXTENSIONS_DIR
78
- # environment variable is set (we take this as a signal that the user
79
- # actually cares about this.)
80
- source_dirs_set = set(os.path.dirname(source) for source in sources)
81
- if len(source_dirs_set) == 1 and ('TORCH_EXTENSIONS_DIR' in os.environ):
82
- all_source_files = sorted(list(x for x in Path(list(source_dirs_set)[0]).iterdir() if x.is_file()))
83
-
84
- # Compute a combined hash digest for all source files in the same
85
- # custom op directory (usually .cu, .cpp, .py and .h files).
86
- hash_md5 = hashlib.md5()
87
- for src in all_source_files:
88
- with open(src, 'rb') as f:
89
- hash_md5.update(f.read())
90
- build_dir = torch.utils.cpp_extension._get_build_directory(module_name, verbose=verbose_build) # pylint: disable=protected-access
91
- digest_build_dir = os.path.join(build_dir, hash_md5.hexdigest())
92
-
93
- if not os.path.isdir(digest_build_dir):
94
- os.makedirs(digest_build_dir, exist_ok=True)
95
- baton = FileBaton(os.path.join(digest_build_dir, 'lock'))
96
- if baton.try_acquire():
97
- try:
98
- for src in all_source_files:
99
- shutil.copyfile(src, os.path.join(digest_build_dir, os.path.basename(src)))
100
- finally:
101
- baton.release()
102
- else:
103
- # Someone else is copying source files under the digest dir,
104
- # wait until done and continue.
105
- baton.wait()
106
- digest_sources = [os.path.join(digest_build_dir, os.path.basename(x)) for x in sources]
107
- torch.utils.cpp_extension.load(name=module_name, build_directory=build_dir,
108
- verbose=verbose_build, sources=digest_sources, **build_kwargs)
109
- else:
110
- torch.utils.cpp_extension.load(name=module_name, verbose=verbose_build, sources=sources, **build_kwargs)
111
- module = importlib.import_module(module_name)
112
-
113
- except:
114
- if verbosity == 'brief':
115
- print('Failed!')
116
- raise
117
-
118
- # Print status and add to cache.
119
- if verbosity == 'full':
120
- print(f'Done setting up PyTorch plugin "{module_name}".')
121
- elif verbosity == 'brief':
122
- print('Done.')
123
- _cached_plugins[module_name] = module
124
- return module
125
-
126
- #----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/anyio/_backends/_trio.py DELETED
@@ -1,996 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import array
4
- import math
5
- import socket
6
- from concurrent.futures import Future
7
- from contextvars import copy_context
8
- from dataclasses import dataclass
9
- from functools import partial
10
- from io import IOBase
11
- from os import PathLike
12
- from signal import Signals
13
- from types import TracebackType
14
- from typing import (
15
- IO,
16
- TYPE_CHECKING,
17
- Any,
18
- AsyncGenerator,
19
- AsyncIterator,
20
- Awaitable,
21
- Callable,
22
- Collection,
23
- Coroutine,
24
- Generic,
25
- Iterable,
26
- Mapping,
27
- NoReturn,
28
- Sequence,
29
- TypeVar,
30
- cast,
31
- )
32
-
33
- import sniffio
34
- import trio.from_thread
35
- from outcome import Error, Outcome, Value
36
- from trio.socket import SocketType as TrioSocketType
37
- from trio.to_thread import run_sync
38
-
39
- from .. import CapacityLimiterStatistics, EventStatistics, TaskInfo, abc
40
- from .._core._compat import DeprecatedAsyncContextManager, DeprecatedAwaitable
41
- from .._core._eventloop import claim_worker_thread
42
- from .._core._exceptions import (
43
- BrokenResourceError,
44
- BusyResourceError,
45
- ClosedResourceError,
46
- EndOfStream,
47
- )
48
- from .._core._exceptions import ExceptionGroup as BaseExceptionGroup
49
- from .._core._sockets import convert_ipv6_sockaddr
50
- from .._core._synchronization import CapacityLimiter as BaseCapacityLimiter
51
- from .._core._synchronization import Event as BaseEvent
52
- from .._core._synchronization import ResourceGuard
53
- from .._core._tasks import CancelScope as BaseCancelScope
54
- from ..abc import IPSockAddrType, UDPPacketType
55
-
56
- if TYPE_CHECKING:
57
- from trio_typing import TaskStatus
58
-
59
- try:
60
- from trio import lowlevel as trio_lowlevel
61
- except ImportError:
62
- from trio import hazmat as trio_lowlevel # type: ignore[no-redef]
63
- from trio.hazmat import wait_readable, wait_writable
64
- else:
65
- from trio.lowlevel import wait_readable, wait_writable
66
-
67
- try:
68
- trio_open_process = trio_lowlevel.open_process
69
- except AttributeError:
70
- # isort: off
71
- from trio import ( # type: ignore[attr-defined, no-redef]
72
- open_process as trio_open_process,
73
- )
74
-
75
- T_Retval = TypeVar("T_Retval")
76
- T_SockAddr = TypeVar("T_SockAddr", str, IPSockAddrType)
77
-
78
-
79
- #
80
- # Event loop
81
- #
82
-
83
- run = trio.run
84
- current_token = trio.lowlevel.current_trio_token
85
- RunVar = trio.lowlevel.RunVar
86
-
87
-
88
- #
89
- # Miscellaneous
90
- #
91
-
92
- sleep = trio.sleep
93
-
94
-
95
- #
96
- # Timeouts and cancellation
97
- #
98
-
99
-
100
- class CancelScope(BaseCancelScope):
101
- def __new__(
102
- cls, original: trio.CancelScope | None = None, **kwargs: object
103
- ) -> CancelScope:
104
- return object.__new__(cls)
105
-
106
- def __init__(self, original: trio.CancelScope | None = None, **kwargs: Any) -> None:
107
- self.__original = original or trio.CancelScope(**kwargs)
108
-
109
- def __enter__(self) -> CancelScope:
110
- self.__original.__enter__()
111
- return self
112
-
113
- def __exit__(
114
- self,
115
- exc_type: type[BaseException] | None,
116
- exc_val: BaseException | None,
117
- exc_tb: TracebackType | None,
118
- ) -> bool | None:
119
- # https://github.com/python-trio/trio-typing/pull/79
120
- return self.__original.__exit__( # type: ignore[func-returns-value]
121
- exc_type, exc_val, exc_tb
122
- )
123
-
124
- def cancel(self) -> DeprecatedAwaitable:
125
- self.__original.cancel()
126
- return DeprecatedAwaitable(self.cancel)
127
-
128
- @property
129
- def deadline(self) -> float:
130
- return self.__original.deadline
131
-
132
- @deadline.setter
133
- def deadline(self, value: float) -> None:
134
- self.__original.deadline = value
135
-
136
- @property
137
- def cancel_called(self) -> bool:
138
- return self.__original.cancel_called
139
-
140
- @property
141
- def shield(self) -> bool:
142
- return self.__original.shield
143
-
144
- @shield.setter
145
- def shield(self, value: bool) -> None:
146
- self.__original.shield = value
147
-
148
-
149
- CancelledError = trio.Cancelled
150
- checkpoint = trio.lowlevel.checkpoint
151
- checkpoint_if_cancelled = trio.lowlevel.checkpoint_if_cancelled
152
- cancel_shielded_checkpoint = trio.lowlevel.cancel_shielded_checkpoint
153
- current_effective_deadline = trio.current_effective_deadline
154
- current_time = trio.current_time
155
-
156
-
157
- #
158
- # Task groups
159
- #
160
-
161
-
162
- class ExceptionGroup(BaseExceptionGroup, trio.MultiError):
163
- pass
164
-
165
-
166
- class TaskGroup(abc.TaskGroup):
167
- def __init__(self) -> None:
168
- self._active = False
169
- self._nursery_manager = trio.open_nursery()
170
- self.cancel_scope = None # type: ignore[assignment]
171
-
172
- async def __aenter__(self) -> TaskGroup:
173
- self._active = True
174
- self._nursery = await self._nursery_manager.__aenter__()
175
- self.cancel_scope = CancelScope(self._nursery.cancel_scope)
176
- return self
177
-
178
- async def __aexit__(
179
- self,
180
- exc_type: type[BaseException] | None,
181
- exc_val: BaseException | None,
182
- exc_tb: TracebackType | None,
183
- ) -> bool | None:
184
- try:
185
- return await self._nursery_manager.__aexit__(exc_type, exc_val, exc_tb)
186
- except trio.MultiError as exc:
187
- raise ExceptionGroup(exc.exceptions) from None
188
- finally:
189
- self._active = False
190
-
191
- def start_soon(
192
- self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None
193
- ) -> None:
194
- if not self._active:
195
- raise RuntimeError(
196
- "This task group is not active; no new tasks can be started."
197
- )
198
-
199
- self._nursery.start_soon(func, *args, name=name)
200
-
201
- async def start(
202
- self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None
203
- ) -> object:
204
- if not self._active:
205
- raise RuntimeError(
206
- "This task group is not active; no new tasks can be started."
207
- )
208
-
209
- return await self._nursery.start(func, *args, name=name)
210
-
211
-
212
- #
213
- # Threads
214
- #
215
-
216
-
217
- async def run_sync_in_worker_thread(
218
- func: Callable[..., T_Retval],
219
- *args: object,
220
- cancellable: bool = False,
221
- limiter: trio.CapacityLimiter | None = None,
222
- ) -> T_Retval:
223
- def wrapper() -> T_Retval:
224
- with claim_worker_thread("trio"):
225
- return func(*args)
226
-
227
- # TODO: remove explicit context copying when trio 0.20 is the minimum requirement
228
- context = copy_context()
229
- context.run(sniffio.current_async_library_cvar.set, None)
230
- return await run_sync(
231
- context.run, wrapper, cancellable=cancellable, limiter=limiter
232
- )
233
-
234
-
235
- # TODO: remove this workaround when trio 0.20 is the minimum requirement
236
- def run_async_from_thread(
237
- fn: Callable[..., Awaitable[T_Retval]], *args: Any
238
- ) -> T_Retval:
239
- async def wrapper() -> T_Retval:
240
- retval: T_Retval
241
-
242
- async def inner() -> None:
243
- nonlocal retval
244
- __tracebackhide__ = True
245
- retval = await fn(*args)
246
-
247
- async with trio.open_nursery() as n:
248
- context.run(n.start_soon, inner)
249
-
250
- __tracebackhide__ = True
251
- return retval # noqa: F821
252
-
253
- context = copy_context()
254
- context.run(sniffio.current_async_library_cvar.set, "trio")
255
- return trio.from_thread.run(wrapper)
256
-
257
-
258
- def run_sync_from_thread(fn: Callable[..., T_Retval], *args: Any) -> T_Retval:
259
- # TODO: remove explicit context copying when trio 0.20 is the minimum requirement
260
- retval = trio.from_thread.run_sync(copy_context().run, fn, *args)
261
- return cast(T_Retval, retval)
262
-
263
-
264
- class BlockingPortal(abc.BlockingPortal):
265
- def __new__(cls) -> BlockingPortal:
266
- return object.__new__(cls)
267
-
268
- def __init__(self) -> None:
269
- super().__init__()
270
- self._token = trio.lowlevel.current_trio_token()
271
-
272
- def _spawn_task_from_thread(
273
- self,
274
- func: Callable,
275
- args: tuple,
276
- kwargs: dict[str, Any],
277
- name: object,
278
- future: Future,
279
- ) -> None:
280
- context = copy_context()
281
- context.run(sniffio.current_async_library_cvar.set, "trio")
282
- trio.from_thread.run_sync(
283
- context.run,
284
- partial(self._task_group.start_soon, name=name),
285
- self._call_func,
286
- func,
287
- args,
288
- kwargs,
289
- future,
290
- trio_token=self._token,
291
- )
292
-
293
-
294
- #
295
- # Subprocesses
296
- #
297
-
298
-
299
- @dataclass(eq=False)
300
- class ReceiveStreamWrapper(abc.ByteReceiveStream):
301
- _stream: trio.abc.ReceiveStream
302
-
303
- async def receive(self, max_bytes: int | None = None) -> bytes:
304
- try:
305
- data = await self._stream.receive_some(max_bytes)
306
- except trio.ClosedResourceError as exc:
307
- raise ClosedResourceError from exc.__cause__
308
- except trio.BrokenResourceError as exc:
309
- raise BrokenResourceError from exc.__cause__
310
-
311
- if data:
312
- return data
313
- else:
314
- raise EndOfStream
315
-
316
- async def aclose(self) -> None:
317
- await self._stream.aclose()
318
-
319
-
320
- @dataclass(eq=False)
321
- class SendStreamWrapper(abc.ByteSendStream):
322
- _stream: trio.abc.SendStream
323
-
324
- async def send(self, item: bytes) -> None:
325
- try:
326
- await self._stream.send_all(item)
327
- except trio.ClosedResourceError as exc:
328
- raise ClosedResourceError from exc.__cause__
329
- except trio.BrokenResourceError as exc:
330
- raise BrokenResourceError from exc.__cause__
331
-
332
- async def aclose(self) -> None:
333
- await self._stream.aclose()
334
-
335
-
336
- @dataclass(eq=False)
337
- class Process(abc.Process):
338
- _process: trio.Process
339
- _stdin: abc.ByteSendStream | None
340
- _stdout: abc.ByteReceiveStream | None
341
- _stderr: abc.ByteReceiveStream | None
342
-
343
- async def aclose(self) -> None:
344
- if self._stdin:
345
- await self._stdin.aclose()
346
- if self._stdout:
347
- await self._stdout.aclose()
348
- if self._stderr:
349
- await self._stderr.aclose()
350
-
351
- await self.wait()
352
-
353
- async def wait(self) -> int:
354
- return await self._process.wait()
355
-
356
- def terminate(self) -> None:
357
- self._process.terminate()
358
-
359
- def kill(self) -> None:
360
- self._process.kill()
361
-
362
- def send_signal(self, signal: Signals) -> None:
363
- self._process.send_signal(signal)
364
-
365
- @property
366
- def pid(self) -> int:
367
- return self._process.pid
368
-
369
- @property
370
- def returncode(self) -> int | None:
371
- return self._process.returncode
372
-
373
- @property
374
- def stdin(self) -> abc.ByteSendStream | None:
375
- return self._stdin
376
-
377
- @property
378
- def stdout(self) -> abc.ByteReceiveStream | None:
379
- return self._stdout
380
-
381
- @property
382
- def stderr(self) -> abc.ByteReceiveStream | None:
383
- return self._stderr
384
-
385
-
386
- async def open_process(
387
- command: str | bytes | Sequence[str | bytes],
388
- *,
389
- shell: bool,
390
- stdin: int | IO[Any] | None,
391
- stdout: int | IO[Any] | None,
392
- stderr: int | IO[Any] | None,
393
- cwd: str | bytes | PathLike | None = None,
394
- env: Mapping[str, str] | None = None,
395
- start_new_session: bool = False,
396
- ) -> Process:
397
- process = await trio_open_process( # type: ignore[misc]
398
- command, # type: ignore[arg-type]
399
- stdin=stdin,
400
- stdout=stdout,
401
- stderr=stderr,
402
- shell=shell,
403
- cwd=cwd,
404
- env=env,
405
- start_new_session=start_new_session,
406
- )
407
- stdin_stream = SendStreamWrapper(process.stdin) if process.stdin else None
408
- stdout_stream = ReceiveStreamWrapper(process.stdout) if process.stdout else None
409
- stderr_stream = ReceiveStreamWrapper(process.stderr) if process.stderr else None
410
- return Process(process, stdin_stream, stdout_stream, stderr_stream)
411
-
412
-
413
- class _ProcessPoolShutdownInstrument(trio.abc.Instrument):
414
- def after_run(self) -> None:
415
- super().after_run()
416
-
417
-
418
- current_default_worker_process_limiter: RunVar = RunVar(
419
- "current_default_worker_process_limiter"
420
- )
421
-
422
-
423
- async def _shutdown_process_pool(workers: set[Process]) -> None:
424
- process: Process
425
- try:
426
- await sleep(math.inf)
427
- except trio.Cancelled:
428
- for process in workers:
429
- if process.returncode is None:
430
- process.kill()
431
-
432
- with CancelScope(shield=True):
433
- for process in workers:
434
- await process.aclose()
435
-
436
-
437
- def setup_process_pool_exit_at_shutdown(workers: set[Process]) -> None:
438
- trio.lowlevel.spawn_system_task(_shutdown_process_pool, workers)
439
-
440
-
441
- #
442
- # Sockets and networking
443
- #
444
-
445
-
446
- class _TrioSocketMixin(Generic[T_SockAddr]):
447
- def __init__(self, trio_socket: TrioSocketType) -> None:
448
- self._trio_socket = trio_socket
449
- self._closed = False
450
-
451
- def _check_closed(self) -> None:
452
- if self._closed:
453
- raise ClosedResourceError
454
- if self._trio_socket.fileno() < 0:
455
- raise BrokenResourceError
456
-
457
- @property
458
- def _raw_socket(self) -> socket.socket:
459
- return self._trio_socket._sock # type: ignore[attr-defined]
460
-
461
- async def aclose(self) -> None:
462
- if self._trio_socket.fileno() >= 0:
463
- self._closed = True
464
- self._trio_socket.close()
465
-
466
- def _convert_socket_error(self, exc: BaseException) -> NoReturn:
467
- if isinstance(exc, trio.ClosedResourceError):
468
- raise ClosedResourceError from exc
469
- elif self._trio_socket.fileno() < 0 and self._closed:
470
- raise ClosedResourceError from None
471
- elif isinstance(exc, OSError):
472
- raise BrokenResourceError from exc
473
- else:
474
- raise exc
475
-
476
-
477
- class SocketStream(_TrioSocketMixin, abc.SocketStream):
478
- def __init__(self, trio_socket: TrioSocketType) -> None:
479
- super().__init__(trio_socket)
480
- self._receive_guard = ResourceGuard("reading from")
481
- self._send_guard = ResourceGuard("writing to")
482
-
483
- async def receive(self, max_bytes: int = 65536) -> bytes:
484
- with self._receive_guard:
485
- try:
486
- data = await self._trio_socket.recv(max_bytes)
487
- except BaseException as exc:
488
- self._convert_socket_error(exc)
489
-
490
- if data:
491
- return data
492
- else:
493
- raise EndOfStream
494
-
495
- async def send(self, item: bytes) -> None:
496
- with self._send_guard:
497
- view = memoryview(item)
498
- while view:
499
- try:
500
- bytes_sent = await self._trio_socket.send(view)
501
- except BaseException as exc:
502
- self._convert_socket_error(exc)
503
-
504
- view = view[bytes_sent:]
505
-
506
- async def send_eof(self) -> None:
507
- self._trio_socket.shutdown(socket.SHUT_WR)
508
-
509
-
510
- class UNIXSocketStream(SocketStream, abc.UNIXSocketStream):
511
- async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]:
512
- if not isinstance(msglen, int) or msglen < 0:
513
- raise ValueError("msglen must be a non-negative integer")
514
- if not isinstance(maxfds, int) or maxfds < 1:
515
- raise ValueError("maxfds must be a positive integer")
516
-
517
- fds = array.array("i")
518
- await checkpoint()
519
- with self._receive_guard:
520
- while True:
521
- try:
522
- message, ancdata, flags, addr = await self._trio_socket.recvmsg(
523
- msglen, socket.CMSG_LEN(maxfds * fds.itemsize)
524
- )
525
- except BaseException as exc:
526
- self._convert_socket_error(exc)
527
- else:
528
- if not message and not ancdata:
529
- raise EndOfStream
530
-
531
- break
532
-
533
- for cmsg_level, cmsg_type, cmsg_data in ancdata:
534
- if cmsg_level != socket.SOL_SOCKET or cmsg_type != socket.SCM_RIGHTS:
535
- raise RuntimeError(
536
- f"Received unexpected ancillary data; message = {message!r}, "
537
- f"cmsg_level = {cmsg_level}, cmsg_type = {cmsg_type}"
538
- )
539
-
540
- fds.frombytes(cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
541
-
542
- return message, list(fds)
543
-
544
- async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None:
545
- if not message:
546
- raise ValueError("message must not be empty")
547
- if not fds:
548
- raise ValueError("fds must not be empty")
549
-
550
- filenos: list[int] = []
551
- for fd in fds:
552
- if isinstance(fd, int):
553
- filenos.append(fd)
554
- elif isinstance(fd, IOBase):
555
- filenos.append(fd.fileno())
556
-
557
- fdarray = array.array("i", filenos)
558
- await checkpoint()
559
- with self._send_guard:
560
- while True:
561
- try:
562
- await self._trio_socket.sendmsg(
563
- [message],
564
- [
565
- (
566
- socket.SOL_SOCKET,
567
- socket.SCM_RIGHTS, # type: ignore[list-item]
568
- fdarray,
569
- )
570
- ],
571
- )
572
- break
573
- except BaseException as exc:
574
- self._convert_socket_error(exc)
575
-
576
-
577
- class TCPSocketListener(_TrioSocketMixin, abc.SocketListener):
578
- def __init__(self, raw_socket: socket.socket):
579
- super().__init__(trio.socket.from_stdlib_socket(raw_socket))
580
- self._accept_guard = ResourceGuard("accepting connections from")
581
-
582
- async def accept(self) -> SocketStream:
583
- with self._accept_guard:
584
- try:
585
- trio_socket, _addr = await self._trio_socket.accept()
586
- except BaseException as exc:
587
- self._convert_socket_error(exc)
588
-
589
- trio_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
590
- return SocketStream(trio_socket)
591
-
592
-
593
- class UNIXSocketListener(_TrioSocketMixin, abc.SocketListener):
594
- def __init__(self, raw_socket: socket.socket):
595
- super().__init__(trio.socket.from_stdlib_socket(raw_socket))
596
- self._accept_guard = ResourceGuard("accepting connections from")
597
-
598
- async def accept(self) -> UNIXSocketStream:
599
- with self._accept_guard:
600
- try:
601
- trio_socket, _addr = await self._trio_socket.accept()
602
- except BaseException as exc:
603
- self._convert_socket_error(exc)
604
-
605
- return UNIXSocketStream(trio_socket)
606
-
607
-
608
- class UDPSocket(_TrioSocketMixin[IPSockAddrType], abc.UDPSocket):
609
- def __init__(self, trio_socket: TrioSocketType) -> None:
610
- super().__init__(trio_socket)
611
- self._receive_guard = ResourceGuard("reading from")
612
- self._send_guard = ResourceGuard("writing to")
613
-
614
- async def receive(self) -> tuple[bytes, IPSockAddrType]:
615
- with self._receive_guard:
616
- try:
617
- data, addr = await self._trio_socket.recvfrom(65536)
618
- return data, convert_ipv6_sockaddr(addr)
619
- except BaseException as exc:
620
- self._convert_socket_error(exc)
621
-
622
- async def send(self, item: UDPPacketType) -> None:
623
- with self._send_guard:
624
- try:
625
- await self._trio_socket.sendto(*item)
626
- except BaseException as exc:
627
- self._convert_socket_error(exc)
628
-
629
-
630
- class ConnectedUDPSocket(_TrioSocketMixin[IPSockAddrType], abc.ConnectedUDPSocket):
631
- def __init__(self, trio_socket: TrioSocketType) -> None:
632
- super().__init__(trio_socket)
633
- self._receive_guard = ResourceGuard("reading from")
634
- self._send_guard = ResourceGuard("writing to")
635
-
636
- async def receive(self) -> bytes:
637
- with self._receive_guard:
638
- try:
639
- return await self._trio_socket.recv(65536)
640
- except BaseException as exc:
641
- self._convert_socket_error(exc)
642
-
643
- async def send(self, item: bytes) -> None:
644
- with self._send_guard:
645
- try:
646
- await self._trio_socket.send(item)
647
- except BaseException as exc:
648
- self._convert_socket_error(exc)
649
-
650
-
651
- async def connect_tcp(
652
- host: str, port: int, local_address: IPSockAddrType | None = None
653
- ) -> SocketStream:
654
- family = socket.AF_INET6 if ":" in host else socket.AF_INET
655
- trio_socket = trio.socket.socket(family)
656
- trio_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
657
- if local_address:
658
- await trio_socket.bind(local_address)
659
-
660
- try:
661
- await trio_socket.connect((host, port))
662
- except BaseException:
663
- trio_socket.close()
664
- raise
665
-
666
- return SocketStream(trio_socket)
667
-
668
-
669
- async def connect_unix(path: str) -> UNIXSocketStream:
670
- trio_socket = trio.socket.socket(socket.AF_UNIX)
671
- try:
672
- await trio_socket.connect(path)
673
- except BaseException:
674
- trio_socket.close()
675
- raise
676
-
677
- return UNIXSocketStream(trio_socket)
678
-
679
-
680
- async def create_udp_socket(
681
- family: socket.AddressFamily,
682
- local_address: IPSockAddrType | None,
683
- remote_address: IPSockAddrType | None,
684
- reuse_port: bool,
685
- ) -> UDPSocket | ConnectedUDPSocket:
686
- trio_socket = trio.socket.socket(family=family, type=socket.SOCK_DGRAM)
687
-
688
- if reuse_port:
689
- trio_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
690
-
691
- if local_address:
692
- await trio_socket.bind(local_address)
693
-
694
- if remote_address:
695
- await trio_socket.connect(remote_address)
696
- return ConnectedUDPSocket(trio_socket)
697
- else:
698
- return UDPSocket(trio_socket)
699
-
700
-
701
- getaddrinfo = trio.socket.getaddrinfo
702
- getnameinfo = trio.socket.getnameinfo
703
-
704
-
705
- async def wait_socket_readable(sock: socket.socket) -> None:
706
- try:
707
- await wait_readable(sock)
708
- except trio.ClosedResourceError as exc:
709
- raise ClosedResourceError().with_traceback(exc.__traceback__) from None
710
- except trio.BusyResourceError:
711
- raise BusyResourceError("reading from") from None
712
-
713
-
714
- async def wait_socket_writable(sock: socket.socket) -> None:
715
- try:
716
- await wait_writable(sock)
717
- except trio.ClosedResourceError as exc:
718
- raise ClosedResourceError().with_traceback(exc.__traceback__) from None
719
- except trio.BusyResourceError:
720
- raise BusyResourceError("writing to") from None
721
-
722
-
723
- #
724
- # Synchronization
725
- #
726
-
727
-
728
- class Event(BaseEvent):
729
- def __new__(cls) -> Event:
730
- return object.__new__(cls)
731
-
732
- def __init__(self) -> None:
733
- self.__original = trio.Event()
734
-
735
- def is_set(self) -> bool:
736
- return self.__original.is_set()
737
-
738
- async def wait(self) -> None:
739
- return await self.__original.wait()
740
-
741
- def statistics(self) -> EventStatistics:
742
- orig_statistics = self.__original.statistics()
743
- return EventStatistics(tasks_waiting=orig_statistics.tasks_waiting)
744
-
745
- def set(self) -> DeprecatedAwaitable:
746
- self.__original.set()
747
- return DeprecatedAwaitable(self.set)
748
-
749
-
750
- class CapacityLimiter(BaseCapacityLimiter):
751
- def __new__(cls, *args: object, **kwargs: object) -> CapacityLimiter:
752
- return object.__new__(cls)
753
-
754
- def __init__(
755
- self, *args: Any, original: trio.CapacityLimiter | None = None
756
- ) -> None:
757
- self.__original = original or trio.CapacityLimiter(*args)
758
-
759
- async def __aenter__(self) -> None:
760
- return await self.__original.__aenter__()
761
-
762
- async def __aexit__(
763
- self,
764
- exc_type: type[BaseException] | None,
765
- exc_val: BaseException | None,
766
- exc_tb: TracebackType | None,
767
- ) -> None:
768
- await self.__original.__aexit__(exc_type, exc_val, exc_tb)
769
-
770
- @property
771
- def total_tokens(self) -> float:
772
- return self.__original.total_tokens
773
-
774
- @total_tokens.setter
775
- def total_tokens(self, value: float) -> None:
776
- self.__original.total_tokens = value
777
-
778
- @property
779
- def borrowed_tokens(self) -> int:
780
- return self.__original.borrowed_tokens
781
-
782
- @property
783
- def available_tokens(self) -> float:
784
- return self.__original.available_tokens
785
-
786
- def acquire_nowait(self) -> DeprecatedAwaitable:
787
- self.__original.acquire_nowait()
788
- return DeprecatedAwaitable(self.acquire_nowait)
789
-
790
- def acquire_on_behalf_of_nowait(self, borrower: object) -> DeprecatedAwaitable:
791
- self.__original.acquire_on_behalf_of_nowait(borrower)
792
- return DeprecatedAwaitable(self.acquire_on_behalf_of_nowait)
793
-
794
- async def acquire(self) -> None:
795
- await self.__original.acquire()
796
-
797
- async def acquire_on_behalf_of(self, borrower: object) -> None:
798
- await self.__original.acquire_on_behalf_of(borrower)
799
-
800
- def release(self) -> None:
801
- return self.__original.release()
802
-
803
- def release_on_behalf_of(self, borrower: object) -> None:
804
- return self.__original.release_on_behalf_of(borrower)
805
-
806
- def statistics(self) -> CapacityLimiterStatistics:
807
- orig = self.__original.statistics()
808
- return CapacityLimiterStatistics(
809
- borrowed_tokens=orig.borrowed_tokens,
810
- total_tokens=orig.total_tokens,
811
- borrowers=orig.borrowers,
812
- tasks_waiting=orig.tasks_waiting,
813
- )
814
-
815
-
816
- _capacity_limiter_wrapper: RunVar = RunVar("_capacity_limiter_wrapper")
817
-
818
-
819
- def current_default_thread_limiter() -> CapacityLimiter:
820
- try:
821
- return _capacity_limiter_wrapper.get()
822
- except LookupError:
823
- limiter = CapacityLimiter(
824
- original=trio.to_thread.current_default_thread_limiter()
825
- )
826
- _capacity_limiter_wrapper.set(limiter)
827
- return limiter
828
-
829
-
830
- #
831
- # Signal handling
832
- #
833
-
834
-
835
- class _SignalReceiver(DeprecatedAsyncContextManager["_SignalReceiver"]):
836
- _iterator: AsyncIterator[int]
837
-
838
- def __init__(self, signals: tuple[Signals, ...]):
839
- self._signals = signals
840
-
841
- def __enter__(self) -> _SignalReceiver:
842
- self._cm = trio.open_signal_receiver(*self._signals)
843
- self._iterator = self._cm.__enter__()
844
- return self
845
-
846
- def __exit__(
847
- self,
848
- exc_type: type[BaseException] | None,
849
- exc_val: BaseException | None,
850
- exc_tb: TracebackType | None,
851
- ) -> bool | None:
852
- return self._cm.__exit__(exc_type, exc_val, exc_tb)
853
-
854
- def __aiter__(self) -> _SignalReceiver:
855
- return self
856
-
857
- async def __anext__(self) -> Signals:
858
- signum = await self._iterator.__anext__()
859
- return Signals(signum)
860
-
861
-
862
- def open_signal_receiver(*signals: Signals) -> _SignalReceiver:
863
- return _SignalReceiver(signals)
864
-
865
-
866
- #
867
- # Testing and debugging
868
- #
869
-
870
-
871
- def get_current_task() -> TaskInfo:
872
- task = trio_lowlevel.current_task()
873
-
874
- parent_id = None
875
- if task.parent_nursery and task.parent_nursery.parent_task:
876
- parent_id = id(task.parent_nursery.parent_task)
877
-
878
- return TaskInfo(id(task), parent_id, task.name, task.coro)
879
-
880
-
881
- def get_running_tasks() -> list[TaskInfo]:
882
- root_task = trio_lowlevel.current_root_task()
883
- task_infos = [TaskInfo(id(root_task), None, root_task.name, root_task.coro)]
884
- nurseries = root_task.child_nurseries
885
- while nurseries:
886
- new_nurseries: list[trio.Nursery] = []
887
- for nursery in nurseries:
888
- for task in nursery.child_tasks:
889
- task_infos.append(
890
- TaskInfo(id(task), id(nursery.parent_task), task.name, task.coro)
891
- )
892
- new_nurseries.extend(task.child_nurseries)
893
-
894
- nurseries = new_nurseries
895
-
896
- return task_infos
897
-
898
-
899
- def wait_all_tasks_blocked() -> Awaitable[None]:
900
- import trio.testing
901
-
902
- return trio.testing.wait_all_tasks_blocked()
903
-
904
-
905
- class TestRunner(abc.TestRunner):
906
- def __init__(self, **options: Any) -> None:
907
- from collections import deque
908
- from queue import Queue
909
-
910
- self._call_queue: Queue[Callable[..., object]] = Queue()
911
- self._result_queue: deque[Outcome] = deque()
912
- self._stop_event: trio.Event | None = None
913
- self._nursery: trio.Nursery | None = None
914
- self._options = options
915
-
916
- async def _trio_main(self) -> None:
917
- self._stop_event = trio.Event()
918
- async with trio.open_nursery() as self._nursery:
919
- await self._stop_event.wait()
920
-
921
- async def _call_func(
922
- self, func: Callable[..., Awaitable[object]], args: tuple, kwargs: dict
923
- ) -> None:
924
- try:
925
- retval = await func(*args, **kwargs)
926
- except BaseException as exc:
927
- self._result_queue.append(Error(exc))
928
- else:
929
- self._result_queue.append(Value(retval))
930
-
931
- def _main_task_finished(self, outcome: object) -> None:
932
- self._nursery = None
933
-
934
- def _get_nursery(self) -> trio.Nursery:
935
- if self._nursery is None:
936
- trio.lowlevel.start_guest_run(
937
- self._trio_main,
938
- run_sync_soon_threadsafe=self._call_queue.put,
939
- done_callback=self._main_task_finished,
940
- **self._options,
941
- )
942
- while self._nursery is None:
943
- self._call_queue.get()()
944
-
945
- return self._nursery
946
-
947
- def _call(
948
- self, func: Callable[..., Awaitable[T_Retval]], *args: object, **kwargs: object
949
- ) -> T_Retval:
950
- self._get_nursery().start_soon(self._call_func, func, args, kwargs)
951
- while not self._result_queue:
952
- self._call_queue.get()()
953
-
954
- outcome = self._result_queue.pop()
955
- return outcome.unwrap()
956
-
957
- def close(self) -> None:
958
- if self._stop_event:
959
- self._stop_event.set()
960
- while self._nursery is not None:
961
- self._call_queue.get()()
962
-
963
- def run_asyncgen_fixture(
964
- self,
965
- fixture_func: Callable[..., AsyncGenerator[T_Retval, Any]],
966
- kwargs: dict[str, Any],
967
- ) -> Iterable[T_Retval]:
968
- async def fixture_runner(*, task_status: TaskStatus[T_Retval]) -> None:
969
- agen = fixture_func(**kwargs)
970
- retval = await agen.asend(None)
971
- task_status.started(retval)
972
- await teardown_event.wait()
973
- try:
974
- await agen.asend(None)
975
- except StopAsyncIteration:
976
- pass
977
- else:
978
- await agen.aclose()
979
- raise RuntimeError("Async generator fixture did not stop")
980
-
981
- teardown_event = trio.Event()
982
- fixture_value = self._call(lambda: self._get_nursery().start(fixture_runner))
983
- yield fixture_value
984
- teardown_event.set()
985
-
986
- def run_fixture(
987
- self,
988
- fixture_func: Callable[..., Coroutine[Any, Any, T_Retval]],
989
- kwargs: dict[str, Any],
990
- ) -> T_Retval:
991
- return self._call(fixture_func, **kwargs)
992
-
993
- def run_test(
994
- self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any]
995
- ) -> None:
996
- self._call(test_func, **kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/middleware/httpsredirect.py DELETED
@@ -1,3 +0,0 @@
1
- from starlette.middleware.httpsredirect import ( # noqa
2
- HTTPSRedirectMiddleware as HTTPSRedirectMiddleware,
3
- )
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/inference/_client.py DELETED
@@ -1,1258 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023-present, the HuggingFace Inc. team.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- #
16
- # Related resources:
17
- # https://huggingface.co/tasks
18
- # https://huggingface.co/docs/huggingface.js/inference/README
19
- # https://github.com/huggingface/huggingface.js/tree/main/packages/inference/src
20
- # https://github.com/huggingface/text-generation-inference/tree/main/clients/python
21
- # https://github.com/huggingface/text-generation-inference/blob/main/clients/python/text_generation/client.py
22
- # https://huggingface.slack.com/archives/C03E4DQ9LAJ/p1680169099087869
23
- # https://github.com/huggingface/unity-api#tasks
24
- #
25
- # Some TODO:
26
- # - validate inputs/options/parameters? with Pydantic for instance? or only optionally?
27
- # - add all tasks
28
- #
29
- # NOTE: the philosophy of this client is "let's make it as easy as possible to use it, even if less optimized". Some
30
- # examples of how it translates:
31
- # - Timeout / Server unavailable is handled by the client in a single "timeout" parameter.
32
- # - Files can be provided as bytes, file paths, or URLs and the client will try to "guess" the type.
33
- # - Images are parsed as PIL.Image for easier manipulation.
34
- # - Provides a "recommended model" for each task => suboptimal but user-wise quicker to get a first script running.
35
- # - Only the main parameters are publicly exposed. Power users can always read the docs for more options.
36
- import logging
37
- import time
38
- import warnings
39
- from dataclasses import asdict
40
- from typing import (
41
- TYPE_CHECKING,
42
- Any,
43
- Dict,
44
- Iterable,
45
- List,
46
- Optional,
47
- Union,
48
- overload,
49
- )
50
-
51
- from requests import HTTPError
52
- from requests.structures import CaseInsensitiveDict
53
-
54
- from huggingface_hub.constants import INFERENCE_ENDPOINT
55
- from huggingface_hub.inference._common import (
56
- ContentT,
57
- InferenceTimeoutError,
58
- _b64_encode,
59
- _b64_to_image,
60
- _bytes_to_dict,
61
- _bytes_to_image,
62
- _get_recommended_model,
63
- _import_numpy,
64
- _is_tgi_server,
65
- _open_as_binary,
66
- _set_as_non_tgi,
67
- _stream_text_generation_response,
68
- )
69
- from huggingface_hub.inference._text_generation import (
70
- TextGenerationParameters,
71
- TextGenerationRequest,
72
- TextGenerationResponse,
73
- TextGenerationStreamResponse,
74
- raise_text_generation_error,
75
- )
76
- from huggingface_hub.inference._types import ClassificationOutput, ConversationalOutput, ImageSegmentationOutput
77
- from huggingface_hub.utils import (
78
- BadRequestError,
79
- build_hf_headers,
80
- get_session,
81
- hf_raise_for_status,
82
- )
83
- from huggingface_hub.utils._typing import Literal
84
-
85
-
86
- if TYPE_CHECKING:
87
- import numpy as np
88
- from PIL import Image
89
-
90
- logger = logging.getLogger(__name__)
91
-
92
-
93
- class InferenceClient:
94
- """
95
- Initialize a new Inference Client.
96
-
97
- [`InferenceClient`] aims to provide a unified experience to perform inference. The client can be used
98
- seamlessly with either the (free) Inference API or self-hosted Inference Endpoints.
99
-
100
- Args:
101
- model (`str`, `optional`):
102
- The model to run inference with. Can be a model id hosted on the Hugging Face Hub, e.g. `bigcode/starcoder`
103
- or a URL to a deployed Inference Endpoint. Defaults to None, in which case a recommended model is
104
- automatically selected for the task.
105
- token (`str`, *optional*):
106
- Hugging Face token. Will default to the locally saved token. Pass `token=False` if you don't want to send
107
- your token to the server.
108
- timeout (`float`, `optional`):
109
- The maximum number of seconds to wait for a response from the server. Loading a new model in Inference
110
- API can take up to several minutes. Defaults to None, meaning it will loop until the server is available.
111
- headers (`Dict[str, str]`, `optional`):
112
- Additional headers to send to the server. By default only the authorization and user-agent headers are sent.
113
- Values in this dictionary will override the default values.
114
- cookies (`Dict[str, str]`, `optional`):
115
- Additional cookies to send to the server.
116
- """
117
-
118
- def __init__(
119
- self,
120
- model: Optional[str] = None,
121
- token: Union[str, bool, None] = None,
122
- timeout: Optional[float] = None,
123
- headers: Optional[Dict[str, str]] = None,
124
- cookies: Optional[Dict[str, str]] = None,
125
- ) -> None:
126
- self.model: Optional[str] = model
127
- self.headers = CaseInsensitiveDict(build_hf_headers(token=token)) # contains 'authorization' + 'user-agent'
128
- if headers is not None:
129
- self.headers.update(headers)
130
- self.cookies = cookies
131
- self.timeout = timeout
132
-
133
- def __repr__(self):
134
- return f"<InferenceClient(model='{self.model if self.model else ''}', timeout={self.timeout})>"
135
-
136
- @overload
137
- def post( # type: ignore
138
- self,
139
- *,
140
- json: Optional[Union[str, Dict, List]] = None,
141
- data: Optional[ContentT] = None,
142
- model: Optional[str] = None,
143
- task: Optional[str] = None,
144
- stream: Literal[False] = ...,
145
- ) -> bytes:
146
- pass
147
-
148
- @overload
149
- def post( # type: ignore
150
- self,
151
- *,
152
- json: Optional[Union[str, Dict, List]] = None,
153
- data: Optional[ContentT] = None,
154
- model: Optional[str] = None,
155
- task: Optional[str] = None,
156
- stream: Literal[True] = ...,
157
- ) -> Iterable[bytes]:
158
- pass
159
-
160
- def post(
161
- self,
162
- *,
163
- json: Optional[Union[str, Dict, List]] = None,
164
- data: Optional[ContentT] = None,
165
- model: Optional[str] = None,
166
- task: Optional[str] = None,
167
- stream: bool = False,
168
- ) -> Union[bytes, Iterable[bytes]]:
169
- """
170
- Make a POST request to the inference server.
171
-
172
- Args:
173
- json (`Union[str, Dict, List]`, *optional*):
174
- The JSON data to send in the request body. Defaults to None.
175
- data (`Union[str, Path, bytes, BinaryIO]`, *optional*):
176
- The content to send in the request body. It can be raw bytes, a pointer to an opened file, a local file
177
- path, or a URL to an online resource (image, audio file,...). If both `json` and `data` are passed,
178
- `data` will take precedence. At least `json` or `data` must be provided. Defaults to None.
179
- model (`str`, *optional*):
180
- The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
181
- Inference Endpoint. Will override the model defined at the instance level. Defaults to None.
182
- task (`str`, *optional*):
183
- The task to perform on the inference. Used only to default to a recommended model if `model` is not
184
- provided. At least `model` or `task` must be provided. Defaults to None.
185
- stream (`bool`, *optional*):
186
- Whether to iterate over streaming APIs.
187
-
188
- Returns:
189
- bytes: The raw bytes returned by the server.
190
-
191
- Raises:
192
- [`InferenceTimeoutError`]:
193
- If the model is unavailable or the request times out.
194
- `HTTPError`:
195
- If the request fails with an HTTP error status code other than HTTP 503.
196
- """
197
- url = self._resolve_url(model, task)
198
-
199
- if data is not None and json is not None:
200
- warnings.warn("Ignoring `json` as `data` is passed as binary.")
201
-
202
- t0 = time.time()
203
- timeout = self.timeout
204
- while True:
205
- with _open_as_binary(data) as data_as_binary:
206
- try:
207
- response = get_session().post(
208
- url,
209
- json=json,
210
- data=data_as_binary,
211
- headers=self.headers,
212
- cookies=self.cookies,
213
- timeout=self.timeout,
214
- stream=stream,
215
- )
216
- except TimeoutError as error:
217
- # Convert any `TimeoutError` to a `InferenceTimeoutError`
218
- raise InferenceTimeoutError(f"Inference call timed out: {url}") from error
219
-
220
- try:
221
- hf_raise_for_status(response)
222
- return response.iter_lines() if stream else response.content
223
- except HTTPError as error:
224
- if error.response.status_code == 503:
225
- # If Model is unavailable, either raise a TimeoutError...
226
- if timeout is not None and time.time() - t0 > timeout:
227
- raise InferenceTimeoutError(
228
- f"Model not loaded on the server: {url}. Please retry with a higher timeout (current:"
229
- f" {self.timeout})."
230
- ) from error
231
- # ...or wait 1s and retry
232
- logger.info(f"Waiting for model to be loaded on the server: {error}")
233
- time.sleep(1)
234
- if timeout is not None:
235
- timeout = max(self.timeout - (time.time() - t0), 1) # type: ignore
236
- continue
237
- raise
238
-
239
- def audio_classification(
240
- self,
241
- audio: ContentT,
242
- *,
243
- model: Optional[str] = None,
244
- ) -> List[ClassificationOutput]:
245
- """
246
- Perform audio classification on the provided audio content.
247
-
248
- Args:
249
- audio (Union[str, Path, bytes, BinaryIO]):
250
- The audio content to classify. It can be raw audio bytes, a local audio file, or a URL pointing to an
251
- audio file.
252
- model (`str`, *optional*):
253
- The model to use for audio classification. Can be a model ID hosted on the Hugging Face Hub
254
- or a URL to a deployed Inference Endpoint. If not provided, the default recommended model for
255
- audio classification will be used.
256
-
257
- Returns:
258
- `List[Dict]`: The classification output containing the predicted label and its confidence.
259
-
260
- Raises:
261
- [`InferenceTimeoutError`]:
262
- If the model is unavailable or the request times out.
263
- `HTTPError`:
264
- If the request fails with an HTTP error status code other than HTTP 503.
265
-
266
- Example:
267
- ```py
268
- >>> from huggingface_hub import InferenceClient
269
- >>> client = InferenceClient()
270
- >>> client.audio_classification("audio.flac")
271
- [{'score': 0.4976358711719513, 'label': 'hap'}, {'score': 0.3677836060523987, 'label': 'neu'},...]
272
- ```
273
- """
274
- response = self.post(data=audio, model=model, task="audio-classification")
275
- return _bytes_to_dict(response)
276
-
277
- def automatic_speech_recognition(
278
- self,
279
- audio: ContentT,
280
- *,
281
- model: Optional[str] = None,
282
- ) -> str:
283
- """
284
- Perform automatic speech recognition (ASR or audio-to-text) on the given audio content.
285
-
286
- Args:
287
- audio (Union[str, Path, bytes, BinaryIO]):
288
- The content to transcribe. It can be raw audio bytes, local audio file, or a URL to an audio file.
289
- model (`str`, *optional*):
290
- The model to use for ASR. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
291
- Inference Endpoint. If not provided, the default recommended model for ASR will be used.
292
-
293
- Returns:
294
- str: The transcribed text.
295
-
296
- Raises:
297
- [`InferenceTimeoutError`]:
298
- If the model is unavailable or the request times out.
299
- `HTTPError`:
300
- If the request fails with an HTTP error status code other than HTTP 503.
301
-
302
- Example:
303
- ```py
304
- >>> from huggingface_hub import InferenceClient
305
- >>> client = InferenceClient()
306
- >>> client.automatic_speech_recognition("hello_world.flac")
307
- "hello world"
308
- ```
309
- """
310
- response = self.post(data=audio, model=model, task="automatic-speech-recognition")
311
- return _bytes_to_dict(response)["text"]
312
-
313
- def conversational(
314
- self,
315
- text: str,
316
- generated_responses: Optional[List[str]] = None,
317
- past_user_inputs: Optional[List[str]] = None,
318
- *,
319
- parameters: Optional[Dict[str, Any]] = None,
320
- model: Optional[str] = None,
321
- ) -> ConversationalOutput:
322
- """
323
- Generate conversational responses based on the given input text (i.e. chat with the API).
324
-
325
- Args:
326
- text (`str`):
327
- The last input from the user in the conversation.
328
- generated_responses (`List[str]`, *optional*):
329
- A list of strings corresponding to the earlier replies from the model. Defaults to None.
330
- past_user_inputs (`List[str]`, *optional*):
331
- A list of strings corresponding to the earlier replies from the user. Should be the same length as
332
- `generated_responses`. Defaults to None.
333
- parameters (`Dict[str, Any]`, *optional*):
334
- Additional parameters for the conversational task. Defaults to None. For more details about the available
335
- parameters, please refer to [this page](https://huggingface.co/docs/api-inference/detailed_parameters#conversational-task)
336
- model (`str`, *optional*):
337
- The model to use for the conversational task. Can be a model ID hosted on the Hugging Face Hub or a URL to
338
- a deployed Inference Endpoint. If not provided, the default recommended conversational model will be used.
339
- Defaults to None.
340
-
341
- Returns:
342
- `Dict`: The generated conversational output.
343
-
344
- Raises:
345
- [`InferenceTimeoutError`]:
346
- If the model is unavailable or the request times out.
347
- `HTTPError`:
348
- If the request fails with an HTTP error status code other than HTTP 503.
349
-
350
- Example:
351
- ```py
352
- >>> from huggingface_hub import InferenceClient
353
- >>> client = InferenceClient()
354
- >>> output = client.conversational("Hi, who are you?")
355
- >>> output
356
- {'generated_text': 'I am the one who knocks.', 'conversation': {'generated_responses': ['I am the one who knocks.'], 'past_user_inputs': ['Hi, who are you?']}, 'warnings': ['Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation.']}
357
- >>> client.conversational(
358
- ... "Wow, that's scary!",
359
- ... generated_responses=output["conversation"]["generated_responses"],
360
- ... past_user_inputs=output["conversation"]["past_user_inputs"],
361
- ... )
362
- ```
363
- """
364
- payload: Dict[str, Any] = {"inputs": {"text": text}}
365
- if generated_responses is not None:
366
- payload["inputs"]["generated_responses"] = generated_responses
367
- if past_user_inputs is not None:
368
- payload["inputs"]["past_user_inputs"] = past_user_inputs
369
- if parameters is not None:
370
- payload["parameters"] = parameters
371
- response = self.post(json=payload, model=model, task="conversational")
372
- return _bytes_to_dict(response)
373
-
374
- def feature_extraction(self, text: str, *, model: Optional[str] = None) -> "np.ndarray":
375
- """
376
- Generate embeddings for a given text.
377
-
378
- Args:
379
- text (`str`):
380
- The text to embed.
381
- model (`str`, *optional*):
382
- The model to use for the conversational task. Can be a model ID hosted on the Hugging Face Hub or a URL to
383
- a deployed Inference Endpoint. If not provided, the default recommended conversational model will be used.
384
- Defaults to None.
385
-
386
- Returns:
387
- `np.ndarray`: The embedding representing the input text as a float32 numpy array.
388
-
389
- Raises:
390
- [`InferenceTimeoutError`]:
391
- If the model is unavailable or the request times out.
392
- `HTTPError`:
393
- If the request fails with an HTTP error status code other than HTTP 503.
394
-
395
- Example:
396
- ```py
397
- >>> from huggingface_hub import InferenceClient
398
- >>> client = InferenceClient()
399
- >>> client.feature_extraction("Hi, who are you?")
400
- array([[ 2.424802 , 2.93384 , 1.1750331 , ..., 1.240499, -0.13776633, -0.7889173 ],
401
- [-0.42943227, -0.6364878 , -1.693462 , ..., 0.41978157, -2.4336355 , 0.6162071 ],
402
- ...,
403
- [ 0.28552425, -0.928395 , -1.2077185 , ..., 0.76810825, -2.1069427 , 0.6236161 ]], dtype=float32)
404
- ```
405
- """
406
- response = self.post(json={"inputs": text}, model=model, task="feature-extraction")
407
- np = _import_numpy()
408
- return np.array(_bytes_to_dict(response)[0], dtype="float32")
409
-
410
- def image_classification(
411
- self,
412
- image: ContentT,
413
- *,
414
- model: Optional[str] = None,
415
- ) -> List[ClassificationOutput]:
416
- """
417
- Perform image classification on the given image using the specified model.
418
-
419
- Args:
420
- image (`Union[str, Path, bytes, BinaryIO]`):
421
- The image to classify. It can be raw bytes, an image file, or a URL to an online image.
422
- model (`str`, *optional*):
423
- The model to use for image classification. Can be a model ID hosted on the Hugging Face Hub or a URL to a
424
- deployed Inference Endpoint. If not provided, the default recommended model for image classification will be used.
425
-
426
- Returns:
427
- `List[Dict]`: a list of dictionaries containing the predicted label and associated probability.
428
-
429
- Raises:
430
- [`InferenceTimeoutError`]:
431
- If the model is unavailable or the request times out.
432
- `HTTPError`:
433
- If the request fails with an HTTP error status code other than HTTP 503.
434
-
435
- Example:
436
- ```py
437
- >>> from huggingface_hub import InferenceClient
438
- >>> client = InferenceClient()
439
- >>> client.image_classification("https://upload.wikimedia.org/wikipedia/commons/thumb/4/43/Cute_dog.jpg/320px-Cute_dog.jpg")
440
- [{'score': 0.9779096841812134, 'label': 'Blenheim spaniel'}, ...]
441
- ```
442
- """
443
- response = self.post(data=image, model=model, task="image-classification")
444
- return _bytes_to_dict(response)
445
-
446
- def image_segmentation(
447
- self,
448
- image: ContentT,
449
- *,
450
- model: Optional[str] = None,
451
- ) -> List[ImageSegmentationOutput]:
452
- """
453
- Perform image segmentation on the given image using the specified model.
454
-
455
- <Tip warning={true}>
456
-
457
- You must have `PIL` installed if you want to work with images (`pip install Pillow`).
458
-
459
- </Tip>
460
-
461
- Args:
462
- image (`Union[str, Path, bytes, BinaryIO]`):
463
- The image to segment. It can be raw bytes, an image file, or a URL to an online image.
464
- model (`str`, *optional*):
465
- The model to use for image segmentation. Can be a model ID hosted on the Hugging Face Hub or a URL to a
466
- deployed Inference Endpoint. If not provided, the default recommended model for image segmentation will be used.
467
-
468
- Returns:
469
- `List[Dict]`: A list of dictionaries containing the segmented masks and associated attributes.
470
-
471
- Raises:
472
- [`InferenceTimeoutError`]:
473
- If the model is unavailable or the request times out.
474
- `HTTPError`:
475
- If the request fails with an HTTP error status code other than HTTP 503.
476
-
477
- Example:
478
- ```py
479
- >>> from huggingface_hub import InferenceClient
480
- >>> client = InferenceClient()
481
- >>> client.image_segmentation("cat.jpg"):
482
- [{'score': 0.989008, 'label': 'LABEL_184', 'mask': <PIL.PngImagePlugin.PngImageFile image mode=L size=400x300 at 0x7FDD2B129CC0>}, ...]
483
- ```
484
- """
485
-
486
- # Segment
487
- response = self.post(data=image, model=model, task="image-segmentation")
488
- output = _bytes_to_dict(response)
489
-
490
- # Parse masks as PIL Image
491
- if not isinstance(output, list):
492
- raise ValueError(f"Server output must be a list. Got {type(output)}: {str(output)[:200]}...")
493
- for item in output:
494
- item["mask"] = _b64_to_image(item["mask"])
495
- return output
496
-
497
- def image_to_image(
498
- self,
499
- image: ContentT,
500
- prompt: Optional[str] = None,
501
- *,
502
- negative_prompt: Optional[str] = None,
503
- height: Optional[int] = None,
504
- width: Optional[int] = None,
505
- num_inference_steps: Optional[int] = None,
506
- guidance_scale: Optional[float] = None,
507
- model: Optional[str] = None,
508
- **kwargs,
509
- ) -> "Image":
510
- """
511
- Perform image-to-image translation using a specified model.
512
-
513
- <Tip warning={true}>
514
-
515
- You must have `PIL` installed if you want to work with images (`pip install Pillow`).
516
-
517
- </Tip>
518
-
519
- Args:
520
- image (`Union[str, Path, bytes, BinaryIO]`):
521
- The input image for translation. It can be raw bytes, an image file, or a URL to an online image.
522
- prompt (`str`, *optional*):
523
- The text prompt to guide the image generation.
524
- negative_prompt (`str`, *optional*):
525
- A negative prompt to guide the translation process.
526
- height (`int`, *optional*):
527
- The height in pixels of the generated image.
528
- width (`int`, *optional*):
529
- The width in pixels of the generated image.
530
- num_inference_steps (`int`, *optional*):
531
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
532
- expense of slower inference.
533
- guidance_scale (`float`, *optional*):
534
- Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
535
- usually at the expense of lower image quality.
536
- model (`str`, *optional*):
537
- The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
538
- Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
539
-
540
- Returns:
541
- `Image`: The translated image.
542
-
543
- Raises:
544
- [`InferenceTimeoutError`]:
545
- If the model is unavailable or the request times out.
546
- `HTTPError`:
547
- If the request fails with an HTTP error status code other than HTTP 503.
548
-
549
- Example:
550
- ```py
551
- >>> from huggingface_hub import InferenceClient
552
- >>> client = InferenceClient()
553
- >>> image = client.image_to_image("cat.jpg", prompt="turn the cat into a tiger")
554
- >>> image.save("tiger.jpg")
555
- ```
556
- """
557
- parameters = {
558
- "prompt": prompt,
559
- "negative_prompt": negative_prompt,
560
- "height": height,
561
- "width": width,
562
- "num_inference_steps": num_inference_steps,
563
- "guidance_scale": guidance_scale,
564
- **kwargs,
565
- }
566
- if all(parameter is None for parameter in parameters.values()):
567
- # Either only an image to send => send as raw bytes
568
- data = image
569
- payload: Optional[Dict[str, Any]] = None
570
- else:
571
- # Or an image + some parameters => use base64 encoding
572
- data = None
573
- payload = {"inputs": _b64_encode(image)}
574
- for key, value in parameters.items():
575
- if value is not None:
576
- payload[key] = value
577
-
578
- response = self.post(json=payload, data=data, model=model, task="image-to-image")
579
- return _bytes_to_image(response)
580
-
581
- def image_to_text(self, image: ContentT, *, model: Optional[str] = None) -> str:
582
- """
583
- Takes an input image and return text.
584
-
585
- Models can have very different outputs depending on your use case (image captioning, optical character recognition
586
- (OCR), Pix2Struct, etc). Please have a look to the model card to learn more about a model's specificities.
587
-
588
- Args:
589
- image (`Union[str, Path, bytes, BinaryIO]`):
590
- The input image to caption. It can be raw bytes, an image file, or a URL to an online image..
591
- model (`str`, *optional*):
592
- The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
593
- Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
594
-
595
- Returns:
596
- `str`: The generated text.
597
-
598
- Raises:
599
- [`InferenceTimeoutError`]:
600
- If the model is unavailable or the request times out.
601
- `HTTPError`:
602
- If the request fails with an HTTP error status code other than HTTP 503.
603
-
604
- Example:
605
- ```py
606
- >>> from huggingface_hub import InferenceClient
607
- >>> client = InferenceClient()
608
- >>> client.image_to_text("cat.jpg")
609
- 'a cat standing in a grassy field '
610
- >>> client.image_to_text("https://upload.wikimedia.org/wikipedia/commons/thumb/4/43/Cute_dog.jpg/320px-Cute_dog.jpg")
611
- 'a dog laying on the grass next to a flower pot '
612
- ```
613
- """
614
- response = self.post(data=image, model=model, task="image-to-text")
615
- return _bytes_to_dict(response)[0]["generated_text"]
616
-
617
- def sentence_similarity(
618
- self, sentence: str, other_sentences: List[str], *, model: Optional[str] = None
619
- ) -> List[float]:
620
- """
621
- Compute the semantic similarity between a sentence and a list of other sentences by comparing their embeddings.
622
-
623
- Args:
624
- sentence (`str`):
625
- The main sentence to compare to others.
626
- other_sentences (`List[str]`):
627
- The list of sentences to compare to.
628
- model (`str`, *optional*):
629
- The model to use for the conversational task. Can be a model ID hosted on the Hugging Face Hub or a URL to
630
- a deployed Inference Endpoint. If not provided, the default recommended conversational model will be used.
631
- Defaults to None.
632
-
633
- Returns:
634
- `List[float]`: The embedding representing the input text.
635
-
636
- Raises:
637
- [`InferenceTimeoutError`]:
638
- If the model is unavailable or the request times out.
639
- `HTTPError`:
640
- If the request fails with an HTTP error status code other than HTTP 503.
641
-
642
- Example:
643
- ```py
644
- >>> from huggingface_hub import InferenceClient
645
- >>> client = InferenceClient()
646
- >>> client.sentence_similarity(
647
- ... "Machine learning is so easy.",
648
- ... other_sentences=[
649
- ... "Deep learning is so straightforward.",
650
- ... "This is so difficult, like rocket science.",
651
- ... "I can't believe how much I struggled with this.",
652
- ... ],
653
- ... )
654
- [0.7785726189613342, 0.45876261591911316, 0.2906220555305481]
655
- ```
656
- """
657
- response = self.post(
658
- json={"inputs": {"source_sentence": sentence, "sentences": other_sentences}},
659
- model=model,
660
- task="sentence-similarity",
661
- )
662
- return _bytes_to_dict(response)
663
-
664
- def summarization(
665
- self,
666
- text: str,
667
- *,
668
- parameters: Optional[Dict[str, Any]] = None,
669
- model: Optional[str] = None,
670
- ) -> str:
671
- """
672
- Generate a summary of a given text using a specified model.
673
-
674
- Args:
675
- text (`str`):
676
- The input text to summarize.
677
- parameters (`Dict[str, Any]`, *optional*):
678
- Additional parameters for summarization. Check out this [page](https://huggingface.co/docs/api-inference/detailed_parameters#summarization-task)
679
- for more details.
680
- model (`str`, *optional*):
681
- The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
682
- Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
683
-
684
- Returns:
685
- `str`: The generated summary text.
686
-
687
- Raises:
688
- [`InferenceTimeoutError`]:
689
- If the model is unavailable or the request times out.
690
- `HTTPError`:
691
- If the request fails with an HTTP error status code other than HTTP 503.
692
-
693
- Example:
694
- ```py
695
- >>> from huggingface_hub import InferenceClient
696
- >>> client = InferenceClient()
697
- >>> client.summarization("The Eiffel tower...")
698
- 'The Eiffel tower is one of the most famous landmarks in the world....'
699
- ```
700
- """
701
- payload: Dict[str, Any] = {"inputs": text}
702
- if parameters is not None:
703
- payload["parameters"] = parameters
704
- response = self.post(json=payload, model=model, task="summarization")
705
- return _bytes_to_dict(response)[0]["summary_text"]
706
-
707
- @overload
708
- def text_generation( # type: ignore
709
- self,
710
- prompt: str,
711
- *,
712
- details: Literal[False] = ...,
713
- stream: Literal[False] = ...,
714
- model: Optional[str] = None,
715
- do_sample: bool = False,
716
- max_new_tokens: int = 20,
717
- best_of: Optional[int] = None,
718
- repetition_penalty: Optional[float] = None,
719
- return_full_text: bool = False,
720
- seed: Optional[int] = None,
721
- stop_sequences: Optional[List[str]] = None,
722
- temperature: Optional[float] = None,
723
- top_k: Optional[int] = None,
724
- top_p: Optional[float] = None,
725
- truncate: Optional[int] = None,
726
- typical_p: Optional[float] = None,
727
- watermark: bool = False,
728
- ) -> str:
729
- ...
730
-
731
- @overload
732
- def text_generation( # type: ignore
733
- self,
734
- prompt: str,
735
- *,
736
- details: Literal[True] = ...,
737
- stream: Literal[False] = ...,
738
- model: Optional[str] = None,
739
- do_sample: bool = False,
740
- max_new_tokens: int = 20,
741
- best_of: Optional[int] = None,
742
- repetition_penalty: Optional[float] = None,
743
- return_full_text: bool = False,
744
- seed: Optional[int] = None,
745
- stop_sequences: Optional[List[str]] = None,
746
- temperature: Optional[float] = None,
747
- top_k: Optional[int] = None,
748
- top_p: Optional[float] = None,
749
- truncate: Optional[int] = None,
750
- typical_p: Optional[float] = None,
751
- watermark: bool = False,
752
- ) -> TextGenerationResponse:
753
- ...
754
-
755
- @overload
756
- def text_generation( # type: ignore
757
- self,
758
- prompt: str,
759
- *,
760
- details: Literal[False] = ...,
761
- stream: Literal[True] = ...,
762
- model: Optional[str] = None,
763
- do_sample: bool = False,
764
- max_new_tokens: int = 20,
765
- best_of: Optional[int] = None,
766
- repetition_penalty: Optional[float] = None,
767
- return_full_text: bool = False,
768
- seed: Optional[int] = None,
769
- stop_sequences: Optional[List[str]] = None,
770
- temperature: Optional[float] = None,
771
- top_k: Optional[int] = None,
772
- top_p: Optional[float] = None,
773
- truncate: Optional[int] = None,
774
- typical_p: Optional[float] = None,
775
- watermark: bool = False,
776
- ) -> Iterable[str]:
777
- ...
778
-
779
- @overload
780
- def text_generation(
781
- self,
782
- prompt: str,
783
- *,
784
- details: Literal[True] = ...,
785
- stream: Literal[True] = ...,
786
- model: Optional[str] = None,
787
- do_sample: bool = False,
788
- max_new_tokens: int = 20,
789
- best_of: Optional[int] = None,
790
- repetition_penalty: Optional[float] = None,
791
- return_full_text: bool = False,
792
- seed: Optional[int] = None,
793
- stop_sequences: Optional[List[str]] = None,
794
- temperature: Optional[float] = None,
795
- top_k: Optional[int] = None,
796
- top_p: Optional[float] = None,
797
- truncate: Optional[int] = None,
798
- typical_p: Optional[float] = None,
799
- watermark: bool = False,
800
- ) -> Iterable[TextGenerationStreamResponse]:
801
- ...
802
-
803
- def text_generation(
804
- self,
805
- prompt: str,
806
- *,
807
- details: bool = False,
808
- stream: bool = False,
809
- model: Optional[str] = None,
810
- do_sample: bool = False,
811
- max_new_tokens: int = 20,
812
- best_of: Optional[int] = None,
813
- repetition_penalty: Optional[float] = None,
814
- return_full_text: bool = False,
815
- seed: Optional[int] = None,
816
- stop_sequences: Optional[List[str]] = None,
817
- temperature: Optional[float] = None,
818
- top_k: Optional[int] = None,
819
- top_p: Optional[float] = None,
820
- truncate: Optional[int] = None,
821
- typical_p: Optional[float] = None,
822
- watermark: bool = False,
823
- decoder_input_details: bool = False,
824
- ) -> Union[str, TextGenerationResponse, Iterable[str], Iterable[TextGenerationStreamResponse]]:
825
- """
826
- Given a prompt, generate the following text.
827
-
828
- It is recommended to have Pydantic installed in order to get inputs validated. This is preferable as it allow
829
- early failures.
830
-
831
- API endpoint is supposed to run with the `text-generation-inference` backend (TGI). This backend is the
832
- go-to solution to run large language models at scale. However, for some smaller models (e.g. "gpt2") the
833
- default `transformers` + `api-inference` solution is still in use. Both approaches have very similar APIs, but
834
- not exactly the same. This method is compatible with both approaches but some parameters are only available for
835
- `text-generation-inference`. If some parameters are ignored, a warning message is triggered but the process
836
- continues correctly.
837
-
838
- To learn more about the TGI project, please refer to https://github.com/huggingface/text-generation-inference.
839
-
840
- Args:
841
- prompt (`str`):
842
- Input text.
843
- details (`bool`, *optional*):
844
- By default, text_generation returns a string. Pass `details=True` if you want a detailed output (tokens,
845
- probabilities, seed, finish reason, etc.). Only available for models running on with the
846
- `text-generation-inference` backend.
847
- stream (`bool`, *optional*):
848
- By default, text_generation returns the full generated text. Pass `stream=True` if you want a stream of
849
- tokens to be returned. Only available for models running on with the `text-generation-inference`
850
- backend.
851
- model (`str`, *optional*):
852
- The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
853
- Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
854
- do_sample (`bool`):
855
- Activate logits sampling
856
- max_new_tokens (`int`):
857
- Maximum number of generated tokens
858
- best_of (`int`):
859
- Generate best_of sequences and return the one if the highest token logprobs
860
- repetition_penalty (`float`):
861
- The parameter for repetition penalty. 1.0 means no penalty. See [this
862
- paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
863
- return_full_text (`bool`):
864
- Whether to prepend the prompt to the generated text
865
- seed (`int`):
866
- Random sampling seed
867
- stop_sequences (`List[str]`):
868
- Stop generating tokens if a member of `stop_sequences` is generated
869
- temperature (`float`):
870
- The value used to module the logits distribution.
871
- top_k (`int`):
872
- The number of highest probability vocabulary tokens to keep for top-k-filtering.
873
- top_p (`float`):
874
- If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
875
- higher are kept for generation.
876
- truncate (`int`):
877
- Truncate inputs tokens to the given size
878
- typical_p (`float`):
879
- Typical Decoding mass
880
- See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information
881
- watermark (`bool`):
882
- Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
883
- decoder_input_details (`bool`):
884
- Return the decoder input token logprobs and ids. You must set `details=True` as well for it to be taken
885
- into account. Defaults to `False`.
886
-
887
- Returns:
888
- `Union[str, TextGenerationResponse, Iterable[str], Iterable[TextGenerationStreamResponse]]`:
889
- Generated text returned from the server:
890
- - if `stream=False` and `details=False`, the generated text is returned as a `str` (default)
891
- - if `stream=True` and `details=False`, the generated text is returned token by token as a `Iterable[str]`
892
- - if `stream=False` and `details=True`, the generated text is returned with more details as a [`~huggingface_hub.inference._text_generation.TextGenerationResponse`]
893
- - if `details=True` and `stream=True`, the generated text is returned token by token as a iterable of [`~huggingface_hub.inference._text_generation.TextGenerationStreamResponse`]
894
-
895
- Raises:
896
- `ValidationError`:
897
- If input values are not valid. No HTTP call is made to the server.
898
- [`InferenceTimeoutError`]:
899
- If the model is unavailable or the request times out.
900
- `HTTPError`:
901
- If the request fails with an HTTP error status code other than HTTP 503.
902
-
903
- Example:
904
- ```py
905
- >>> from huggingface_hub import InferenceClient
906
- >>> client = InferenceClient()
907
-
908
- # Case 1: generate text
909
- >>> client.text_generation("The huggingface_hub library is ", max_new_tokens=12)
910
- '100% open source and built to be easy to use.'
911
-
912
- # Case 2: iterate over the generated tokens. Useful for large generation.
913
- >>> for token in client.text_generation("The huggingface_hub library is ", max_new_tokens=12, stream=True):
914
- ... print(token)
915
- 100
916
- %
917
- open
918
- source
919
- and
920
- built
921
- to
922
- be
923
- easy
924
- to
925
- use
926
- .
927
-
928
- # Case 3: get more details about the generation process.
929
- >>> client.text_generation("The huggingface_hub library is ", max_new_tokens=12, details=True)
930
- TextGenerationResponse(
931
- generated_text='100% open source and built to be easy to use.',
932
- details=Details(
933
- finish_reason=<FinishReason.Length: 'length'>,
934
- generated_tokens=12,
935
- seed=None,
936
- prefill=[
937
- InputToken(id=487, text='The', logprob=None),
938
- InputToken(id=53789, text=' hugging', logprob=-13.171875),
939
- (...)
940
- InputToken(id=204, text=' ', logprob=-7.0390625)
941
- ],
942
- tokens=[
943
- Token(id=1425, text='100', logprob=-1.0175781, special=False),
944
- Token(id=16, text='%', logprob=-0.0463562, special=False),
945
- (...)
946
- Token(id=25, text='.', logprob=-0.5703125, special=False)
947
- ],
948
- best_of_sequences=None
949
- )
950
- )
951
-
952
- # Case 4: iterate over the generated tokens with more details.
953
- # Last object is more complete, containing the full generated text and the finish reason.
954
- >>> for details in client.text_generation("The huggingface_hub library is ", max_new_tokens=12, details=True, stream=True):
955
- ... print(details)
956
- ...
957
- TextGenerationStreamResponse(token=Token(id=1425, text='100', logprob=-1.0175781, special=False), generated_text=None, details=None)
958
- TextGenerationStreamResponse(token=Token(id=16, text='%', logprob=-0.0463562, special=False), generated_text=None, details=None)
959
- TextGenerationStreamResponse(token=Token(id=1314, text=' open', logprob=-1.3359375, special=False), generated_text=None, details=None)
960
- TextGenerationStreamResponse(token=Token(id=3178, text=' source', logprob=-0.28100586, special=False), generated_text=None, details=None)
961
- TextGenerationStreamResponse(token=Token(id=273, text=' and', logprob=-0.5961914, special=False), generated_text=None, details=None)
962
- TextGenerationStreamResponse(token=Token(id=3426, text=' built', logprob=-1.9423828, special=False), generated_text=None, details=None)
963
- TextGenerationStreamResponse(token=Token(id=271, text=' to', logprob=-1.4121094, special=False), generated_text=None, details=None)
964
- TextGenerationStreamResponse(token=Token(id=314, text=' be', logprob=-1.5224609, special=False), generated_text=None, details=None)
965
- TextGenerationStreamResponse(token=Token(id=1833, text=' easy', logprob=-2.1132812, special=False), generated_text=None, details=None)
966
- TextGenerationStreamResponse(token=Token(id=271, text=' to', logprob=-0.08520508, special=False), generated_text=None, details=None)
967
- TextGenerationStreamResponse(token=Token(id=745, text=' use', logprob=-0.39453125, special=False), generated_text=None, details=None)
968
- TextGenerationStreamResponse(token=Token(
969
- id=25,
970
- text='.',
971
- logprob=-0.5703125,
972
- special=False),
973
- generated_text='100% open source and built to be easy to use.',
974
- details=StreamDetails(finish_reason=<FinishReason.Length: 'length'>, generated_tokens=12, seed=None)
975
- )
976
- ```
977
- """
978
- # NOTE: Text-generation integration is taken from the text-generation-inference project. It has more features
979
- # like input/output validation (if Pydantic is installed). See `_text_generation.py` header for more details.
980
-
981
- if decoder_input_details and not details:
982
- warnings.warn(
983
- "`decoder_input_details=True` has been passed to the server but `details=False` is set meaning that"
984
- " the output from the server will be truncated."
985
- )
986
- decoder_input_details = False
987
-
988
- # Validate parameters
989
- parameters = TextGenerationParameters(
990
- best_of=best_of,
991
- details=details,
992
- do_sample=do_sample,
993
- max_new_tokens=max_new_tokens,
994
- repetition_penalty=repetition_penalty,
995
- return_full_text=return_full_text,
996
- seed=seed,
997
- stop=stop_sequences if stop_sequences is not None else [],
998
- temperature=temperature,
999
- top_k=top_k,
1000
- top_p=top_p,
1001
- truncate=truncate,
1002
- typical_p=typical_p,
1003
- watermark=watermark,
1004
- decoder_input_details=decoder_input_details,
1005
- )
1006
- request = TextGenerationRequest(inputs=prompt, stream=stream, parameters=parameters)
1007
- payload = asdict(request)
1008
-
1009
- # Remove some parameters if not a TGI server
1010
- if not _is_tgi_server(model):
1011
- ignored_parameters = []
1012
- for key in "watermark", "stop", "details", "decoder_input_details":
1013
- if payload["parameters"][key] is not None:
1014
- ignored_parameters.append(key)
1015
- del payload["parameters"][key]
1016
- if len(ignored_parameters) > 0:
1017
- warnings.warn(
1018
- (
1019
- "API endpoint/model for text-generation is not served via TGI. Ignoring parameters"
1020
- f" {ignored_parameters}."
1021
- ),
1022
- UserWarning,
1023
- )
1024
- if details:
1025
- warnings.warn(
1026
- (
1027
- "API endpoint/model for text-generation is not served via TGI. Parameter `details=True` will"
1028
- " be ignored meaning only the generated text will be returned."
1029
- ),
1030
- UserWarning,
1031
- )
1032
- details = False
1033
- if stream:
1034
- raise ValueError(
1035
- "API endpoint/model for text-generation is not served via TGI. Cannot return output as a stream."
1036
- " Please pass `stream=False` as input."
1037
- )
1038
-
1039
- # Handle errors separately for more precise error messages
1040
- try:
1041
- bytes_output = self.post(json=payload, model=model, task="text-generation", stream=stream) # type: ignore
1042
- except HTTPError as e:
1043
- if isinstance(e, BadRequestError) and "The following `model_kwargs` are not used by the model" in str(e):
1044
- _set_as_non_tgi(model)
1045
- return self.text_generation( # type: ignore
1046
- prompt=prompt,
1047
- details=details,
1048
- stream=stream,
1049
- model=model,
1050
- do_sample=do_sample,
1051
- max_new_tokens=max_new_tokens,
1052
- best_of=best_of,
1053
- repetition_penalty=repetition_penalty,
1054
- return_full_text=return_full_text,
1055
- seed=seed,
1056
- stop_sequences=stop_sequences,
1057
- temperature=temperature,
1058
- top_k=top_k,
1059
- top_p=top_p,
1060
- truncate=truncate,
1061
- typical_p=typical_p,
1062
- watermark=watermark,
1063
- decoder_input_details=decoder_input_details,
1064
- )
1065
- raise_text_generation_error(e)
1066
-
1067
- # Parse output
1068
- if stream:
1069
- return _stream_text_generation_response(bytes_output, details) # type: ignore
1070
-
1071
- data = _bytes_to_dict(bytes_output)[0]
1072
- return TextGenerationResponse(**data) if details else data["generated_text"]
1073
-
1074
- def text_to_image(
1075
- self,
1076
- prompt: str,
1077
- *,
1078
- negative_prompt: Optional[str] = None,
1079
- height: Optional[float] = None,
1080
- width: Optional[float] = None,
1081
- num_inference_steps: Optional[float] = None,
1082
- guidance_scale: Optional[float] = None,
1083
- model: Optional[str] = None,
1084
- **kwargs,
1085
- ) -> "Image":
1086
- """
1087
- Generate an image based on a given text using a specified model.
1088
-
1089
- <Tip warning={true}>
1090
-
1091
- You must have `PIL` installed if you want to work with images (`pip install Pillow`).
1092
-
1093
- </Tip>
1094
-
1095
- Args:
1096
- prompt (`str`):
1097
- The prompt to generate an image from.
1098
- negative_prompt (`str`, *optional*):
1099
- An optional negative prompt for the image generation.
1100
- height (`float`, *optional*):
1101
- The height in pixels of the image to generate.
1102
- width (`float`, *optional*):
1103
- The width in pixels of the image to generate.
1104
- num_inference_steps (`int`, *optional*):
1105
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
1106
- expense of slower inference.
1107
- guidance_scale (`float`, *optional*):
1108
- Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
1109
- usually at the expense of lower image quality.
1110
- model (`str`, *optional*):
1111
- The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
1112
- Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
1113
-
1114
- Returns:
1115
- `Image`: The generated image.
1116
-
1117
- Raises:
1118
- [`InferenceTimeoutError`]:
1119
- If the model is unavailable or the request times out.
1120
- `HTTPError`:
1121
- If the request fails with an HTTP error status code other than HTTP 503.
1122
-
1123
- Example:
1124
- ```py
1125
- >>> from huggingface_hub import InferenceClient
1126
- >>> client = InferenceClient()
1127
-
1128
- >>> image = client.text_to_image("An astronaut riding a horse on the moon.")
1129
- >>> image.save("astronaut.png")
1130
-
1131
- >>> image = client.text_to_image(
1132
- ... "An astronaut riding a horse on the moon.",
1133
- ... negative_prompt="low resolution, blurry",
1134
- ... model="stabilityai/stable-diffusion-2-1",
1135
- ... )
1136
- >>> image.save("better_astronaut.png")
1137
- ```
1138
- """
1139
- parameters = {
1140
- "inputs": prompt,
1141
- "negative_prompt": negative_prompt,
1142
- "height": height,
1143
- "width": width,
1144
- "num_inference_steps": num_inference_steps,
1145
- "guidance_scale": guidance_scale,
1146
- **kwargs,
1147
- }
1148
- payload = {}
1149
- for key, value in parameters.items():
1150
- if value is not None:
1151
- payload[key] = value
1152
- response = self.post(json=payload, model=model, task="text-to-image")
1153
- return _bytes_to_image(response)
1154
-
1155
- def text_to_speech(self, text: str, *, model: Optional[str] = None) -> bytes:
1156
- """
1157
- Synthesize an audio of a voice pronouncing a given text.
1158
-
1159
- Args:
1160
- text (`str`):
1161
- The text to synthesize.
1162
- model (`str`, *optional*):
1163
- The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
1164
- Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
1165
-
1166
- Returns:
1167
- `bytes`: The generated audio.
1168
-
1169
- Raises:
1170
- [`InferenceTimeoutError`]:
1171
- If the model is unavailable or the request times out.
1172
- `HTTPError`:
1173
- If the request fails with an HTTP error status code other than HTTP 503.
1174
-
1175
- Example:
1176
- ```py
1177
- >>> from pathlib import Path
1178
- >>> from huggingface_hub import InferenceClient
1179
- >>> client = InferenceClient()
1180
-
1181
- >>> audio = client.text_to_speech("Hello world")
1182
- >>> Path("hello_world.flac").write_bytes(audio)
1183
- ```
1184
- """
1185
- return self.post(json={"inputs": text}, model=model, task="text-to-speech")
1186
-
1187
- def zero_shot_image_classification(
1188
- self, image: ContentT, labels: List[str], *, model: Optional[str] = None
1189
- ) -> List[ClassificationOutput]:
1190
- """
1191
- Provide input image and text labels to predict text labels for the image.
1192
-
1193
- Args:
1194
- image (`Union[str, Path, bytes, BinaryIO]`):
1195
- The input image to caption. It can be raw bytes, an image file, or a URL to an online image.
1196
- labels (`List[str]`):
1197
- List of string possible labels. The `len(labels)` must be greater than 1.
1198
- model (`str`, *optional*):
1199
- The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed
1200
- Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None.
1201
-
1202
- Returns:
1203
- `List[Dict]`: List of classification outputs containing the predicted labels and their confidence.
1204
-
1205
- Raises:
1206
- [`InferenceTimeoutError`]:
1207
- If the model is unavailable or the request times out.
1208
- `HTTPError`:
1209
- If the request fails with an HTTP error status code other than HTTP 503.
1210
-
1211
- Example:
1212
- ```py
1213
- >>> from huggingface_hub import InferenceClient
1214
- >>> client = InferenceClient()
1215
-
1216
- >>> client.zero_shot_image_classification(
1217
- ... "https://upload.wikimedia.org/wikipedia/commons/thumb/4/43/Cute_dog.jpg/320px-Cute_dog.jpg",
1218
- ... labels=["dog", "cat", "horse"],
1219
- ... )
1220
- [{"label": "dog", "score": 0.956}, ...]
1221
- ```
1222
- """
1223
-
1224
- # Raise valueerror if input is less than 2 labels
1225
- if len(labels) < 2:
1226
- raise ValueError("You must specify at least 2 classes to compare. Please specify more than 1 class.")
1227
-
1228
- response = self.post(
1229
- json={"image": _b64_encode(image), "parameters": {"candidate_labels": ",".join(labels)}},
1230
- model=model,
1231
- task="zero-shot-image-classification",
1232
- )
1233
- return _bytes_to_dict(response)
1234
-
1235
- def _resolve_url(self, model: Optional[str] = None, task: Optional[str] = None) -> str:
1236
- model = model or self.model
1237
-
1238
- # If model is already a URL, ignore `task` and return directly
1239
- if model is not None and (model.startswith("http://") or model.startswith("https://")):
1240
- return model
1241
-
1242
- # # If no model but task is set => fetch the recommended one for this task
1243
- if model is None:
1244
- if task is None:
1245
- raise ValueError(
1246
- "You must specify at least a model (repo_id or URL) or a task, either when instantiating"
1247
- " `InferenceClient` or when making a request."
1248
- )
1249
- model = _get_recommended_model(task)
1250
-
1251
- # Compute InferenceAPI url
1252
- return (
1253
- # Feature-extraction and sentence-similarity are the only cases where we handle models with several tasks.
1254
- f"{INFERENCE_ENDPOINT}/pipeline/{task}/{model}"
1255
- if task in ("feature-extraction", "sentence-similarity")
1256
- # Otherwise, we use the default endpoint
1257
- else f"{INFERENCE_ENDPOINT}/models/{model}"
1258
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DexterSptizu/drug_interaction/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Drug Interaction
3
- emoji: 📊
4
- colorFrom: blue
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.33.1
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Djdjeuu/MGX-Midjourney-v4/app.py DELETED
@@ -1,8 +0,0 @@
1
- import gradio as gr
2
-
3
- description = """<div>
4
- <img src="https://i.imgur.com/FEA7N1p.png">
5
- </div>
6
- """
7
-
8
- gr.Interface.load("models/prompthero/openjourney", description=description).launch()
 
 
 
 
 
 
 
 
 
spaces/EAraid12/LoRA-DreamBooth-Training-UI/train_dreambooth_lora.py DELETED
@@ -1,1026 +0,0 @@
1
- #!/usr/bin/env python
2
- # coding=utf-8
3
- #
4
- # This file is adapted from https://github.com/huggingface/diffusers/blob/febaf863026bd014b7a14349336544fc109d0f57/examples/dreambooth/train_dreambooth_lora.py
5
- # The original license is as below:
6
- #
7
- # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
8
- #
9
- # Licensed under the Apache License, Version 2.0 (the "License");
10
- # you may not use this file except in compliance with the License.
11
- # You may obtain a copy of the License at
12
- #
13
- # http://www.apache.org/licenses/LICENSE-2.0
14
- #
15
- # Unless required by applicable law or agreed to in writing, software
16
- # distributed under the License is distributed on an "AS IS" BASIS,
17
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
- # See the License for the specific language governing permissions and
19
-
20
- import argparse
21
- import hashlib
22
- import logging
23
- import math
24
- import os
25
- import warnings
26
- from pathlib import Path
27
- from typing import Optional
28
-
29
- import numpy as np
30
- import torch
31
- import torch.nn.functional as F
32
- import torch.utils.checkpoint
33
- from torch.utils.data import Dataset
34
-
35
- import datasets
36
- import diffusers
37
- import transformers
38
- from accelerate import Accelerator
39
- from accelerate.logging import get_logger
40
- from accelerate.utils import set_seed
41
- from diffusers import (
42
- AutoencoderKL,
43
- DDPMScheduler,
44
- DiffusionPipeline,
45
- DPMSolverMultistepScheduler,
46
- UNet2DConditionModel,
47
- )
48
- from diffusers.loaders import AttnProcsLayers
49
- from diffusers.models.cross_attention import LoRACrossAttnProcessor
50
- from diffusers.optimization import get_scheduler
51
- from diffusers.utils import check_min_version, is_wandb_available
52
- from diffusers.utils.import_utils import is_xformers_available
53
- from huggingface_hub import HfFolder, Repository, create_repo, whoami
54
- from PIL import Image
55
- from torchvision import transforms
56
- from tqdm.auto import tqdm
57
- from transformers import AutoTokenizer, PretrainedConfig
58
-
59
-
60
- # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
61
- check_min_version("0.12.0.dev0")
62
-
63
- logger = get_logger(__name__)
64
-
65
-
66
- def save_model_card(repo_name, images=None, base_model=str, prompt=str, repo_folder=None):
67
- img_str = ""
68
- for i, image in enumerate(images):
69
- image.save(os.path.join(repo_folder, f"image_{i}.png"))
70
- img_str += f"![img_{i}](./image_{i}.png)\n"
71
-
72
- yaml = f"""
73
- ---
74
- license: creativeml-openrail-m
75
- base_model: {base_model}
76
- tags:
77
- - stable-diffusion
78
- - stable-diffusion-diffusers
79
- - text-to-image
80
- - diffusers
81
- - lora
82
- inference: true
83
- ---
84
- """
85
- model_card = f"""
86
- # LoRA DreamBooth - {repo_name}
87
-
88
- These are LoRA adaption weights for {repo_name}. The weights were trained on {prompt} using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following. \n
89
- {img_str}
90
- """
91
- with open(os.path.join(repo_folder, "README.md"), "w") as f:
92
- f.write(yaml + model_card)
93
-
94
-
95
- def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str):
96
- text_encoder_config = PretrainedConfig.from_pretrained(
97
- pretrained_model_name_or_path,
98
- subfolder="text_encoder",
99
- revision=revision,
100
- )
101
- model_class = text_encoder_config.architectures[0]
102
-
103
- if model_class == "CLIPTextModel":
104
- from transformers import CLIPTextModel
105
-
106
- return CLIPTextModel
107
- elif model_class == "RobertaSeriesModelWithTransformation":
108
- from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation
109
-
110
- return RobertaSeriesModelWithTransformation
111
- else:
112
- raise ValueError(f"{model_class} is not supported.")
113
-
114
-
115
- def parse_args(input_args=None):
116
- parser = argparse.ArgumentParser(description="Simple example of a training script.")
117
- parser.add_argument(
118
- "--pretrained_model_name_or_path",
119
- type=str,
120
- default=None,
121
- required=True,
122
- help="Path to pretrained model or model identifier from huggingface.co/models.",
123
- )
124
- parser.add_argument(
125
- "--revision",
126
- type=str,
127
- default=None,
128
- required=False,
129
- help="Revision of pretrained model identifier from huggingface.co/models.",
130
- )
131
- parser.add_argument(
132
- "--tokenizer_name",
133
- type=str,
134
- default=None,
135
- help="Pretrained tokenizer name or path if not the same as model_name",
136
- )
137
- parser.add_argument(
138
- "--instance_data_dir",
139
- type=str,
140
- default=None,
141
- required=True,
142
- help="A folder containing the training data of instance images.",
143
- )
144
- parser.add_argument(
145
- "--class_data_dir",
146
- type=str,
147
- default=None,
148
- required=False,
149
- help="A folder containing the training data of class images.",
150
- )
151
- parser.add_argument(
152
- "--instance_prompt",
153
- type=str,
154
- default=None,
155
- required=True,
156
- help="The prompt with identifier specifying the instance",
157
- )
158
- parser.add_argument(
159
- "--class_prompt",
160
- type=str,
161
- default=None,
162
- help="The prompt to specify images in the same class as provided instance images.",
163
- )
164
- parser.add_argument(
165
- "--validation_prompt",
166
- type=str,
167
- default=None,
168
- help="A prompt that is used during validation to verify that the model is learning.",
169
- )
170
- parser.add_argument(
171
- "--num_validation_images",
172
- type=int,
173
- default=4,
174
- help="Number of images that should be generated during validation with `validation_prompt`.",
175
- )
176
- parser.add_argument(
177
- "--validation_epochs",
178
- type=int,
179
- default=50,
180
- help=(
181
- "Run dreambooth validation every X epochs. Dreambooth validation consists of running the prompt"
182
- " `args.validation_prompt` multiple times: `args.num_validation_images`."
183
- ),
184
- )
185
- parser.add_argument(
186
- "--with_prior_preservation",
187
- default=False,
188
- action="store_true",
189
- help="Flag to add prior preservation loss.",
190
- )
191
- parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
192
- parser.add_argument(
193
- "--num_class_images",
194
- type=int,
195
- default=100,
196
- help=(
197
- "Minimal class images for prior preservation loss. If there are not enough images already present in"
198
- " class_data_dir, additional images will be sampled with class_prompt."
199
- ),
200
- )
201
- parser.add_argument(
202
- "--output_dir",
203
- type=str,
204
- default="lora-dreambooth-model",
205
- help="The output directory where the model predictions and checkpoints will be written.",
206
- )
207
- parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
208
- parser.add_argument(
209
- "--resolution",
210
- type=int,
211
- default=512,
212
- help=(
213
- "The resolution for input images, all the images in the train/validation dataset will be resized to this"
214
- " resolution"
215
- ),
216
- )
217
- parser.add_argument(
218
- "--center_crop",
219
- default=False,
220
- action="store_true",
221
- help=(
222
- "Whether to center crop the input images to the resolution. If not set, the images will be randomly"
223
- " cropped. The images will be resized to the resolution first before cropping."
224
- ),
225
- )
226
- parser.add_argument(
227
- "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
228
- )
229
- parser.add_argument(
230
- "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
231
- )
232
- parser.add_argument("--num_train_epochs", type=int, default=1)
233
- parser.add_argument(
234
- "--max_train_steps",
235
- type=int,
236
- default=None,
237
- help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
238
- )
239
- parser.add_argument(
240
- "--checkpointing_steps",
241
- type=int,
242
- default=500,
243
- help=(
244
- "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final"
245
- " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming"
246
- " training using `--resume_from_checkpoint`."
247
- ),
248
- )
249
- parser.add_argument(
250
- "--resume_from_checkpoint",
251
- type=str,
252
- default=None,
253
- help=(
254
- "Whether training should be resumed from a previous checkpoint. Use a path saved by"
255
- ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
256
- ),
257
- )
258
- parser.add_argument(
259
- "--gradient_accumulation_steps",
260
- type=int,
261
- default=1,
262
- help="Number of updates steps to accumulate before performing a backward/update pass.",
263
- )
264
- parser.add_argument(
265
- "--gradient_checkpointing",
266
- action="store_true",
267
- help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
268
- )
269
- parser.add_argument(
270
- "--learning_rate",
271
- type=float,
272
- default=5e-4,
273
- help="Initial learning rate (after the potential warmup period) to use.",
274
- )
275
- parser.add_argument(
276
- "--scale_lr",
277
- action="store_true",
278
- default=False,
279
- help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
280
- )
281
- parser.add_argument(
282
- "--lr_scheduler",
283
- type=str,
284
- default="constant",
285
- help=(
286
- 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
287
- ' "constant", "constant_with_warmup"]'
288
- ),
289
- )
290
- parser.add_argument(
291
- "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
292
- )
293
- parser.add_argument(
294
- "--lr_num_cycles",
295
- type=int,
296
- default=1,
297
- help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
298
- )
299
- parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.")
300
- parser.add_argument(
301
- "--dataloader_num_workers",
302
- type=int,
303
- default=0,
304
- help=(
305
- "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
306
- ),
307
- )
308
- parser.add_argument(
309
- "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
310
- )
311
- parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
312
- parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
313
- parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
314
- parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
315
- parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
316
- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
317
- parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
318
- parser.add_argument(
319
- "--hub_model_id",
320
- type=str,
321
- default=None,
322
- help="The name of the repository to keep in sync with the local `output_dir`.",
323
- )
324
- parser.add_argument(
325
- "--logging_dir",
326
- type=str,
327
- default="logs",
328
- help=(
329
- "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
330
- " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
331
- ),
332
- )
333
- parser.add_argument(
334
- "--allow_tf32",
335
- action="store_true",
336
- help=(
337
- "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
338
- " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
339
- ),
340
- )
341
- parser.add_argument(
342
- "--report_to",
343
- type=str,
344
- default="tensorboard",
345
- help=(
346
- 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
347
- ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
348
- ),
349
- )
350
- parser.add_argument(
351
- "--mixed_precision",
352
- type=str,
353
- default=None,
354
- choices=["no", "fp16", "bf16"],
355
- help=(
356
- "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
357
- " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
358
- " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
359
- ),
360
- )
361
- parser.add_argument(
362
- "--prior_generation_precision",
363
- type=str,
364
- default=None,
365
- choices=["no", "fp32", "fp16", "bf16"],
366
- help=(
367
- "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
368
- " 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32."
369
- ),
370
- )
371
- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
372
- parser.add_argument(
373
- "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
374
- )
375
-
376
- if input_args is not None:
377
- args = parser.parse_args(input_args)
378
- else:
379
- args = parser.parse_args()
380
-
381
- env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
382
- if env_local_rank != -1 and env_local_rank != args.local_rank:
383
- args.local_rank = env_local_rank
384
-
385
- if args.with_prior_preservation:
386
- if args.class_data_dir is None:
387
- raise ValueError("You must specify a data directory for class images.")
388
- if args.class_prompt is None:
389
- raise ValueError("You must specify prompt for class images.")
390
- else:
391
- # logger is not available yet
392
- if args.class_data_dir is not None:
393
- warnings.warn("You need not use --class_data_dir without --with_prior_preservation.")
394
- if args.class_prompt is not None:
395
- warnings.warn("You need not use --class_prompt without --with_prior_preservation.")
396
-
397
- return args
398
-
399
-
400
- class DreamBoothDataset(Dataset):
401
- """
402
- A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
403
- It pre-processes the images and the tokenizes prompts.
404
- """
405
-
406
- def __init__(
407
- self,
408
- instance_data_root,
409
- instance_prompt,
410
- tokenizer,
411
- class_data_root=None,
412
- class_prompt=None,
413
- size=512,
414
- center_crop=False,
415
- ):
416
- self.size = size
417
- self.center_crop = center_crop
418
- self.tokenizer = tokenizer
419
-
420
- self.instance_data_root = Path(instance_data_root)
421
- if not self.instance_data_root.exists():
422
- raise ValueError("Instance images root doesn't exists.")
423
-
424
- self.instance_images_path = list(Path(instance_data_root).iterdir())
425
- self.num_instance_images = len(self.instance_images_path)
426
- self.instance_prompt = instance_prompt
427
- self._length = self.num_instance_images
428
-
429
- if class_data_root is not None:
430
- self.class_data_root = Path(class_data_root)
431
- self.class_data_root.mkdir(parents=True, exist_ok=True)
432
- self.class_images_path = list(self.class_data_root.iterdir())
433
- self.num_class_images = len(self.class_images_path)
434
- self._length = max(self.num_class_images, self.num_instance_images)
435
- self.class_prompt = class_prompt
436
- else:
437
- self.class_data_root = None
438
-
439
- self.image_transforms = transforms.Compose(
440
- [
441
- transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
442
- transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
443
- transforms.ToTensor(),
444
- transforms.Normalize([0.5], [0.5]),
445
- ]
446
- )
447
-
448
- def __len__(self):
449
- return self._length
450
-
451
- def __getitem__(self, index):
452
- example = {}
453
- instance_image = Image.open(self.instance_images_path[index % self.num_instance_images])
454
- if not instance_image.mode == "RGB":
455
- instance_image = instance_image.convert("RGB")
456
- example["instance_images"] = self.image_transforms(instance_image)
457
- example["instance_prompt_ids"] = self.tokenizer(
458
- self.instance_prompt,
459
- truncation=True,
460
- padding="max_length",
461
- max_length=self.tokenizer.model_max_length,
462
- return_tensors="pt",
463
- ).input_ids
464
-
465
- if self.class_data_root:
466
- class_image = Image.open(self.class_images_path[index % self.num_class_images])
467
- if not class_image.mode == "RGB":
468
- class_image = class_image.convert("RGB")
469
- example["class_images"] = self.image_transforms(class_image)
470
- example["class_prompt_ids"] = self.tokenizer(
471
- self.class_prompt,
472
- truncation=True,
473
- padding="max_length",
474
- max_length=self.tokenizer.model_max_length,
475
- return_tensors="pt",
476
- ).input_ids
477
-
478
- return example
479
-
480
-
481
- def collate_fn(examples, with_prior_preservation=False):
482
- input_ids = [example["instance_prompt_ids"] for example in examples]
483
- pixel_values = [example["instance_images"] for example in examples]
484
-
485
- # Concat class and instance examples for prior preservation.
486
- # We do this to avoid doing two forward passes.
487
- if with_prior_preservation:
488
- input_ids += [example["class_prompt_ids"] for example in examples]
489
- pixel_values += [example["class_images"] for example in examples]
490
-
491
- pixel_values = torch.stack(pixel_values)
492
- pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
493
-
494
- input_ids = torch.cat(input_ids, dim=0)
495
-
496
- batch = {
497
- "input_ids": input_ids,
498
- "pixel_values": pixel_values,
499
- }
500
- return batch
501
-
502
-
503
- class PromptDataset(Dataset):
504
- "A simple dataset to prepare the prompts to generate class images on multiple GPUs."
505
-
506
- def __init__(self, prompt, num_samples):
507
- self.prompt = prompt
508
- self.num_samples = num_samples
509
-
510
- def __len__(self):
511
- return self.num_samples
512
-
513
- def __getitem__(self, index):
514
- example = {}
515
- example["prompt"] = self.prompt
516
- example["index"] = index
517
- return example
518
-
519
-
520
- def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
521
- if token is None:
522
- token = HfFolder.get_token()
523
- if organization is None:
524
- username = whoami(token)["name"]
525
- return f"{username}/{model_id}"
526
- else:
527
- return f"{organization}/{model_id}"
528
-
529
-
530
- def main(args):
531
- logging_dir = Path(args.output_dir, args.logging_dir)
532
-
533
- accelerator = Accelerator(
534
- gradient_accumulation_steps=args.gradient_accumulation_steps,
535
- mixed_precision=args.mixed_precision,
536
- log_with=args.report_to,
537
- logging_dir=logging_dir,
538
- )
539
-
540
- if args.report_to == "wandb":
541
- if not is_wandb_available():
542
- raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
543
- import wandb
544
-
545
- # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate
546
- # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models.
547
- # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate.
548
- # Make one log on every process with the configuration for debugging.
549
- logging.basicConfig(
550
- format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
551
- datefmt="%m/%d/%Y %H:%M:%S",
552
- level=logging.INFO,
553
- )
554
- logger.info(accelerator.state, main_process_only=False)
555
- if accelerator.is_local_main_process:
556
- datasets.utils.logging.set_verbosity_warning()
557
- transformers.utils.logging.set_verbosity_warning()
558
- diffusers.utils.logging.set_verbosity_info()
559
- else:
560
- datasets.utils.logging.set_verbosity_error()
561
- transformers.utils.logging.set_verbosity_error()
562
- diffusers.utils.logging.set_verbosity_error()
563
-
564
- # If passed along, set the training seed now.
565
- if args.seed is not None:
566
- set_seed(args.seed)
567
-
568
- # Generate class images if prior preservation is enabled.
569
- if args.with_prior_preservation:
570
- class_images_dir = Path(args.class_data_dir)
571
- if not class_images_dir.exists():
572
- class_images_dir.mkdir(parents=True)
573
- cur_class_images = len(list(class_images_dir.iterdir()))
574
-
575
- if cur_class_images < args.num_class_images:
576
- torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
577
- if args.prior_generation_precision == "fp32":
578
- torch_dtype = torch.float32
579
- elif args.prior_generation_precision == "fp16":
580
- torch_dtype = torch.float16
581
- elif args.prior_generation_precision == "bf16":
582
- torch_dtype = torch.bfloat16
583
- pipeline = DiffusionPipeline.from_pretrained(
584
- args.pretrained_model_name_or_path,
585
- torch_dtype=torch_dtype,
586
- safety_checker=None,
587
- revision=args.revision,
588
- )
589
- pipeline.set_progress_bar_config(disable=True)
590
-
591
- num_new_images = args.num_class_images - cur_class_images
592
- logger.info(f"Number of class images to sample: {num_new_images}.")
593
-
594
- sample_dataset = PromptDataset(args.class_prompt, num_new_images)
595
- sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
596
-
597
- sample_dataloader = accelerator.prepare(sample_dataloader)
598
- pipeline.to(accelerator.device)
599
-
600
- for example in tqdm(
601
- sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
602
- ):
603
- images = pipeline(example["prompt"]).images
604
-
605
- for i, image in enumerate(images):
606
- hash_image = hashlib.sha1(image.tobytes()).hexdigest()
607
- image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg"
608
- image.save(image_filename)
609
-
610
- del pipeline
611
- if torch.cuda.is_available():
612
- torch.cuda.empty_cache()
613
-
614
- # Handle the repository creation
615
- if accelerator.is_main_process:
616
- if args.push_to_hub:
617
- if args.hub_model_id is None:
618
- repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
619
- else:
620
- repo_name = args.hub_model_id
621
-
622
- create_repo(repo_name, exist_ok=True, token=args.hub_token)
623
- repo = Repository(args.output_dir, clone_from=repo_name, token=args.hub_token)
624
-
625
- with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
626
- if "step_*" not in gitignore:
627
- gitignore.write("step_*\n")
628
- if "epoch_*" not in gitignore:
629
- gitignore.write("epoch_*\n")
630
- elif args.output_dir is not None:
631
- os.makedirs(args.output_dir, exist_ok=True)
632
-
633
- # Load the tokenizer
634
- if args.tokenizer_name:
635
- tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False)
636
- elif args.pretrained_model_name_or_path:
637
- tokenizer = AutoTokenizer.from_pretrained(
638
- args.pretrained_model_name_or_path,
639
- subfolder="tokenizer",
640
- revision=args.revision,
641
- use_fast=False,
642
- )
643
-
644
- # import correct text encoder class
645
- text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision)
646
-
647
- # Load scheduler and models
648
- noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
649
- text_encoder = text_encoder_cls.from_pretrained(
650
- args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
651
- )
652
- vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision)
653
- unet = UNet2DConditionModel.from_pretrained(
654
- args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
655
- )
656
-
657
- # We only train the additional adapter LoRA layers
658
- vae.requires_grad_(False)
659
- text_encoder.requires_grad_(False)
660
- unet.requires_grad_(False)
661
-
662
- # For mixed precision training we cast the text_encoder and vae weights to half-precision
663
- # as these models are only used for inference, keeping weights in full precision is not required.
664
- weight_dtype = torch.float32
665
- if accelerator.mixed_precision == "fp16":
666
- weight_dtype = torch.float16
667
- elif accelerator.mixed_precision == "bf16":
668
- weight_dtype = torch.bfloat16
669
-
670
- # Move unet, vae and text_encoder to device and cast to weight_dtype
671
- unet.to(accelerator.device, dtype=weight_dtype)
672
- vae.to(accelerator.device, dtype=weight_dtype)
673
- text_encoder.to(accelerator.device, dtype=weight_dtype)
674
-
675
- if args.enable_xformers_memory_efficient_attention:
676
- if is_xformers_available():
677
- unet.enable_xformers_memory_efficient_attention()
678
- else:
679
- raise ValueError("xformers is not available. Make sure it is installed correctly")
680
-
681
- # now we will add new LoRA weights to the attention layers
682
- # It's important to realize here how many attention weights will be added and of which sizes
683
- # The sizes of the attention layers consist only of two different variables:
684
- # 1) - the "hidden_size", which is increased according to `unet.config.block_out_channels`.
685
- # 2) - the "cross attention size", which is set to `unet.config.cross_attention_dim`.
686
-
687
- # Let's first see how many attention processors we will have to set.
688
- # For Stable Diffusion, it should be equal to:
689
- # - down blocks (2x attention layers) * (2x transformer layers) * (3x down blocks) = 12
690
- # - mid blocks (2x attention layers) * (1x transformer layers) * (1x mid blocks) = 2
691
- # - up blocks (2x attention layers) * (3x transformer layers) * (3x down blocks) = 18
692
- # => 32 layers
693
-
694
- # Set correct lora layers
695
- lora_attn_procs = {}
696
- for name in unet.attn_processors.keys():
697
- cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim
698
- if name.startswith("mid_block"):
699
- hidden_size = unet.config.block_out_channels[-1]
700
- elif name.startswith("up_blocks"):
701
- block_id = int(name[len("up_blocks.")])
702
- hidden_size = list(reversed(unet.config.block_out_channels))[block_id]
703
- elif name.startswith("down_blocks"):
704
- block_id = int(name[len("down_blocks.")])
705
- hidden_size = unet.config.block_out_channels[block_id]
706
-
707
- lora_attn_procs[name] = LoRACrossAttnProcessor(
708
- hidden_size=hidden_size, cross_attention_dim=cross_attention_dim
709
- )
710
-
711
- unet.set_attn_processor(lora_attn_procs)
712
- lora_layers = AttnProcsLayers(unet.attn_processors)
713
-
714
- accelerator.register_for_checkpointing(lora_layers)
715
-
716
- if args.scale_lr:
717
- args.learning_rate = (
718
- args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
719
- )
720
-
721
- # Enable TF32 for faster training on Ampere GPUs,
722
- # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
723
- if args.allow_tf32:
724
- torch.backends.cuda.matmul.allow_tf32 = True
725
-
726
- if args.scale_lr:
727
- args.learning_rate = (
728
- args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
729
- )
730
-
731
- # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
732
- if args.use_8bit_adam:
733
- try:
734
- import bitsandbytes as bnb
735
- except ImportError:
736
- raise ImportError(
737
- "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
738
- )
739
-
740
- optimizer_class = bnb.optim.AdamW8bit
741
- else:
742
- optimizer_class = torch.optim.AdamW
743
-
744
- # Optimizer creation
745
- optimizer = optimizer_class(
746
- lora_layers.parameters(),
747
- lr=args.learning_rate,
748
- betas=(args.adam_beta1, args.adam_beta2),
749
- weight_decay=args.adam_weight_decay,
750
- eps=args.adam_epsilon,
751
- )
752
-
753
- # Dataset and DataLoaders creation:
754
- train_dataset = DreamBoothDataset(
755
- instance_data_root=args.instance_data_dir,
756
- instance_prompt=args.instance_prompt,
757
- class_data_root=args.class_data_dir if args.with_prior_preservation else None,
758
- class_prompt=args.class_prompt,
759
- tokenizer=tokenizer,
760
- size=args.resolution,
761
- center_crop=args.center_crop,
762
- )
763
-
764
- train_dataloader = torch.utils.data.DataLoader(
765
- train_dataset,
766
- batch_size=args.train_batch_size,
767
- shuffle=True,
768
- collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation),
769
- num_workers=args.dataloader_num_workers,
770
- )
771
-
772
- # Scheduler and math around the number of training steps.
773
- overrode_max_train_steps = False
774
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
775
- if args.max_train_steps is None:
776
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
777
- overrode_max_train_steps = True
778
-
779
- lr_scheduler = get_scheduler(
780
- args.lr_scheduler,
781
- optimizer=optimizer,
782
- num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps,
783
- num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
784
- num_cycles=args.lr_num_cycles,
785
- power=args.lr_power,
786
- )
787
-
788
- # Prepare everything with our `accelerator`.
789
- lora_layers, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
790
- lora_layers, optimizer, train_dataloader, lr_scheduler
791
- )
792
-
793
- # We need to recalculate our total training steps as the size of the training dataloader may have changed.
794
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
795
- if overrode_max_train_steps:
796
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
797
- # Afterwards we recalculate our number of training epochs
798
- args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
799
-
800
- # We need to initialize the trackers we use, and also store our configuration.
801
- # The trackers initializes automatically on the main process.
802
- if accelerator.is_main_process:
803
- accelerator.init_trackers("dreambooth-lora", config=vars(args))
804
-
805
- # Train!
806
- total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
807
-
808
- logger.info("***** Running training *****")
809
- logger.info(f" Num examples = {len(train_dataset)}")
810
- logger.info(f" Num batches each epoch = {len(train_dataloader)}")
811
- logger.info(f" Num Epochs = {args.num_train_epochs}")
812
- logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
813
- logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
814
- logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
815
- logger.info(f" Total optimization steps = {args.max_train_steps}")
816
- global_step = 0
817
- first_epoch = 0
818
-
819
- # Potentially load in the weights and states from a previous save
820
- if args.resume_from_checkpoint:
821
- if args.resume_from_checkpoint != "latest":
822
- path = os.path.basename(args.resume_from_checkpoint)
823
- else:
824
- # Get the mos recent checkpoint
825
- dirs = os.listdir(args.output_dir)
826
- dirs = [d for d in dirs if d.startswith("checkpoint")]
827
- dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
828
- path = dirs[-1] if len(dirs) > 0 else None
829
-
830
- if path is None:
831
- accelerator.print(
832
- f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
833
- )
834
- args.resume_from_checkpoint = None
835
- else:
836
- accelerator.print(f"Resuming from checkpoint {path}")
837
- accelerator.load_state(os.path.join(args.output_dir, path))
838
- global_step = int(path.split("-")[1])
839
-
840
- resume_global_step = global_step * args.gradient_accumulation_steps
841
- first_epoch = global_step // num_update_steps_per_epoch
842
- resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps)
843
-
844
- # Only show the progress bar once on each machine.
845
- progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process)
846
- progress_bar.set_description("Steps")
847
-
848
- for epoch in range(first_epoch, args.num_train_epochs):
849
- unet.train()
850
- for step, batch in enumerate(train_dataloader):
851
- # Skip steps until we reach the resumed step
852
- if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
853
- if step % args.gradient_accumulation_steps == 0:
854
- progress_bar.update(1)
855
- continue
856
-
857
- with accelerator.accumulate(unet):
858
- # Convert images to latent space
859
- latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample()
860
- latents = latents * 0.18215
861
-
862
- # Sample noise that we'll add to the latents
863
- noise = torch.randn_like(latents)
864
- bsz = latents.shape[0]
865
- # Sample a random timestep for each image
866
- timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
867
- timesteps = timesteps.long()
868
-
869
- # Add noise to the latents according to the noise magnitude at each timestep
870
- # (this is the forward diffusion process)
871
- noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
872
-
873
- # Get the text embedding for conditioning
874
- encoder_hidden_states = text_encoder(batch["input_ids"])[0]
875
-
876
- # Predict the noise residual
877
- model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
878
-
879
- # Get the target for loss depending on the prediction type
880
- if noise_scheduler.config.prediction_type == "epsilon":
881
- target = noise
882
- elif noise_scheduler.config.prediction_type == "v_prediction":
883
- target = noise_scheduler.get_velocity(latents, noise, timesteps)
884
- else:
885
- raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
886
-
887
- if args.with_prior_preservation:
888
- # Chunk the noise and model_pred into two parts and compute the loss on each part separately.
889
- model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0)
890
- target, target_prior = torch.chunk(target, 2, dim=0)
891
-
892
- # Compute instance loss
893
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
894
-
895
- # Compute prior loss
896
- prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean")
897
-
898
- # Add the prior loss to the instance loss.
899
- loss = loss + args.prior_loss_weight * prior_loss
900
- else:
901
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
902
-
903
- accelerator.backward(loss)
904
- if accelerator.sync_gradients:
905
- params_to_clip = lora_layers.parameters()
906
- accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
907
- optimizer.step()
908
- lr_scheduler.step()
909
- optimizer.zero_grad()
910
-
911
- # Checks if the accelerator has performed an optimization step behind the scenes
912
- if accelerator.sync_gradients:
913
- progress_bar.update(1)
914
- global_step += 1
915
-
916
- if global_step % args.checkpointing_steps == 0:
917
- if accelerator.is_main_process:
918
- save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
919
- accelerator.save_state(save_path)
920
- logger.info(f"Saved state to {save_path}")
921
-
922
- logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
923
- progress_bar.set_postfix(**logs)
924
- accelerator.log(logs, step=global_step)
925
-
926
- if global_step >= args.max_train_steps:
927
- break
928
-
929
- if args.validation_prompt is not None and epoch % args.validation_epochs == 0:
930
- logger.info(
931
- f"Running validation... \n Generating {args.num_validation_images} images with prompt:"
932
- f" {args.validation_prompt}."
933
- )
934
- # create pipeline
935
- pipeline = DiffusionPipeline.from_pretrained(
936
- args.pretrained_model_name_or_path,
937
- unet=accelerator.unwrap_model(unet),
938
- text_encoder=accelerator.unwrap_model(text_encoder),
939
- revision=args.revision,
940
- torch_dtype=weight_dtype,
941
- )
942
- pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
943
- pipeline = pipeline.to(accelerator.device)
944
- pipeline.set_progress_bar_config(disable=True)
945
-
946
- # run inference
947
- generator = torch.Generator(device=accelerator.device).manual_seed(args.seed)
948
- prompt = args.num_validation_images * [args.validation_prompt]
949
- images = pipeline(prompt, num_inference_steps=25, generator=generator).images
950
-
951
- for tracker in accelerator.trackers:
952
- if tracker.name == "tensorboard":
953
- np_images = np.stack([np.asarray(img) for img in images])
954
- tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC")
955
- if tracker.name == "wandb":
956
- tracker.log(
957
- {
958
- "validation": [
959
- wandb.Image(image, caption=f"{i}: {args.validation_prompt}")
960
- for i, image in enumerate(images)
961
- ]
962
- }
963
- )
964
-
965
- del pipeline
966
- torch.cuda.empty_cache()
967
-
968
- # Save the lora layers
969
- accelerator.wait_for_everyone()
970
- if accelerator.is_main_process:
971
- unet = unet.to(torch.float32)
972
- unet.save_attn_procs(args.output_dir)
973
-
974
- # Final inference
975
- # Load previous pipeline
976
- pipeline = DiffusionPipeline.from_pretrained(
977
- args.pretrained_model_name_or_path, revision=args.revision, torch_dtype=weight_dtype
978
- )
979
- pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
980
- pipeline = pipeline.to(accelerator.device)
981
-
982
- # load attention processors
983
- pipeline.unet.load_attn_procs(args.output_dir)
984
-
985
- # run inference
986
- if args.validation_prompt and args.num_validation_images > 0:
987
- generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None
988
- prompt = args.num_validation_images * [args.validation_prompt]
989
- images = pipeline(prompt, num_inference_steps=25, generator=generator).images
990
-
991
- test_image_dir = Path(args.output_dir) / 'test_images'
992
- test_image_dir.mkdir()
993
- for i, image in enumerate(images):
994
- out_path = test_image_dir / f'image_{i}.png'
995
- image.save(out_path)
996
-
997
- for tracker in accelerator.trackers:
998
- if tracker.name == "tensorboard":
999
- np_images = np.stack([np.asarray(img) for img in images])
1000
- tracker.writer.add_images("test", np_images, epoch, dataformats="NHWC")
1001
- if tracker.name == "wandb":
1002
- tracker.log(
1003
- {
1004
- "test": [
1005
- wandb.Image(image, caption=f"{i}: {args.validation_prompt}")
1006
- for i, image in enumerate(images)
1007
- ]
1008
- }
1009
- )
1010
-
1011
- if args.push_to_hub:
1012
- save_model_card(
1013
- repo_name,
1014
- images=images,
1015
- base_model=args.pretrained_model_name_or_path,
1016
- prompt=args.instance_prompt,
1017
- repo_folder=args.output_dir,
1018
- )
1019
- repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True)
1020
-
1021
- accelerator.end_training()
1022
-
1023
-
1024
- if __name__ == "__main__":
1025
- args = parse_args()
1026
- main(args)