parquet-converter commited on
Commit
c3316cf
·
1 Parent(s): 503155d

Update parquet files (step 15 of 476)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/123Kumar/vits-uma-genshin-honkai123/Docker/vits.sh +0 -20
  2. spaces/1gistliPinn/ChatGPT4/Examples/Badmash No.1 movie free download kickass torrent Find out why this movie is a must-watch for action lovers.md +0 -6
  3. spaces/1gistliPinn/ChatGPT4/Examples/Descargar Gateway B2 Teacher Book Pdf LINK.md +0 -68
  4. spaces/1gistliPinn/ChatGPT4/Examples/Download Film Si Doel Anak Sekolahan Full 14 BEST.md +0 -18
  5. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Become the Drift King with Drift Clash Online Racing Mod APK Android 1.md +0 -116
  6. spaces/1phancelerku/anime-remove-background/Download Traffic Racer MOD APK for Android The Ultimate Racing Experience.md +0 -94
  7. spaces/1phancelerku/anime-remove-background/FIFA Mobile v18.1.01 MOD APK Play in World Cup Stadiums with Official Licenses.md +0 -132
  8. spaces/7hao/bingo/Dockerfile +0 -36
  9. spaces/801artistry/RVC801/diffq/__init__.py +0 -18
  10. spaces/A00001/bingothoo/src/components/header.tsx +0 -12
  11. spaces/AIConsultant/MusicGen/audiocraft/__init__.py +0 -26
  12. spaces/AIFILMS/generate_human_motion/pyrender/setup.py +0 -76
  13. spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/predict.py +0 -90
  14. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb32-coslr-preciseBN_in1k.py +0 -13
  15. spaces/Ababababababbababa/Ashaar/poetry_diacritizer/create_configs.py +0 -13
  16. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/chart/Factory.js +0 -13
  17. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/YAMLMake.js +0 -35
  18. spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/ops/upfirdn2d.py +0 -409
  19. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/models/transformer_temporal.md +0 -11
  20. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/zh/quicktour.md +0 -331
  21. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/transformer_temporal.py +0 -179
  22. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/versatile_diffusion/modeling_text_unet.py +0 -1932
  23. spaces/Andy1621/uniformer_image_detection/configs/vfnet/vfnet_r50_fpn_1x_coco.py +0 -108
  24. spaces/Andy1621/uniformer_image_detection/mmdet/datasets/voc.py +0 -93
  25. spaces/Andy1621/uniformer_image_segmentation/configs/danet/danet_r50-d8_512x512_160k_ade20k.py +0 -6
  26. spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r101-d8_769x769_40k_cityscapes.py +0 -2
  27. spaces/Andy1621/uniformerv2_demo/transforms.py +0 -443
  28. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/start_wsl.bat +0 -11
  29. spaces/Arnx/MusicGenXvAKN/tests/common_utils/wav_utils.py +0 -32
  30. spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/tuneavideo/tuneavideo_text2video.py +0 -153
  31. spaces/AsakuraMizu/moe-tts/text/mandarin.py +0 -329
  32. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/glibc.py +0 -88
  33. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/Misc/mmdet_mask_rcnn_R_50_FPN_1x.py +0 -151
  34. spaces/Axolotlily/Interpolate/README.md +0 -13
  35. spaces/BasToTheMax/22h-vintedois-diffusion-v0-1/README.md +0 -12
  36. spaces/Benebene/Chat-question-answering/app.py +0 -9
  37. spaces/Benson/text-generation/Examples/Buscar En La Lista De Miembros.md +0 -82
  38. spaces/Benson/text-generation/Examples/Choque Mini Descarga Pc.md +0 -63
  39. spaces/Benson/text-generation/Examples/Descargar Fonte Clash Royale.md +0 -61
  40. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/__init__.py +0 -23
  41. spaces/CVPR/LIVE/pybind11/tools/check-style.sh +0 -44
  42. spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/transform_reduce.h +0 -22
  43. spaces/CVPR/LIVE/thrust/thrust/unique.h +0 -968
  44. spaces/Cat125/text-generator-v2/utils.py +0 -36
  45. spaces/Celestinian/Topic-Detection/app.py +0 -39
  46. spaces/ChandraMohanNayal/AutoGPT/autogpt/speech/base.py +0 -50
  47. spaces/ChongCJ/fish/README.md +0 -13
  48. spaces/Clebersla/RVC_V2_Huggingface_Version/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py +0 -97
  49. spaces/CuriousDolphin/MobileSAM/app.py +0 -319
  50. spaces/Cybsechuman/Consistency_analysis/README.md +0 -13
spaces/123Kumar/vits-uma-genshin-honkai123/Docker/vits.sh DELETED
@@ -1,20 +0,0 @@
1
- #!/bin/bash
2
- run() {
3
- echo -e "\033[32m已完成初始化,启动服务...\033[0m"
4
- python3 /app/vits-uma-genshin-honkai/app.py
5
- }
6
- install() {
7
- echo -e "\033[33m正在初始化:安装依赖....\033[0m"
8
- pip install -r /app/vits-uma-genshin-honkai/requirements.txt -i https://mirrors.ustc.edu.cn/pypi/web/simple
9
- echo -e "\033[33m正在下载模型....\033[0m"
10
- rm -f /app/vits-uma-genshin-honkai/model/G_953000.pth
11
- wget -O /app/vits-uma-genshin-honkai/model/G_953000.pth https://huggingface.co/spaces/ikechan8370/vits-uma-genshin-honkai/resolve/main/model/G_953000.pth
12
- echo -e "\033[32m初始化完成!\033[0m"
13
- run
14
- }
15
-
16
- if [ ! -f "/app/vits-uma-genshin-honkai/model/G_953000.pth" ] || [ "$(stat -c%s "/app/vits-uma-genshin-honkai/model/G_953000.pth")" -lt 10000 ]; then
17
- install
18
- else
19
- run
20
- fi
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Badmash No.1 movie free download kickass torrent Find out why this movie is a must-watch for action lovers.md DELETED
@@ -1,6 +0,0 @@
1
- <br />
2
- <p>1.Torrentz-Torrentz is one of the most smooth, powerful and popular platforms which makes it possible for you to download video song torrents at free of cost. This platform enables you to enter your search query and then select one of the many options of video songs provided. <br />Download link:</p>
3
- <p>1.The first step is to download a torrent client or software like Bittorrent. Bittorrent can be downloaded by going to its official website and then clicking on the download link which is suitable for your Operating system. Make sure you download the latest free version of the platform.</p>
4
- <h2>Badmash No.1 movie free download kickass torrent</h2><br /><p><b><b>Download File</b> &#10004; <a href="https://imgfil.com/2uxY1Q">https://imgfil.com/2uxY1Q</a></b></p><br /><br /> aaccfb2cb3<br />
5
- <br />
6
- <br />
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Descargar Gateway B2 Teacher Book Pdf LINK.md DELETED
@@ -1,68 +0,0 @@
1
- <h2>Descargar Gateway B2 Teacher Book Pdf</h2><br /><p><b><b>Download</b> &#10038; <a href="https://imgfil.com/2uy10O">https://imgfil.com/2uy10O</a></b></p><br /><br />
2
-
3
- Gateway-B2-teachers.pdf - Free download as PDF File (.pdf) or read online for . 125 Units 79 Unit 10135 Classroom audio recordings 145 Workbook answer key 155 ... Spotlight 2nd grade workbook 160 Workbook key answer workbook ... Complete with the textbook Spheres English Language.
4
- 2 grade ...
5
- Buy workbook for the textbook Spotlight (English in Focus) grade 2, online gdz.
6
- In the workbook for the English language for 2nd grade are ready answers to the exercises for the textbook Spotlight.
7
- 2 class.
8
- Author: Afanasyeva, Mikheeva, Baranova, Vaulina ...
9
- English 2nd grade English ...
10
- Workbook for the textbook Spotlight ...
11
- 2nd grade.
12
- Workbook.
13
- For the textbook on ...
14
- - Your book
15
- For the textbook "Informatics" for grade 5 (M.: BINOM. ...
16
- Workbook is part of the teaching kit.
17
- The workbook.
18
- For the textbook "Informatics" for grade 5
19
- Buy the book "Workbook.
20
- Workbook for the textbook "Informatics for grade 5" (Lutceva E.) in the online store My-shop.ru.
21
- Low price, delivery ...
22
- Informatics.
23
- 5 grade.
24
- Workbook for the textbook L.L.
25
- Description:
26
- The workbook is part of the system of educational and methodical sets Algorithm of Success and is designed for the textbook L.L.Bosova, A.Y.Bosova on computer science for grade 5.
27
- The workbook includes exercises that allow students to consolidate and develop their programming skills, learn algorithms for solving typical problems, and perform creative and research tasks.
28
- The workbook is designed for computer science lessons in grade 5.
29
- Printable and downloadable version
30
- The workbook is a teaching aid.
31
- It contains .
32
- The workbook is part of the Computing curriculum for grades 5-6, along with the
33
- The workbook for the 5th grade is a part of the ATC for the 5th-6th grades
34
- The workbook for the 6th grade is an integral part of the informatics textbook for grades 5-6, together with
35
- The workbook for the 6th grade is an integral part of the informatics textbook for grades 5-6 together with the English language curriculum.
36
- The workbook for the 5th grade is an integral part of the informatics textbook for grades 5-6 together with the 8th grade and the 6th grade
37
- The workbook for the 4th grade is an integral part of the informatics textbook for 3rd-4th grades, together with
38
- The workbook for the 5th grade is an integral part of the informatics textbook for grades 5-6, along with
39
- Grade 2.
40
- In 2 parts.
41
- Part 1. FGOS.
42
- Matveeva N.V.
43
- FGOS.
44
- Workbook for grade 3 is part of the workbook on computer science for children.
45
- Educational literature in the online store Book24.
46
- Delivery in Kazakhstan.
47
- The textbook and workbook for 6th grade is part of the "Information science textbook for 5.
48
- (The textbook, the workbook, the collection of problems, the electronic appendix) and
49
- For the 6th grade, and also a manual for the teacher.
50
- The structure of the workbook includes: - a textbook in two parts ("Informatics.
51
- Bosova); - book for projects and creative works (authors: A.G. Gein, A.I. Senokosov, N.A. Yunerman); - collection of tasks and tests (author: N.A. Rodichev); - teaching aid for teachers (authors: A.V. Goriachev, K.I. Gorina, N.I. Suvorova, T.O. Volkova).
52
- Informatics textbooks for grades 5-8 by A.G. Gein and A.I. Senokosov are the continuation of the informatics textbooks for the elementary school.
53
- The 5th grade textbook studies information processes, information systems, information technologies, as well as the theoretical basics of information security.
54
- The textbook for 6th grade explores the logical, physical, and operational foundations of computers, information technology, and word processing technology.
55
- The textbook for grade 7 studies the logical foundations of the computer, information technology for processing graphic information and multimedia, computer technology for creating Web pages, network technology for processing text and graphic information, and information modeling technology.
56
- The grade 8 textbook explores models and structures of information systems, information technologies for numerical information processing, and information processing technologies in spreadsheets.
57
- The textbook for Grade 9 contains a lot of information on Information and Communication Technologies, Communication Technologies, and Informatics and ICT: Preparing for the Unified State Exam.
58
- It deals with technology of Web-pages creation, models and structures of different information systems, information and communication technologies, providing creation and processing of text documents by word-processing tools, and technology of information processing in electronic tables.
59
- In addition, the textbook covers the technologies of working with databases, creating presentations, preparing publications on Web pages, creating and processing audio and video files, information retrieval on the Internet, etc.
60
- Examples of different technologies and tools for working with information systems and computer networks are given in the textbook.
61
- Each chapter ends with self-check questions, tasks for self-check, variants of independent and laboratory works.
62
- For students of higher education institutions on economic specialties.
63
- Will be useful to students of institutions of general secondary education in preparation for centralized testing in computer science in grades 9 and 11.
64
- Corresponds to the current requirements of the Federal state educational standard of secondary vocational education and professional requirements.
65
- For students studying computer science in technical specialties and for teachers 8a78ff9644<br />
66
- <br />
67
- <br />
68
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Download Film Si Doel Anak Sekolahan Full 14 BEST.md DELETED
@@ -1,18 +0,0 @@
1
-
2
- <h1>Download Film Si Doel Anak Sekolahan Full 14: Lagi Lagi Huru Hara</h1>
3
- <p>Si Doel Anak Sekolahan adalah sinetron Indonesia yang pertama kali ditayangkan oleh stasiun TV RCTI pada tahun 1994. Disutradarai dan dibintangi oleh Rano Karno sebagai Doel, sinetron ini berkisah mengenai kehidupan Doel dan keluarganya, keluarga Betawi yang tetap mempertahankan nilai-nilai tradisional meskipun hidup di tengah-tengah arus perkotaan dan modernisasi.</p>
4
- <h2>download film si doel anak sekolahan full 14</h2><br /><p><b><b>Download File</b> &#128279; <a href="https://imgfil.com/2uxXsJ">https://imgfil.com/2uxXsJ</a></b></p><br /><br />
5
- <p>Sinetron ini memiliki banyak penggemar yang setia mengikuti kisah cinta segitiga antara Doel, Zaenab, dan Sarah. Selain itu, sinetron ini juga menyajikan berbagai adegan lucu dan mengharukan yang melibatkan keluarga dan teman-teman Doel.</p>
6
- <p>Salah satu episode yang paling ditunggu-tunggu oleh para penggemar adalah episode 14 yang berjudul "Lagi Lagi Huru Hara". Dalam episode ini, Doel harus menghadapi berbagai masalah yang menimpa dirinya dan orang-orang terdekatnya.</p>
7
- <p>Doel harus berurusan dengan polisi karena dituduh mencuri sepeda motor milik Pak RT. Sementara itu, Zaenab harus menanggung malu karena foto-foto mesranya dengan Doel tersebar di media sosial. Sarah juga tidak kalah sial karena harus menerima kenyataan bahwa ayahnya meninggal dunia akibat serangan jantung.</p>
8
- <p>Bagaimana nasib Doel dan keluarganya? Apakah mereka bisa melewati semua cobaan yang datang? Bagaimana pula hubungan Doel dengan Zaenab dan Sarah?</p>
9
- <p></p>
10
- <p>Jika Anda penasaran dengan jawabannya, Anda bisa download film si doel anak sekolahan full 14 di sini. Anda bisa menonton episode ini secara gratis dan mudah tanpa perlu mendaftar atau membayar biaya apapun.</p>
11
- <p>Download film si doel anak sekolahan full 14 sekarang juga dan nikmati kisah seru dan menghibur dari Doel dan keluarganya. Jangan lupa untuk berbagi link download ini dengan teman-teman Anda yang juga suka dengan sinetron Si Doel Anak Sekolahan.</p>
12
-
13
- <p>Episode 14 ini dimulai dengan adegan Doel yang sedang berada di kantor polisi bersama Sabeni dan Mandra. Mereka dituduh mencuri sepeda motor milik Pak RT yang sebenarnya adalah milik Doel sendiri. Doel harus menjelaskan panjang lebar bahwa sepeda motor itu adalah hadiah dari Zaenab yang ia simpan di rumah Pak RT karena takut dicuri di rumahnya.</p>
14
- <p>Sementara itu, Zaenab yang sedang berada di kantor juga mendapat masalah besar. Foto-foto mesranya dengan Doel yang diambil oleh Sarah secara diam-diam telah tersebar di media sosial oleh teman-temannya yang iri. Zaenab merasa malu dan marah karena reputasinya sebagai wanita baik-baik tercoreng. Ia pun mencari tahu siapa yang menyebarkan foto-foto itu dan berniat untuk melaporkannya ke polisi.</p>
15
- <p>Di sisi lain, Sarah yang sedang berada di Belanda mendapat kabar buruk dari ibunya. Ayahnya, Hans, telah meninggal dunia akibat serangan jantung. Sarah sangat terpukul dan bingung harus bagaimana. Ia ingin segera pulang ke Indonesia untuk mengurus jenazah ayahnya, tetapi ia juga tidak ingin meninggalkan Doel yang masih ia cintai.</p>
16
- <p>Akankah Doel bisa keluar dari kantor polisi tanpa masalah? Apakah Zaenab bisa menemukan pelaku penyebar foto-foto mesranya dengan Doel? Bagaimana pula nasib Sarah yang harus menghadapi kematian ayahnya? Temukan jawabannya dengan download film si doel anak sekolahan full 14 di sini.</p> d5da3c52bf<br />
17
- <br />
18
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Become the Drift King with Drift Clash Online Racing Mod APK Android 1.md DELETED
@@ -1,116 +0,0 @@
1
-
2
- <h1>Drift Clash Online Racing Mod APK Android 1: A Review</h1>
3
- <p>If you are a fan of drift racing games, you might have heard of Drift Clash Online Racing, a game that offers real-time battles and realistic physics. But did you know that you can enjoy this game even more with the modded version from Mod APK Android 1? In this article, we will review Drift Clash Online Racing Mod APK Android 1 and tell you why you should try it.</p>
4
- <h2>What is Drift Clash Online Racing?</h2>
5
- <p>Drift Clash Online Racing is a drift racing game that was developed by EasyWays and released in 2018. It is the first drift racing game with real-time battles and realistic physics. You can compete with other players in online multiplayer mode and show off your drifting skills. You can also customize your car with various parts and paint jobs, and collect most wanted cars from different eras.</p>
6
- <h2>drift clash online racing mod apk android 1</h2><br /><p><b><b>Download File</b> &#10004; <a href="https://urlin.us/2uSVQW">https://urlin.us/2uSVQW</a></b></p><br /><br />
7
- <h3>Features of the game</h3>
8
- <ul>
9
- <li>Real-time multiplayer mode with up to 10 players</li>
10
- <li>Realistic physics and car handling</li>
11
- <li>33 cars from different categories and eras</li>
12
- <li>Customization options for your car</li>
13
- <li>Free-roam mode where you can explore the map and practice your drifts</li>
14
- <li>Retro style graphics and sound effects</li>
15
- </ul>
16
- <h3>How to play the game</h3>
17
- <p>The game is easy to play but hard to master. You can control your car with simple touch buttons or tilt your device. You can also adjust the sensitivity and steering angle in the settings. The goal is to drift as much as possible and earn points. The more you drift, the more boost you get. You can use the boost to speed up and overtake your opponents. You can also perform tricks like donuts, spins, and jumps to earn extra points. The player with the most points at the end of the race wins.</p>
18
- <h2>What is Mod APK Android 1?</h2>
19
- <p>Mod APK Android 1 is a website that provides modded versions of various Android games and apps. A modded version is a modified version that has some features or functions that are not available in the original version. For example, a modded version may have unlimited money, unlocked items, or no ads.</p>
20
- <h3>Benefits of using Mod APK Android 1</h3>
21
- <ul>
22
- <li>You can access premium features or items for free</li>
23
- <li>You can enjoy the game without any restrictions or limitations</li>
24
- <li>You can save your time and money by not having to spend real money on in-app purchases</li>
25
- <li>You can have more fun and challenge by playing with different mods</li>
26
- </ul>
27
- <h3>How to download and install Mod APK Android 1</h3>
28
- <p>To download and install Mod APK Android 1, you need to follow these steps:</p>
29
- <ol>
30
- <li>Go to [Mod APK Android 1](^1^) website and search for Drift Clash Online Racing Mod APK</li>
31
- <li>Click on the download button and wait for the file to be downloaded</li>
32
- <li>Go to your device settings and enable unknown sources installation</li>
33
- <li>Locate the downloaded file and tap on it to install it</li>
34
- <li>Launch the game and enjoy the modded version</li>
35
- </ol>
36
- <h2>Why you should try Drift Clash Online Racing Mod APK Android 1</h2>
37
- <p>If you are still not convinced, here are some pros and cons of Drift Clash Online Racing Mod APK Android 1 that may help you decide:</p>
38
- <h3>Pros of the modded version</h3>
39
- <ul>
40
- <li>You can get unlimited money to buy any car or part you want</li>
41
- <li>You can <li>You can unlock all the cars and parts without having to complete the missions or achievements</li>
42
- <li>You can remove the ads that may interrupt your gameplay</li>
43
- <li>You can enjoy the game with better graphics and performance</li>
44
- </ul>
45
- <h3>Cons of the modded version</h3>
46
- <ul>
47
- <li>You may face some compatibility or security issues with your device</li>
48
- <li>You may lose your progress or data if the modded version is not updated or compatible with the original version</li>
49
- <li>You may get banned or penalized by the game developers or Google Play for using a modded version</li>
50
- </ul>
51
- <h2>Conclusion</h2>
52
- <p>Drift Clash Online Racing Mod APK Android 1 is a great option for drift racing enthusiasts who want to experience the game with more features and fun. It offers unlimited money, unlocked cars and parts, no ads, and improved graphics and performance. However, it also comes with some risks and drawbacks, such as compatibility, security, and ban issues. Therefore, you should use it at your own discretion and responsibility.</p>
53
- <h2>FAQs</h2>
54
- <ol>
55
- <li>What is the difference between drift racing and normal racing?</li>
56
- <p>Drift racing is a type of racing where the driver intentionally oversteers the car to make it slide sideways. It requires more skill and technique than normal racing, where the driver tries to maintain traction and speed. Drift racing is more popular in Japan and other Asian countries, where it originated.</p>
57
- <li>What are the best cars for drift racing?</li>
58
- <p>There is no definitive answer to this question, as different cars may suit different drivers and preferences. However, some of the common factors that make a good drift car are rear-wheel drive, lightweight body, powerful engine, manual transmission, and adjustable suspension. Some of the popular drift cars are Nissan Skyline, Toyota Supra, Mazda RX-7, BMW M3, and Ford Mustang.</p>
59
- <li>How can I improve my drift skills?</li>
60
- <p>The best way to improve your drift skills is to practice regularly and learn from your mistakes. You can also watch videos of professional drifters and observe their techniques and tips. You can also join online communities and forums where you can interact with other drifters and get feedback and advice.</p>
61
- <p>drift clash real-time multiplayer racing mod apk<br />
62
- drift clash online racing unlimited money mod apk<br />
63
- drift clash realistic physics racing mod apk<br />
64
- drift clash online racing hack apk download<br />
65
- drift clash online racing mod apk latest version<br />
66
- drift clash online racing free-roam mod apk<br />
67
- drift clash online racing retro style mod apk<br />
68
- drift clash online racing mod apk happymod<br />
69
- drift clash online racing mod apk android 2<br />
70
- drift clash online racing mod apk android 3<br />
71
- drift clash online racing mod apk android 4<br />
72
- drift clash online racing mod apk android 5<br />
73
- drift clash online racing mod apk android 6<br />
74
- drift clash online racing mod apk android 7<br />
75
- drift clash online racing mod apk android 8<br />
76
- drift clash online racing mod apk android 9<br />
77
- drift clash online racing mod apk android 10<br />
78
- drift clash online racing mod apk android 11<br />
79
- drift clash online racing mod apk android 12<br />
80
- drift clash online racing mod apk android 13<br />
81
- drift clash online racing mod apk android 14<br />
82
- drift clash online racing mod apk android 15<br />
83
- drift clash online racing mod apk android 16<br />
84
- drift clash online racing mod apk android 17<br />
85
- drift clash online racing mod apk android 18<br />
86
- drift clash online racing mod apk android 19<br />
87
- drift clash online racing mod apk android 20<br />
88
- drift clash online racing motorcycles drifting mod apk<br />
89
- drift clash online racing clipping zones mod apk<br />
90
- drift clash online racing cars customization mod apk<br />
91
- drift clash online racing stickers and decals mod apk<br />
92
- drift clash online racing game with friends mod apk<br />
93
- drift clash online racing win most wanted cars mod apk<br />
94
- drift clash online racing burn tyres on track mod apk<br />
95
- drift clash online racing unique retro style of the game mod apk <br />
96
- download drift clash online racing mod apk for free <br />
97
- how to install drift clash online racing mod apk on android <br />
98
- how to play drift clash online racing mod apk offline <br />
99
- how to update drift clash online racing mod apk <br />
100
- how to get unlimited coins in drift clash online racing mod apk <br />
101
- how to unlock all cars in drift clash online racing mod apk <br />
102
- how to get rid of ads in drift clash online racing mod apk <br />
103
- how to fix lag in drift clash online racing mod apk <br />
104
- how to change language in drift clash online racing mod apk <br />
105
- how to connect with facebook in drift clash online racing mod apk <br />
106
- how to record gameplay in drift clash online racing mod apk <br />
107
- how to share your score in drift clash online racing mod apk <br />
108
- how to join a clan in drift clash online racing mod apk <br />
109
- how to chat with other players in drift clash online racing mod apk</p>
110
- <li>Is Drift Clash Online Racing Mod APK Android 1 safe to use?</li>
111
- <p>Drift Clash Online Racing Mod APK Android 1 is not an official version of the game, so it may not be safe to use. It may contain viruses or malware that can harm your device or steal your personal information. It may also violate the terms and conditions of the game developers or Google Play, which can result in a ban or penalty. Therefore, you should use it at your own risk and discretion.</p>
112
- <li>Where can I download Drift Clash Online Racing Mod APK Android 1?</li>
113
- <p>You can download Drift Clash Online Racing Mod APK Android 1 from [Mod APK Android 1] website, which provides modded versions of various Android games and apps. However, you should be careful and cautious when downloading any modded version from any website, as they may not be reliable or trustworthy.</p>
114
- </ol></p> 197e85843d<br />
115
- <br />
116
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Traffic Racer MOD APK for Android The Ultimate Racing Experience.md DELETED
@@ -1,94 +0,0 @@
1
-
2
- <h1>Download Traffic Racer Mod Apk Done: How to Enjoy Unlimited Money and Cars in This Amazing Racing Game</h1>
3
- <h2>Introduction</h2>
4
- <p>If you are a fan of racing games, you might have heard of Traffic Racer, a popular game that lets you drive your car through highway traffic, earn cash, upgrade your car and buy new ones. It is a fun and addictive game that challenges your reflexes and skills. But what if you want to enjoy the game without any limitations or restrictions? What if you want to have unlimited money and cars in the game? Well, there is a way to do that, and it is by downloading Traffic Racer Mod Apk done.</p>
5
- <h2>download traffic racer mod apk done</h2><br /><p><b><b>Download File</b> &#10027; <a href="https://jinyurl.com/2uNMbN">https://jinyurl.com/2uNMbN</a></b></p><br /><br />
6
- <h3>What is Traffic Racer?</h3>
7
- <p>Traffic Racer is a 3D racing game developed by Soner Kara, a Turkish game developer. It was released in 2012 for Android and iOS devices. The game has over 100 million downloads on Google Play Store and has a rating of 4.4 out of 5 stars. The game features 35 different cars, 5 game modes, 4 environments, rich types of NPC traffic, basic customization through paint and wheels, online leaderboards and achievements.</p>
8
- <h3>What is Traffic Racer Mod Apk?</h3>
9
- <p>Traffic Racer Mod Apk is a modified version of the original game that gives you access to unlimited money and cars in the game. You can use the money to buy any car you want, upgrade it to the max level, and customize it as you like. You can also unlock all the game modes and environments without having to complete any missions or challenges. With Traffic Racer Mod Apk, you can enjoy the game without any ads or interruptions.</p>
10
- <h3>Why download Traffic Racer Mod Apk?</h3>
11
- <p>There are many reasons why you might want to download Traffic Racer Mod Apk done. Here are some of them:</p>
12
- <p>download traffic racer mod apk unlimited money<br />
13
- download traffic racer mod apk latest version<br />
14
- download traffic racer mod apk for android<br />
15
- download traffic racer mod apk free<br />
16
- download traffic racer mod apk hack<br />
17
- download traffic racer mod apk full<br />
18
- download traffic racer mod apk offline<br />
19
- download traffic racer mod apk 3.6<br />
20
- download traffic racer mod apk revdl<br />
21
- download traffic racer mod apk rexdl<br />
22
- download traffic racer mod apk no ads<br />
23
- download traffic racer mod apk android 1<br />
24
- download traffic racer mod apk 2023<br />
25
- download traffic racer mod apk apkpure<br />
26
- download traffic racer mod apk happymod<br />
27
- download traffic racer mod apk unlimited coins and keys<br />
28
- download traffic racer mod apk unlocked all cars<br />
29
- download traffic racer mod apk 3.5<br />
30
- download traffic racer mod apk 3.4<br />
31
- download traffic racer mod apk 3.3<br />
32
- download traffic racer mod apk 3.2<br />
33
- download traffic racer mod apk 3.1<br />
34
- download traffic racer mod apk 3.0<br />
35
- download traffic racer mod apk 2.5<br />
36
- download traffic racer mod apk 2.4<br />
37
- download traffic racer mod apk 2.3<br />
38
- download traffic racer mod apk 2.2<br />
39
- download traffic racer mod apk 2.1<br />
40
- download traffic racer mod apk 2.0<br />
41
- download traffic racer mod apk 1.9<br />
42
- download traffic racer mod apk 1.8<br />
43
- download traffic racer mod apk 1.7<br />
44
- download traffic racer mod apk 1.6<br />
45
- download traffic racer mod apk 1.5<br />
46
- download traffic racer mod apk 1.4<br />
47
- download traffic racer mod apk 1.3<br />
48
- download traffic racer mod apk 1.2<br />
49
- download traffic racer mod apk 1.1<br />
50
- download traffic racer mod apk 1.0<br />
51
- how to download traffic racer mod apk done</p>
52
- <ul>
53
- <li>You can have unlimited money and cars in the game, which means you can buy any car you want, upgrade it to the max level, and customize it as you like.</li>
54
- <li>You can unlock all the game modes and environments without having to complete any missions or challenges.</li>
55
- <li>You can enjoy the game without any ads or interruptions.</li>
56
- <li>You can have more fun and excitement in the game, as you can drive faster, perform more stunts, and crash more cars.</li>
57
- <li>You can challenge yourself and your friends by competing on the online leaderboards and achievements.</li>
58
- </ul>
59
- <h2>How to download Traffic Racer Mod Apk done?</h2>
60
- <p>If you are interested in downloading Traffic Racer Mod Apk done, you need to follow these simple steps:</p>
61
- <h3>Step 1: Find a reliable source</h3>
62
- <p>The first thing you need to do is to find a reliable source that offers the mod apk file for download. There are many websites that claim to provide the mod apk file, but not all of them are trustworthy. Some of them might contain viruses, malware, or spyware that can harm your device or steal your personal information. Therefore, you need to be careful when choosing a source. One of the sources that we recommend is [AN1.com](^1^), which is a reputable website that provides various mod apk files for free.</p>
63
- <h3>Step 2: Enable unknown sources</h3>
64
- <p>The next thing you need to do is to enable unknown sources on your device. This is because the mod apk file is not from the official Google Play Store, so you need to allow the installation of apps from unknown sources. To do this, you need to go to your device settings, then security, then enable unknown sources. This will allow you to install the mod apk file without any problems.</p>
65
- <h3>Step 3: Download and install the mod apk file</h3>
66
- <p>The third thing you need to do is to download and install the mod apk file on your device. To do this, you need to go to the website that you chose in step 1, then find the download link for the Traffic Racer Mod Apk file. Click on the download link and wait for the file to be downloaded on your device. Once the file is downloaded, you need to locate it in your device storage, then tap on it to start the installation process. Follow the instructions on the screen and wait for the installation to be completed.</p>
67
- <h3>Step 4: Launch the game and enjoy</h3>
68
- <p>The last thing you need to do is to launch the game and enjoy it. To do this, you need to find the game icon on your device home screen or app drawer, then tap on it to open the game. You will see that you have unlimited money and cars in the game, and you can access all the game modes and environments. You can also play the game without any ads or interruptions. You can now enjoy the game as much as you want and have fun.</p>
69
- <h2>Features of Traffic Racer Mod Apk</h2>
70
- <p>Traffic Racer Mod Apk has many features that make it different from the original game. Here are some of them:</p>
71
- <h3>Unlimited money</h3>
72
- <p>One of the main features of Traffic Racer Mod Apk is that it gives you unlimited money in the game. You can use this money to buy any car you want, upgrade it to the max level, and customize it as you like. You can also use this money to unlock all the game modes and environments without having to complete any missions or challenges. You can have as much money as you want and spend it as you wish.</p>
73
- <h3>Unlimited cars</h3>
74
- <p>Another feature of Traffic Racer Mod Apk is that it gives you unlimited cars in the game. You can choose from 35 different cars, ranging from sedans, sports cars, trucks, buses, police cars, and more. You can also unlock all the cars without having to earn cash or complete any missions or challenges. You can have as many cars as you want and switch between them as you like.</p>
75
- <h3>No ads</h3>
76
- <p>A third feature of Traffic Racer Mod Apk is that it removes all the ads from the game. You can play the game without any ads or interruptions. You can also save your data and battery by not having to watch any ads or videos. You can enjoy the game without any distractions or annoyances.</p>
77
- <h3>High-quality graphics and sound</h3>
78
- <p>A fourth feature of Traffic Racer Mod Apk is that it improves the graphics and sound quality of the game. You can experience realistic 3D graphics and smooth animations in the game. You can also hear realistic sound effects and music in the game. You can immerse yourself in the game and feel like you are driving a real car on a real highway.</p>
79
- <h2>Conclusion</h2>
80
- <p>Traffic Racer is a fun and addictive racing game that lets you drive your car through highway traffic, earn cash, upgrade your car and buy new ones. But if you want to enjoy the game without any limitations or restrictions, you should download Traffic Racer Mod Apk done. This mod apk file gives you access to unlimited money and cars in the game, as well as removes all the ads from the game. You can also unlock all the game modes and environments without having to complete any missions or challenges. With Traffic Racer Mod Apk, you can have more fun and excitement in the game, as well as challenge yourself and your friends by competing on the online leaderboards and achievements. If you are interested in downloading Traffic Racer Mod Apk done, you can follow the simple steps that we have explained in this article. We hope that you have found this article helpful and informative. Thank you for reading and happy racing!</p>
81
- <h2>FAQs</h2>
82
- <p>Here are some frequently asked questions about Traffic Racer Mod Apk:</p>
83
- <h3>Is Traffic Racer Mod Apk safe to download and install?</h3>
84
- <p>Yes, Traffic Racer Mod Apk is safe to download and install, as long as you use a reliable source that offers the mod apk file for free. We recommend using [AN1.com], which is a reputable website that provides various mod apk files for free. However, you should always scan the mod apk file with an antivirus or anti-malware program before installing it on your device, just to be on the safe side.</p>
85
- <h3>Is Traffic Racer Mod Apk compatible with my device?</h3>
86
- <p>Traffic Racer Mod Apk is compatible with most Android devices that run on Android 4.1 or higher. However, some devices might not support the mod apk file due to different specifications or settings. Therefore, you should always check the compatibility of the mod apk file with your device before downloading and installing it. You can also contact the developer of the mod apk file if you encounter any problems or issues with the compatibility.</p>
87
- <h3>Will Traffic Racer Mod Apk affect my game progress or account?</h3>
88
- <p>No, Traffic Racer Mod Apk will not affect your game progress or account, as it does not require any root access or login credentials to work. You can play the game as usual, with or without the mod apk file installed on your device. However, you should be aware that using the mod apk file might violate the terms and conditions of the original game, and you might face some consequences or risks if you use it online or with other players. Therefore, you should use the mod apk file at your own discretion and responsibility.</p>
89
- <h3>Can I update Traffic Racer Mod Apk to the latest version?</h3>
90
- <p>Yes, you can update Traffic Racer Mod Apk to the latest version, as long as the developer of the mod apk file releases a new version that matches the original game version. You can check for updates on the website that you used to download the mod apk file, or on other websites that offer similar mod apk files. However, you should always backup your game data before updating the mod apk file, just in case something goes wrong or you lose your game progress.</p>
91
- <h3>Can I uninstall Traffic Racer Mod Apk if I don't like it?</h3>
92
- <p>Yes, you can uninstall Traffic Racer Mod Apk if you don't like it or if you want to switch back to the original game. To do this, you need to go to your device settings, then apps, then find and select Traffic Racer Mod Apk, then tap on uninstall. This will remove the mod apk file from your device and restore the original game. You can also delete the mod apk file from your device storage if you want to free up some space.</p> 401be4b1e0<br />
93
- <br />
94
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/FIFA Mobile v18.1.01 MOD APK Play in World Cup Stadiums with Official Licenses.md DELETED
@@ -1,132 +0,0 @@
1
-
2
- <h1>FIFA Mobile v18.1.01 Mod Apk: The Ultimate Guide</h1>
3
- <p>If you are a fan of soccer games, you probably have heard of <strong>FIFA Mobile</strong>, the official mobile game of EA Sports that lets you build your ultimate team of soccer stars and compete in various modes, including the FIFA World Cup 2022™. But did you know that there is a way to make your gaming experience even more exciting and rewarding? That's right, we are talking about <strong>FIFA Mobile v18.1.01 mod apk</strong>, a modified version of the game that gives you access to unlimited money, unlocked features, and more.</p>
4
- <p>In this article, we will tell you everything you need to know about FIFA Mobile v18.1.01 mod apk, including its benefits, how to download and install it, and how to use it to dominate the soccer field. Whether you want to build your dream team, relive the world's greatest soccer tournament, score big with soccer icons and heroes, experience immersive next-level soccer simulation, or be the soccer manager of your own dream team, FIFA Mobile v18.1.01 mod apk has something for you.</p>
5
- <h2>fifa mobile v18.1.01 mod apk</h2><br /><p><b><b>DOWNLOAD</b> &#10001; &#10001; &#10001; <a href="https://jinyurl.com/2uNU0a">https://jinyurl.com/2uNU0a</a></b></p><br /><br />
6
- <p>So what are you waiting for? Read on and discover how FIFA Mobile v18.1.01 mod apk can take your soccer game to the next level.</p>
7
- <h2>What is FIFA Mobile and what are its features?</h2>
8
- <p>FIFA Mobile is a free-to-play soccer game for iOS and Android devices that lets you build your ultimate team of over 15,000 authentic soccer stars from over 600 teams across over 30 leagues. You can choose from world-class talent like Kylian Mbappé, Christian Pulisic, Vinicius Jr, and Son Heung-min, as well as legends like Paolo Maldini, Ronaldinho, and more. You can also customize your team's kits, badges, formation, tactics, and chemistry.</p>
9
- <p>FIFA Mobile also offers various modes for you to enjoy, such as:</p>
10
- <ul>
11
- <li><strong>Head-to-Head</strong>: Play real-time 11v11 matches against other players from around the world and climb the leaderboards.</li>
12
- <li><strong>VS Attack</strong>: Take turns to score goals in fast-paced matches where every attack counts.</li>
13
- <li><strong>Manager Mode</strong>: Be the soccer manager of your own dream team and plan your strategy and adjust your tactics in real time or choose auto-play.</li>
14
- <li><strong>FIFA World Cup 2022™ Mode</strong>: Relive the world's greatest soccer tournament with any of the 32 qualified national teams or rewrite history with 15 non-qualified national teams. Play in authentic World Cup stadiums with official kits, badges, and match ball.</li>
15
- <li><strong>Events</strong>: Participate in live events that correspond with the real-world tournaments throughout the soccer season and earn special rewards.</li>
16
- <li><strong>Campaigns</strong>: Complete challenges and earn players from different leagues and regions.</li>
17
- <li><strong>The Academy</strong>: Learn the basics of the game and improve your skills with drills and tutorials.</li>
18
- </ul>
19
- <p>FIFA Mobile also features stunning graphics, realistic animations, and immersive sound effects that make you feel like you are on the pitch. You can also chat with your friends, join a league, or create your own league and compete with other players. FIFA Mobile is constantly updated with new content and features to keep you engaged and entertained.</p>
20
- <h2>What is FIFA Mobile v18.1.01 mod apk and what are its benefits?</h2>
21
- <p>FIFA Mobile v18.1.01 mod apk is a modified version of the original FIFA Mobile game that gives you some extra advantages and perks that are not available in the official version. Some of the benefits of FIFA Mobile v18.1.01 mod apk are:</p>
22
- <ul>
23
- <li><strong>Unlimited money</strong>: You can get unlimited coins and points to buy players, upgrade your team, and unlock features without spending real money.</li>
24
- <li><strong>Unlocked features</strong>: You can access all the features and modes of the game without any restrictions or limitations.</li>
25
- <li><strong>No ads</strong>: You can enjoy the game without any annoying ads or pop-ups that interrupt your gameplay.</li>
26
- <li><strong>No root required</strong>: You can install and run FIFA Mobile v18.1.01 mod apk on your device without rooting it or risking its security.</li>
27
- <li><strong>Easy to use</strong>: You can easily download and install FIFA Mobile v18.1.01 mod apk on your device and start playing right away without any complicated steps or procedures.</li>
28
- </ul>
29
- <p>FIFA Mobile v18.1.01 mod apk is a great way to enhance your gaming experience and have more fun with FIFA Mobile. You can enjoy all the features and modes of the game without any limitations or costs, and build your ultimate team of soccer stars with ease.</p>
30
- <h2>How to download and install FIFA Mobile v18.1.01 mod apk?</h2>
31
- <p>Downloading and installing FIFA Mobile v18.1.01 mod apk is very simple and straightforward. Just follow these steps:</p>
32
- <ol>
33
- <li><strong>Download the FIFA Mobile v18.1.01 mod apk file from a trusted source</strong>. You can find many websites that offer the mod apk file for free, but make sure you choose a reliable and safe one. Alternatively, you can use this link to download the file directly: [FIFA Mobile v18.1.01 mod apk].</li>
34
- <li><strong>Allow unknown sources on your device</strong>. Before you can install the mod apk file, you need to enable the option to allow unknown sources on your device settings. This will allow you to install apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on.</li>
35
- <li><strong>Locate and install the mod apk file</strong>. After you have downloaded the file, go to your file manager and find the folder where you saved it. Tap on the file and follow the instructions to install it on your device.</li>
36
- <li><strong>Launch the game and enjoy</strong>. Once you have installed the mod apk file, you can launch the game from your app drawer or home screen and start playing with unlimited money, unlocked features, and no ads.</li>
37
- </ol>
38
- <p>Congratulations, you have successfully downloaded and installed FIFA Mobile v18.1.01 mod apk on your device. Now you can enjoy all the benefits of the modified version of the game and have more fun with FIFA Mobile.</p>
39
- <p>fifa mobile v18.1.01 mod apk unlimited money<br />
40
- fifa mobile v18.1.01 mod apk unlocked all<br />
41
- fifa mobile v18.1.01 mod apk menu<br />
42
- fifa mobile v18.1.01 mod apk download<br />
43
- fifa mobile v18.1.01 mod apk android<br />
44
- fifa mobile v18.1.01 mod apk latest version<br />
45
- fifa mobile v18.1.01 mod apk offline<br />
46
- fifa mobile v18.1.01 mod apk free<br />
47
- fifa mobile v18.1.01 mod apk hack<br />
48
- fifa mobile v18.1.01 mod apk 2023<br />
49
- fifa mobile v18.1.01 mod apk world cup<br />
50
- fifa mobile v18.1.01 mod apk mega<br />
51
- fifa mobile v18.1.01 mod apk obb<br />
52
- fifa mobile v18.1.01 mod apk data<br />
53
- fifa mobile v18.1.01 mod apk no root<br />
54
- fifa mobile v18.1.01 mod apk online<br />
55
- fifa mobile v18.1.01 mod apk revdl<br />
56
- fifa mobile v18.1.01 mod apk rexdl<br />
57
- fifa mobile v18.1.01 mod apk 5play<br />
58
- fifa mobile v18.1.01 mod apk update<br />
59
- fifa mobile v18.1.01 mod apk full<br />
60
- fifa mobile v18.1.01 mod apk premium<br />
61
- fifa mobile v18.1.01 mod apk pro<br />
62
- fifa mobile v18.1.01 mod apk cracked<br />
63
- fifa mobile v18.1.01 mod apk patched<br />
64
- fifa mobile v18.1.01 mod apk vip<br />
65
- fifa mobile v18.1.01 mod apk cheat<br />
66
- fifa mobile v18.1.01 mod apk coins<br />
67
- fifa mobile v18.1.01 mod apk gems<br />
68
- fifa mobile v18.1.01 mod apk gold<br />
69
- fifa mobile v18.1.01 mod apk stars<br />
70
- fifa mobile v18.1.01 mod apk points<br />
71
- fifa mobile v18.1.01 mod apk tokens<br />
72
- fifa mobile v18.1.01 mod apk players<br />
73
- fifa mobile v18.1.01 mod apk teams<br />
74
- fifa mobile v18.1.01 mod apk kits<br />
75
- fifa mobile v18.1.01 mod apk stadiums<br />
76
- fifa mobile v18.1.01 mod apk icons<br />
77
- fifa mobile v18.1.01 mod apk heroes<br />
78
- fifa mobile v18 2023 season 23 update download free unlimited money coins points tokens players teams kits stadiums icons heroes manager mode world cup mode offline online hack cheat menu mega obb data revdl rexdl 5play android latest version full premium pro cracked patched vip.</p>
79
- <h2>How to build your ultimate team with star players from the biggest leagues and top teams?</h2> test your team's skills and abilities in various modes, such as Head-to-Head, VS Attack, Manager Mode, FIFA World Cup 2022™ Mode, Events, Campaigns, and The Academy. You can also chat with your friends, join a league, or create your own league and compete with other players. FIFA Mobile is the ultimate soccer game for mobile devices.</p>
80
- <h2>How to relive the world's greatest soccer tournament with FIFA World Cup 2022™ mode?</h2>
81
- <p>One of the most exciting modes in FIFA Mobile is the FIFA World Cup 2022™ mode, where you can relive the world's greatest soccer tournament with any of the 32 qualified national teams or rewrite history with 15 non-qualified national teams. You can play in authentic World Cup stadiums with official kits, badges, and match ball. You can also earn exclusive rewards and players from the World Cup events and campaigns. But how do you relive the world's greatest soccer tournament with FIFA World Cup 2022™ mode? Here are some steps to help you out:</p>
82
- <ol>
83
- <li><strong>Choose your national team</strong>. You can choose from any of the 32 qualified national teams or 15 non-qualified national teams to represent in the World Cup. You can also customize your team's kits, badges, formation, tactics, and chemistry.</li>
84
- <li><strong>Play the group stage</strong>. You can play against other national teams in your group and try to qualify for the knockout stage. You can earn points for winning or drawing matches and advance to the next round based on your ranking.</li>
85
- <li><strong>Play the knockout stage</strong>. You can play against other national teams that qualified from their groups and try to reach the final. You can win matches by scoring more goals than your opponent or by winning a penalty shootout if the score is tied after extra time.</li>
86
- <li><strong>Play the final</strong>. You can play against the other finalist and try to win the World Cup trophy. You can celebrate your victory with your team and fans and earn exclusive rewards and players.</li>
87
- </ol>
88
- <p>By following these steps, you can relive the world's greatest soccer tournament with FIFA World Cup 2022™ mode in FIFA Mobile. You can also play friendly matches against other national teams or challenge yourself with special scenarios and objectives. FIFA World Cup 2022™ mode is a great way to experience the thrill and excitement of the World Cup on your mobile device.</p>
89
- <h2>How to score big with soccer icons and heroes?</h2>
90
- <p>Another amazing feature of FIFA Mobile is the ability to score big with soccer icons and heroes, who are legendary players that have made history in the soccer world. You can choose from over 100 icons and heroes, such as Cristiano Ronaldo, Lionel Messi, Neymar Jr, Zinedine Zidane, David Beckham, Pele, Maradona, and more. You can also unlock their stories and learn about their careers and achievements. But how do you score big with soccer icons and heroes? Here are some tips and tricks to help you out:</p>
91
- <ul>
92
- <li><strong>Earn icons and heroes from events and campaigns</strong>. You can earn icons and heroes from various events and campaigns that are available throughout the soccer season. You can complete challenges and objectives to earn players or tokens that can be exchanged for players. You can also buy players from the Market or use coins or points to open packs that contain players.</li>
93
- <li><strong>Train and rank up your icons and heroes to boost their OVR and stats</strong>. You can train and rank up your icons and heroes using Training XP, coins, Rank Up Tokens, and coins. Training XP can be obtained from events, campaigns, rewards, or by using other players as training material. Rank Up Tokens can be obtained from events or by using duplicate players as rank up material.</li>
94
- <li><strong>Use skill boosts to enhance your icons' and heroes' attributes</strong>. You can use skill boosts to boost specific attributes of your icons and heroes, such as pace, shooting, passing, defending, or physical. You can apply skill boosts using Skill Boosts Tokens and coins. Skill Boosts Tokens can be obtained from events, rewards, or by using other skill boosts as skill boost material.</li>
95
- <li><strong>Add icons and heroes to your team to increase chemistry</strong>. Icons and heroes have a special ability to increase chemistry among your players. Icons have a base chemistry of 5 with any player regardless of league, team, or nation. Heroes have a base chemistry of 10 with any player from their league or nation. You can also increase chemistry by using players with the same skill boost or position link.</li>
96
- <li><strong>Use icons' and heroes' special traits and skills to score goals and win matches</strong>. Icons and heroes have special traits and skills that make them stand out from other players. Traits are passive abilities that affect the player's performance, such as finesse shot, speed dribbler, or long shot taker. Skills are active abilities that the player can use during matches, such as rainbow flick, roulette, or heel to heel. You can use these traits and skills to score goals and win matches with your icons and heroes.</li>
97
- </ul>
98
- <p>By following these tips and tricks, you can score big with soccer icons and heroes in FIFA Mobile. You can also unlock their stories and learn about their careers and achievements. Icons and heroes are the ultimate players to have in your team.</p>
99
- <h2>How to experience immersive next-level soccer simulation with upgraded stadiums and realistic audio?</h2>
100
- <p>FIFA Mobile is not only a game of skills and strategy, but also a game of immersion and realism. You can experience immersive next-level soccer simulation with upgraded stadiums and realistic audio that make you feel like you are on the pitch. You can play in authentic stadiums from around the world, such as Wembley Stadium, Camp Nou, Santiago Bernabéu, Allianz Arena, and more. You can also hear the roar of the crowd, the chants of the fans, the commentary of the announcers, and the sound of the ball hitting the net. But how do you experience immersive next-level soccer simulation with upgraded stadiums and realistic audio? Here are some steps to help you out:</p>
101
- <ol>
102
- <li><strong>Choose your preferred stadium</strong>. You can choose from various stadiums from different leagues and regions to play in. You can also unlock more stadiums by completing events and campaigns. You can change your stadium by going to Settings > Team > Stadium.</li>
103
- <li><strong>Adjust your graphics and sound settings</strong>. You can adjust your graphics and sound settings to optimize your gaming experience. You can change your graphics quality by going to Settings > Graphics Quality. You can change your sound settings by going to Settings > Sound Settings. You can also enable or disable music, sound effects, commentary, or crowd noise.</li>
104
- <li><strong>Enjoy the game</strong>. You can enjoy the game with upgraded stadiums and realistic audio that make you feel like you are on the pitch. You can see the details of the stadiums, such as the grass, the lights, the banners, and the fans. You can also hear the sounds of the game, such as the whistle, the ball, the players, and the crowd.</li>
105
- </ol>
106
- <p>By following these steps, you can experience immersive next-level soccer simulation with upgraded stadiums and realistic audio in FIFA Mobile. You can also switch between different camera angles and zoom levels to get a better view of the action. FIFA Mobile is a game that brings you closer to the real soccer world.</p>
107
- <h2>How to be the soccer manager of your own dream team with manager mode?</h2>
108
- <p>One of the most challenging and rewarding modes in FIFA Mobile is the manager mode, where you can be the soccer manager of your own dream team and plan your strategy and adjust your tactics in real time or choose auto-play. You can choose from over 600 teams across over 30 leagues or create your own custom team with your favorite players. You can also compete in various tournaments and leagues or play friendly matches against other teams. But how do you be the soccer manager of your own dream team with manager mode? Here are some tips and tricks to help you out:</p>
109
- <ul>
110
- <li><strong>Select your team</strong>. You can select your team by going to Manager Mode > Select Team. You can choose from any of the available teams or create your own custom team by going to Manager Mode > Create Team. You can also edit your team's name, logo, kit, formation, tactics, chemistry, and players by going to Manager Mode > Edit Team.</li>
111
- <li><strong>Play matches</strong>. You can play matches by going to Manager Mode > Play Match. You can choose from various tournaments and leagues or play friendly matches against other teams. You can also select your difficulty level, match length, weather condition, stadium, ball type, and referee by going to Manager Mode > Match Settings.</li>
112
- <li><strong>Manage your team</strong>. You can manage your team by going to Manager Mode > Manage Team. You can plan your strategy and adjust your tactics in real time or choose auto-play. You can also make substitutions, change formations, switch players' positions, or give instructions to your players during matches.</li>
113
- <li><strong>Earn rewards</strong>. You can earn rewards by playing matches in manager mode. You can earn coins, points , players, skill boosts, rank up tokens, and more by winning matches, completing objectives, and ranking up in the leaderboards. You can also unlock more teams, stadiums, balls, and kits by playing matches in manager mode.</li>
114
- </ul>
115
- <p>By following these tips and tricks, you can be the soccer manager of your own dream team with manager mode in FIFA Mobile. You can also compare your team's performance and stats with other teams and players by going to Manager Mode > Stats. Manager mode is a great way to test your soccer knowledge and skills.</p>
116
- <h2>Conclusion</h2>
117
- <p>FIFA Mobile v18.1.01 mod apk is a modified version of the original FIFA Mobile game that gives you access to unlimited money, unlocked features, and more. It is a great way to enhance your gaming experience and have more fun with FIFA Mobile. You can build your ultimate team with star players from the biggest leagues and top teams, relive the world's greatest soccer tournament with FIFA World Cup 2022™ mode, score big with soccer icons and heroes, experience immersive next-level soccer simulation with upgraded stadiums and realistic audio, or be the soccer manager of your own dream team with manager mode. FIFA Mobile v18.1.01 mod apk has something for everyone.</p>
118
- <p>So what are you waiting for? Download and install FIFA Mobile v18.1.01 mod apk on your device and start playing right away. You will not regret it.</p>
119
- <h2>FAQs</h2>
120
- <p>Here are some frequently asked questions about FIFA Mobile v18.1.01 mod apk:</p>
121
- <h3>What are the requirements for FIFA Mobile v18.1.01 mod apk?</h3>
122
- <p>FIFA Mobile v18.1.01 mod apk requires Android 4.4 or higher and at least 1 GB of RAM and 100 MB of free storage space on your device.</p>
123
- <h3>Is FIFA Mobile v18.1.01 mod apk safe and legal?</h3>
124
- <p>FIFA Mobile v18.1.01 mod apk is safe to use as long as you download it from a trusted source and scan it for viruses before installing it on your device. However, it is not legal to use FIFA Mobile v18.1.01 mod apk as it violates the terms and conditions of EA Sports and Google Play Store. You may face some risks or consequences if you use FIFA Mobile v18.1.01 mod apk, such as account suspension, data loss, or legal action.</p>
125
- <h3>How to update FIFA Mobile v18.1.01 mod apk?</h3>
126
- <p>To update FIFA Mobile v18.1.01 mod apk, you need to download the latest version of the mod apk file from a trusted source and install it on your device over the existing version. You may also need to uninstall the original FIFA Mobile game before installing the mod apk file.</p>
127
- <h3>How to get unlimited coins and points in FIFA Mobile v18.1.01 mod apk?</h3>
128
- <p>To get unlimited coins and points in FIFA Mobile v18.1.01 mod apk, you just need to launch the game and check your balance. You will see that you have unlimited coins and points to spend on players, upgrades, features, and more.</p>
129
- <h3>How to contact EA Sports for support or feedback on FIFA Mobile?</h3>
130
- <p>To contact EA Sports for support or feedback on FIFA Mobile, you can go to Settings > Help & Support > Contact Us and choose your preferred option to reach out to them. You can also visit their official website or social media pages for more information.</p> 197e85843d<br />
131
- <br />
132
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/7hao/bingo/Dockerfile DELETED
@@ -1,36 +0,0 @@
1
- FROM node:18
2
-
3
-
4
- ARG DEBIAN_FRONTEND=noninteractive
5
-
6
- ENV BING_HEADER ""
7
-
8
- # Set home to the user's home directory
9
- ENV HOME=/home/user \
10
- PATH=/home/user/.local/bin:$PATH
11
-
12
- # Set up a new user named "user" with user ID 1000
13
- RUN useradd -o -u 1000 user && mkdir -p $HOME/app && chown -R user $HOME
14
-
15
- # Switch to the "user" user
16
- USER user
17
-
18
- # Set the working directory to the user's home directory
19
- WORKDIR $HOME/app
20
-
21
- # Install app dependencies
22
- # A wildcard is used to ensure both package.json AND package-lock.json are copied
23
- # where available (npm@5+)
24
- COPY --chown=user package*.json $HOME/app/
25
-
26
- RUN npm install
27
-
28
- # Copy the current directory contents into the container at $HOME/app setting the owner to the user
29
- COPY --chown=user . $HOME/app/
30
-
31
- RUN npm run build
32
-
33
- ENV PORT 7860
34
- EXPOSE 7860
35
-
36
- CMD npm start
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/diffq/__init__.py DELETED
@@ -1,18 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- # flake8: noqa
8
- """
9
- This package implements different quantization strategies:
10
-
11
- - `diffq.uniform.UniformQuantizer`: classic uniform quantization over n bits.
12
- - `diffq.diffq.DiffQuantizer`: differentiable quantizer based on scaled noise injection.
13
-
14
- Also, do check `diffq.base.BaseQuantizer` for the common methods of all Quantizers.
15
- """
16
-
17
- from .uniform import UniformQuantizer
18
- from .diffq import DiffQuantizer
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/A00001/bingothoo/src/components/header.tsx DELETED
@@ -1,12 +0,0 @@
1
- import * as React from 'react'
2
- import { UserMenu } from './user-menu'
3
-
4
- export async function Header() {
5
- return (
6
- <header className="sticky top-0 z-50 flex items-center justify-between w-full h-16 px-4 border-b shrink-0 bg-gradient-to-b from-background/10 via-background/50 to-background/80 backdrop-blur-xl">
7
- <div className="flex items-center justify-end space-x-2 w-full">
8
- <UserMenu />
9
- </div>
10
- </header>
11
- )
12
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/audiocraft/__init__.py DELETED
@@ -1,26 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
- """
7
- AudioCraft is a general framework for training audio generative models.
8
- At the moment we provide the training code for:
9
-
10
- - [MusicGen](https://arxiv.org/abs/2306.05284), a state-of-the-art
11
- text-to-music and melody+text autoregressive generative model.
12
- For the solver, see `audiocraft.solvers.musicgen.MusicGenSolver`, and for the model,
13
- `audiocraft.models.musicgen.MusicGen`.
14
- - [AudioGen](https://arxiv.org/abs/2209.15352), a state-of-the-art
15
- text-to-general-audio generative model.
16
- - [EnCodec](https://arxiv.org/abs/2210.13438), efficient and high fidelity
17
- neural audio codec which provides an excellent tokenizer for autoregressive language models.
18
- See `audiocraft.solvers.compression.CompressionSolver`, and `audiocraft.models.encodec.EncodecModel`.
19
- - [MultiBandDiffusion](TODO), alternative diffusion-based decoder compatible with EnCodec that
20
- improves the perceived quality and reduces the artifacts coming from adversarial decoders.
21
- """
22
-
23
- # flake8: noqa
24
- from . import data, modules, models
25
-
26
- __version__ = '1.0.0'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/generate_human_motion/pyrender/setup.py DELETED
@@ -1,76 +0,0 @@
1
- """
2
- Setup of pyrender Python codebase.
3
-
4
- Author: Matthew Matl
5
- """
6
- import sys
7
- from setuptools import setup
8
-
9
- # load __version__
10
- exec(open('pyrender/version.py').read())
11
-
12
- def get_imageio_dep():
13
- if sys.version[0] == "2":
14
- return 'imageio<=2.6.1'
15
- return 'imageio'
16
-
17
- requirements = [
18
- 'freetype-py', # For font loading
19
- get_imageio_dep(), # For Image I/O
20
- 'networkx', # For the scene graph
21
- 'numpy', # Numpy
22
- 'Pillow', # For Trimesh texture conversions
23
- 'pyglet>=1.4.10', # For the pyglet viewer
24
- 'PyOpenGL~=3.1.0', # For OpenGL
25
- # 'PyOpenGL_accelerate~=3.1.0', # For OpenGL
26
- 'scipy', # Because of trimesh missing dep
27
- 'six', # For Python 2/3 interop
28
- 'trimesh', # For meshes
29
- ]
30
-
31
- dev_requirements = [
32
- 'flake8', # Code formatting checker
33
- 'pre-commit', # Pre-commit hooks
34
- 'pytest', # Code testing
35
- 'pytest-cov', # Coverage testing
36
- 'tox', # Automatic virtualenv testing
37
- ]
38
-
39
- docs_requirements = [
40
- 'sphinx', # General doc library
41
- 'sphinx_rtd_theme', # RTD theme for sphinx
42
- 'sphinx-automodapi' # For generating nice tables
43
- ]
44
-
45
-
46
- setup(
47
- name = 'pyrender',
48
- version=__version__,
49
- description='Easy-to-use Python renderer for 3D visualization',
50
- long_description='A simple implementation of Physically-Based Rendering '
51
- '(PBR) in Python. Compliant with the glTF 2.0 standard.',
52
- author='Matthew Matl',
53
- author_email='[email protected]',
54
- license='MIT License',
55
- url = 'https://github.com/mmatl/pyrender',
56
- classifiers = [
57
- 'Development Status :: 4 - Beta',
58
- 'License :: OSI Approved :: MIT License',
59
- 'Operating System :: POSIX :: Linux',
60
- 'Operating System :: MacOS :: MacOS X',
61
- 'Programming Language :: Python :: 2.7',
62
- 'Programming Language :: Python :: 3.5',
63
- 'Programming Language :: Python :: 3.6',
64
- 'Natural Language :: English',
65
- 'Topic :: Scientific/Engineering'
66
- ],
67
- keywords = 'rendering graphics opengl 3d visualization pbr gltf',
68
- packages = ['pyrender', 'pyrender.platforms'],
69
- setup_requires = requirements,
70
- install_requires = requirements,
71
- extras_require={
72
- 'dev': dev_requirements,
73
- 'docs': docs_requirements,
74
- },
75
- include_package_data=True
76
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/predict.py DELETED
@@ -1,90 +0,0 @@
1
- import os
2
- from torch.utils.data import DataLoader
3
- import torchvision
4
- from tqdm import tqdm
5
- from dataset import VGGSound
6
- import torch
7
- import torch.nn as nn
8
- from metrics import metrics
9
- from omegaconf import OmegaConf
10
- from model import VGGishish
11
- from transforms import Crop, StandardNormalizeAudio, ToTensor
12
-
13
-
14
- if __name__ == '__main__':
15
- cfg_cli = OmegaConf.from_cli()
16
- print(cfg_cli.config)
17
- cfg_yml = OmegaConf.load(cfg_cli.config)
18
- # the latter arguments are prioritized
19
- cfg = OmegaConf.merge(cfg_yml, cfg_cli)
20
- OmegaConf.set_readonly(cfg, True)
21
- print(OmegaConf.to_yaml(cfg))
22
-
23
- # logger = LoggerWithTBoard(cfg)
24
- transforms = [
25
- StandardNormalizeAudio(cfg.mels_path),
26
- ToTensor(),
27
- ]
28
- if cfg.cropped_size not in [None, 'None', 'none']:
29
- transforms.append(Crop(cfg.cropped_size))
30
- transforms = torchvision.transforms.transforms.Compose(transforms)
31
-
32
- datasets = {
33
- 'test': VGGSound('test', cfg.mels_path, transforms),
34
- }
35
-
36
- loaders = {
37
- 'test': DataLoader(datasets['test'], batch_size=cfg.batch_size,
38
- num_workers=cfg.num_workers, pin_memory=True)
39
- }
40
-
41
- device = torch.device(cfg.device if torch.cuda.is_available() else 'cpu')
42
- model = VGGishish(cfg.conv_layers, cfg.use_bn, num_classes=len(datasets['test'].target2label))
43
- model = model.to(device)
44
-
45
- optimizer = torch.optim.Adam(model.parameters(), lr=cfg.learning_rate)
46
- criterion = nn.CrossEntropyLoss()
47
-
48
- # loading the best model
49
- folder_name = os.path.split(cfg.config)[0].split('/')[-1]
50
- print(folder_name)
51
- ckpt = torch.load(f'./logs/{folder_name}/vggishish-{folder_name}.pt', map_location='cpu')
52
- model.load_state_dict(ckpt['model'])
53
- print((f'The model was trained for {ckpt["epoch"]} epochs. Loss: {ckpt["loss"]:.4f}'))
54
-
55
- # Testing the model
56
- model.eval()
57
- running_loss = 0
58
- preds_from_each_batch = []
59
- targets_from_each_batch = []
60
-
61
- for i, batch in enumerate(tqdm(loaders['test'])):
62
- inputs = batch['input'].to(device)
63
- targets = batch['target'].to(device)
64
-
65
- # zero the parameter gradients
66
- optimizer.zero_grad()
67
-
68
- # forward + backward + optimize
69
- with torch.set_grad_enabled(False):
70
- outputs = model(inputs)
71
- loss = criterion(outputs, targets)
72
-
73
- # loss
74
- running_loss += loss.item()
75
-
76
- # for metrics calculation later on
77
- preds_from_each_batch += [outputs.detach().cpu()]
78
- targets_from_each_batch += [targets.cpu()]
79
-
80
- # logging metrics
81
- preds_from_each_batch = torch.cat(preds_from_each_batch)
82
- targets_from_each_batch = torch.cat(targets_from_each_batch)
83
- test_metrics_dict = metrics(targets_from_each_batch, preds_from_each_batch)
84
- test_metrics_dict['avg_loss'] = running_loss / len(loaders['test'])
85
- test_metrics_dict['param_num'] = sum(p.numel() for p in model.parameters() if p.requires_grad)
86
-
87
- # TODO: I have no idea why tboard doesn't keep metrics (hparams) in a tensorboard when
88
- # I run this experiment from cli: `python main.py config=./configs/vggish.yaml`
89
- # while when I run it in vscode debugger the metrics are present in the tboard (weird)
90
- print(test_metrics_dict)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/configs/resnet/resnet50_8xb32-coslr-preciseBN_in1k.py DELETED
@@ -1,13 +0,0 @@
1
- _base_ = 'resnet50_8xb32-coslr_in1k.py'
2
-
3
- # Precise BN hook will update the bn stats, so this hook should be executed
4
- # before CheckpointHook(priority of 'VERY_LOW') and
5
- # EMAHook(priority of 'NORMAL') So set the priority of PreciseBNHook to
6
- # 'ABOVENORMAL' here.
7
- custom_hooks = [
8
- dict(
9
- type='PreciseBNHook',
10
- num_samples=8192,
11
- interval=1,
12
- priority='ABOVE_NORMAL')
13
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ababababababbababa/Ashaar/poetry_diacritizer/create_configs.py DELETED
@@ -1,13 +0,0 @@
1
- import yaml
2
-
3
- fname = "config/gpt-cls-tash-proc.yml"
4
-
5
- stream = open(fname, 'r')
6
- data = yaml.load(stream, Loader=yaml.FullLoader)
7
-
8
- for i in range(0, 10):
9
- data['n_layer'] = i
10
- data['log_directory'] = f'log_dir_cls_{i}_tash_proc'
11
- data['max_steps'] = 5000
12
- with open(f"config/gpt-cls-{i}-tash-proc.yml", 'w') as yaml_file:
13
- yaml_file.write( yaml.dump(data, default_flow_style=False))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/chart/Factory.js DELETED
@@ -1,13 +0,0 @@
1
- import Chart from './Chart.js';
2
- import ObjectFactory from '../ObjectFactory.js';
3
- import SetValue from '../../../plugins/utils/object/SetValue.js';
4
-
5
- ObjectFactory.register('chart', function (x, y, width, height, config) {
6
- var gameObject = new Chart(this.scene, x, y, width, height, config);
7
- this.scene.add.existing(gameObject);
8
- return gameObject;
9
- });
10
-
11
- SetValue(window, 'RexPlugins.UI.Chart', Chart);
12
-
13
- export default Chart;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/YAMLMake.js DELETED
@@ -1,35 +0,0 @@
1
- import ParseYAML from './utils/ParseYAML.js';
2
- import Make from './Make.js';
3
-
4
- var YAMLMake = function (scene, data, view, styles, customBuilders) {
5
- data = ParseYAML(data);
6
- if (Array.isArray(data)) {
7
- // Parsing result of YAML data might be an array,
8
- // Only last item will be used to create game object, others are references
9
- data = data[data.length - 1];
10
- } else if (data.$root) {
11
- // Parsing result of YAML data might be an object, with $root key,
12
- // data.$root will be used to create game object, others are default styles
13
- var defaultStyles = data;
14
- data = data.$root;
15
- delete defaultStyles.$root;
16
-
17
- if (styles === undefined) {
18
- styles = defaultStyles;
19
- } else {
20
- for (var key in defaultStyles) {
21
- if (!styles[key]) {
22
- styles[key] = defaultStyles[key];
23
- }
24
- }
25
- }
26
- }
27
-
28
- styles = ParseYAML(styles);
29
-
30
- var gameObject = Make(scene, data, view, styles, customBuilders);
31
-
32
- return gameObject;
33
- }
34
-
35
- export default YAMLMake;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/ops/upfirdn2d.py DELETED
@@ -1,409 +0,0 @@
1
- # Copyright (c) SenseTime Research. All rights reserved.
2
-
3
- # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
4
- #
5
- # NVIDIA CORPORATION and its licensors retain all intellectual property
6
- # and proprietary rights in and to this software, related documentation
7
- # and any modifications thereto. Any use, reproduction, disclosure or
8
- # distribution of this software and related documentation without an express
9
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
10
-
11
- """Custom PyTorch ops for efficient resampling of 2D images."""
12
-
13
- import os
14
- import warnings
15
- import numpy as np
16
- import torch
17
- import traceback
18
-
19
- from .. import custom_ops
20
- from .. import misc
21
- from . import conv2d_gradfix
22
-
23
- # ----------------------------------------------------------------------------
24
-
25
- _inited = False
26
- _plugin = None
27
-
28
-
29
- def _init():
30
- global _inited, _plugin
31
- if not _inited:
32
- sources = ['upfirdn2d.cpp', 'upfirdn2d.cu']
33
- sources = [os.path.join(os.path.dirname(__file__), s) for s in sources]
34
- try:
35
- _plugin = custom_ops.get_plugin(
36
- 'upfirdn2d_plugin', sources=sources, extra_cuda_cflags=['--use_fast_math'])
37
- except:
38
- warnings.warn(
39
- 'Failed to build CUDA kernels for upfirdn2d. Falling back to slow reference implementation. Details:\n\n' + traceback.format_exc())
40
- return _plugin is not None
41
-
42
-
43
- def _parse_scaling(scaling):
44
- if isinstance(scaling, int):
45
- scaling = [scaling, scaling]
46
- assert isinstance(scaling, (list, tuple))
47
- assert all(isinstance(x, int) for x in scaling)
48
- sx, sy = scaling
49
- assert sx >= 1 and sy >= 1
50
- return sx, sy
51
-
52
-
53
- def _parse_padding(padding):
54
- if isinstance(padding, int):
55
- padding = [padding, padding]
56
- assert isinstance(padding, (list, tuple))
57
- assert all(isinstance(x, int) for x in padding)
58
- if len(padding) == 2:
59
- padx, pady = padding
60
- padding = [padx, padx, pady, pady]
61
- padx0, padx1, pady0, pady1 = padding
62
- return padx0, padx1, pady0, pady1
63
-
64
-
65
- def _get_filter_size(f):
66
- if f is None:
67
- return 1, 1
68
- assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
69
- fw = f.shape[-1]
70
- fh = f.shape[0]
71
- with misc.suppress_tracer_warnings():
72
- fw = int(fw)
73
- fh = int(fh)
74
- misc.assert_shape(f, [fh, fw][:f.ndim])
75
- assert fw >= 1 and fh >= 1
76
- return fw, fh
77
-
78
- # ----------------------------------------------------------------------------
79
-
80
-
81
- def setup_filter(f, device=torch.device('cpu'), normalize=True, flip_filter=False, gain=1, separable=None):
82
- r"""Convenience function to setup 2D FIR filter for `upfirdn2d()`.
83
-
84
- Args:
85
- f: Torch tensor, numpy array, or python list of the shape
86
- `[filter_height, filter_width]` (non-separable),
87
- `[filter_taps]` (separable),
88
- `[]` (impulse), or
89
- `None` (identity).
90
- device: Result device (default: cpu).
91
- normalize: Normalize the filter so that it retains the magnitude
92
- for constant input signal (DC)? (default: True).
93
- flip_filter: Flip the filter? (default: False).
94
- gain: Overall scaling factor for signal magnitude (default: 1).
95
- separable: Return a separable filter? (default: select automatically).
96
-
97
- Returns:
98
- Float32 tensor of the shape
99
- `[filter_height, filter_width]` (non-separable) or
100
- `[filter_taps]` (separable).
101
- """
102
- # Validate.
103
- if f is None:
104
- f = 1
105
- f = torch.as_tensor(f, dtype=torch.float32)
106
- assert f.ndim in [0, 1, 2]
107
- assert f.numel() > 0
108
- if f.ndim == 0:
109
- f = f[np.newaxis]
110
-
111
- # Separable?
112
- if separable is None:
113
- separable = (f.ndim == 1 and f.numel() >= 8)
114
- if f.ndim == 1 and not separable:
115
- f = f.ger(f)
116
- assert f.ndim == (1 if separable else 2)
117
-
118
- # Apply normalize, flip, gain, and device.
119
- if normalize:
120
- f /= f.sum()
121
- if flip_filter:
122
- f = f.flip(list(range(f.ndim)))
123
- f = f * (gain ** (f.ndim / 2))
124
- f = f.to(device=device)
125
- return f
126
-
127
- # ----------------------------------------------------------------------------
128
-
129
-
130
- def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl='cuda'):
131
- r"""Pad, upsample, filter, and downsample a batch of 2D images.
132
-
133
- Performs the following sequence of operations for each channel:
134
-
135
- 1. Upsample the image by inserting N-1 zeros after each pixel (`up`).
136
-
137
- 2. Pad the image with the specified number of zeros on each side (`padding`).
138
- Negative padding corresponds to cropping the image.
139
-
140
- 3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it
141
- so that the footprint of all output pixels lies within the input image.
142
-
143
- 4. Downsample the image by keeping every Nth pixel (`down`).
144
-
145
- This sequence of operations bears close resemblance to scipy.signal.upfirdn().
146
- The fused op is considerably more efficient than performing the same calculation
147
- using standard PyTorch ops. It supports gradients of arbitrary order.
148
-
149
- Args:
150
- x: Float32/float64/float16 input tensor of the shape
151
- `[batch_size, num_channels, in_height, in_width]`.
152
- f: Float32 FIR filter of the shape
153
- `[filter_height, filter_width]` (non-separable),
154
- `[filter_taps]` (separable), or
155
- `None` (identity).
156
- up: Integer upsampling factor. Can be a single int or a list/tuple
157
- `[x, y]` (default: 1).
158
- down: Integer downsampling factor. Can be a single int or a list/tuple
159
- `[x, y]` (default: 1).
160
- padding: Padding with respect to the upsampled image. Can be a single number
161
- or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
162
- (default: 0).
163
- flip_filter: False = convolution, True = correlation (default: False).
164
- gain: Overall scaling factor for signal magnitude (default: 1).
165
- impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
166
-
167
- Returns:
168
- Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
169
- """
170
- assert isinstance(x, torch.Tensor)
171
- assert impl in ['ref', 'cuda']
172
- if impl == 'cuda' and x.device.type == 'cuda' and _init():
173
- return _upfirdn2d_cuda(up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain).apply(x, f)
174
- return _upfirdn2d_ref(x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain)
175
-
176
- # ----------------------------------------------------------------------------
177
-
178
-
179
- @misc.profiled_function
180
- def _upfirdn2d_ref(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1):
181
- """Slow reference implementation of `upfirdn2d()` using standard PyTorch ops.
182
- """
183
- # Validate arguments.
184
- assert isinstance(x, torch.Tensor) and x.ndim == 4
185
- if f is None:
186
- f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
187
- assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
188
- assert f.dtype == torch.float32 and not f.requires_grad
189
- batch_size, num_channels, in_height, in_width = x.shape
190
- upx, upy = _parse_scaling(up)
191
- downx, downy = _parse_scaling(down)
192
- padx0, padx1, pady0, pady1 = _parse_padding(padding)
193
-
194
- # Upsample by inserting zeros.
195
- x = x.reshape([batch_size, num_channels, in_height, 1, in_width, 1])
196
- x = torch.nn.functional.pad(x, [0, upx - 1, 0, 0, 0, upy - 1])
197
- x = x.reshape([batch_size, num_channels, in_height * upy, in_width * upx])
198
-
199
- # Pad or crop.
200
- x = torch.nn.functional.pad(
201
- x, [max(padx0, 0), max(padx1, 0), max(pady0, 0), max(pady1, 0)])
202
- x = x[:, :, max(-pady0, 0): x.shape[2] - max(-pady1, 0),
203
- max(-padx0, 0): x.shape[3] - max(-padx1, 0)]
204
-
205
- # Setup filter.
206
- f = f * (gain ** (f.ndim / 2))
207
- f = f.to(x.dtype)
208
- if not flip_filter:
209
- f = f.flip(list(range(f.ndim)))
210
-
211
- # Convolve with the filter.
212
- f = f[np.newaxis, np.newaxis].repeat([num_channels, 1] + [1] * f.ndim)
213
- if f.ndim == 4:
214
- x = conv2d_gradfix.conv2d(input=x, weight=f, groups=num_channels)
215
- else:
216
- x = conv2d_gradfix.conv2d(
217
- input=x, weight=f.unsqueeze(2), groups=num_channels)
218
- x = conv2d_gradfix.conv2d(
219
- input=x, weight=f.unsqueeze(3), groups=num_channels)
220
-
221
- # Downsample by throwing away pixels.
222
- x = x[:, :, ::downy, ::downx]
223
- return x
224
-
225
- # ----------------------------------------------------------------------------
226
-
227
-
228
- _upfirdn2d_cuda_cache = dict()
229
-
230
-
231
- def _upfirdn2d_cuda(up=1, down=1, padding=0, flip_filter=False, gain=1):
232
- """Fast CUDA implementation of `upfirdn2d()` using custom ops.
233
- """
234
- # Parse arguments.
235
- upx, upy = _parse_scaling(up)
236
- downx, downy = _parse_scaling(down)
237
- padx0, padx1, pady0, pady1 = _parse_padding(padding)
238
-
239
- # Lookup from cache.
240
- key = (upx, upy, downx, downy, padx0, padx1,
241
- pady0, pady1, flip_filter, gain)
242
- if key in _upfirdn2d_cuda_cache:
243
- return _upfirdn2d_cuda_cache[key]
244
-
245
- # Forward op.
246
- class Upfirdn2dCuda(torch.autograd.Function):
247
- @staticmethod
248
- def forward(ctx, x, f): # pylint: disable=arguments-differ
249
- assert isinstance(x, torch.Tensor) and x.ndim == 4
250
- if f is None:
251
- f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
252
- assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
253
- y = x
254
- if f.ndim == 2:
255
- y = _plugin.upfirdn2d(
256
- y, f, upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain)
257
- else:
258
- y = _plugin.upfirdn2d(y, f.unsqueeze(
259
- 0), upx, 1, downx, 1, padx0, padx1, 0, 0, flip_filter, np.sqrt(gain))
260
- y = _plugin.upfirdn2d(y, f.unsqueeze(
261
- 1), 1, upy, 1, downy, 0, 0, pady0, pady1, flip_filter, np.sqrt(gain))
262
- ctx.save_for_backward(f)
263
- ctx.x_shape = x.shape
264
- return y
265
-
266
- @staticmethod
267
- def backward(ctx, dy): # pylint: disable=arguments-differ
268
- f, = ctx.saved_tensors
269
- _, _, ih, iw = ctx.x_shape
270
- _, _, oh, ow = dy.shape
271
- fw, fh = _get_filter_size(f)
272
- p = [
273
- fw - padx0 - 1,
274
- iw * upx - ow * downx + padx0 - upx + 1,
275
- fh - pady0 - 1,
276
- ih * upy - oh * downy + pady0 - upy + 1,
277
- ]
278
- dx = None
279
- df = None
280
-
281
- if ctx.needs_input_grad[0]:
282
- dx = _upfirdn2d_cuda(up=down, down=up, padding=p, flip_filter=(
283
- not flip_filter), gain=gain).apply(dy, f)
284
-
285
- assert not ctx.needs_input_grad[1]
286
- return dx, df
287
-
288
- # Add to cache.
289
- _upfirdn2d_cuda_cache[key] = Upfirdn2dCuda
290
- return Upfirdn2dCuda
291
-
292
- # ----------------------------------------------------------------------------
293
-
294
-
295
- def filter2d(x, f, padding=0, flip_filter=False, gain=1, impl='cuda'):
296
- r"""Filter a batch of 2D images using the given 2D FIR filter.
297
-
298
- By default, the result is padded so that its shape matches the input.
299
- User-specified padding is applied on top of that, with negative values
300
- indicating cropping. Pixels outside the image are assumed to be zero.
301
-
302
- Args:
303
- x: Float32/float64/float16 input tensor of the shape
304
- `[batch_size, num_channels, in_height, in_width]`.
305
- f: Float32 FIR filter of the shape
306
- `[filter_height, filter_width]` (non-separable),
307
- `[filter_taps]` (separable), or
308
- `None` (identity).
309
- padding: Padding with respect to the output. Can be a single number or a
310
- list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
311
- (default: 0).
312
- flip_filter: False = convolution, True = correlation (default: False).
313
- gain: Overall scaling factor for signal magnitude (default: 1).
314
- impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
315
-
316
- Returns:
317
- Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
318
- """
319
- padx0, padx1, pady0, pady1 = _parse_padding(padding)
320
- fw, fh = _get_filter_size(f)
321
- p = [
322
- padx0 + fw // 2,
323
- padx1 + (fw - 1) // 2,
324
- pady0 + fh // 2,
325
- pady1 + (fh - 1) // 2,
326
- ]
327
- return upfirdn2d(x, f, padding=p, flip_filter=flip_filter, gain=gain, impl=impl)
328
-
329
- # ----------------------------------------------------------------------------
330
-
331
-
332
- def upsample2d(x, f, up=2, padding=0, flip_filter=False, gain=1, impl='cuda'):
333
- r"""Upsample a batch of 2D images using the given 2D FIR filter.
334
-
335
- By default, the result is padded so that its shape is a multiple of the input.
336
- User-specified padding is applied on top of that, with negative values
337
- indicating cropping. Pixels outside the image are assumed to be zero.
338
-
339
- Args:
340
- x: Float32/float64/float16 input tensor of the shape
341
- `[batch_size, num_channels, in_height, in_width]`.
342
- f: Float32 FIR filter of the shape
343
- `[filter_height, filter_width]` (non-separable),
344
- `[filter_taps]` (separable), or
345
- `None` (identity).
346
- up: Integer upsampling factor. Can be a single int or a list/tuple
347
- `[x, y]` (default: 1).
348
- padding: Padding with respect to the output. Can be a single number or a
349
- list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
350
- (default: 0).
351
- flip_filter: False = convolution, True = correlation (default: False).
352
- gain: Overall scaling factor for signal magnitude (default: 1).
353
- impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
354
-
355
- Returns:
356
- Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
357
- """
358
- upx, upy = _parse_scaling(up)
359
- padx0, padx1, pady0, pady1 = _parse_padding(padding)
360
- fw, fh = _get_filter_size(f)
361
- p = [
362
- padx0 + (fw + upx - 1) // 2,
363
- padx1 + (fw - upx) // 2,
364
- pady0 + (fh + upy - 1) // 2,
365
- pady1 + (fh - upy) // 2,
366
- ]
367
- return upfirdn2d(x, f, up=up, padding=p, flip_filter=flip_filter, gain=gain*upx*upy, impl=impl)
368
-
369
- # ----------------------------------------------------------------------------
370
-
371
-
372
- def downsample2d(x, f, down=2, padding=0, flip_filter=False, gain=1, impl='cuda'):
373
- r"""Downsample a batch of 2D images using the given 2D FIR filter.
374
-
375
- By default, the result is padded so that its shape is a fraction of the input.
376
- User-specified padding is applied on top of that, with negative values
377
- indicating cropping. Pixels outside the image are assumed to be zero.
378
-
379
- Args:
380
- x: Float32/float64/float16 input tensor of the shape
381
- `[batch_size, num_channels, in_height, in_width]`.
382
- f: Float32 FIR filter of the shape
383
- `[filter_height, filter_width]` (non-separable),
384
- `[filter_taps]` (separable), or
385
- `None` (identity).
386
- down: Integer downsampling factor. Can be a single int or a list/tuple
387
- `[x, y]` (default: 1).
388
- padding: Padding with respect to the input. Can be a single number or a
389
- list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
390
- (default: 0).
391
- flip_filter: False = convolution, True = correlation (default: False).
392
- gain: Overall scaling factor for signal magnitude (default: 1).
393
- impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
394
-
395
- Returns:
396
- Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
397
- """
398
- downx, downy = _parse_scaling(down)
399
- padx0, padx1, pady0, pady1 = _parse_padding(padding)
400
- fw, fh = _get_filter_size(f)
401
- p = [
402
- padx0 + (fw - downx + 1) // 2,
403
- padx1 + (fw - downx) // 2,
404
- pady0 + (fh - downy + 1) // 2,
405
- pady1 + (fh - downy) // 2,
406
- ]
407
- return upfirdn2d(x, f, down=down, padding=p, flip_filter=flip_filter, gain=gain, impl=impl)
408
-
409
- # ----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/models/transformer_temporal.md DELETED
@@ -1,11 +0,0 @@
1
- # Transformer Temporal
2
-
3
- A Transformer model for video-like data.
4
-
5
- ## TransformerTemporalModel
6
-
7
- [[autodoc]] models.transformer_temporal.TransformerTemporalModel
8
-
9
- ## TransformerTemporalModelOutput
10
-
11
- [[autodoc]] models.transformer_temporal.TransformerTemporalModelOutput
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/zh/quicktour.md DELETED
@@ -1,331 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- [[open-in-colab]]
14
-
15
- # 快速上手
16
-
17
- 训练扩散模型,是为了对随机高斯噪声进行逐步去噪,以生成令人感兴趣的样本,比如图像或者语音。
18
-
19
- 扩散模型的发展引起了人们对生成式人工智能的极大兴趣,你可能已经在网上见过扩散生成的图像了。🧨 Diffusers库的目的是让大家更易上手扩散模型。
20
-
21
- 无论你是开发人员还是普通用户,本文将向你介绍🧨 Diffusers 并帮助你快速开始生成内容!
22
-
23
- 🧨 Diffusers 库的三个主要组件:
24
-
25
-
26
- 无论你是开发者还是普通用户,这个快速指南将向你介绍🧨 Diffusers,并帮助你快速使用和生成!该库三个主要部分如下:
27
-
28
- * [`DiffusionPipeline`]是一个高级的端到端类,旨在通过预训练的扩散模型快速生成样本进行推理。
29
- * 作为创建扩散系统做组件的流行的预训练[模型](./api/models)框架和模块。
30
- * 许多不同的[调度器](./api/schedulers/overview):控制如何在训练过程中添加噪声的算法,以及如何在推理过程中生成去噪图像的算法。
31
-
32
- 快速入门将告诉你如何使用[`DiffusionPipeline`]进行推理,然后指导你如何结合模型和调度器以复现[`DiffusionPipeline`]内部发生的事情。
33
-
34
- <Tip>
35
-
36
- 快速入门是🧨[Diffusers入门](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/diffusers_intro.ipynb)的简化版,可以帮助你快速上手。如果你想了解更多关于🧨 Diffusers的目标、设计理念以及关于它的核心API的更多细节,可以点击🧨[Diffusers入门](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/diffusers_intro.ipynb)查看。
37
-
38
- </Tip>
39
-
40
- 在开始之前,确认一下你已经安装好了所需要的库:
41
-
42
- ```bash
43
- pip install --upgrade diffusers accelerate transformers
44
- ```
45
-
46
- - [🤗 Accelerate](https://huggingface.co/docs/accelerate/index) 在推理和训练过程中加速模型加载。
47
- - [🤗 Transformers](https://huggingface.co/docs/transformers/index) 是运行最流行的扩散模型所必须的库,比如[Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview).
48
-
49
- ## 扩散模型管道
50
-
51
- [`DiffusionPipeline`]是用预训练的扩散系统进行推理的最简单方法。它是一个包含模型和调度器的端到端系统。你可以直接使用[`DiffusionPipeline`]完成许多任务。请查看下面的表格以了解一些支持的任务,要获取完整的支持任务列表,请查看[🧨 Diffusers 总结](./api/pipelines/overview#diffusers-summary) 。
52
-
53
- | **任务** | **描述** | **管道**
54
- |------------------------------|--------------------------------------------------------------------------------------------------------------|-----------------|
55
- | Unconditional Image Generation | 从高斯噪声中生成图片 | [unconditional_image_generation](./using-diffusers/unconditional_image_generation) |
56
- | Text-Guided Image Generation | 给定文本提示生成图像 | [conditional_image_generation](./using-diffusers/conditional_image_generation) |
57
- | Text-Guided Image-to-Image Translation | 在文本提示的指导下调整图像 | [img2img](./using-diffusers/img2img) |
58
- | Text-Guided Image-Inpainting | 给出图像、遮罩和文本提示,填充图像的遮罩部分 | [inpaint](./using-diffusers/inpaint) |
59
- | Text-Guided Depth-to-Image Translation | 在文本提示的指导下调整图像的部分内容,同时通过深度估计保留其结构 | [depth2img](./using-diffusers/depth2img) |
60
-
61
- 首先创建一个[`DiffusionPipeline`]的实例,并指定要下载的pipeline检查点。
62
- 你可以使用存储在Hugging Face Hub上的任何[`DiffusionPipeline`][检查点](https://huggingface.co/models?library=diffusers&sort=downloads)。
63
- 在教程中,你将加载[`stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5)检查点,用于文本到图像的生成。
64
-
65
- 首先创建一个[DiffusionPipeline]实例,并指定要下载的管道检查点。
66
- 您可以在Hugging Face Hub上使用[DiffusionPipeline]的任何检查点。
67
- 在本快速入门中,您将加载stable-diffusion-v1-5检查点,用于文本到图像生成。
68
-
69
- <Tip warning={true}>。
70
-
71
- 对于[Stable Diffusion](https://huggingface.co/CompVis/stable-diffusion)模型,在运行该模型之前,请先仔细阅读[许可证](https://huggingface.co/spaces/CompVis/stable-diffusion-license)。🧨 Diffusers实现了一个[`safety_checker`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py),以防止有攻击性的或有害的内容,但Stable Diffusion模型改进图像的生成能力仍有可能产生潜在的有害内容。
72
-
73
- </Tip>
74
-
75
- 用[`~DiffusionPipeline.from_pretrained`]方法加载模型。
76
-
77
- ```python
78
- >>> from diffusers import DiffusionPipeline
79
-
80
- >>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
81
- ```
82
- [`DiffusionPipeline`]会下载并缓存所有的建模、标记化和调度组件。你可以看到Stable Diffusion的pipeline是由[`UNet2DConditionModel`]和[`PNDMScheduler`]等组件组成的:
83
-
84
- ```py
85
- >>> pipeline
86
- StableDiffusionPipeline {
87
- "_class_name": "StableDiffusionPipeline",
88
- "_diffusers_version": "0.13.1",
89
- ...,
90
- "scheduler": [
91
- "diffusers",
92
- "PNDMScheduler"
93
- ],
94
- ...,
95
- "unet": [
96
- "diffusers",
97
- "UNet2DConditionModel"
98
- ],
99
- "vae": [
100
- "diffusers",
101
- "AutoencoderKL"
102
- ]
103
- }
104
- ```
105
-
106
- 我们强烈建议你在GPU上运行这个pipeline,因为该模型由大约14亿个参数组成。
107
-
108
- 你可以像在Pytorch里那样把生成器对象移到GPU上:
109
-
110
- ```python
111
- >>> pipeline.to("cuda")
112
- ```
113
-
114
- 现在你可以向`pipeline`传递一个文本提示来生成图像,然后获得去噪的图像。默认情况下,图像输出被放在一个[`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class)对象中。
115
-
116
- ```python
117
- >>> image = pipeline("An image of a squirrel in Picasso style").images[0]
118
- >>> image
119
- ```
120
-
121
- <div class="flex justify-center">
122
- <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/image_of_squirrel_painting.png"/>
123
- </div>
124
-
125
-
126
- 调用`save`保存图像:
127
-
128
- ```python
129
- >>> image.save("image_of_squirrel_painting.png")
130
- ```
131
-
132
- ### 本地管道
133
-
134
- 你也可以在本地使用管道。唯一的区别是你需提前下载权重:
135
-
136
- ```
137
- git lfs install
138
- git clone https://huggingface.co/runwayml/stable-diffusion-v1-5
139
- ```
140
-
141
- 将下载好的权重加载到管道中:
142
-
143
- ```python
144
- >>> pipeline = DiffusionPipeline.from_pretrained("./stable-diffusion-v1-5")
145
- ```
146
-
147
- 现在你可以像上一节中那样运行管道了。
148
-
149
- ### 更换调度器
150
-
151
- 不同的调度器对去噪速度和质量的权衡是不同的。要想知道哪种调度器最适合你,最好的办法就是试用一下。🧨 Diffusers的主要特点之一是允许你轻松切换不同的调度器。例如,要用[`EulerDiscreteScheduler`]替换默认的[`PNDMScheduler`],用[`~diffusers.ConfigMixin.from_config`]方法加载即可:
152
-
153
- ```py
154
- >>> from diffusers import EulerDiscreteScheduler
155
-
156
- >>> pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
157
- >>> pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config)
158
- ```
159
-
160
-
161
- 试着用新的调度器生成一个图像,看看你能否发现不同之处。
162
-
163
- 在下一节中,你将仔细观察组成[`DiffusionPipeline`]的组件——模型和调度器,并学习如何使用这些组件来生成猫咪的图像。
164
-
165
- ## 模型
166
-
167
- 大多数模型取一个噪声样本,在每个时间点预测*噪声残差*(其他模型则直接学习预测前一个样本或速度或[`v-prediction`](https://github.com/huggingface/diffusers/blob/5e5ce13e2f89ac45a0066cb3f369462a3cf1d9ef/src/diffusers/schedulers/scheduling_ddim.py#L110)),即噪声较小的图像与输入图像的差异。你可以混搭模型创建其他扩散系统。
168
-
169
- 模型是用[`~ModelMixin.from_pretrained`]方法启动的,该方法还在本地缓存了模型权重,所以下次加载模型时更快。对于快速入门,你默认加载的是[`UNet2DModel`],这是一个基础的无条件图像生成模型,该模型有一个在猫咪图像上训练的检查点:
170
-
171
-
172
- ```py
173
- >>> from diffusers import UNet2DModel
174
-
175
- >>> repo_id = "google/ddpm-cat-256"
176
- >>> model = UNet2DModel.from_pretrained(repo_id)
177
- ```
178
-
179
- 想知道模型的参数,调用 `model.config`:
180
-
181
- ```py
182
- >>> model.config
183
- ```
184
-
185
- 模型配置是一个🧊冻结的🧊字典,意思是这些参数在模型创建后就不变了。这是特意设置的,确保在开始时用于定义模型架构的参数保持不变,其他参数仍然可以在推理过程中进行调整。
186
-
187
- 一些最重要的参数:
188
-
189
- * `sample_size`:输入样本的高度和宽度尺寸。
190
- * `in_channels`:输入样本的输入通道数。
191
- * `down_block_types`和`up_block_types`:用于创建U-Net架构的下采样和上采样块的类型。
192
- * `block_out_channels`:下采样块的输出通道数;也以相反的顺序用于上采样块的输入通道数。
193
- * `layers_per_block`:每个U-Net块中存在的ResNet块的数量。
194
-
195
- 为了使用该模型进行推理,用随机高斯噪声生成图像形状。它应该有一个`batch`轴,因为模型可以接收多个随机噪声,一个`channel`轴,对应于输入通道的数量,以及一个`sample_size`轴,对应图像的高度和宽度。
196
-
197
-
198
- ```py
199
- >>> import torch
200
-
201
- >>> torch.manual_seed(0)
202
-
203
- >>> noisy_sample = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
204
- >>> noisy_sample.shape
205
- torch.Size([1, 3, 256, 256])
206
- ```
207
-
208
- 对于推理,将噪声图像和一个`timestep`传递给模型。`timestep` 表示输入图像的噪声程度,开始时噪声更多,结束时噪声更少。这有助于模型确定其在扩散过程中的位置,是更接近开始还是结束。使用 `sample` 获得模型输出:
209
-
210
-
211
- ```py
212
- >>> with torch.no_grad():
213
- ... noisy_residual = model(sample=noisy_sample, timestep=2).sample
214
- ```
215
-
216
- 想生成实际的样本,你需要一个调度器指导去噪过程。在下一节中,你将学习如何把模型与调度器结合起来。
217
-
218
- ## 调度器
219
-
220
- 调度器管理一个噪声样本到一个噪声较小的样本的处理过程,给出模型输出 —— 在这种情况下,它是`noisy_residual`。
221
-
222
-
223
-
224
- <Tip>
225
-
226
- 🧨 Diffusers是一个用于构建扩散系统的工具箱。预定义好的扩散系统[`DiffusionPipeline`]能方便你快速试用,你也可以单独选择自己的模型和调度器组件来建立一个自定义的扩散系统。
227
-
228
- </Tip>
229
-
230
- 在快速入门教程中,你将用它的[`~diffusers.ConfigMixin.from_config`]方法实例化[`DDPMScheduler`]:
231
-
232
- ```py
233
- >>> from diffusers import DDPMScheduler
234
-
235
- >>> scheduler = DDPMScheduler.from_config(repo_id)
236
- >>> scheduler
237
- DDPMScheduler {
238
- "_class_name": "DDPMScheduler",
239
- "_diffusers_version": "0.13.1",
240
- "beta_end": 0.02,
241
- "beta_schedule": "linear",
242
- "beta_start": 0.0001,
243
- "clip_sample": true,
244
- "clip_sample_range": 1.0,
245
- "num_train_timesteps": 1000,
246
- "prediction_type": "epsilon",
247
- "trained_betas": null,
248
- "variance_type": "fixed_small"
249
- }
250
- ```
251
-
252
- <Tip>
253
-
254
-
255
- 💡 注意调度器是如何从配置中实例化的。与模型不同,调度器没有可训练的权重,而且是无参数的。
256
-
257
- </Tip>
258
-
259
- * `num_train_timesteps`:去噪过程的长度,或者换句话说,将随机高斯噪声处理成数据样本所需的时间步数。
260
- * `beta_schedule`:用于推理和训练的噪声表。
261
- * `beta_start`和`beta_end`:噪声表的开始和结束噪声值。
262
-
263
- 要预测一个噪音稍小的图像,请将 模型输出、`timestep`和当前`sample` 传递给调度器的[`~diffusers.DDPMScheduler.step`]方法:
264
-
265
-
266
- ```py
267
- >>> less_noisy_sample = scheduler.step(model_output=noisy_residual, timestep=2, sample=noisy_sample).prev_sample
268
- >>> less_noisy_sample.shape
269
- ```
270
-
271
- 这个 `less_noisy_sample` 去噪样本 可以被传递到下一个`timestep` ,处理后会将变得噪声更小。现在让我们把所有步骤合起来,可视化整个去噪过程。
272
-
273
- 首先,创建一个函数,对去噪后的图像进行后处理并显示为`PIL.Image`:
274
-
275
- ```py
276
- >>> import PIL.Image
277
- >>> import numpy as np
278
-
279
-
280
- >>> def display_sample(sample, i):
281
- ... image_processed = sample.cpu().permute(0, 2, 3, 1)
282
- ... image_processed = (image_processed + 1.0) * 127.5
283
- ... image_processed = image_processed.numpy().astype(np.uint8)
284
-
285
- ... image_pil = PIL.Image.fromarray(image_processed[0])
286
- ... display(f"Image at step {i}")
287
- ... display(image_pil)
288
- ```
289
-
290
- 将输入和模型移到GPU上加速去噪过程:
291
-
292
- ```py
293
- >>> model.to("cuda")
294
- >>> noisy_sample = noisy_sample.to("cuda")
295
- ```
296
-
297
- 现在创建一个去噪循环,该循环预测噪声较少样本的残差,并使用调度程序计算噪声较少的样本:
298
-
299
- ```py
300
- >>> import tqdm
301
-
302
- >>> sample = noisy_sample
303
-
304
- >>> for i, t in enumerate(tqdm.tqdm(scheduler.timesteps)):
305
- ... # 1. predict noise residual
306
- ... with torch.no_grad():
307
- ... residual = model(sample, t).sample
308
-
309
- ... # 2. compute less noisy image and set x_t -> x_t-1
310
- ... sample = scheduler.step(residual, t, sample).prev_sample
311
-
312
- ... # 3. optionally look at image
313
- ... if (i + 1) % 50 == 0:
314
- ... display_sample(sample, i + 1)
315
- ```
316
-
317
- 看!这样就从噪声中生成出一只猫了!😻
318
-
319
- <div class="flex justify-center">
320
- <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/diffusion-quicktour.png"/>
321
- </div>
322
-
323
- ## 下一步
324
-
325
- 希望你在这次快速入门教程中用🧨Diffuser 生成了一些很酷的图像! 下一步你可以:
326
-
327
- * 在[训练](./tutorials/basic_training)教程中训练或微调一个模型来生成你自己的图像。
328
- * 查看官方和社区的[训练或微调脚本](https://github.com/huggingface/diffusers/tree/main/examples#-diffusers-examples)的例子,了解更多使用情况。
329
- * 在[使用不同的调度器](./using-diffusers/schedulers)指南中了解更多关于加载、访问、更改和比较调度器的信息。
330
- * 在[Stable Diffusion](./stable_diffusion)教程中探索提示工程、速度和内存优化,以及生成更高质量图像的技巧。
331
- * 通过[���GPU上优化PyTorch](./optimization/fp16)指南,以及运行[Apple (M1/M2)上的Stable Diffusion](./optimization/mps)和[ONNX Runtime](./optimization/onnx)的教程,更深入地了解如何加速🧨Diffuser。
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/transformer_temporal.py DELETED
@@ -1,179 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- from dataclasses import dataclass
15
- from typing import Optional
16
-
17
- import torch
18
- from torch import nn
19
-
20
- from ..configuration_utils import ConfigMixin, register_to_config
21
- from ..utils import BaseOutput
22
- from .attention import BasicTransformerBlock
23
- from .modeling_utils import ModelMixin
24
-
25
-
26
- @dataclass
27
- class TransformerTemporalModelOutput(BaseOutput):
28
- """
29
- The output of [`TransformerTemporalModel`].
30
-
31
- Args:
32
- sample (`torch.FloatTensor` of shape `(batch_size x num_frames, num_channels, height, width)`):
33
- The hidden states output conditioned on `encoder_hidden_states` input.
34
- """
35
-
36
- sample: torch.FloatTensor
37
-
38
-
39
- class TransformerTemporalModel(ModelMixin, ConfigMixin):
40
- """
41
- A Transformer model for video-like data.
42
-
43
- Parameters:
44
- num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.
45
- attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.
46
- in_channels (`int`, *optional*):
47
- The number of channels in the input and output (specify if the input is **continuous**).
48
- num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.
49
- dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
50
- cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use.
51
- sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**).
52
- This is fixed during training since it is used to learn a number of position embeddings.
53
- activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to use in feed-forward.
54
- attention_bias (`bool`, *optional*):
55
- Configure if the `TransformerBlock` attention should contain a bias parameter.
56
- double_self_attention (`bool`, *optional*):
57
- Configure if each `TransformerBlock` should contain two self-attention layers.
58
- """
59
-
60
- @register_to_config
61
- def __init__(
62
- self,
63
- num_attention_heads: int = 16,
64
- attention_head_dim: int = 88,
65
- in_channels: Optional[int] = None,
66
- out_channels: Optional[int] = None,
67
- num_layers: int = 1,
68
- dropout: float = 0.0,
69
- norm_num_groups: int = 32,
70
- cross_attention_dim: Optional[int] = None,
71
- attention_bias: bool = False,
72
- sample_size: Optional[int] = None,
73
- activation_fn: str = "geglu",
74
- norm_elementwise_affine: bool = True,
75
- double_self_attention: bool = True,
76
- ):
77
- super().__init__()
78
- self.num_attention_heads = num_attention_heads
79
- self.attention_head_dim = attention_head_dim
80
- inner_dim = num_attention_heads * attention_head_dim
81
-
82
- self.in_channels = in_channels
83
-
84
- self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)
85
- self.proj_in = nn.Linear(in_channels, inner_dim)
86
-
87
- # 3. Define transformers blocks
88
- self.transformer_blocks = nn.ModuleList(
89
- [
90
- BasicTransformerBlock(
91
- inner_dim,
92
- num_attention_heads,
93
- attention_head_dim,
94
- dropout=dropout,
95
- cross_attention_dim=cross_attention_dim,
96
- activation_fn=activation_fn,
97
- attention_bias=attention_bias,
98
- double_self_attention=double_self_attention,
99
- norm_elementwise_affine=norm_elementwise_affine,
100
- )
101
- for d in range(num_layers)
102
- ]
103
- )
104
-
105
- self.proj_out = nn.Linear(inner_dim, in_channels)
106
-
107
- def forward(
108
- self,
109
- hidden_states,
110
- encoder_hidden_states=None,
111
- timestep=None,
112
- class_labels=None,
113
- num_frames=1,
114
- cross_attention_kwargs=None,
115
- return_dict: bool = True,
116
- ):
117
- """
118
- The [`TransformerTemporal`] forward method.
119
-
120
- Args:
121
- hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous):
122
- Input hidden_states.
123
- encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*):
124
- Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
125
- self-attention.
126
- timestep ( `torch.long`, *optional*):
127
- Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`.
128
- class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*):
129
- Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in
130
- `AdaLayerZeroNorm`.
131
- return_dict (`bool`, *optional*, defaults to `True`):
132
- Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
133
- tuple.
134
-
135
- Returns:
136
- [`~models.transformer_temporal.TransformerTemporalModelOutput`] or `tuple`:
137
- If `return_dict` is True, an [`~models.transformer_temporal.TransformerTemporalModelOutput`] is
138
- returned, otherwise a `tuple` where the first element is the sample tensor.
139
- """
140
- # 1. Input
141
- batch_frames, channel, height, width = hidden_states.shape
142
- batch_size = batch_frames // num_frames
143
-
144
- residual = hidden_states
145
-
146
- hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, channel, height, width)
147
- hidden_states = hidden_states.permute(0, 2, 1, 3, 4)
148
-
149
- hidden_states = self.norm(hidden_states)
150
- hidden_states = hidden_states.permute(0, 3, 4, 2, 1).reshape(batch_size * height * width, num_frames, channel)
151
-
152
- hidden_states = self.proj_in(hidden_states)
153
-
154
- # 2. Blocks
155
- for block in self.transformer_blocks:
156
- hidden_states = block(
157
- hidden_states,
158
- encoder_hidden_states=encoder_hidden_states,
159
- timestep=timestep,
160
- cross_attention_kwargs=cross_attention_kwargs,
161
- class_labels=class_labels,
162
- )
163
-
164
- # 3. Output
165
- hidden_states = self.proj_out(hidden_states)
166
- hidden_states = (
167
- hidden_states[None, None, :]
168
- .reshape(batch_size, height, width, channel, num_frames)
169
- .permute(0, 3, 4, 1, 2)
170
- .contiguous()
171
- )
172
- hidden_states = hidden_states.reshape(batch_frames, channel, height, width)
173
-
174
- output = hidden_states + residual
175
-
176
- if not return_dict:
177
- return (output,)
178
-
179
- return TransformerTemporalModelOutput(sample=output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/versatile_diffusion/modeling_text_unet.py DELETED
@@ -1,1932 +0,0 @@
1
- from typing import Any, Dict, List, Optional, Tuple, Union
2
-
3
- import numpy as np
4
- import torch
5
- import torch.nn as nn
6
- import torch.nn.functional as F
7
-
8
- from ...configuration_utils import ConfigMixin, register_to_config
9
- from ...models import ModelMixin
10
- from ...models.activations import get_activation
11
- from ...models.attention import Attention
12
- from ...models.attention_processor import (
13
- AttentionProcessor,
14
- AttnAddedKVProcessor,
15
- AttnAddedKVProcessor2_0,
16
- AttnProcessor,
17
- )
18
- from ...models.dual_transformer_2d import DualTransformer2DModel
19
- from ...models.embeddings import (
20
- GaussianFourierProjection,
21
- ImageHintTimeEmbedding,
22
- ImageProjection,
23
- ImageTimeEmbedding,
24
- TextImageProjection,
25
- TextImageTimeEmbedding,
26
- TextTimeEmbedding,
27
- TimestepEmbedding,
28
- Timesteps,
29
- )
30
- from ...models.transformer_2d import Transformer2DModel
31
- from ...models.unet_2d_condition import UNet2DConditionOutput
32
- from ...utils import is_torch_version, logging
33
-
34
-
35
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
36
-
37
-
38
- def get_down_block(
39
- down_block_type,
40
- num_layers,
41
- in_channels,
42
- out_channels,
43
- temb_channels,
44
- add_downsample,
45
- resnet_eps,
46
- resnet_act_fn,
47
- num_attention_heads,
48
- resnet_groups=None,
49
- cross_attention_dim=None,
50
- downsample_padding=None,
51
- dual_cross_attention=False,
52
- use_linear_projection=False,
53
- only_cross_attention=False,
54
- upcast_attention=False,
55
- resnet_time_scale_shift="default",
56
- resnet_skip_time_act=False,
57
- resnet_out_scale_factor=1.0,
58
- cross_attention_norm=None,
59
- ):
60
- down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type
61
- if down_block_type == "DownBlockFlat":
62
- return DownBlockFlat(
63
- num_layers=num_layers,
64
- in_channels=in_channels,
65
- out_channels=out_channels,
66
- temb_channels=temb_channels,
67
- add_downsample=add_downsample,
68
- resnet_eps=resnet_eps,
69
- resnet_act_fn=resnet_act_fn,
70
- resnet_groups=resnet_groups,
71
- downsample_padding=downsample_padding,
72
- resnet_time_scale_shift=resnet_time_scale_shift,
73
- )
74
- elif down_block_type == "CrossAttnDownBlockFlat":
75
- if cross_attention_dim is None:
76
- raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlockFlat")
77
- return CrossAttnDownBlockFlat(
78
- num_layers=num_layers,
79
- in_channels=in_channels,
80
- out_channels=out_channels,
81
- temb_channels=temb_channels,
82
- add_downsample=add_downsample,
83
- resnet_eps=resnet_eps,
84
- resnet_act_fn=resnet_act_fn,
85
- resnet_groups=resnet_groups,
86
- downsample_padding=downsample_padding,
87
- cross_attention_dim=cross_attention_dim,
88
- num_attention_heads=num_attention_heads,
89
- dual_cross_attention=dual_cross_attention,
90
- use_linear_projection=use_linear_projection,
91
- only_cross_attention=only_cross_attention,
92
- resnet_time_scale_shift=resnet_time_scale_shift,
93
- )
94
- raise ValueError(f"{down_block_type} is not supported.")
95
-
96
-
97
- def get_up_block(
98
- up_block_type,
99
- num_layers,
100
- in_channels,
101
- out_channels,
102
- prev_output_channel,
103
- temb_channels,
104
- add_upsample,
105
- resnet_eps,
106
- resnet_act_fn,
107
- num_attention_heads,
108
- resnet_groups=None,
109
- cross_attention_dim=None,
110
- dual_cross_attention=False,
111
- use_linear_projection=False,
112
- only_cross_attention=False,
113
- upcast_attention=False,
114
- resnet_time_scale_shift="default",
115
- resnet_skip_time_act=False,
116
- resnet_out_scale_factor=1.0,
117
- cross_attention_norm=None,
118
- ):
119
- up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type
120
- if up_block_type == "UpBlockFlat":
121
- return UpBlockFlat(
122
- num_layers=num_layers,
123
- in_channels=in_channels,
124
- out_channels=out_channels,
125
- prev_output_channel=prev_output_channel,
126
- temb_channels=temb_channels,
127
- add_upsample=add_upsample,
128
- resnet_eps=resnet_eps,
129
- resnet_act_fn=resnet_act_fn,
130
- resnet_groups=resnet_groups,
131
- resnet_time_scale_shift=resnet_time_scale_shift,
132
- )
133
- elif up_block_type == "CrossAttnUpBlockFlat":
134
- if cross_attention_dim is None:
135
- raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlockFlat")
136
- return CrossAttnUpBlockFlat(
137
- num_layers=num_layers,
138
- in_channels=in_channels,
139
- out_channels=out_channels,
140
- prev_output_channel=prev_output_channel,
141
- temb_channels=temb_channels,
142
- add_upsample=add_upsample,
143
- resnet_eps=resnet_eps,
144
- resnet_act_fn=resnet_act_fn,
145
- resnet_groups=resnet_groups,
146
- cross_attention_dim=cross_attention_dim,
147
- num_attention_heads=num_attention_heads,
148
- dual_cross_attention=dual_cross_attention,
149
- use_linear_projection=use_linear_projection,
150
- only_cross_attention=only_cross_attention,
151
- resnet_time_scale_shift=resnet_time_scale_shift,
152
- )
153
- raise ValueError(f"{up_block_type} is not supported.")
154
-
155
-
156
- # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel with UNet2DConditionModel->UNetFlatConditionModel, nn.Conv2d->LinearMultiDim, Block2D->BlockFlat
157
- class UNetFlatConditionModel(ModelMixin, ConfigMixin):
158
- r"""
159
- A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample
160
- shaped output.
161
-
162
- This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
163
- for all models (such as downloading or saving).
164
-
165
- Parameters:
166
- sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
167
- Height and width of input/output sample.
168
- in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample.
169
- out_channels (`int`, *optional*, defaults to 4): Number of channels in the output.
170
- center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample.
171
- flip_sin_to_cos (`bool`, *optional*, defaults to `False`):
172
- Whether to flip the sin to cos in the time embedding.
173
- freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding.
174
- down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlockFlat", "CrossAttnDownBlockFlat", "CrossAttnDownBlockFlat", "DownBlockFlat")`):
175
- The tuple of downsample blocks to use.
176
- mid_block_type (`str`, *optional*, defaults to `"UNetMidBlockFlatCrossAttn"`):
177
- Block type for middle of UNet, it can be either `UNetMidBlockFlatCrossAttn` or
178
- `UNetMidBlockFlatSimpleCrossAttn`. If `None`, the mid block layer is skipped.
179
- up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlockFlat", "CrossAttnUpBlockFlat", "CrossAttnUpBlockFlat", "CrossAttnUpBlockFlat")`):
180
- The tuple of upsample blocks to use.
181
- only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`):
182
- Whether to include self-attention in the basic transformer blocks, see
183
- [`~models.attention.BasicTransformerBlock`].
184
- block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
185
- The tuple of output channels for each block.
186
- layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
187
- downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.
188
- mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.
189
- act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
190
- norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
191
- If `None`, normalization and activation layers is skipped in post-processing.
192
- norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.
193
- cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280):
194
- The dimension of the cross attention features.
195
- transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1):
196
- The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
197
- [`~models.unet_2d_blocks.CrossAttnDownBlockFlat`], [`~models.unet_2d_blocks.CrossAttnUpBlockFlat`],
198
- [`~models.unet_2d_blocks.UNetMidBlockFlatCrossAttn`].
199
- encoder_hid_dim (`int`, *optional*, defaults to None):
200
- If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`
201
- dimension to `cross_attention_dim`.
202
- encoder_hid_dim_type (`str`, *optional*, defaults to `None`):
203
- If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text
204
- embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`.
205
- attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
206
- num_attention_heads (`int`, *optional*):
207
- The number of attention heads. If not defined, defaults to `attention_head_dim`
208
- resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config
209
- for ResNet blocks (see [`~models.resnet.ResnetBlockFlat`]). Choose from `default` or `scale_shift`.
210
- class_embed_type (`str`, *optional*, defaults to `None`):
211
- The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`,
212
- `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`.
213
- addition_embed_type (`str`, *optional*, defaults to `None`):
214
- Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or
215
- "text". "text" will use the `TextTimeEmbedding` layer.
216
- addition_time_embed_dim: (`int`, *optional*, defaults to `None`):
217
- Dimension for the timestep embeddings.
218
- num_class_embeds (`int`, *optional*, defaults to `None`):
219
- Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing
220
- class conditioning with `class_embed_type` equal to `None`.
221
- time_embedding_type (`str`, *optional*, defaults to `positional`):
222
- The type of position embedding to use for timesteps. Choose from `positional` or `fourier`.
223
- time_embedding_dim (`int`, *optional*, defaults to `None`):
224
- An optional override for the dimension of the projected time embedding.
225
- time_embedding_act_fn (`str`, *optional*, defaults to `None`):
226
- Optional activation function to use only once on the time embeddings before they are passed to the rest of
227
- the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`.
228
- timestep_post_act (`str`, *optional*, defaults to `None`):
229
- The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`.
230
- time_cond_proj_dim (`int`, *optional*, defaults to `None`):
231
- The dimension of `cond_proj` layer in the timestep embedding.
232
- conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer.
233
- conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer.
234
- projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when
235
- `class_embed_type="projection"`. Required when `class_embed_type="projection"`.
236
- class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time
237
- embeddings with the class embeddings.
238
- mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`):
239
- Whether to use cross attention with the mid block when using the `UNetMidBlockFlatSimpleCrossAttn`. If
240
- `only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the
241
- `only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False`
242
- otherwise.
243
- """
244
-
245
- _supports_gradient_checkpointing = True
246
-
247
- @register_to_config
248
- def __init__(
249
- self,
250
- sample_size: Optional[int] = None,
251
- in_channels: int = 4,
252
- out_channels: int = 4,
253
- center_input_sample: bool = False,
254
- flip_sin_to_cos: bool = True,
255
- freq_shift: int = 0,
256
- down_block_types: Tuple[str] = (
257
- "CrossAttnDownBlockFlat",
258
- "CrossAttnDownBlockFlat",
259
- "CrossAttnDownBlockFlat",
260
- "DownBlockFlat",
261
- ),
262
- mid_block_type: Optional[str] = "UNetMidBlockFlatCrossAttn",
263
- up_block_types: Tuple[str] = (
264
- "UpBlockFlat",
265
- "CrossAttnUpBlockFlat",
266
- "CrossAttnUpBlockFlat",
267
- "CrossAttnUpBlockFlat",
268
- ),
269
- only_cross_attention: Union[bool, Tuple[bool]] = False,
270
- block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
271
- layers_per_block: Union[int, Tuple[int]] = 2,
272
- downsample_padding: int = 1,
273
- mid_block_scale_factor: float = 1,
274
- act_fn: str = "silu",
275
- norm_num_groups: Optional[int] = 32,
276
- norm_eps: float = 1e-5,
277
- cross_attention_dim: Union[int, Tuple[int]] = 1280,
278
- transformer_layers_per_block: Union[int, Tuple[int]] = 1,
279
- encoder_hid_dim: Optional[int] = None,
280
- encoder_hid_dim_type: Optional[str] = None,
281
- attention_head_dim: Union[int, Tuple[int]] = 8,
282
- num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
283
- dual_cross_attention: bool = False,
284
- use_linear_projection: bool = False,
285
- class_embed_type: Optional[str] = None,
286
- addition_embed_type: Optional[str] = None,
287
- addition_time_embed_dim: Optional[int] = None,
288
- num_class_embeds: Optional[int] = None,
289
- upcast_attention: bool = False,
290
- resnet_time_scale_shift: str = "default",
291
- resnet_skip_time_act: bool = False,
292
- resnet_out_scale_factor: int = 1.0,
293
- time_embedding_type: str = "positional",
294
- time_embedding_dim: Optional[int] = None,
295
- time_embedding_act_fn: Optional[str] = None,
296
- timestep_post_act: Optional[str] = None,
297
- time_cond_proj_dim: Optional[int] = None,
298
- conv_in_kernel: int = 3,
299
- conv_out_kernel: int = 3,
300
- projection_class_embeddings_input_dim: Optional[int] = None,
301
- class_embeddings_concat: bool = False,
302
- mid_block_only_cross_attention: Optional[bool] = None,
303
- cross_attention_norm: Optional[str] = None,
304
- addition_embed_type_num_heads=64,
305
- ):
306
- super().__init__()
307
-
308
- self.sample_size = sample_size
309
-
310
- if num_attention_heads is not None:
311
- raise ValueError(
312
- "At the moment it is not possible to define the number of attention heads via `num_attention_heads`"
313
- " because of a naming issue as described in"
314
- " https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing"
315
- " `num_attention_heads` will only be supported in diffusers v0.19."
316
- )
317
-
318
- # If `num_attention_heads` is not defined (which is the case for most models)
319
- # it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
320
- # The reason for this behavior is to correct for incorrectly named variables that were introduced
321
- # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
322
- # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
323
- # which is why we correct for the naming here.
324
- num_attention_heads = num_attention_heads or attention_head_dim
325
-
326
- # Check inputs
327
- if len(down_block_types) != len(up_block_types):
328
- raise ValueError(
329
- "Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`:"
330
- f" {down_block_types}. `up_block_types`: {up_block_types}."
331
- )
332
-
333
- if len(block_out_channels) != len(down_block_types):
334
- raise ValueError(
335
- "Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`:"
336
- f" {block_out_channels}. `down_block_types`: {down_block_types}."
337
- )
338
-
339
- if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
340
- raise ValueError(
341
- "Must provide the same number of `only_cross_attention` as `down_block_types`."
342
- f" `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
343
- )
344
-
345
- if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
346
- raise ValueError(
347
- "Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`:"
348
- f" {num_attention_heads}. `down_block_types`: {down_block_types}."
349
- )
350
-
351
- if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
352
- raise ValueError(
353
- "Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`:"
354
- f" {attention_head_dim}. `down_block_types`: {down_block_types}."
355
- )
356
-
357
- if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types):
358
- raise ValueError(
359
- "Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`:"
360
- f" {cross_attention_dim}. `down_block_types`: {down_block_types}."
361
- )
362
-
363
- if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types):
364
- raise ValueError(
365
- "Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`:"
366
- f" {layers_per_block}. `down_block_types`: {down_block_types}."
367
- )
368
-
369
- # input
370
- conv_in_padding = (conv_in_kernel - 1) // 2
371
- self.conv_in = LinearMultiDim(
372
- in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
373
- )
374
-
375
- # time
376
- if time_embedding_type == "fourier":
377
- time_embed_dim = time_embedding_dim or block_out_channels[0] * 2
378
- if time_embed_dim % 2 != 0:
379
- raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.")
380
- self.time_proj = GaussianFourierProjection(
381
- time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos
382
- )
383
- timestep_input_dim = time_embed_dim
384
- elif time_embedding_type == "positional":
385
- time_embed_dim = time_embedding_dim or block_out_channels[0] * 4
386
-
387
- self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
388
- timestep_input_dim = block_out_channels[0]
389
- else:
390
- raise ValueError(
391
- f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`."
392
- )
393
-
394
- self.time_embedding = TimestepEmbedding(
395
- timestep_input_dim,
396
- time_embed_dim,
397
- act_fn=act_fn,
398
- post_act_fn=timestep_post_act,
399
- cond_proj_dim=time_cond_proj_dim,
400
- )
401
-
402
- if encoder_hid_dim_type is None and encoder_hid_dim is not None:
403
- encoder_hid_dim_type = "text_proj"
404
- self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type)
405
- logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.")
406
-
407
- if encoder_hid_dim is None and encoder_hid_dim_type is not None:
408
- raise ValueError(
409
- f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}."
410
- )
411
-
412
- if encoder_hid_dim_type == "text_proj":
413
- self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim)
414
- elif encoder_hid_dim_type == "text_image_proj":
415
- # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much
416
- # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
417
- # case when `addition_embed_type == "text_image_proj"` (Kadinsky 2.1)`
418
- self.encoder_hid_proj = TextImageProjection(
419
- text_embed_dim=encoder_hid_dim,
420
- image_embed_dim=cross_attention_dim,
421
- cross_attention_dim=cross_attention_dim,
422
- )
423
- elif encoder_hid_dim_type == "image_proj":
424
- # Kandinsky 2.2
425
- self.encoder_hid_proj = ImageProjection(
426
- image_embed_dim=encoder_hid_dim,
427
- cross_attention_dim=cross_attention_dim,
428
- )
429
- elif encoder_hid_dim_type is not None:
430
- raise ValueError(
431
- f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'."
432
- )
433
- else:
434
- self.encoder_hid_proj = None
435
-
436
- # class embedding
437
- if class_embed_type is None and num_class_embeds is not None:
438
- self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
439
- elif class_embed_type == "timestep":
440
- self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn)
441
- elif class_embed_type == "identity":
442
- self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
443
- elif class_embed_type == "projection":
444
- if projection_class_embeddings_input_dim is None:
445
- raise ValueError(
446
- "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set"
447
- )
448
- # The projection `class_embed_type` is the same as the timestep `class_embed_type` except
449
- # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings
450
- # 2. it projects from an arbitrary input dimension.
451
- #
452
- # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.
453
- # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.
454
- # As a result, `TimestepEmbedding` can be passed arbitrary vectors.
455
- self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
456
- elif class_embed_type == "simple_projection":
457
- if projection_class_embeddings_input_dim is None:
458
- raise ValueError(
459
- "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set"
460
- )
461
- self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim)
462
- else:
463
- self.class_embedding = None
464
-
465
- if addition_embed_type == "text":
466
- if encoder_hid_dim is not None:
467
- text_time_embedding_from_dim = encoder_hid_dim
468
- else:
469
- text_time_embedding_from_dim = cross_attention_dim
470
-
471
- self.add_embedding = TextTimeEmbedding(
472
- text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads
473
- )
474
- elif addition_embed_type == "text_image":
475
- # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much
476
- # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
477
- # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)`
478
- self.add_embedding = TextImageTimeEmbedding(
479
- text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim
480
- )
481
- elif addition_embed_type == "text_time":
482
- self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift)
483
- self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
484
- elif addition_embed_type == "image":
485
- # Kandinsky 2.2
486
- self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
487
- elif addition_embed_type == "image_hint":
488
- # Kandinsky 2.2 ControlNet
489
- self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
490
- elif addition_embed_type is not None:
491
- raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.")
492
-
493
- if time_embedding_act_fn is None:
494
- self.time_embed_act = None
495
- else:
496
- self.time_embed_act = get_activation(time_embedding_act_fn)
497
-
498
- self.down_blocks = nn.ModuleList([])
499
- self.up_blocks = nn.ModuleList([])
500
-
501
- if isinstance(only_cross_attention, bool):
502
- if mid_block_only_cross_attention is None:
503
- mid_block_only_cross_attention = only_cross_attention
504
-
505
- only_cross_attention = [only_cross_attention] * len(down_block_types)
506
-
507
- if mid_block_only_cross_attention is None:
508
- mid_block_only_cross_attention = False
509
-
510
- if isinstance(num_attention_heads, int):
511
- num_attention_heads = (num_attention_heads,) * len(down_block_types)
512
-
513
- if isinstance(attention_head_dim, int):
514
- attention_head_dim = (attention_head_dim,) * len(down_block_types)
515
-
516
- if isinstance(cross_attention_dim, int):
517
- cross_attention_dim = (cross_attention_dim,) * len(down_block_types)
518
-
519
- if isinstance(layers_per_block, int):
520
- layers_per_block = [layers_per_block] * len(down_block_types)
521
-
522
- if isinstance(transformer_layers_per_block, int):
523
- transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
524
-
525
- if class_embeddings_concat:
526
- # The time embeddings are concatenated with the class embeddings. The dimension of the
527
- # time embeddings passed to the down, middle, and up blocks is twice the dimension of the
528
- # regular time embeddings
529
- blocks_time_embed_dim = time_embed_dim * 2
530
- else:
531
- blocks_time_embed_dim = time_embed_dim
532
-
533
- # down
534
- output_channel = block_out_channels[0]
535
- for i, down_block_type in enumerate(down_block_types):
536
- input_channel = output_channel
537
- output_channel = block_out_channels[i]
538
- is_final_block = i == len(block_out_channels) - 1
539
-
540
- down_block = get_down_block(
541
- down_block_type,
542
- num_layers=layers_per_block[i],
543
- transformer_layers_per_block=transformer_layers_per_block[i],
544
- in_channels=input_channel,
545
- out_channels=output_channel,
546
- temb_channels=blocks_time_embed_dim,
547
- add_downsample=not is_final_block,
548
- resnet_eps=norm_eps,
549
- resnet_act_fn=act_fn,
550
- resnet_groups=norm_num_groups,
551
- cross_attention_dim=cross_attention_dim[i],
552
- num_attention_heads=num_attention_heads[i],
553
- downsample_padding=downsample_padding,
554
- dual_cross_attention=dual_cross_attention,
555
- use_linear_projection=use_linear_projection,
556
- only_cross_attention=only_cross_attention[i],
557
- upcast_attention=upcast_attention,
558
- resnet_time_scale_shift=resnet_time_scale_shift,
559
- resnet_skip_time_act=resnet_skip_time_act,
560
- resnet_out_scale_factor=resnet_out_scale_factor,
561
- cross_attention_norm=cross_attention_norm,
562
- attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
563
- )
564
- self.down_blocks.append(down_block)
565
-
566
- # mid
567
- if mid_block_type == "UNetMidBlockFlatCrossAttn":
568
- self.mid_block = UNetMidBlockFlatCrossAttn(
569
- transformer_layers_per_block=transformer_layers_per_block[-1],
570
- in_channels=block_out_channels[-1],
571
- temb_channels=blocks_time_embed_dim,
572
- resnet_eps=norm_eps,
573
- resnet_act_fn=act_fn,
574
- output_scale_factor=mid_block_scale_factor,
575
- resnet_time_scale_shift=resnet_time_scale_shift,
576
- cross_attention_dim=cross_attention_dim[-1],
577
- num_attention_heads=num_attention_heads[-1],
578
- resnet_groups=norm_num_groups,
579
- dual_cross_attention=dual_cross_attention,
580
- use_linear_projection=use_linear_projection,
581
- upcast_attention=upcast_attention,
582
- )
583
- elif mid_block_type == "UNetMidBlockFlatSimpleCrossAttn":
584
- self.mid_block = UNetMidBlockFlatSimpleCrossAttn(
585
- in_channels=block_out_channels[-1],
586
- temb_channels=blocks_time_embed_dim,
587
- resnet_eps=norm_eps,
588
- resnet_act_fn=act_fn,
589
- output_scale_factor=mid_block_scale_factor,
590
- cross_attention_dim=cross_attention_dim[-1],
591
- attention_head_dim=attention_head_dim[-1],
592
- resnet_groups=norm_num_groups,
593
- resnet_time_scale_shift=resnet_time_scale_shift,
594
- skip_time_act=resnet_skip_time_act,
595
- only_cross_attention=mid_block_only_cross_attention,
596
- cross_attention_norm=cross_attention_norm,
597
- )
598
- elif mid_block_type is None:
599
- self.mid_block = None
600
- else:
601
- raise ValueError(f"unknown mid_block_type : {mid_block_type}")
602
-
603
- # count how many layers upsample the images
604
- self.num_upsamplers = 0
605
-
606
- # up
607
- reversed_block_out_channels = list(reversed(block_out_channels))
608
- reversed_num_attention_heads = list(reversed(num_attention_heads))
609
- reversed_layers_per_block = list(reversed(layers_per_block))
610
- reversed_cross_attention_dim = list(reversed(cross_attention_dim))
611
- reversed_transformer_layers_per_block = list(reversed(transformer_layers_per_block))
612
- only_cross_attention = list(reversed(only_cross_attention))
613
-
614
- output_channel = reversed_block_out_channels[0]
615
- for i, up_block_type in enumerate(up_block_types):
616
- is_final_block = i == len(block_out_channels) - 1
617
-
618
- prev_output_channel = output_channel
619
- output_channel = reversed_block_out_channels[i]
620
- input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
621
-
622
- # add upsample block for all BUT final layer
623
- if not is_final_block:
624
- add_upsample = True
625
- self.num_upsamplers += 1
626
- else:
627
- add_upsample = False
628
-
629
- up_block = get_up_block(
630
- up_block_type,
631
- num_layers=reversed_layers_per_block[i] + 1,
632
- transformer_layers_per_block=reversed_transformer_layers_per_block[i],
633
- in_channels=input_channel,
634
- out_channels=output_channel,
635
- prev_output_channel=prev_output_channel,
636
- temb_channels=blocks_time_embed_dim,
637
- add_upsample=add_upsample,
638
- resnet_eps=norm_eps,
639
- resnet_act_fn=act_fn,
640
- resnet_groups=norm_num_groups,
641
- cross_attention_dim=reversed_cross_attention_dim[i],
642
- num_attention_heads=reversed_num_attention_heads[i],
643
- dual_cross_attention=dual_cross_attention,
644
- use_linear_projection=use_linear_projection,
645
- only_cross_attention=only_cross_attention[i],
646
- upcast_attention=upcast_attention,
647
- resnet_time_scale_shift=resnet_time_scale_shift,
648
- resnet_skip_time_act=resnet_skip_time_act,
649
- resnet_out_scale_factor=resnet_out_scale_factor,
650
- cross_attention_norm=cross_attention_norm,
651
- attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
652
- )
653
- self.up_blocks.append(up_block)
654
- prev_output_channel = output_channel
655
-
656
- # out
657
- if norm_num_groups is not None:
658
- self.conv_norm_out = nn.GroupNorm(
659
- num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps
660
- )
661
-
662
- self.conv_act = get_activation(act_fn)
663
-
664
- else:
665
- self.conv_norm_out = None
666
- self.conv_act = None
667
-
668
- conv_out_padding = (conv_out_kernel - 1) // 2
669
- self.conv_out = LinearMultiDim(
670
- block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding
671
- )
672
-
673
- @property
674
- def attn_processors(self) -> Dict[str, AttentionProcessor]:
675
- r"""
676
- Returns:
677
- `dict` of attention processors: A dictionary containing all attention processors used in the model with
678
- indexed by its weight name.
679
- """
680
- # set recursively
681
- processors = {}
682
-
683
- def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
684
- if hasattr(module, "set_processor"):
685
- processors[f"{name}.processor"] = module.processor
686
-
687
- for sub_name, child in module.named_children():
688
- fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
689
-
690
- return processors
691
-
692
- for name, module in self.named_children():
693
- fn_recursive_add_processors(name, module, processors)
694
-
695
- return processors
696
-
697
- def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
698
- r"""
699
- Sets the attention processor to use to compute attention.
700
-
701
- Parameters:
702
- processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
703
- The instantiated processor class or a dictionary of processor classes that will be set as the processor
704
- for **all** `Attention` layers.
705
-
706
- If `processor` is a dict, the key needs to define the path to the corresponding cross attention
707
- processor. This is strongly recommended when setting trainable attention processors.
708
-
709
- """
710
- count = len(self.attn_processors.keys())
711
-
712
- if isinstance(processor, dict) and len(processor) != count:
713
- raise ValueError(
714
- f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
715
- f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
716
- )
717
-
718
- def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
719
- if hasattr(module, "set_processor"):
720
- if not isinstance(processor, dict):
721
- module.set_processor(processor)
722
- else:
723
- module.set_processor(processor.pop(f"{name}.processor"))
724
-
725
- for sub_name, child in module.named_children():
726
- fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
727
-
728
- for name, module in self.named_children():
729
- fn_recursive_attn_processor(name, module, processor)
730
-
731
- def set_default_attn_processor(self):
732
- """
733
- Disables custom attention processors and sets the default attention implementation.
734
- """
735
- self.set_attn_processor(AttnProcessor())
736
-
737
- def set_attention_slice(self, slice_size):
738
- r"""
739
- Enable sliced attention computation.
740
-
741
- When this option is enabled, the attention module splits the input tensor in slices to compute attention in
742
- several steps. This is useful for saving some memory in exchange for a small decrease in speed.
743
-
744
- Args:
745
- slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
746
- When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
747
- `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
748
- provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
749
- must be a multiple of `slice_size`.
750
- """
751
- sliceable_head_dims = []
752
-
753
- def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):
754
- if hasattr(module, "set_attention_slice"):
755
- sliceable_head_dims.append(module.sliceable_head_dim)
756
-
757
- for child in module.children():
758
- fn_recursive_retrieve_sliceable_dims(child)
759
-
760
- # retrieve number of attention layers
761
- for module in self.children():
762
- fn_recursive_retrieve_sliceable_dims(module)
763
-
764
- num_sliceable_layers = len(sliceable_head_dims)
765
-
766
- if slice_size == "auto":
767
- # half the attention head size is usually a good trade-off between
768
- # speed and memory
769
- slice_size = [dim // 2 for dim in sliceable_head_dims]
770
- elif slice_size == "max":
771
- # make smallest slice possible
772
- slice_size = num_sliceable_layers * [1]
773
-
774
- slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
775
-
776
- if len(slice_size) != len(sliceable_head_dims):
777
- raise ValueError(
778
- f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
779
- f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
780
- )
781
-
782
- for i in range(len(slice_size)):
783
- size = slice_size[i]
784
- dim = sliceable_head_dims[i]
785
- if size is not None and size > dim:
786
- raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
787
-
788
- # Recursively walk through all the children.
789
- # Any children which exposes the set_attention_slice method
790
- # gets the message
791
- def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
792
- if hasattr(module, "set_attention_slice"):
793
- module.set_attention_slice(slice_size.pop())
794
-
795
- for child in module.children():
796
- fn_recursive_set_attention_slice(child, slice_size)
797
-
798
- reversed_slice_size = list(reversed(slice_size))
799
- for module in self.children():
800
- fn_recursive_set_attention_slice(module, reversed_slice_size)
801
-
802
- def _set_gradient_checkpointing(self, module, value=False):
803
- if isinstance(module, (CrossAttnDownBlockFlat, DownBlockFlat, CrossAttnUpBlockFlat, UpBlockFlat)):
804
- module.gradient_checkpointing = value
805
-
806
- def forward(
807
- self,
808
- sample: torch.FloatTensor,
809
- timestep: Union[torch.Tensor, float, int],
810
- encoder_hidden_states: torch.Tensor,
811
- class_labels: Optional[torch.Tensor] = None,
812
- timestep_cond: Optional[torch.Tensor] = None,
813
- attention_mask: Optional[torch.Tensor] = None,
814
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
815
- added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
816
- down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
817
- mid_block_additional_residual: Optional[torch.Tensor] = None,
818
- encoder_attention_mask: Optional[torch.Tensor] = None,
819
- return_dict: bool = True,
820
- ) -> Union[UNet2DConditionOutput, Tuple]:
821
- r"""
822
- The [`UNetFlatConditionModel`] forward method.
823
-
824
- Args:
825
- sample (`torch.FloatTensor`):
826
- The noisy input tensor with the following shape `(batch, channel, height, width)`.
827
- timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input.
828
- encoder_hidden_states (`torch.FloatTensor`):
829
- The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.
830
- encoder_attention_mask (`torch.Tensor`):
831
- A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If
832
- `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,
833
- which adds large negative values to the attention scores corresponding to "discard" tokens.
834
- return_dict (`bool`, *optional*, defaults to `True`):
835
- Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
836
- tuple.
837
- cross_attention_kwargs (`dict`, *optional*):
838
- A kwargs dictionary that if specified is passed along to the [`AttnProcessor`].
839
- added_cond_kwargs: (`dict`, *optional*):
840
- A kwargs dictionary containin additional embeddings that if specified are added to the embeddings that
841
- are passed along to the UNet blocks.
842
-
843
- Returns:
844
- [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
845
- If `return_dict` is True, an [`~models.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise
846
- a `tuple` is returned where the first element is the sample tensor.
847
- """
848
- # By default samples have to be AT least a multiple of the overall upsampling factor.
849
- # The overall upsampling factor is equal to 2 ** (# num of upsampling layers).
850
- # However, the upsampling interpolation output size can be forced to fit any upsampling size
851
- # on the fly if necessary.
852
- default_overall_up_factor = 2**self.num_upsamplers
853
-
854
- # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
855
- forward_upsample_size = False
856
- upsample_size = None
857
-
858
- if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):
859
- logger.info("Forward upsample size to force interpolation output size.")
860
- forward_upsample_size = True
861
-
862
- # ensure attention_mask is a bias, and give it a singleton query_tokens dimension
863
- # expects mask of shape:
864
- # [batch, key_tokens]
865
- # adds singleton query_tokens dimension:
866
- # [batch, 1, key_tokens]
867
- # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
868
- # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
869
- # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
870
- if attention_mask is not None:
871
- # assume that mask is expressed as:
872
- # (1 = keep, 0 = discard)
873
- # convert mask into a bias that can be added to attention scores:
874
- # (keep = +0, discard = -10000.0)
875
- attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
876
- attention_mask = attention_mask.unsqueeze(1)
877
-
878
- # convert encoder_attention_mask to a bias the same way we do for attention_mask
879
- if encoder_attention_mask is not None:
880
- encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0
881
- encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
882
-
883
- # 0. center input if necessary
884
- if self.config.center_input_sample:
885
- sample = 2 * sample - 1.0
886
-
887
- # 1. time
888
- timesteps = timestep
889
- if not torch.is_tensor(timesteps):
890
- # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
891
- # This would be a good case for the `match` statement (Python 3.10+)
892
- is_mps = sample.device.type == "mps"
893
- if isinstance(timestep, float):
894
- dtype = torch.float32 if is_mps else torch.float64
895
- else:
896
- dtype = torch.int32 if is_mps else torch.int64
897
- timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
898
- elif len(timesteps.shape) == 0:
899
- timesteps = timesteps[None].to(sample.device)
900
-
901
- # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
902
- timesteps = timesteps.expand(sample.shape[0])
903
-
904
- t_emb = self.time_proj(timesteps)
905
-
906
- # `Timesteps` does not contain any weights and will always return f32 tensors
907
- # but time_embedding might actually be running in fp16. so we need to cast here.
908
- # there might be better ways to encapsulate this.
909
- t_emb = t_emb.to(dtype=sample.dtype)
910
-
911
- emb = self.time_embedding(t_emb, timestep_cond)
912
- aug_emb = None
913
-
914
- if self.class_embedding is not None:
915
- if class_labels is None:
916
- raise ValueError("class_labels should be provided when num_class_embeds > 0")
917
-
918
- if self.config.class_embed_type == "timestep":
919
- class_labels = self.time_proj(class_labels)
920
-
921
- # `Timesteps` does not contain any weights and will always return f32 tensors
922
- # there might be better ways to encapsulate this.
923
- class_labels = class_labels.to(dtype=sample.dtype)
924
-
925
- class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype)
926
-
927
- if self.config.class_embeddings_concat:
928
- emb = torch.cat([emb, class_emb], dim=-1)
929
- else:
930
- emb = emb + class_emb
931
-
932
- if self.config.addition_embed_type == "text":
933
- aug_emb = self.add_embedding(encoder_hidden_states)
934
- elif self.config.addition_embed_type == "text_image":
935
- # Kandinsky 2.1 - style
936
- if "image_embeds" not in added_cond_kwargs:
937
- raise ValueError(
938
- f"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires"
939
- " the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
940
- )
941
-
942
- image_embs = added_cond_kwargs.get("image_embeds")
943
- text_embs = added_cond_kwargs.get("text_embeds", encoder_hidden_states)
944
- aug_emb = self.add_embedding(text_embs, image_embs)
945
- elif self.config.addition_embed_type == "text_time":
946
- # SDXL - style
947
- if "text_embeds" not in added_cond_kwargs:
948
- raise ValueError(
949
- f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires"
950
- " the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
951
- )
952
- text_embeds = added_cond_kwargs.get("text_embeds")
953
- if "time_ids" not in added_cond_kwargs:
954
- raise ValueError(
955
- f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires"
956
- " the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
957
- )
958
- time_ids = added_cond_kwargs.get("time_ids")
959
- time_embeds = self.add_time_proj(time_ids.flatten())
960
- time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
961
-
962
- add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
963
- add_embeds = add_embeds.to(emb.dtype)
964
- aug_emb = self.add_embedding(add_embeds)
965
- elif self.config.addition_embed_type == "image":
966
- # Kandinsky 2.2 - style
967
- if "image_embeds" not in added_cond_kwargs:
968
- raise ValueError(
969
- f"{self.__class__} has the config param `addition_embed_type` set to 'image' which requires the"
970
- " keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
971
- )
972
- image_embs = added_cond_kwargs.get("image_embeds")
973
- aug_emb = self.add_embedding(image_embs)
974
- elif self.config.addition_embed_type == "image_hint":
975
- # Kandinsky 2.2 - style
976
- if "image_embeds" not in added_cond_kwargs or "hint" not in added_cond_kwargs:
977
- raise ValueError(
978
- f"{self.__class__} has the config param `addition_embed_type` set to 'image_hint' which requires"
979
- " the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`"
980
- )
981
- image_embs = added_cond_kwargs.get("image_embeds")
982
- hint = added_cond_kwargs.get("hint")
983
- aug_emb, hint = self.add_embedding(image_embs, hint)
984
- sample = torch.cat([sample, hint], dim=1)
985
-
986
- emb = emb + aug_emb if aug_emb is not None else emb
987
-
988
- if self.time_embed_act is not None:
989
- emb = self.time_embed_act(emb)
990
-
991
- if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_proj":
992
- encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states)
993
- elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_image_proj":
994
- # Kadinsky 2.1 - style
995
- if "image_embeds" not in added_cond_kwargs:
996
- raise ValueError(
997
- f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which"
998
- " requires the keyword argument `image_embeds` to be passed in `added_conditions`"
999
- )
1000
-
1001
- image_embeds = added_cond_kwargs.get("image_embeds")
1002
- encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds)
1003
- elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "image_proj":
1004
- # Kandinsky 2.2 - style
1005
- if "image_embeds" not in added_cond_kwargs:
1006
- raise ValueError(
1007
- f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj' which requires"
1008
- " the keyword argument `image_embeds` to be passed in `added_conditions`"
1009
- )
1010
- image_embeds = added_cond_kwargs.get("image_embeds")
1011
- encoder_hidden_states = self.encoder_hid_proj(image_embeds)
1012
- # 2. pre-process
1013
- sample = self.conv_in(sample)
1014
-
1015
- # 3. down
1016
-
1017
- is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None
1018
- is_adapter = mid_block_additional_residual is None and down_block_additional_residuals is not None
1019
-
1020
- down_block_res_samples = (sample,)
1021
- for downsample_block in self.down_blocks:
1022
- if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
1023
- # For t2i-adapter CrossAttnDownBlockFlat
1024
- additional_residuals = {}
1025
- if is_adapter and len(down_block_additional_residuals) > 0:
1026
- additional_residuals["additional_residuals"] = down_block_additional_residuals.pop(0)
1027
-
1028
- sample, res_samples = downsample_block(
1029
- hidden_states=sample,
1030
- temb=emb,
1031
- encoder_hidden_states=encoder_hidden_states,
1032
- attention_mask=attention_mask,
1033
- cross_attention_kwargs=cross_attention_kwargs,
1034
- encoder_attention_mask=encoder_attention_mask,
1035
- **additional_residuals,
1036
- )
1037
- else:
1038
- sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
1039
-
1040
- if is_adapter and len(down_block_additional_residuals) > 0:
1041
- sample += down_block_additional_residuals.pop(0)
1042
-
1043
- down_block_res_samples += res_samples
1044
-
1045
- if is_controlnet:
1046
- new_down_block_res_samples = ()
1047
-
1048
- for down_block_res_sample, down_block_additional_residual in zip(
1049
- down_block_res_samples, down_block_additional_residuals
1050
- ):
1051
- down_block_res_sample = down_block_res_sample + down_block_additional_residual
1052
- new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)
1053
-
1054
- down_block_res_samples = new_down_block_res_samples
1055
-
1056
- # 4. mid
1057
- if self.mid_block is not None:
1058
- sample = self.mid_block(
1059
- sample,
1060
- emb,
1061
- encoder_hidden_states=encoder_hidden_states,
1062
- attention_mask=attention_mask,
1063
- cross_attention_kwargs=cross_attention_kwargs,
1064
- encoder_attention_mask=encoder_attention_mask,
1065
- )
1066
-
1067
- if is_controlnet:
1068
- sample = sample + mid_block_additional_residual
1069
-
1070
- # 5. up
1071
- for i, upsample_block in enumerate(self.up_blocks):
1072
- is_final_block = i == len(self.up_blocks) - 1
1073
-
1074
- res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
1075
- down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
1076
-
1077
- # if we have not reached the final block and need to forward the
1078
- # upsample size, we do it here
1079
- if not is_final_block and forward_upsample_size:
1080
- upsample_size = down_block_res_samples[-1].shape[2:]
1081
-
1082
- if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
1083
- sample = upsample_block(
1084
- hidden_states=sample,
1085
- temb=emb,
1086
- res_hidden_states_tuple=res_samples,
1087
- encoder_hidden_states=encoder_hidden_states,
1088
- cross_attention_kwargs=cross_attention_kwargs,
1089
- upsample_size=upsample_size,
1090
- attention_mask=attention_mask,
1091
- encoder_attention_mask=encoder_attention_mask,
1092
- )
1093
- else:
1094
- sample = upsample_block(
1095
- hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size
1096
- )
1097
-
1098
- # 6. post-process
1099
- if self.conv_norm_out:
1100
- sample = self.conv_norm_out(sample)
1101
- sample = self.conv_act(sample)
1102
- sample = self.conv_out(sample)
1103
-
1104
- if not return_dict:
1105
- return (sample,)
1106
-
1107
- return UNet2DConditionOutput(sample=sample)
1108
-
1109
-
1110
- class LinearMultiDim(nn.Linear):
1111
- def __init__(self, in_features, out_features=None, second_dim=4, *args, **kwargs):
1112
- in_features = [in_features, second_dim, 1] if isinstance(in_features, int) else list(in_features)
1113
- if out_features is None:
1114
- out_features = in_features
1115
- out_features = [out_features, second_dim, 1] if isinstance(out_features, int) else list(out_features)
1116
- self.in_features_multidim = in_features
1117
- self.out_features_multidim = out_features
1118
- super().__init__(np.array(in_features).prod(), np.array(out_features).prod())
1119
-
1120
- def forward(self, input_tensor, *args, **kwargs):
1121
- shape = input_tensor.shape
1122
- n_dim = len(self.in_features_multidim)
1123
- input_tensor = input_tensor.reshape(*shape[0:-n_dim], self.in_features)
1124
- output_tensor = super().forward(input_tensor)
1125
- output_tensor = output_tensor.view(*shape[0:-n_dim], *self.out_features_multidim)
1126
- return output_tensor
1127
-
1128
-
1129
- class ResnetBlockFlat(nn.Module):
1130
- def __init__(
1131
- self,
1132
- *,
1133
- in_channels,
1134
- out_channels=None,
1135
- dropout=0.0,
1136
- temb_channels=512,
1137
- groups=32,
1138
- groups_out=None,
1139
- pre_norm=True,
1140
- eps=1e-6,
1141
- time_embedding_norm="default",
1142
- use_in_shortcut=None,
1143
- second_dim=4,
1144
- **kwargs,
1145
- ):
1146
- super().__init__()
1147
- self.pre_norm = pre_norm
1148
- self.pre_norm = True
1149
-
1150
- in_channels = [in_channels, second_dim, 1] if isinstance(in_channels, int) else list(in_channels)
1151
- self.in_channels_prod = np.array(in_channels).prod()
1152
- self.channels_multidim = in_channels
1153
-
1154
- if out_channels is not None:
1155
- out_channels = [out_channels, second_dim, 1] if isinstance(out_channels, int) else list(out_channels)
1156
- out_channels_prod = np.array(out_channels).prod()
1157
- self.out_channels_multidim = out_channels
1158
- else:
1159
- out_channels_prod = self.in_channels_prod
1160
- self.out_channels_multidim = self.channels_multidim
1161
- self.time_embedding_norm = time_embedding_norm
1162
-
1163
- if groups_out is None:
1164
- groups_out = groups
1165
-
1166
- self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=self.in_channels_prod, eps=eps, affine=True)
1167
- self.conv1 = torch.nn.Conv2d(self.in_channels_prod, out_channels_prod, kernel_size=1, padding=0)
1168
-
1169
- if temb_channels is not None:
1170
- self.time_emb_proj = torch.nn.Linear(temb_channels, out_channels_prod)
1171
- else:
1172
- self.time_emb_proj = None
1173
-
1174
- self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels_prod, eps=eps, affine=True)
1175
- self.dropout = torch.nn.Dropout(dropout)
1176
- self.conv2 = torch.nn.Conv2d(out_channels_prod, out_channels_prod, kernel_size=1, padding=0)
1177
-
1178
- self.nonlinearity = nn.SiLU()
1179
-
1180
- self.use_in_shortcut = (
1181
- self.in_channels_prod != out_channels_prod if use_in_shortcut is None else use_in_shortcut
1182
- )
1183
-
1184
- self.conv_shortcut = None
1185
- if self.use_in_shortcut:
1186
- self.conv_shortcut = torch.nn.Conv2d(
1187
- self.in_channels_prod, out_channels_prod, kernel_size=1, stride=1, padding=0
1188
- )
1189
-
1190
- def forward(self, input_tensor, temb):
1191
- shape = input_tensor.shape
1192
- n_dim = len(self.channels_multidim)
1193
- input_tensor = input_tensor.reshape(*shape[0:-n_dim], self.in_channels_prod, 1, 1)
1194
- input_tensor = input_tensor.view(-1, self.in_channels_prod, 1, 1)
1195
-
1196
- hidden_states = input_tensor
1197
-
1198
- hidden_states = self.norm1(hidden_states)
1199
- hidden_states = self.nonlinearity(hidden_states)
1200
- hidden_states = self.conv1(hidden_states)
1201
-
1202
- if temb is not None:
1203
- temb = self.time_emb_proj(self.nonlinearity(temb))[:, :, None, None]
1204
- hidden_states = hidden_states + temb
1205
-
1206
- hidden_states = self.norm2(hidden_states)
1207
- hidden_states = self.nonlinearity(hidden_states)
1208
-
1209
- hidden_states = self.dropout(hidden_states)
1210
- hidden_states = self.conv2(hidden_states)
1211
-
1212
- if self.conv_shortcut is not None:
1213
- input_tensor = self.conv_shortcut(input_tensor)
1214
-
1215
- output_tensor = input_tensor + hidden_states
1216
-
1217
- output_tensor = output_tensor.view(*shape[0:-n_dim], -1)
1218
- output_tensor = output_tensor.view(*shape[0:-n_dim], *self.out_channels_multidim)
1219
-
1220
- return output_tensor
1221
-
1222
-
1223
- # Copied from diffusers.models.unet_2d_blocks.DownBlock2D with DownBlock2D->DownBlockFlat, ResnetBlock2D->ResnetBlockFlat, Downsample2D->LinearMultiDim
1224
- class DownBlockFlat(nn.Module):
1225
- def __init__(
1226
- self,
1227
- in_channels: int,
1228
- out_channels: int,
1229
- temb_channels: int,
1230
- dropout: float = 0.0,
1231
- num_layers: int = 1,
1232
- resnet_eps: float = 1e-6,
1233
- resnet_time_scale_shift: str = "default",
1234
- resnet_act_fn: str = "swish",
1235
- resnet_groups: int = 32,
1236
- resnet_pre_norm: bool = True,
1237
- output_scale_factor=1.0,
1238
- add_downsample=True,
1239
- downsample_padding=1,
1240
- ):
1241
- super().__init__()
1242
- resnets = []
1243
-
1244
- for i in range(num_layers):
1245
- in_channels = in_channels if i == 0 else out_channels
1246
- resnets.append(
1247
- ResnetBlockFlat(
1248
- in_channels=in_channels,
1249
- out_channels=out_channels,
1250
- temb_channels=temb_channels,
1251
- eps=resnet_eps,
1252
- groups=resnet_groups,
1253
- dropout=dropout,
1254
- time_embedding_norm=resnet_time_scale_shift,
1255
- non_linearity=resnet_act_fn,
1256
- output_scale_factor=output_scale_factor,
1257
- pre_norm=resnet_pre_norm,
1258
- )
1259
- )
1260
-
1261
- self.resnets = nn.ModuleList(resnets)
1262
-
1263
- if add_downsample:
1264
- self.downsamplers = nn.ModuleList(
1265
- [
1266
- LinearMultiDim(
1267
- out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
1268
- )
1269
- ]
1270
- )
1271
- else:
1272
- self.downsamplers = None
1273
-
1274
- self.gradient_checkpointing = False
1275
-
1276
- def forward(self, hidden_states, temb=None):
1277
- output_states = ()
1278
-
1279
- for resnet in self.resnets:
1280
- if self.training and self.gradient_checkpointing:
1281
-
1282
- def create_custom_forward(module):
1283
- def custom_forward(*inputs):
1284
- return module(*inputs)
1285
-
1286
- return custom_forward
1287
-
1288
- if is_torch_version(">=", "1.11.0"):
1289
- hidden_states = torch.utils.checkpoint.checkpoint(
1290
- create_custom_forward(resnet), hidden_states, temb, use_reentrant=False
1291
- )
1292
- else:
1293
- hidden_states = torch.utils.checkpoint.checkpoint(
1294
- create_custom_forward(resnet), hidden_states, temb
1295
- )
1296
- else:
1297
- hidden_states = resnet(hidden_states, temb)
1298
-
1299
- output_states = output_states + (hidden_states,)
1300
-
1301
- if self.downsamplers is not None:
1302
- for downsampler in self.downsamplers:
1303
- hidden_states = downsampler(hidden_states)
1304
-
1305
- output_states = output_states + (hidden_states,)
1306
-
1307
- return hidden_states, output_states
1308
-
1309
-
1310
- # Copied from diffusers.models.unet_2d_blocks.CrossAttnDownBlock2D with CrossAttnDownBlock2D->CrossAttnDownBlockFlat, ResnetBlock2D->ResnetBlockFlat, Downsample2D->LinearMultiDim
1311
- class CrossAttnDownBlockFlat(nn.Module):
1312
- def __init__(
1313
- self,
1314
- in_channels: int,
1315
- out_channels: int,
1316
- temb_channels: int,
1317
- dropout: float = 0.0,
1318
- num_layers: int = 1,
1319
- transformer_layers_per_block: int = 1,
1320
- resnet_eps: float = 1e-6,
1321
- resnet_time_scale_shift: str = "default",
1322
- resnet_act_fn: str = "swish",
1323
- resnet_groups: int = 32,
1324
- resnet_pre_norm: bool = True,
1325
- num_attention_heads=1,
1326
- cross_attention_dim=1280,
1327
- output_scale_factor=1.0,
1328
- downsample_padding=1,
1329
- add_downsample=True,
1330
- dual_cross_attention=False,
1331
- use_linear_projection=False,
1332
- only_cross_attention=False,
1333
- upcast_attention=False,
1334
- ):
1335
- super().__init__()
1336
- resnets = []
1337
- attentions = []
1338
-
1339
- self.has_cross_attention = True
1340
- self.num_attention_heads = num_attention_heads
1341
-
1342
- for i in range(num_layers):
1343
- in_channels = in_channels if i == 0 else out_channels
1344
- resnets.append(
1345
- ResnetBlockFlat(
1346
- in_channels=in_channels,
1347
- out_channels=out_channels,
1348
- temb_channels=temb_channels,
1349
- eps=resnet_eps,
1350
- groups=resnet_groups,
1351
- dropout=dropout,
1352
- time_embedding_norm=resnet_time_scale_shift,
1353
- non_linearity=resnet_act_fn,
1354
- output_scale_factor=output_scale_factor,
1355
- pre_norm=resnet_pre_norm,
1356
- )
1357
- )
1358
- if not dual_cross_attention:
1359
- attentions.append(
1360
- Transformer2DModel(
1361
- num_attention_heads,
1362
- out_channels // num_attention_heads,
1363
- in_channels=out_channels,
1364
- num_layers=transformer_layers_per_block,
1365
- cross_attention_dim=cross_attention_dim,
1366
- norm_num_groups=resnet_groups,
1367
- use_linear_projection=use_linear_projection,
1368
- only_cross_attention=only_cross_attention,
1369
- upcast_attention=upcast_attention,
1370
- )
1371
- )
1372
- else:
1373
- attentions.append(
1374
- DualTransformer2DModel(
1375
- num_attention_heads,
1376
- out_channels // num_attention_heads,
1377
- in_channels=out_channels,
1378
- num_layers=1,
1379
- cross_attention_dim=cross_attention_dim,
1380
- norm_num_groups=resnet_groups,
1381
- )
1382
- )
1383
- self.attentions = nn.ModuleList(attentions)
1384
- self.resnets = nn.ModuleList(resnets)
1385
-
1386
- if add_downsample:
1387
- self.downsamplers = nn.ModuleList(
1388
- [
1389
- LinearMultiDim(
1390
- out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
1391
- )
1392
- ]
1393
- )
1394
- else:
1395
- self.downsamplers = None
1396
-
1397
- self.gradient_checkpointing = False
1398
-
1399
- def forward(
1400
- self,
1401
- hidden_states: torch.FloatTensor,
1402
- temb: Optional[torch.FloatTensor] = None,
1403
- encoder_hidden_states: Optional[torch.FloatTensor] = None,
1404
- attention_mask: Optional[torch.FloatTensor] = None,
1405
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1406
- encoder_attention_mask: Optional[torch.FloatTensor] = None,
1407
- additional_residuals=None,
1408
- ):
1409
- output_states = ()
1410
-
1411
- blocks = list(zip(self.resnets, self.attentions))
1412
-
1413
- for i, (resnet, attn) in enumerate(blocks):
1414
- if self.training and self.gradient_checkpointing:
1415
-
1416
- def create_custom_forward(module, return_dict=None):
1417
- def custom_forward(*inputs):
1418
- if return_dict is not None:
1419
- return module(*inputs, return_dict=return_dict)
1420
- else:
1421
- return module(*inputs)
1422
-
1423
- return custom_forward
1424
-
1425
- ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
1426
- hidden_states = torch.utils.checkpoint.checkpoint(
1427
- create_custom_forward(resnet),
1428
- hidden_states,
1429
- temb,
1430
- **ckpt_kwargs,
1431
- )
1432
- hidden_states = torch.utils.checkpoint.checkpoint(
1433
- create_custom_forward(attn, return_dict=False),
1434
- hidden_states,
1435
- encoder_hidden_states,
1436
- None, # timestep
1437
- None, # class_labels
1438
- cross_attention_kwargs,
1439
- attention_mask,
1440
- encoder_attention_mask,
1441
- **ckpt_kwargs,
1442
- )[0]
1443
- else:
1444
- hidden_states = resnet(hidden_states, temb)
1445
- hidden_states = attn(
1446
- hidden_states,
1447
- encoder_hidden_states=encoder_hidden_states,
1448
- cross_attention_kwargs=cross_attention_kwargs,
1449
- attention_mask=attention_mask,
1450
- encoder_attention_mask=encoder_attention_mask,
1451
- return_dict=False,
1452
- )[0]
1453
-
1454
- # apply additional residuals to the output of the last pair of resnet and attention blocks
1455
- if i == len(blocks) - 1 and additional_residuals is not None:
1456
- hidden_states = hidden_states + additional_residuals
1457
-
1458
- output_states = output_states + (hidden_states,)
1459
-
1460
- if self.downsamplers is not None:
1461
- for downsampler in self.downsamplers:
1462
- hidden_states = downsampler(hidden_states)
1463
-
1464
- output_states = output_states + (hidden_states,)
1465
-
1466
- return hidden_states, output_states
1467
-
1468
-
1469
- # Copied from diffusers.models.unet_2d_blocks.UpBlock2D with UpBlock2D->UpBlockFlat, ResnetBlock2D->ResnetBlockFlat, Upsample2D->LinearMultiDim
1470
- class UpBlockFlat(nn.Module):
1471
- def __init__(
1472
- self,
1473
- in_channels: int,
1474
- prev_output_channel: int,
1475
- out_channels: int,
1476
- temb_channels: int,
1477
- dropout: float = 0.0,
1478
- num_layers: int = 1,
1479
- resnet_eps: float = 1e-6,
1480
- resnet_time_scale_shift: str = "default",
1481
- resnet_act_fn: str = "swish",
1482
- resnet_groups: int = 32,
1483
- resnet_pre_norm: bool = True,
1484
- output_scale_factor=1.0,
1485
- add_upsample=True,
1486
- ):
1487
- super().__init__()
1488
- resnets = []
1489
-
1490
- for i in range(num_layers):
1491
- res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
1492
- resnet_in_channels = prev_output_channel if i == 0 else out_channels
1493
-
1494
- resnets.append(
1495
- ResnetBlockFlat(
1496
- in_channels=resnet_in_channels + res_skip_channels,
1497
- out_channels=out_channels,
1498
- temb_channels=temb_channels,
1499
- eps=resnet_eps,
1500
- groups=resnet_groups,
1501
- dropout=dropout,
1502
- time_embedding_norm=resnet_time_scale_shift,
1503
- non_linearity=resnet_act_fn,
1504
- output_scale_factor=output_scale_factor,
1505
- pre_norm=resnet_pre_norm,
1506
- )
1507
- )
1508
-
1509
- self.resnets = nn.ModuleList(resnets)
1510
-
1511
- if add_upsample:
1512
- self.upsamplers = nn.ModuleList([LinearMultiDim(out_channels, use_conv=True, out_channels=out_channels)])
1513
- else:
1514
- self.upsamplers = None
1515
-
1516
- self.gradient_checkpointing = False
1517
-
1518
- def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):
1519
- for resnet in self.resnets:
1520
- # pop res hidden states
1521
- res_hidden_states = res_hidden_states_tuple[-1]
1522
- res_hidden_states_tuple = res_hidden_states_tuple[:-1]
1523
- hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
1524
-
1525
- if self.training and self.gradient_checkpointing:
1526
-
1527
- def create_custom_forward(module):
1528
- def custom_forward(*inputs):
1529
- return module(*inputs)
1530
-
1531
- return custom_forward
1532
-
1533
- if is_torch_version(">=", "1.11.0"):
1534
- hidden_states = torch.utils.checkpoint.checkpoint(
1535
- create_custom_forward(resnet), hidden_states, temb, use_reentrant=False
1536
- )
1537
- else:
1538
- hidden_states = torch.utils.checkpoint.checkpoint(
1539
- create_custom_forward(resnet), hidden_states, temb
1540
- )
1541
- else:
1542
- hidden_states = resnet(hidden_states, temb)
1543
-
1544
- if self.upsamplers is not None:
1545
- for upsampler in self.upsamplers:
1546
- hidden_states = upsampler(hidden_states, upsample_size)
1547
-
1548
- return hidden_states
1549
-
1550
-
1551
- # Copied from diffusers.models.unet_2d_blocks.CrossAttnUpBlock2D with CrossAttnUpBlock2D->CrossAttnUpBlockFlat, ResnetBlock2D->ResnetBlockFlat, Upsample2D->LinearMultiDim
1552
- class CrossAttnUpBlockFlat(nn.Module):
1553
- def __init__(
1554
- self,
1555
- in_channels: int,
1556
- out_channels: int,
1557
- prev_output_channel: int,
1558
- temb_channels: int,
1559
- dropout: float = 0.0,
1560
- num_layers: int = 1,
1561
- transformer_layers_per_block: int = 1,
1562
- resnet_eps: float = 1e-6,
1563
- resnet_time_scale_shift: str = "default",
1564
- resnet_act_fn: str = "swish",
1565
- resnet_groups: int = 32,
1566
- resnet_pre_norm: bool = True,
1567
- num_attention_heads=1,
1568
- cross_attention_dim=1280,
1569
- output_scale_factor=1.0,
1570
- add_upsample=True,
1571
- dual_cross_attention=False,
1572
- use_linear_projection=False,
1573
- only_cross_attention=False,
1574
- upcast_attention=False,
1575
- ):
1576
- super().__init__()
1577
- resnets = []
1578
- attentions = []
1579
-
1580
- self.has_cross_attention = True
1581
- self.num_attention_heads = num_attention_heads
1582
-
1583
- for i in range(num_layers):
1584
- res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
1585
- resnet_in_channels = prev_output_channel if i == 0 else out_channels
1586
-
1587
- resnets.append(
1588
- ResnetBlockFlat(
1589
- in_channels=resnet_in_channels + res_skip_channels,
1590
- out_channels=out_channels,
1591
- temb_channels=temb_channels,
1592
- eps=resnet_eps,
1593
- groups=resnet_groups,
1594
- dropout=dropout,
1595
- time_embedding_norm=resnet_time_scale_shift,
1596
- non_linearity=resnet_act_fn,
1597
- output_scale_factor=output_scale_factor,
1598
- pre_norm=resnet_pre_norm,
1599
- )
1600
- )
1601
- if not dual_cross_attention:
1602
- attentions.append(
1603
- Transformer2DModel(
1604
- num_attention_heads,
1605
- out_channels // num_attention_heads,
1606
- in_channels=out_channels,
1607
- num_layers=transformer_layers_per_block,
1608
- cross_attention_dim=cross_attention_dim,
1609
- norm_num_groups=resnet_groups,
1610
- use_linear_projection=use_linear_projection,
1611
- only_cross_attention=only_cross_attention,
1612
- upcast_attention=upcast_attention,
1613
- )
1614
- )
1615
- else:
1616
- attentions.append(
1617
- DualTransformer2DModel(
1618
- num_attention_heads,
1619
- out_channels // num_attention_heads,
1620
- in_channels=out_channels,
1621
- num_layers=1,
1622
- cross_attention_dim=cross_attention_dim,
1623
- norm_num_groups=resnet_groups,
1624
- )
1625
- )
1626
- self.attentions = nn.ModuleList(attentions)
1627
- self.resnets = nn.ModuleList(resnets)
1628
-
1629
- if add_upsample:
1630
- self.upsamplers = nn.ModuleList([LinearMultiDim(out_channels, use_conv=True, out_channels=out_channels)])
1631
- else:
1632
- self.upsamplers = None
1633
-
1634
- self.gradient_checkpointing = False
1635
-
1636
- def forward(
1637
- self,
1638
- hidden_states: torch.FloatTensor,
1639
- res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],
1640
- temb: Optional[torch.FloatTensor] = None,
1641
- encoder_hidden_states: Optional[torch.FloatTensor] = None,
1642
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1643
- upsample_size: Optional[int] = None,
1644
- attention_mask: Optional[torch.FloatTensor] = None,
1645
- encoder_attention_mask: Optional[torch.FloatTensor] = None,
1646
- ):
1647
- for resnet, attn in zip(self.resnets, self.attentions):
1648
- # pop res hidden states
1649
- res_hidden_states = res_hidden_states_tuple[-1]
1650
- res_hidden_states_tuple = res_hidden_states_tuple[:-1]
1651
- hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
1652
-
1653
- if self.training and self.gradient_checkpointing:
1654
-
1655
- def create_custom_forward(module, return_dict=None):
1656
- def custom_forward(*inputs):
1657
- if return_dict is not None:
1658
- return module(*inputs, return_dict=return_dict)
1659
- else:
1660
- return module(*inputs)
1661
-
1662
- return custom_forward
1663
-
1664
- ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
1665
- hidden_states = torch.utils.checkpoint.checkpoint(
1666
- create_custom_forward(resnet),
1667
- hidden_states,
1668
- temb,
1669
- **ckpt_kwargs,
1670
- )
1671
- hidden_states = torch.utils.checkpoint.checkpoint(
1672
- create_custom_forward(attn, return_dict=False),
1673
- hidden_states,
1674
- encoder_hidden_states,
1675
- None, # timestep
1676
- None, # class_labels
1677
- cross_attention_kwargs,
1678
- attention_mask,
1679
- encoder_attention_mask,
1680
- **ckpt_kwargs,
1681
- )[0]
1682
- else:
1683
- hidden_states = resnet(hidden_states, temb)
1684
- hidden_states = attn(
1685
- hidden_states,
1686
- encoder_hidden_states=encoder_hidden_states,
1687
- cross_attention_kwargs=cross_attention_kwargs,
1688
- attention_mask=attention_mask,
1689
- encoder_attention_mask=encoder_attention_mask,
1690
- return_dict=False,
1691
- )[0]
1692
-
1693
- if self.upsamplers is not None:
1694
- for upsampler in self.upsamplers:
1695
- hidden_states = upsampler(hidden_states, upsample_size)
1696
-
1697
- return hidden_states
1698
-
1699
-
1700
- # Copied from diffusers.models.unet_2d_blocks.UNetMidBlock2DCrossAttn with UNetMidBlock2DCrossAttn->UNetMidBlockFlatCrossAttn, ResnetBlock2D->ResnetBlockFlat
1701
- class UNetMidBlockFlatCrossAttn(nn.Module):
1702
- def __init__(
1703
- self,
1704
- in_channels: int,
1705
- temb_channels: int,
1706
- dropout: float = 0.0,
1707
- num_layers: int = 1,
1708
- transformer_layers_per_block: int = 1,
1709
- resnet_eps: float = 1e-6,
1710
- resnet_time_scale_shift: str = "default",
1711
- resnet_act_fn: str = "swish",
1712
- resnet_groups: int = 32,
1713
- resnet_pre_norm: bool = True,
1714
- num_attention_heads=1,
1715
- output_scale_factor=1.0,
1716
- cross_attention_dim=1280,
1717
- dual_cross_attention=False,
1718
- use_linear_projection=False,
1719
- upcast_attention=False,
1720
- ):
1721
- super().__init__()
1722
-
1723
- self.has_cross_attention = True
1724
- self.num_attention_heads = num_attention_heads
1725
- resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
1726
-
1727
- # there is always at least one resnet
1728
- resnets = [
1729
- ResnetBlockFlat(
1730
- in_channels=in_channels,
1731
- out_channels=in_channels,
1732
- temb_channels=temb_channels,
1733
- eps=resnet_eps,
1734
- groups=resnet_groups,
1735
- dropout=dropout,
1736
- time_embedding_norm=resnet_time_scale_shift,
1737
- non_linearity=resnet_act_fn,
1738
- output_scale_factor=output_scale_factor,
1739
- pre_norm=resnet_pre_norm,
1740
- )
1741
- ]
1742
- attentions = []
1743
-
1744
- for _ in range(num_layers):
1745
- if not dual_cross_attention:
1746
- attentions.append(
1747
- Transformer2DModel(
1748
- num_attention_heads,
1749
- in_channels // num_attention_heads,
1750
- in_channels=in_channels,
1751
- num_layers=transformer_layers_per_block,
1752
- cross_attention_dim=cross_attention_dim,
1753
- norm_num_groups=resnet_groups,
1754
- use_linear_projection=use_linear_projection,
1755
- upcast_attention=upcast_attention,
1756
- )
1757
- )
1758
- else:
1759
- attentions.append(
1760
- DualTransformer2DModel(
1761
- num_attention_heads,
1762
- in_channels // num_attention_heads,
1763
- in_channels=in_channels,
1764
- num_layers=1,
1765
- cross_attention_dim=cross_attention_dim,
1766
- norm_num_groups=resnet_groups,
1767
- )
1768
- )
1769
- resnets.append(
1770
- ResnetBlockFlat(
1771
- in_channels=in_channels,
1772
- out_channels=in_channels,
1773
- temb_channels=temb_channels,
1774
- eps=resnet_eps,
1775
- groups=resnet_groups,
1776
- dropout=dropout,
1777
- time_embedding_norm=resnet_time_scale_shift,
1778
- non_linearity=resnet_act_fn,
1779
- output_scale_factor=output_scale_factor,
1780
- pre_norm=resnet_pre_norm,
1781
- )
1782
- )
1783
-
1784
- self.attentions = nn.ModuleList(attentions)
1785
- self.resnets = nn.ModuleList(resnets)
1786
-
1787
- def forward(
1788
- self,
1789
- hidden_states: torch.FloatTensor,
1790
- temb: Optional[torch.FloatTensor] = None,
1791
- encoder_hidden_states: Optional[torch.FloatTensor] = None,
1792
- attention_mask: Optional[torch.FloatTensor] = None,
1793
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1794
- encoder_attention_mask: Optional[torch.FloatTensor] = None,
1795
- ) -> torch.FloatTensor:
1796
- hidden_states = self.resnets[0](hidden_states, temb)
1797
- for attn, resnet in zip(self.attentions, self.resnets[1:]):
1798
- hidden_states = attn(
1799
- hidden_states,
1800
- encoder_hidden_states=encoder_hidden_states,
1801
- cross_attention_kwargs=cross_attention_kwargs,
1802
- attention_mask=attention_mask,
1803
- encoder_attention_mask=encoder_attention_mask,
1804
- return_dict=False,
1805
- )[0]
1806
- hidden_states = resnet(hidden_states, temb)
1807
-
1808
- return hidden_states
1809
-
1810
-
1811
- # Copied from diffusers.models.unet_2d_blocks.UNetMidBlock2DSimpleCrossAttn with UNetMidBlock2DSimpleCrossAttn->UNetMidBlockFlatSimpleCrossAttn, ResnetBlock2D->ResnetBlockFlat
1812
- class UNetMidBlockFlatSimpleCrossAttn(nn.Module):
1813
- def __init__(
1814
- self,
1815
- in_channels: int,
1816
- temb_channels: int,
1817
- dropout: float = 0.0,
1818
- num_layers: int = 1,
1819
- resnet_eps: float = 1e-6,
1820
- resnet_time_scale_shift: str = "default",
1821
- resnet_act_fn: str = "swish",
1822
- resnet_groups: int = 32,
1823
- resnet_pre_norm: bool = True,
1824
- attention_head_dim=1,
1825
- output_scale_factor=1.0,
1826
- cross_attention_dim=1280,
1827
- skip_time_act=False,
1828
- only_cross_attention=False,
1829
- cross_attention_norm=None,
1830
- ):
1831
- super().__init__()
1832
-
1833
- self.has_cross_attention = True
1834
-
1835
- self.attention_head_dim = attention_head_dim
1836
- resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
1837
-
1838
- self.num_heads = in_channels // self.attention_head_dim
1839
-
1840
- # there is always at least one resnet
1841
- resnets = [
1842
- ResnetBlockFlat(
1843
- in_channels=in_channels,
1844
- out_channels=in_channels,
1845
- temb_channels=temb_channels,
1846
- eps=resnet_eps,
1847
- groups=resnet_groups,
1848
- dropout=dropout,
1849
- time_embedding_norm=resnet_time_scale_shift,
1850
- non_linearity=resnet_act_fn,
1851
- output_scale_factor=output_scale_factor,
1852
- pre_norm=resnet_pre_norm,
1853
- skip_time_act=skip_time_act,
1854
- )
1855
- ]
1856
- attentions = []
1857
-
1858
- for _ in range(num_layers):
1859
- processor = (
1860
- AttnAddedKVProcessor2_0() if hasattr(F, "scaled_dot_product_attention") else AttnAddedKVProcessor()
1861
- )
1862
-
1863
- attentions.append(
1864
- Attention(
1865
- query_dim=in_channels,
1866
- cross_attention_dim=in_channels,
1867
- heads=self.num_heads,
1868
- dim_head=self.attention_head_dim,
1869
- added_kv_proj_dim=cross_attention_dim,
1870
- norm_num_groups=resnet_groups,
1871
- bias=True,
1872
- upcast_softmax=True,
1873
- only_cross_attention=only_cross_attention,
1874
- cross_attention_norm=cross_attention_norm,
1875
- processor=processor,
1876
- )
1877
- )
1878
- resnets.append(
1879
- ResnetBlockFlat(
1880
- in_channels=in_channels,
1881
- out_channels=in_channels,
1882
- temb_channels=temb_channels,
1883
- eps=resnet_eps,
1884
- groups=resnet_groups,
1885
- dropout=dropout,
1886
- time_embedding_norm=resnet_time_scale_shift,
1887
- non_linearity=resnet_act_fn,
1888
- output_scale_factor=output_scale_factor,
1889
- pre_norm=resnet_pre_norm,
1890
- skip_time_act=skip_time_act,
1891
- )
1892
- )
1893
-
1894
- self.attentions = nn.ModuleList(attentions)
1895
- self.resnets = nn.ModuleList(resnets)
1896
-
1897
- def forward(
1898
- self,
1899
- hidden_states: torch.FloatTensor,
1900
- temb: Optional[torch.FloatTensor] = None,
1901
- encoder_hidden_states: Optional[torch.FloatTensor] = None,
1902
- attention_mask: Optional[torch.FloatTensor] = None,
1903
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1904
- encoder_attention_mask: Optional[torch.FloatTensor] = None,
1905
- ):
1906
- cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}
1907
-
1908
- if attention_mask is None:
1909
- # if encoder_hidden_states is defined: we are doing cross-attn, so we should use cross-attn mask.
1910
- mask = None if encoder_hidden_states is None else encoder_attention_mask
1911
- else:
1912
- # when attention_mask is defined: we don't even check for encoder_attention_mask.
1913
- # this is to maintain compatibility with UnCLIP, which uses 'attention_mask' param for cross-attn masks.
1914
- # TODO: UnCLIP should express cross-attn mask via encoder_attention_mask param instead of via attention_mask.
1915
- # then we can simplify this whole if/else block to:
1916
- # mask = attention_mask if encoder_hidden_states is None else encoder_attention_mask
1917
- mask = attention_mask
1918
-
1919
- hidden_states = self.resnets[0](hidden_states, temb)
1920
- for attn, resnet in zip(self.attentions, self.resnets[1:]):
1921
- # attn
1922
- hidden_states = attn(
1923
- hidden_states,
1924
- encoder_hidden_states=encoder_hidden_states,
1925
- attention_mask=mask,
1926
- **cross_attention_kwargs,
1927
- )
1928
-
1929
- # resnet
1930
- hidden_states = resnet(hidden_states, temb)
1931
-
1932
- return hidden_states
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/vfnet/vfnet_r50_fpn_1x_coco.py DELETED
@@ -1,108 +0,0 @@
1
- _base_ = [
2
- '../_base_/datasets/coco_detection.py',
3
- '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
4
- ]
5
- # model settings
6
- model = dict(
7
- type='VFNet',
8
- pretrained='torchvision://resnet50',
9
- backbone=dict(
10
- type='ResNet',
11
- depth=50,
12
- num_stages=4,
13
- out_indices=(0, 1, 2, 3),
14
- frozen_stages=1,
15
- norm_cfg=dict(type='BN', requires_grad=True),
16
- norm_eval=True,
17
- style='pytorch'),
18
- neck=dict(
19
- type='FPN',
20
- in_channels=[256, 512, 1024, 2048],
21
- out_channels=256,
22
- start_level=1,
23
- add_extra_convs=True,
24
- extra_convs_on_inputs=False, # use P5
25
- num_outs=5,
26
- relu_before_extra_convs=True),
27
- bbox_head=dict(
28
- type='VFNetHead',
29
- num_classes=80,
30
- in_channels=256,
31
- stacked_convs=3,
32
- feat_channels=256,
33
- strides=[8, 16, 32, 64, 128],
34
- center_sampling=False,
35
- dcn_on_last_conv=False,
36
- use_atss=True,
37
- use_vfl=True,
38
- loss_cls=dict(
39
- type='VarifocalLoss',
40
- use_sigmoid=True,
41
- alpha=0.75,
42
- gamma=2.0,
43
- iou_weighted=True,
44
- loss_weight=1.0),
45
- loss_bbox=dict(type='GIoULoss', loss_weight=1.5),
46
- loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0)),
47
- # training and testing settings
48
- train_cfg=dict(
49
- assigner=dict(type='ATSSAssigner', topk=9),
50
- allowed_border=-1,
51
- pos_weight=-1,
52
- debug=False),
53
- test_cfg=dict(
54
- nms_pre=1000,
55
- min_bbox_size=0,
56
- score_thr=0.05,
57
- nms=dict(type='nms', iou_threshold=0.6),
58
- max_per_img=100))
59
-
60
- # data setting
61
- dataset_type = 'CocoDataset'
62
- data_root = 'data/coco/'
63
- img_norm_cfg = dict(
64
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
65
- train_pipeline = [
66
- dict(type='LoadImageFromFile'),
67
- dict(type='LoadAnnotations', with_bbox=True),
68
- dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
69
- dict(type='RandomFlip', flip_ratio=0.5),
70
- dict(type='Normalize', **img_norm_cfg),
71
- dict(type='Pad', size_divisor=32),
72
- dict(type='DefaultFormatBundle'),
73
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
74
- ]
75
- test_pipeline = [
76
- dict(type='LoadImageFromFile'),
77
- dict(
78
- type='MultiScaleFlipAug',
79
- img_scale=(1333, 800),
80
- flip=False,
81
- transforms=[
82
- dict(type='Resize', keep_ratio=True),
83
- dict(type='RandomFlip'),
84
- dict(type='Normalize', **img_norm_cfg),
85
- dict(type='Pad', size_divisor=32),
86
- dict(type='DefaultFormatBundle'),
87
- dict(type='Collect', keys=['img']),
88
- ])
89
- ]
90
- data = dict(
91
- samples_per_gpu=2,
92
- workers_per_gpu=2,
93
- train=dict(pipeline=train_pipeline),
94
- val=dict(pipeline=test_pipeline),
95
- test=dict(pipeline=test_pipeline))
96
-
97
- # optimizer
98
- optimizer = dict(
99
- lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
100
- optimizer_config = dict(grad_clip=None)
101
- # learning policy
102
- lr_config = dict(
103
- policy='step',
104
- warmup='linear',
105
- warmup_iters=500,
106
- warmup_ratio=0.1,
107
- step=[8, 11])
108
- runner = dict(type='EpochBasedRunner', max_epochs=12)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/datasets/voc.py DELETED
@@ -1,93 +0,0 @@
1
- from collections import OrderedDict
2
-
3
- from mmcv.utils import print_log
4
-
5
- from mmdet.core import eval_map, eval_recalls
6
- from .builder import DATASETS
7
- from .xml_style import XMLDataset
8
-
9
-
10
- @DATASETS.register_module()
11
- class VOCDataset(XMLDataset):
12
-
13
- CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',
14
- 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
15
- 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
16
- 'tvmonitor')
17
-
18
- def __init__(self, **kwargs):
19
- super(VOCDataset, self).__init__(**kwargs)
20
- if 'VOC2007' in self.img_prefix:
21
- self.year = 2007
22
- elif 'VOC2012' in self.img_prefix:
23
- self.year = 2012
24
- else:
25
- raise ValueError('Cannot infer dataset year from img_prefix')
26
-
27
- def evaluate(self,
28
- results,
29
- metric='mAP',
30
- logger=None,
31
- proposal_nums=(100, 300, 1000),
32
- iou_thr=0.5,
33
- scale_ranges=None):
34
- """Evaluate in VOC protocol.
35
-
36
- Args:
37
- results (list[list | tuple]): Testing results of the dataset.
38
- metric (str | list[str]): Metrics to be evaluated. Options are
39
- 'mAP', 'recall'.
40
- logger (logging.Logger | str, optional): Logger used for printing
41
- related information during evaluation. Default: None.
42
- proposal_nums (Sequence[int]): Proposal number used for evaluating
43
- recalls, such as recall@100, recall@1000.
44
- Default: (100, 300, 1000).
45
- iou_thr (float | list[float]): IoU threshold. Default: 0.5.
46
- scale_ranges (list[tuple], optional): Scale ranges for evaluating
47
- mAP. If not specified, all bounding boxes would be included in
48
- evaluation. Default: None.
49
-
50
- Returns:
51
- dict[str, float]: AP/recall metrics.
52
- """
53
-
54
- if not isinstance(metric, str):
55
- assert len(metric) == 1
56
- metric = metric[0]
57
- allowed_metrics = ['mAP', 'recall']
58
- if metric not in allowed_metrics:
59
- raise KeyError(f'metric {metric} is not supported')
60
- annotations = [self.get_ann_info(i) for i in range(len(self))]
61
- eval_results = OrderedDict()
62
- iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr
63
- if metric == 'mAP':
64
- assert isinstance(iou_thrs, list)
65
- if self.year == 2007:
66
- ds_name = 'voc07'
67
- else:
68
- ds_name = self.CLASSES
69
- mean_aps = []
70
- for iou_thr in iou_thrs:
71
- print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}')
72
- mean_ap, _ = eval_map(
73
- results,
74
- annotations,
75
- scale_ranges=None,
76
- iou_thr=iou_thr,
77
- dataset=ds_name,
78
- logger=logger)
79
- mean_aps.append(mean_ap)
80
- eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)
81
- eval_results['mAP'] = sum(mean_aps) / len(mean_aps)
82
- elif metric == 'recall':
83
- gt_bboxes = [ann['bboxes'] for ann in annotations]
84
- recalls = eval_recalls(
85
- gt_bboxes, results, proposal_nums, iou_thr, logger=logger)
86
- for i, num in enumerate(proposal_nums):
87
- for j, iou in enumerate(iou_thr):
88
- eval_results[f'recall@{num}@{iou}'] = recalls[i, j]
89
- if recalls.shape[1] > 1:
90
- ar = recalls.mean(axis=1)
91
- for i, num in enumerate(proposal_nums):
92
- eval_results[f'AR@{num}'] = ar[i]
93
- return eval_results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/danet/danet_r50-d8_512x512_160k_ade20k.py DELETED
@@ -1,6 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/danet_r50-d8.py', '../_base_/datasets/ade20k.py',
3
- '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
4
- ]
5
- model = dict(
6
- decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/encnet/encnet_r101-d8_769x769_40k_cityscapes.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './encnet_r50-d8_769x769_40k_cityscapes.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/Andy1621/uniformerv2_demo/transforms.py DELETED
@@ -1,443 +0,0 @@
1
- import torchvision
2
- import random
3
- from PIL import Image, ImageOps
4
- import numpy as np
5
- import numbers
6
- import math
7
- import torch
8
-
9
-
10
- class GroupRandomCrop(object):
11
- def __init__(self, size):
12
- if isinstance(size, numbers.Number):
13
- self.size = (int(size), int(size))
14
- else:
15
- self.size = size
16
-
17
- def __call__(self, img_group):
18
-
19
- w, h = img_group[0].size
20
- th, tw = self.size
21
-
22
- out_images = list()
23
-
24
- x1 = random.randint(0, w - tw)
25
- y1 = random.randint(0, h - th)
26
-
27
- for img in img_group:
28
- assert(img.size[0] == w and img.size[1] == h)
29
- if w == tw and h == th:
30
- out_images.append(img)
31
- else:
32
- out_images.append(img.crop((x1, y1, x1 + tw, y1 + th)))
33
-
34
- return out_images
35
-
36
-
37
- class MultiGroupRandomCrop(object):
38
- def __init__(self, size, groups=1):
39
- if isinstance(size, numbers.Number):
40
- self.size = (int(size), int(size))
41
- else:
42
- self.size = size
43
- self.groups = groups
44
-
45
- def __call__(self, img_group):
46
-
47
- w, h = img_group[0].size
48
- th, tw = self.size
49
-
50
- out_images = list()
51
-
52
- for i in range(self.groups):
53
- x1 = random.randint(0, w - tw)
54
- y1 = random.randint(0, h - th)
55
-
56
- for img in img_group:
57
- assert(img.size[0] == w and img.size[1] == h)
58
- if w == tw and h == th:
59
- out_images.append(img)
60
- else:
61
- out_images.append(img.crop((x1, y1, x1 + tw, y1 + th)))
62
-
63
- return out_images
64
-
65
-
66
- class GroupCenterCrop(object):
67
- def __init__(self, size):
68
- self.worker = torchvision.transforms.CenterCrop(size)
69
-
70
- def __call__(self, img_group):
71
- return [self.worker(img) for img in img_group]
72
-
73
-
74
- class GroupRandomHorizontalFlip(object):
75
- """Randomly horizontally flips the given PIL.Image with a probability of 0.5
76
- """
77
-
78
- def __init__(self, is_flow=False):
79
- self.is_flow = is_flow
80
-
81
- def __call__(self, img_group, is_flow=False):
82
- v = random.random()
83
- if v < 0.5:
84
- ret = [img.transpose(Image.FLIP_LEFT_RIGHT) for img in img_group]
85
- if self.is_flow:
86
- for i in range(0, len(ret), 2):
87
- # invert flow pixel values when flipping
88
- ret[i] = ImageOps.invert(ret[i])
89
- return ret
90
- else:
91
- return img_group
92
-
93
-
94
- class GroupNormalize(object):
95
- def __init__(self, mean, std):
96
- self.mean = mean
97
- self.std = std
98
-
99
- def __call__(self, tensor):
100
- rep_mean = self.mean * (tensor.size()[0] // len(self.mean))
101
- rep_std = self.std * (tensor.size()[0] // len(self.std))
102
-
103
- # TODO: make efficient
104
- for t, m, s in zip(tensor, rep_mean, rep_std):
105
- t.sub_(m).div_(s)
106
-
107
- return tensor
108
-
109
-
110
- class GroupScale(object):
111
- """ Rescales the input PIL.Image to the given 'size'.
112
- 'size' will be the size of the smaller edge.
113
- For example, if height > width, then image will be
114
- rescaled to (size * height / width, size)
115
- size: size of the smaller edge
116
- interpolation: Default: PIL.Image.BILINEAR
117
- """
118
-
119
- def __init__(self, size, interpolation=Image.BILINEAR):
120
- self.worker = torchvision.transforms.Resize(size, interpolation)
121
-
122
- def __call__(self, img_group):
123
- return [self.worker(img) for img in img_group]
124
-
125
-
126
- class GroupOverSample(object):
127
- def __init__(self, crop_size, scale_size=None, flip=True):
128
- self.crop_size = crop_size if not isinstance(
129
- crop_size, int) else (crop_size, crop_size)
130
-
131
- if scale_size is not None:
132
- self.scale_worker = GroupScale(scale_size)
133
- else:
134
- self.scale_worker = None
135
- self.flip = flip
136
-
137
- def __call__(self, img_group):
138
-
139
- if self.scale_worker is not None:
140
- img_group = self.scale_worker(img_group)
141
-
142
- image_w, image_h = img_group[0].size
143
- crop_w, crop_h = self.crop_size
144
-
145
- offsets = GroupMultiScaleCrop.fill_fix_offset(
146
- False, image_w, image_h, crop_w, crop_h)
147
- oversample_group = list()
148
- for o_w, o_h in offsets:
149
- normal_group = list()
150
- flip_group = list()
151
- for i, img in enumerate(img_group):
152
- crop = img.crop((o_w, o_h, o_w + crop_w, o_h + crop_h))
153
- normal_group.append(crop)
154
- flip_crop = crop.copy().transpose(Image.FLIP_LEFT_RIGHT)
155
-
156
- if img.mode == 'L' and i % 2 == 0:
157
- flip_group.append(ImageOps.invert(flip_crop))
158
- else:
159
- flip_group.append(flip_crop)
160
-
161
- oversample_group.extend(normal_group)
162
- if self.flip:
163
- oversample_group.extend(flip_group)
164
- return oversample_group
165
-
166
-
167
- class GroupFullResSample(object):
168
- def __init__(self, crop_size, scale_size=None, flip=True):
169
- self.crop_size = crop_size if not isinstance(
170
- crop_size, int) else (crop_size, crop_size)
171
-
172
- if scale_size is not None:
173
- self.scale_worker = GroupScale(scale_size)
174
- else:
175
- self.scale_worker = None
176
- self.flip = flip
177
-
178
- def __call__(self, img_group):
179
-
180
- if self.scale_worker is not None:
181
- img_group = self.scale_worker(img_group)
182
-
183
- image_w, image_h = img_group[0].size
184
- crop_w, crop_h = self.crop_size
185
-
186
- w_step = (image_w - crop_w) // 4
187
- h_step = (image_h - crop_h) // 4
188
-
189
- offsets = list()
190
- offsets.append((0 * w_step, 2 * h_step)) # left
191
- offsets.append((4 * w_step, 2 * h_step)) # right
192
- offsets.append((2 * w_step, 2 * h_step)) # center
193
-
194
- oversample_group = list()
195
- for o_w, o_h in offsets:
196
- normal_group = list()
197
- flip_group = list()
198
- for i, img in enumerate(img_group):
199
- crop = img.crop((o_w, o_h, o_w + crop_w, o_h + crop_h))
200
- normal_group.append(crop)
201
- if self.flip:
202
- flip_crop = crop.copy().transpose(Image.FLIP_LEFT_RIGHT)
203
-
204
- if img.mode == 'L' and i % 2 == 0:
205
- flip_group.append(ImageOps.invert(flip_crop))
206
- else:
207
- flip_group.append(flip_crop)
208
-
209
- oversample_group.extend(normal_group)
210
- oversample_group.extend(flip_group)
211
- return oversample_group
212
-
213
-
214
- class GroupMultiScaleCrop(object):
215
-
216
- def __init__(self, input_size, scales=None, max_distort=1,
217
- fix_crop=True, more_fix_crop=True):
218
- self.scales = scales if scales is not None else [1, .875, .75, .66]
219
- self.max_distort = max_distort
220
- self.fix_crop = fix_crop
221
- self.more_fix_crop = more_fix_crop
222
- self.input_size = input_size if not isinstance(input_size, int) else [
223
- input_size, input_size]
224
- self.interpolation = Image.BILINEAR
225
-
226
- def __call__(self, img_group):
227
-
228
- im_size = img_group[0].size
229
-
230
- crop_w, crop_h, offset_w, offset_h = self._sample_crop_size(im_size)
231
- crop_img_group = [
232
- img.crop(
233
- (offset_w,
234
- offset_h,
235
- offset_w +
236
- crop_w,
237
- offset_h +
238
- crop_h)) for img in img_group]
239
- ret_img_group = [img.resize((self.input_size[0], self.input_size[1]), self.interpolation)
240
- for img in crop_img_group]
241
- return ret_img_group
242
-
243
- def _sample_crop_size(self, im_size):
244
- image_w, image_h = im_size[0], im_size[1]
245
-
246
- # find a crop size
247
- base_size = min(image_w, image_h)
248
- crop_sizes = [int(base_size * x) for x in self.scales]
249
- crop_h = [
250
- self.input_size[1] if abs(
251
- x - self.input_size[1]) < 3 else x for x in crop_sizes]
252
- crop_w = [
253
- self.input_size[0] if abs(
254
- x - self.input_size[0]) < 3 else x for x in crop_sizes]
255
-
256
- pairs = []
257
- for i, h in enumerate(crop_h):
258
- for j, w in enumerate(crop_w):
259
- if abs(i - j) <= self.max_distort:
260
- pairs.append((w, h))
261
-
262
- crop_pair = random.choice(pairs)
263
- if not self.fix_crop:
264
- w_offset = random.randint(0, image_w - crop_pair[0])
265
- h_offset = random.randint(0, image_h - crop_pair[1])
266
- else:
267
- w_offset, h_offset = self._sample_fix_offset(
268
- image_w, image_h, crop_pair[0], crop_pair[1])
269
-
270
- return crop_pair[0], crop_pair[1], w_offset, h_offset
271
-
272
- def _sample_fix_offset(self, image_w, image_h, crop_w, crop_h):
273
- offsets = self.fill_fix_offset(
274
- self.more_fix_crop, image_w, image_h, crop_w, crop_h)
275
- return random.choice(offsets)
276
-
277
- @staticmethod
278
- def fill_fix_offset(more_fix_crop, image_w, image_h, crop_w, crop_h):
279
- w_step = (image_w - crop_w) // 4
280
- h_step = (image_h - crop_h) // 4
281
-
282
- ret = list()
283
- ret.append((0, 0)) # upper left
284
- ret.append((4 * w_step, 0)) # upper right
285
- ret.append((0, 4 * h_step)) # lower left
286
- ret.append((4 * w_step, 4 * h_step)) # lower right
287
- ret.append((2 * w_step, 2 * h_step)) # center
288
-
289
- if more_fix_crop:
290
- ret.append((0, 2 * h_step)) # center left
291
- ret.append((4 * w_step, 2 * h_step)) # center right
292
- ret.append((2 * w_step, 4 * h_step)) # lower center
293
- ret.append((2 * w_step, 0 * h_step)) # upper center
294
-
295
- ret.append((1 * w_step, 1 * h_step)) # upper left quarter
296
- ret.append((3 * w_step, 1 * h_step)) # upper right quarter
297
- ret.append((1 * w_step, 3 * h_step)) # lower left quarter
298
- ret.append((3 * w_step, 3 * h_step)) # lower righ quarter
299
-
300
- return ret
301
-
302
-
303
- class GroupRandomSizedCrop(object):
304
- """Random crop the given PIL.Image to a random size of (0.08 to 1.0) of the original size
305
- and and a random aspect ratio of 3/4 to 4/3 of the original aspect ratio
306
- This is popularly used to train the Inception networks
307
- size: size of the smaller edge
308
- interpolation: Default: PIL.Image.BILINEAR
309
- """
310
-
311
- def __init__(self, size, interpolation=Image.BILINEAR):
312
- self.size = size
313
- self.interpolation = interpolation
314
-
315
- def __call__(self, img_group):
316
- for attempt in range(10):
317
- area = img_group[0].size[0] * img_group[0].size[1]
318
- target_area = random.uniform(0.08, 1.0) * area
319
- aspect_ratio = random.uniform(3. / 4, 4. / 3)
320
-
321
- w = int(round(math.sqrt(target_area * aspect_ratio)))
322
- h = int(round(math.sqrt(target_area / aspect_ratio)))
323
-
324
- if random.random() < 0.5:
325
- w, h = h, w
326
-
327
- if w <= img_group[0].size[0] and h <= img_group[0].size[1]:
328
- x1 = random.randint(0, img_group[0].size[0] - w)
329
- y1 = random.randint(0, img_group[0].size[1] - h)
330
- found = True
331
- break
332
- else:
333
- found = False
334
- x1 = 0
335
- y1 = 0
336
-
337
- if found:
338
- out_group = list()
339
- for img in img_group:
340
- img = img.crop((x1, y1, x1 + w, y1 + h))
341
- assert(img.size == (w, h))
342
- out_group.append(
343
- img.resize(
344
- (self.size, self.size), self.interpolation))
345
- return out_group
346
- else:
347
- # Fallback
348
- scale = GroupScale(self.size, interpolation=self.interpolation)
349
- crop = GroupRandomCrop(self.size)
350
- return crop(scale(img_group))
351
-
352
-
353
- class ConvertDataFormat(object):
354
- def __init__(self, model_type):
355
- self.model_type = model_type
356
-
357
- def __call__(self, images):
358
- if self.model_type == '2D':
359
- return images
360
- tc, h, w = images.size()
361
- t = tc // 3
362
- images = images.view(t, 3, h, w)
363
- images = images.permute(1, 0, 2, 3)
364
- return images
365
-
366
-
367
- class Stack(object):
368
-
369
- def __init__(self, roll=False):
370
- self.roll = roll
371
-
372
- def __call__(self, img_group):
373
- if img_group[0].mode == 'L':
374
- return np.concatenate([np.expand_dims(x, 2)
375
- for x in img_group], axis=2)
376
- elif img_group[0].mode == 'RGB':
377
- if self.roll:
378
- return np.concatenate([np.array(x)[:, :, ::-1]
379
- for x in img_group], axis=2)
380
- else:
381
- #print(np.concatenate(img_group, axis=2).shape)
382
- # print(img_group[0].shape)
383
- return np.concatenate(img_group, axis=2)
384
-
385
-
386
- class ToTorchFormatTensor(object):
387
- """ Converts a PIL.Image (RGB) or numpy.ndarray (H x W x C) in the range [0, 255]
388
- to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] """
389
-
390
- def __init__(self, div=True):
391
- self.div = div
392
-
393
- def __call__(self, pic):
394
- if isinstance(pic, np.ndarray):
395
- # handle numpy array
396
- img = torch.from_numpy(pic).permute(2, 0, 1).contiguous()
397
- else:
398
- # handle PIL Image
399
- img = torch.ByteTensor(
400
- torch.ByteStorage.from_buffer(
401
- pic.tobytes()))
402
- img = img.view(pic.size[1], pic.size[0], len(pic.mode))
403
- # put it from HWC to CHW format
404
- # yikes, this transpose takes 80% of the loading time/CPU
405
- img = img.transpose(0, 1).transpose(0, 2).contiguous()
406
- return img.float().div(255) if self.div else img.float()
407
-
408
-
409
- class IdentityTransform(object):
410
-
411
- def __call__(self, data):
412
- return data
413
-
414
-
415
- if __name__ == "__main__":
416
- trans = torchvision.transforms.Compose([
417
- GroupScale(256),
418
- GroupRandomCrop(224),
419
- Stack(),
420
- ToTorchFormatTensor(),
421
- GroupNormalize(
422
- mean=[.485, .456, .406],
423
- std=[.229, .224, .225]
424
- )]
425
- )
426
-
427
- im = Image.open('../tensorflow-model-zoo.torch/lena_299.png')
428
-
429
- color_group = [im] * 3
430
- rst = trans(color_group)
431
-
432
- gray_group = [im.convert('L')] * 9
433
- gray_rst = trans(gray_group)
434
-
435
- trans2 = torchvision.transforms.Compose([
436
- GroupRandomSizedCrop(256),
437
- Stack(),
438
- ToTorchFormatTensor(),
439
- GroupNormalize(
440
- mean=[.485, .456, .406],
441
- std=[.229, .224, .225])
442
- ])
443
- print(trans2(color_group))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/start_wsl.bat DELETED
@@ -1,11 +0,0 @@
1
- @echo off
2
-
3
- cd /D "%~dp0"
4
-
5
- set PATH=%PATH%;%SystemRoot%\system32
6
-
7
- @rem sed -i 's/\x0D$//' ./wsl.sh converts newlines to unix format in the wsl script
8
- call wsl -e bash -lic "sed -i 's/\x0D$//' ./wsl.sh; source ./wsl.sh %*"
9
-
10
- :end
11
- pause
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arnx/MusicGenXvAKN/tests/common_utils/wav_utils.py DELETED
@@ -1,32 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from pathlib import Path
8
- import typing as tp
9
-
10
- import torch
11
- import torchaudio
12
-
13
-
14
- def get_white_noise(chs: int = 1, num_frames: int = 1):
15
- wav = torch.randn(chs, num_frames)
16
- return wav
17
-
18
-
19
- def get_batch_white_noise(bs: int = 1, chs: int = 1, num_frames: int = 1):
20
- wav = torch.randn(bs, chs, num_frames)
21
- return wav
22
-
23
-
24
- def save_wav(path: str, wav: torch.Tensor, sample_rate: int):
25
- fp = Path(path)
26
- kwargs: tp.Dict[str, tp.Any] = {}
27
- if fp.suffix == '.wav':
28
- kwargs['encoding'] = 'PCM_S'
29
- kwargs['bits_per_sample'] = 16
30
- elif fp.suffix == '.mp3':
31
- kwargs['compression'] = 320
32
- torchaudio.save(str(fp), wav, sample_rate, **kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/tuneavideo/tuneavideo_text2video.py DELETED
@@ -1,153 +0,0 @@
1
- import gradio as gr
2
- import torch
3
-
4
- from video_diffusion.tuneavideo.models.unet import UNet3DConditionModel
5
- from video_diffusion.tuneavideo.pipelines.pipeline_tuneavideo import TuneAVideoPipeline
6
- from video_diffusion.tuneavideo.util import save_videos_grid
7
- from video_diffusion.utils.model_list import stable_model_list
8
-
9
- video_diffusion_model_list = [
10
- "Tune-A-Video-library/a-man-is-surfing",
11
- "Tune-A-Video-library/mo-di-bear-guitar",
12
- "Tune-A-Video-library/redshift-man-skiing",
13
- ]
14
-
15
-
16
- class TunaVideoText2VideoGenerator:
17
- def __init__(self):
18
- self.pipe = None
19
- self.unet = None
20
-
21
- def load_model(self, video_diffusion_model_list, stable_model_list):
22
- if self.pipe is None:
23
- if self.unet is None:
24
- self.unet = UNet3DConditionModel.from_pretrained(
25
- video_diffusion_model_list, subfolder="unet", torch_dtype=torch.float16
26
- ).to("cuda")
27
-
28
- self.pipe = TuneAVideoPipeline.from_pretrained(
29
- stable_model_list, unet=self.unet, torch_dtype=torch.float16
30
- )
31
- self.pipe.to("cuda")
32
- self.pipe.enable_xformers_memory_efficient_attention()
33
-
34
- return self.pipe
35
-
36
- def generate_video(
37
- self,
38
- video_diffusion_model: str,
39
- stable_model_list: str,
40
- prompt: str,
41
- negative_prompt: str,
42
- video_length: int,
43
- height: int,
44
- width: int,
45
- num_inference_steps: int,
46
- guidance_scale: int,
47
- fps: int,
48
- ):
49
- pipe = self.load_model(video_diffusion_model, stable_model_list)
50
- video = pipe(
51
- prompt,
52
- negative_prompt=negative_prompt,
53
- video_length=video_length,
54
- height=height,
55
- width=width,
56
- num_inference_steps=num_inference_steps,
57
- guidance_scale=guidance_scale,
58
- ).videos
59
-
60
- save_videos_grid(videos=video, path="output.gif", fps=fps)
61
- return "output.gif"
62
-
63
- def app():
64
- with gr.Blocks():
65
- with gr.Row():
66
- with gr.Column():
67
- tunevideo_video_diffusion_model_list = gr.Dropdown(
68
- choices=video_diffusion_model_list,
69
- label="Video Diffusion Model",
70
- value=video_diffusion_model_list[0],
71
- )
72
- tunevideo_stable_model_list = gr.Dropdown(
73
- choices=stable_model_list,
74
- label="Stable Model List",
75
- value=stable_model_list[0],
76
- )
77
- with gr.Row():
78
- with gr.Column():
79
- tunevideo_prompt = gr.Textbox(
80
- lines=1,
81
- placeholder="Prompt",
82
- show_label=False,
83
- )
84
- tunevideo_video_length = gr.Slider(
85
- minimum=1,
86
- maximum=100,
87
- step=1,
88
- value=10,
89
- label="Video Length",
90
- )
91
- tunevideo_num_inference_steps = gr.Slider(
92
- minimum=1,
93
- maximum=100,
94
- step=1,
95
- value=50,
96
- label="Num Inference Steps",
97
- )
98
- tunevideo_fps = gr.Slider(
99
- minimum=1,
100
- maximum=60,
101
- step=1,
102
- value=5,
103
- label="Fps",
104
- )
105
- with gr.Row():
106
- with gr.Column():
107
- tunevideo_negative_prompt = gr.Textbox(
108
- lines=1,
109
- placeholder="Negative Prompt",
110
- show_label=False,
111
- )
112
- tunevideo_guidance_scale = gr.Slider(
113
- minimum=1,
114
- maximum=15,
115
- step=1,
116
- value=7.5,
117
- label="Guidance Scale",
118
- )
119
- tunevideo_height = gr.Slider(
120
- minimum=1,
121
- maximum=1280,
122
- step=32,
123
- value=512,
124
- label="Height",
125
- )
126
- tunevideo_width = gr.Slider(
127
- minimum=1,
128
- maximum=1280,
129
- step=32,
130
- value=512,
131
- label="Width",
132
- )
133
- tunevideo_generate = gr.Button(value="Generator")
134
-
135
- with gr.Column():
136
- tunevideo_output = gr.Video(label="Output")
137
-
138
- tunevideo_generate.click(
139
- fn=TunaVideoText2VideoGenerator().generate_video,
140
- inputs=[
141
- tunevideo_video_diffusion_model_list,
142
- tunevideo_stable_model_list,
143
- tunevideo_prompt,
144
- tunevideo_negative_prompt,
145
- tunevideo_video_length,
146
- tunevideo_height,
147
- tunevideo_width,
148
- tunevideo_num_inference_steps,
149
- tunevideo_guidance_scale,
150
- tunevideo_fps,
151
- ],
152
- outputs=tunevideo_output,
153
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AsakuraMizu/moe-tts/text/mandarin.py DELETED
@@ -1,329 +0,0 @@
1
- import os
2
- import sys
3
- import re
4
- from pypinyin import lazy_pinyin, BOPOMOFO
5
- import jieba
6
- import cn2an
7
- import logging
8
-
9
- logging.getLogger('jieba').setLevel(logging.WARNING)
10
- jieba.initialize()
11
-
12
-
13
- # List of (Latin alphabet, bopomofo) pairs:
14
- _latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
15
- ('a', 'ㄟˉ'),
16
- ('b', 'ㄅㄧˋ'),
17
- ('c', 'ㄙㄧˉ'),
18
- ('d', 'ㄉㄧˋ'),
19
- ('e', 'ㄧˋ'),
20
- ('f', 'ㄝˊㄈㄨˋ'),
21
- ('g', 'ㄐㄧˋ'),
22
- ('h', 'ㄝˇㄑㄩˋ'),
23
- ('i', 'ㄞˋ'),
24
- ('j', 'ㄐㄟˋ'),
25
- ('k', 'ㄎㄟˋ'),
26
- ('l', 'ㄝˊㄛˋ'),
27
- ('m', 'ㄝˊㄇㄨˋ'),
28
- ('n', 'ㄣˉ'),
29
- ('o', 'ㄡˉ'),
30
- ('p', 'ㄆㄧˉ'),
31
- ('q', 'ㄎㄧㄡˉ'),
32
- ('r', 'ㄚˋ'),
33
- ('s', 'ㄝˊㄙˋ'),
34
- ('t', 'ㄊㄧˋ'),
35
- ('u', 'ㄧㄡˉ'),
36
- ('v', 'ㄨㄧˉ'),
37
- ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'),
38
- ('x', 'ㄝˉㄎㄨˋㄙˋ'),
39
- ('y', 'ㄨㄞˋ'),
40
- ('z', 'ㄗㄟˋ')
41
- ]]
42
-
43
- # List of (bopomofo, romaji) pairs:
44
- _bopomofo_to_romaji = [(re.compile('%s' % x[0]), x[1]) for x in [
45
- ('ㄅㄛ', 'p⁼wo'),
46
- ('ㄆㄛ', 'pʰwo'),
47
- ('ㄇㄛ', 'mwo'),
48
- ('ㄈㄛ', 'fwo'),
49
- ('ㄅ', 'p⁼'),
50
- ('ㄆ', 'pʰ'),
51
- ('ㄇ', 'm'),
52
- ('ㄈ', 'f'),
53
- ('ㄉ', 't⁼'),
54
- ('ㄊ', 'tʰ'),
55
- ('ㄋ', 'n'),
56
- ('ㄌ', 'l'),
57
- ('ㄍ', 'k⁼'),
58
- ('ㄎ', 'kʰ'),
59
- ('ㄏ', 'h'),
60
- ('ㄐ', 'ʧ⁼'),
61
- ('ㄑ', 'ʧʰ'),
62
- ('ㄒ', 'ʃ'),
63
- ('ㄓ', 'ʦ`⁼'),
64
- ('ㄔ', 'ʦ`ʰ'),
65
- ('ㄕ', 's`'),
66
- ('ㄖ', 'ɹ`'),
67
- ('ㄗ', 'ʦ⁼'),
68
- ('ㄘ', 'ʦʰ'),
69
- ('ㄙ', 's'),
70
- ('ㄚ', 'a'),
71
- ('ㄛ', 'o'),
72
- ('ㄜ', 'ə'),
73
- ('ㄝ', 'e'),
74
- ('ㄞ', 'ai'),
75
- ('ㄟ', 'ei'),
76
- ('ㄠ', 'au'),
77
- ('ㄡ', 'ou'),
78
- ('ㄧㄢ', 'yeNN'),
79
- ('ㄢ', 'aNN'),
80
- ('ㄧㄣ', 'iNN'),
81
- ('ㄣ', 'əNN'),
82
- ('ㄤ', 'aNg'),
83
- ('ㄧㄥ', 'iNg'),
84
- ('ㄨㄥ', 'uNg'),
85
- ('ㄩㄥ', 'yuNg'),
86
- ('ㄥ', 'əNg'),
87
- ('ㄦ', 'əɻ'),
88
- ('ㄧ', 'i'),
89
- ('ㄨ', 'u'),
90
- ('ㄩ', 'ɥ'),
91
- ('ˉ', '→'),
92
- ('ˊ', '↑'),
93
- ('ˇ', '↓↑'),
94
- ('ˋ', '↓'),
95
- ('˙', ''),
96
- (',', ','),
97
- ('。', '.'),
98
- ('!', '!'),
99
- ('?', '?'),
100
- ('—', '-')
101
- ]]
102
-
103
- # List of (romaji, ipa) pairs:
104
- _romaji_to_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
105
- ('ʃy', 'ʃ'),
106
- ('ʧʰy', 'ʧʰ'),
107
- ('ʧ⁼y', 'ʧ⁼'),
108
- ('NN', 'n'),
109
- ('Ng', 'ŋ'),
110
- ('y', 'j'),
111
- ('h', 'x')
112
- ]]
113
-
114
- # List of (bopomofo, ipa) pairs:
115
- _bopomofo_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
116
- ('ㄅㄛ', 'p⁼wo'),
117
- ('ㄆㄛ', 'pʰwo'),
118
- ('ㄇㄛ', 'mwo'),
119
- ('ㄈㄛ', 'fwo'),
120
- ('ㄅ', 'p⁼'),
121
- ('ㄆ', 'pʰ'),
122
- ('ㄇ', 'm'),
123
- ('ㄈ', 'f'),
124
- ('ㄉ', 't⁼'),
125
- ('ㄊ', 'tʰ'),
126
- ('ㄋ', 'n'),
127
- ('ㄌ', 'l'),
128
- ('ㄍ', 'k⁼'),
129
- ('ㄎ', 'kʰ'),
130
- ('ㄏ', 'x'),
131
- ('ㄐ', 'tʃ⁼'),
132
- ('ㄑ', 'tʃʰ'),
133
- ('ㄒ', 'ʃ'),
134
- ('ㄓ', 'ts`⁼'),
135
- ('ㄔ', 'ts`ʰ'),
136
- ('ㄕ', 's`'),
137
- ('ㄖ', 'ɹ`'),
138
- ('ㄗ', 'ts⁼'),
139
- ('ㄘ', 'tsʰ'),
140
- ('ㄙ', 's'),
141
- ('ㄚ', 'a'),
142
- ('ㄛ', 'o'),
143
- ('ㄜ', 'ə'),
144
- ('ㄝ', 'ɛ'),
145
- ('ㄞ', 'aɪ'),
146
- ('ㄟ', 'eɪ'),
147
- ('ㄠ', 'ɑʊ'),
148
- ('ㄡ', 'oʊ'),
149
- ('ㄧㄢ', 'jɛn'),
150
- ('ㄩㄢ', 'ɥæn'),
151
- ('ㄢ', 'an'),
152
- ('ㄧㄣ', 'in'),
153
- ('ㄩㄣ', 'ɥn'),
154
- ('ㄣ', 'ən'),
155
- ('ㄤ', 'ɑŋ'),
156
- ('ㄧㄥ', 'iŋ'),
157
- ('ㄨㄥ', 'ʊŋ'),
158
- ('ㄩㄥ', 'jʊŋ'),
159
- ('ㄥ', 'əŋ'),
160
- ('ㄦ', 'əɻ'),
161
- ('ㄧ', 'i'),
162
- ('ㄨ', 'u'),
163
- ('ㄩ', 'ɥ'),
164
- ('ˉ', '→'),
165
- ('ˊ', '↑'),
166
- ('ˇ', '↓↑'),
167
- ('ˋ', '↓'),
168
- ('˙', ''),
169
- (',', ','),
170
- ('。', '.'),
171
- ('!', '!'),
172
- ('?', '?'),
173
- ('—', '-')
174
- ]]
175
-
176
- # List of (bopomofo, ipa2) pairs:
177
- _bopomofo_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [
178
- ('ㄅㄛ', 'pwo'),
179
- ('ㄆㄛ', 'pʰwo'),
180
- ('ㄇㄛ', 'mwo'),
181
- ('ㄈㄛ', 'fwo'),
182
- ('ㄅ', 'p'),
183
- ('ㄆ', 'pʰ'),
184
- ('ㄇ', 'm'),
185
- ('ㄈ', 'f'),
186
- ('ㄉ', 't'),
187
- ('ㄊ', 'tʰ'),
188
- ('ㄋ', 'n'),
189
- ('ㄌ', 'l'),
190
- ('ㄍ', 'k'),
191
- ('ㄎ', 'kʰ'),
192
- ('ㄏ', 'h'),
193
- ('ㄐ', 'tɕ'),
194
- ('ㄑ', 'tɕʰ'),
195
- ('ㄒ', 'ɕ'),
196
- ('ㄓ', 'tʂ'),
197
- ('ㄔ', 'tʂʰ'),
198
- ('ㄕ', 'ʂ'),
199
- ('ㄖ', 'ɻ'),
200
- ('ㄗ', 'ts'),
201
- ('ㄘ', 'tsʰ'),
202
- ('ㄙ', 's'),
203
- ('ㄚ', 'a'),
204
- ('ㄛ', 'o'),
205
- ('ㄜ', 'ɤ'),
206
- ('ㄝ', 'ɛ'),
207
- ('ㄞ', 'aɪ'),
208
- ('ㄟ', 'eɪ'),
209
- ('ㄠ', 'ɑʊ'),
210
- ('ㄡ', 'oʊ'),
211
- ('ㄧㄢ', 'jɛn'),
212
- ('ㄩㄢ', 'yæn'),
213
- ('ㄢ', 'an'),
214
- ('ㄧㄣ', 'in'),
215
- ('ㄩㄣ', 'yn'),
216
- ('ㄣ', 'ən'),
217
- ('ㄤ', 'ɑŋ'),
218
- ('ㄧㄥ', 'iŋ'),
219
- ('ㄨㄥ', 'ʊŋ'),
220
- ('ㄩㄥ', 'jʊŋ'),
221
- ('ㄥ', 'ɤŋ'),
222
- ('ㄦ', 'əɻ'),
223
- ('ㄧ', 'i'),
224
- ('ㄨ', 'u'),
225
- ('ㄩ', 'y'),
226
- ('ˉ', '˥'),
227
- ('ˊ', '˧˥'),
228
- ('ˇ', '˨˩˦'),
229
- ('ˋ', '˥˩'),
230
- ('˙', ''),
231
- (',', ','),
232
- ('。', '.'),
233
- ('!', '!'),
234
- ('?', '?'),
235
- ('—', '-')
236
- ]]
237
-
238
-
239
- def number_to_chinese(text):
240
- numbers = re.findall(r'\d+(?:\.?\d+)?', text)
241
- for number in numbers:
242
- text = text.replace(number, cn2an.an2cn(number), 1)
243
- return text
244
-
245
-
246
- def chinese_to_bopomofo(text):
247
- text = text.replace('、', ',').replace(';', ',').replace(':', ',')
248
- words = jieba.lcut(text, cut_all=False)
249
- text = ''
250
- for word in words:
251
- bopomofos = lazy_pinyin(word, BOPOMOFO)
252
- if not re.search('[\u4e00-\u9fff]', word):
253
- text += word
254
- continue
255
- for i in range(len(bopomofos)):
256
- bopomofos[i] = re.sub(r'([\u3105-\u3129])$', r'\1ˉ', bopomofos[i])
257
- if text != '':
258
- text += ' '
259
- text += ''.join(bopomofos)
260
- return text
261
-
262
-
263
- def latin_to_bopomofo(text):
264
- for regex, replacement in _latin_to_bopomofo:
265
- text = re.sub(regex, replacement, text)
266
- return text
267
-
268
-
269
- def bopomofo_to_romaji(text):
270
- for regex, replacement in _bopomofo_to_romaji:
271
- text = re.sub(regex, replacement, text)
272
- return text
273
-
274
-
275
- def bopomofo_to_ipa(text):
276
- for regex, replacement in _bopomofo_to_ipa:
277
- text = re.sub(regex, replacement, text)
278
- return text
279
-
280
-
281
- def bopomofo_to_ipa2(text):
282
- for regex, replacement in _bopomofo_to_ipa2:
283
- text = re.sub(regex, replacement, text)
284
- return text
285
-
286
-
287
- def chinese_to_romaji(text):
288
- text = number_to_chinese(text)
289
- text = chinese_to_bopomofo(text)
290
- text = latin_to_bopomofo(text)
291
- text = bopomofo_to_romaji(text)
292
- text = re.sub('i([aoe])', r'y\1', text)
293
- text = re.sub('u([aoəe])', r'w\1', text)
294
- text = re.sub('([ʦsɹ]`[⁼ʰ]?)([→↓↑ ]+|$)',
295
- r'\1ɹ`\2', text).replace('ɻ', 'ɹ`')
296
- text = re.sub('([ʦs][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text)
297
- return text
298
-
299
-
300
- def chinese_to_lazy_ipa(text):
301
- text = chinese_to_romaji(text)
302
- for regex, replacement in _romaji_to_ipa:
303
- text = re.sub(regex, replacement, text)
304
- return text
305
-
306
-
307
- def chinese_to_ipa(text):
308
- text = number_to_chinese(text)
309
- text = chinese_to_bopomofo(text)
310
- text = latin_to_bopomofo(text)
311
- text = bopomofo_to_ipa(text)
312
- text = re.sub('i([aoe])', r'j\1', text)
313
- text = re.sub('u([aoəe])', r'w\1', text)
314
- text = re.sub('([sɹ]`[⁼ʰ]?)([→↓↑ ]+|$)',
315
- r'\1ɹ`\2', text).replace('ɻ', 'ɹ`')
316
- text = re.sub('([s][⁼ʰ]?)([→↓↑ ]+|$)', r'\1ɹ\2', text)
317
- return text
318
-
319
-
320
- def chinese_to_ipa2(text):
321
- text = number_to_chinese(text)
322
- text = chinese_to_bopomofo(text)
323
- text = latin_to_bopomofo(text)
324
- text = bopomofo_to_ipa2(text)
325
- text = re.sub(r'i([aoe])', r'j\1', text)
326
- text = re.sub(r'u([aoəe])', r'w\1', text)
327
- text = re.sub(r'([ʂɹ]ʰ?)([˩˨˧˦˥ ]+|$)', r'\1ʅ\2', text)
328
- text = re.sub(r'(sʰ?)([˩˨˧˦˥ ]+|$)', r'\1ɿ\2', text)
329
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/glibc.py DELETED
@@ -1,88 +0,0 @@
1
- # The following comment should be removed at some point in the future.
2
- # mypy: strict-optional=False
3
-
4
- import os
5
- import sys
6
- from typing import Optional, Tuple
7
-
8
-
9
- def glibc_version_string() -> Optional[str]:
10
- "Returns glibc version string, or None if not using glibc."
11
- return glibc_version_string_confstr() or glibc_version_string_ctypes()
12
-
13
-
14
- def glibc_version_string_confstr() -> Optional[str]:
15
- "Primary implementation of glibc_version_string using os.confstr."
16
- # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
17
- # to be broken or missing. This strategy is used in the standard library
18
- # platform module:
19
- # https://github.com/python/cpython/blob/fcf1d003bf4f0100c9d0921ff3d70e1127ca1b71/Lib/platform.py#L175-L183
20
- if sys.platform == "win32":
21
- return None
22
- try:
23
- # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17":
24
- _, version = os.confstr("CS_GNU_LIBC_VERSION").split()
25
- except (AttributeError, OSError, ValueError):
26
- # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
27
- return None
28
- return version
29
-
30
-
31
- def glibc_version_string_ctypes() -> Optional[str]:
32
- "Fallback implementation of glibc_version_string using ctypes."
33
-
34
- try:
35
- import ctypes
36
- except ImportError:
37
- return None
38
-
39
- # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
40
- # manpage says, "If filename is NULL, then the returned handle is for the
41
- # main program". This way we can let the linker do the work to figure out
42
- # which libc our process is actually using.
43
- process_namespace = ctypes.CDLL(None)
44
- try:
45
- gnu_get_libc_version = process_namespace.gnu_get_libc_version
46
- except AttributeError:
47
- # Symbol doesn't exist -> therefore, we are not linked to
48
- # glibc.
49
- return None
50
-
51
- # Call gnu_get_libc_version, which returns a string like "2.5"
52
- gnu_get_libc_version.restype = ctypes.c_char_p
53
- version_str = gnu_get_libc_version()
54
- # py2 / py3 compatibility:
55
- if not isinstance(version_str, str):
56
- version_str = version_str.decode("ascii")
57
-
58
- return version_str
59
-
60
-
61
- # platform.libc_ver regularly returns completely nonsensical glibc
62
- # versions. E.g. on my computer, platform says:
63
- #
64
- # ~$ python2.7 -c 'import platform; print(platform.libc_ver())'
65
- # ('glibc', '2.7')
66
- # ~$ python3.5 -c 'import platform; print(platform.libc_ver())'
67
- # ('glibc', '2.9')
68
- #
69
- # But the truth is:
70
- #
71
- # ~$ ldd --version
72
- # ldd (Debian GLIBC 2.22-11) 2.22
73
- #
74
- # This is unfortunate, because it means that the linehaul data on libc
75
- # versions that was generated by pip 8.1.2 and earlier is useless and
76
- # misleading. Solution: instead of using platform, use our code that actually
77
- # works.
78
- def libc_ver() -> Tuple[str, str]:
79
- """Try to determine the glibc version
80
-
81
- Returns a tuple of strings (lib, version) which default to empty strings
82
- in case the lookup fails.
83
- """
84
- glibc_version = glibc_version_string()
85
- if glibc_version is None:
86
- return ("", "")
87
- else:
88
- return ("glibc", glibc_version)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/Misc/mmdet_mask_rcnn_R_50_FPN_1x.py DELETED
@@ -1,151 +0,0 @@
1
- # An example config to train a mmdetection model using detectron2.
2
-
3
- from ..common.data.coco import dataloader
4
- from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
5
- from ..common.optim import SGD as optimizer
6
- from ..common.train import train
7
-
8
- from detectron2.modeling.mmdet_wrapper import MMDetDetector
9
- from detectron2.config import LazyCall as L
10
-
11
- model = L(MMDetDetector)(
12
- detector=dict(
13
- type="MaskRCNN",
14
- pretrained="torchvision://resnet50",
15
- backbone=dict(
16
- type="ResNet",
17
- depth=50,
18
- num_stages=4,
19
- out_indices=(0, 1, 2, 3),
20
- frozen_stages=1,
21
- norm_cfg=dict(type="BN", requires_grad=True),
22
- norm_eval=True,
23
- style="pytorch",
24
- ),
25
- neck=dict(type="FPN", in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5),
26
- rpn_head=dict(
27
- type="RPNHead",
28
- in_channels=256,
29
- feat_channels=256,
30
- anchor_generator=dict(
31
- type="AnchorGenerator",
32
- scales=[8],
33
- ratios=[0.5, 1.0, 2.0],
34
- strides=[4, 8, 16, 32, 64],
35
- ),
36
- bbox_coder=dict(
37
- type="DeltaXYWHBBoxCoder",
38
- target_means=[0.0, 0.0, 0.0, 0.0],
39
- target_stds=[1.0, 1.0, 1.0, 1.0],
40
- ),
41
- loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=True, loss_weight=1.0),
42
- loss_bbox=dict(type="L1Loss", loss_weight=1.0),
43
- ),
44
- roi_head=dict(
45
- type="StandardRoIHead",
46
- bbox_roi_extractor=dict(
47
- type="SingleRoIExtractor",
48
- roi_layer=dict(type="RoIAlign", output_size=7, sampling_ratio=0),
49
- out_channels=256,
50
- featmap_strides=[4, 8, 16, 32],
51
- ),
52
- bbox_head=dict(
53
- type="Shared2FCBBoxHead",
54
- in_channels=256,
55
- fc_out_channels=1024,
56
- roi_feat_size=7,
57
- num_classes=80,
58
- bbox_coder=dict(
59
- type="DeltaXYWHBBoxCoder",
60
- target_means=[0.0, 0.0, 0.0, 0.0],
61
- target_stds=[0.1, 0.1, 0.2, 0.2],
62
- ),
63
- reg_class_agnostic=False,
64
- loss_cls=dict(type="CrossEntropyLoss", use_sigmoid=False, loss_weight=1.0),
65
- loss_bbox=dict(type="L1Loss", loss_weight=1.0),
66
- ),
67
- mask_roi_extractor=dict(
68
- type="SingleRoIExtractor",
69
- roi_layer=dict(type="RoIAlign", output_size=14, sampling_ratio=0),
70
- out_channels=256,
71
- featmap_strides=[4, 8, 16, 32],
72
- ),
73
- mask_head=dict(
74
- type="FCNMaskHead",
75
- num_convs=4,
76
- in_channels=256,
77
- conv_out_channels=256,
78
- num_classes=80,
79
- loss_mask=dict(type="CrossEntropyLoss", use_mask=True, loss_weight=1.0),
80
- ),
81
- ),
82
- # model training and testing settings
83
- train_cfg=dict(
84
- rpn=dict(
85
- assigner=dict(
86
- type="MaxIoUAssigner",
87
- pos_iou_thr=0.7,
88
- neg_iou_thr=0.3,
89
- min_pos_iou=0.3,
90
- match_low_quality=True,
91
- ignore_iof_thr=-1,
92
- ),
93
- sampler=dict(
94
- type="RandomSampler",
95
- num=256,
96
- pos_fraction=0.5,
97
- neg_pos_ub=-1,
98
- add_gt_as_proposals=False,
99
- ),
100
- allowed_border=-1,
101
- pos_weight=-1,
102
- debug=False,
103
- ),
104
- rpn_proposal=dict(
105
- nms_pre=2000,
106
- max_per_img=1000,
107
- nms=dict(type="nms", iou_threshold=0.7),
108
- min_bbox_size=0,
109
- ),
110
- rcnn=dict(
111
- assigner=dict(
112
- type="MaxIoUAssigner",
113
- pos_iou_thr=0.5,
114
- neg_iou_thr=0.5,
115
- min_pos_iou=0.5,
116
- match_low_quality=True,
117
- ignore_iof_thr=-1,
118
- ),
119
- sampler=dict(
120
- type="RandomSampler",
121
- num=512,
122
- pos_fraction=0.25,
123
- neg_pos_ub=-1,
124
- add_gt_as_proposals=True,
125
- ),
126
- mask_size=28,
127
- pos_weight=-1,
128
- debug=False,
129
- ),
130
- ),
131
- test_cfg=dict(
132
- rpn=dict(
133
- nms_pre=1000,
134
- max_per_img=1000,
135
- nms=dict(type="nms", iou_threshold=0.7),
136
- min_bbox_size=0,
137
- ),
138
- rcnn=dict(
139
- score_thr=0.05,
140
- nms=dict(type="nms", iou_threshold=0.5),
141
- max_per_img=100,
142
- mask_thr_binary=0.5,
143
- ),
144
- ),
145
- ),
146
- pixel_mean=[123.675, 116.280, 103.530],
147
- pixel_std=[58.395, 57.120, 57.375],
148
- )
149
-
150
- dataloader.train.mapper.image_format = "RGB" # torchvision pretrained model
151
- train.init_checkpoint = None # pretrained model is loaded inside backbone
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Axolotlily/Interpolate/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Interpolate
3
- emoji: 🌖
4
- colorFrom: blue
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.0.17
8
- app_file: app.py
9
- pinned: false
10
- license: other
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BasToTheMax/22h-vintedois-diffusion-v0-1/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: 22h Vintedois Diffusion V0 1
3
- emoji: 🦀
4
- colorFrom: yellow
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.19.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benebene/Chat-question-answering/app.py DELETED
@@ -1,9 +0,0 @@
1
- from utils import Stuff
2
- from test import test, test_bench
3
- from interface import launch_gradio
4
-
5
- s = Stuff()
6
-
7
- #test(test_bench, s)
8
-
9
- launch_gradio(s)
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Buscar En La Lista De Miembros.md DELETED
@@ -1,82 +0,0 @@
1
-
2
- <h1>Ghost Rider 3 Dawn of Darkness: Todo lo que necesitas saber</h1>
3
- <p>Si usted es un fan del antihéroe de fuego Ghost Rider, es posible que se pregunte si hay una tercera película en las obras. La respuesta no es tan simple, ya que ha habido rumores, especulaciones y remolques hechos por fans sobre Ghost Rider 3 Dawn of Darkness, pero no hay confirmación oficial de Marvel Studios o cualquier otra compañía de producción. En este artículo, exploraremos todo lo que necesitas saber sobre Ghost Rider 3 Dawn of Darkness, incluyendo qué es, quién está en el reparto, cuál es la trama y cómo se relaciona con las películas anteriores de Ghost Rider y el Universo Cinematográfico de Marvel (MCU). </p>
4
- <h2>Buscar en la lista de miembros</h2><br /><p><b><b>Download File</b> &#127383; <a href="https://bltlly.com/2v6M7b">https://bltlly.com/2v6M7b</a></b></p><br /><br />
5
- <h2>Introducción</h2>
6
- <p>Ghost Rider es un personaje de cómics estadounidenses publicado por Marvel Comics. Es un ser sobrenatural que monta una motocicleta en llamas y tiene un cráneo por cabeza. También es conocido como el Espíritu de Venganza, ya que castiga a los malvados con sus poderes de fuego infernal. Ha habido varias versiones de Ghost Rider en los cómics, pero el más famoso es Johnny Blaze, un motociclista especialista que vendió su alma al diablo para salvar la vida de su padre. </p>
7
- <p>Ghost Rider ha aparecido en dos películas de acción en vivo hasta ahora, ambas protagonizadas por Nicolas Cage como Johnny Blaze. El primero fue lanzado en 2007 y fue dirigido por Mark Steven Johnson. El segundo fue lanzado en 2011 y fue dirigido por Mark Neveldine y Brian Taylor. Ambas películas recibieron críticas mixtas a negativas de críticos y fans, pero tuvieron éxito comercial, recaudando más de $400 millones en todo el mundo combinados. </p>
8
- <h3>¿Qué es Ghost Rider 3 Dawn of Darkness? </h3>
9
-
10
- <p>Uno de los trailers más populares de Ghost Rider 3 Dawn of Darkness fue subido a YouTube por Mega Movie Trailer en 2017. Presenta clips de varias películas y programas, como Blade, Constantine, Supernatural y Agents of S.H.I.E.L.D., para crear una historia mash-up que involucra a Wesley Snipes como Blade, Idris Elba como Moreau y Nicolas Cage como Johnny Blaze/ Ghost Rider. El tráiler tiene más de 2 millones de visitas y ha recibido comentarios positivos de los espectadores. </p>
11
- <p>Otro trailer hecho por fans para Ghost Rider 3 Dawn of Darkness fue subido a YouTube por End Of The Galaxy en 2020. Presenta clips de varias películas y programas, como Doctor Strange, Thor: Ragnarok, Avengers: Endgame y Lucifer, para crear una historia de mash-up que involucra a Benedict Cumberbatch como Doctor Strange, Chris Hemsworth como Thor, Tom Ellis como Lucifer Morningstar, y Nicolas Cage como Johnny Blaze/Ghost Rider. El trailer tiene más de 300 mil visitas y ha recibido comentarios positivos de los espectadores. </p>
12
- <h3>¿Quién está en el elenco de Ghost Rider 3 Dawn of Darkness? </h3>
13
- <p>Como Ghost Rider 3 Dawn of Darkness no es una película oficial, no hay un elenco oficial para ella. Sin embargo, en base a los trailers y carteles hechos por los fans, algunos de los actores que les gustaría ver en la película son:</p>
14
- <p></p>
15
- <ul>
16
- <li>Nicolas Cage como Johnny Blaze/ Ghost Rider: Cage jugó el papel en las dos primeras películas y ha expresado interés en repetirlo en el futuro. </li>
17
- <li>Wesley Snipes as Blade: Snipes jugó el papel en las tres primeras películas de Blade y está programado para regresar en el próximo reinicio del personaje en MCU. </li>
18
- <li>Idris Elba como Moreau: Elba jugó el papel en Ghost Rider: Spirit of Vengeance y también es conocido por sus papeles en el MCU, Luther y The Dark Tower.</li>
19
-
20
- <li>Chris Hemsworth como Thor: Hemsworth jugó el papel en Thor, Los Vengadores, Thor: El Mundo Oscuro, Avengers: Age of Ultron, Thor: Ragnarok, Avengers: Infinity War, Avengers: Endgame, y lo hará en Thor: Love and Thunder.<li>
21
- <li>Tom Ellis como Lucifer Morningstar: Ellis interpretó el papel en Lucifer, una serie de televisión basada en el personaje de DC Comics del mismo nombre. </li>
22
- </ul>
23
- <p>Por supuesto, estos son solo deseos de los fans y no miembros del reparto confirmados. Es poco probable que todos estos actores aparezcan en una película de Ghost Rider 3, especialmente porque algunos de ellos pertenecen a diferentes franquicias y estudios. Sin embargo, es divertido imaginar cómo sería una película de crossover como esta. </p>
24
- <h3>¿Cuál es la trama de Ghost Rider 3 Dawn of Darkness? </h3>
25
- <p>De nuevo, ya que Ghost Rider 3 Dawn of Darkness no es una película oficial, no hay ninguna trama oficial para ella. Sin embargo, sobre la base de los remolques y carteles hechos por fans, algunos de los posibles elementos de la trama son:</p>
26
- <ul>
27
- Johnny Blaze/Ghost Rider sigue huyendo de sus enemigos y su maldición. Es contactado por Moreau, un antiguo monje que lo ayudó en Ghost Rider: Spirit of Vengeance. Moreau le dice que hay una manera de terminar su sufrimiento y liberar su alma del diablo. </li>
28
- <li>La manera de hacer eso es encontrar y destruir el Libro de Cagliostro, un tomo antiguo que contiene oscuros secretos y hechizos. El libro está escondido en algún lugar de Europa y está custodiado por un culto de vampiros dirigido por Blade, un medio vampiro mitad humano que caza a su propia especie. </li>
29
- Johnny Blaze/ Ghost Rider se une a Moreau y otros aliados, como el Doctor Strange, Thor y Lucifer Morningstar, para encontrar el libro y enfrentar a Blade y sus secuaces. En el camino, se encuentran con varias amenazas y desafíos de fuerzas sobrenaturales y enemigos. </li>
30
-
31
- </ul>
32
- <p>Por supuesto, esto es solo una trama hecha por fans y no una historia oficial. Es poco probable que una película de Ghost Rider 3 siga esta historia exacta, especialmente porque involucra personajes y elementos de diferentes franquicias y estudios. Sin embargo, es divertido imaginar cómo sería una película de crossover como esta. </p>
33
- <h2>La historia de las películas de Ghost Rider</h2>
34
- <p>Antes de sumergirnos en el futuro de Ghost Rider en el MCU, echemos un vistazo a la historia de las películas de Ghost Rider. Aquí hay algunos breves resúmenes y reseñas de las dos primeras películas protagonizadas por Nicolas Cage como Johnny Blaze/Ghost Rider.</p>
35
- <h3>Jinete fantasma (2007)</h3>
36
- <h4>Sinopsis</h4>
37
- <p>Ghost Rider es una película de superhéroes de 2007 basada en el personaje de Marvel Comics del mismo nombre. Fue dirigida por Mark Steven Johnson y protagonizada por Nicolas Cage como Johnny Blaze/Ghost Rider, Eva Mendes como Roxanne Simpson, Wes Bentley como Blackheart, Sam Elliott como Carter Slade/Caretaker, Peter Fonda como Mephistopheles, y Donal Logue como Mack.</p>
38
- <p>La película cuenta la historia de origen de Johnny Blaze/ Ghost Rider, un motociclista especialista que vendió su alma a Mefistófeles para salvar la vida de su padre. Años más tarde, es llamado por Mefistófeles para detener a Blackheart, su hijo rebelde que planea desatar el infierno en la tierra. En el camino, se reúne con su amor de la infancia Roxanne Simpson, que ahora es periodista. </p>
39
- <h4>Recepción</h4>
40
- <p>Ghost Rider recibió críticas mixtas a negativas de críticos y fans. Tiene una calificación del 26% en Rotten Tomatoes basada en 173 comentarios. El consenso dice: "Ghost Rider es una mezcla amarga de triste, [asistente] (#mensaje) comedia y efectos especiales, y no puede estar a la altura de su material de origen." </p>
41
- <p>Algunas de las críticas de la película fueron su débil guion, mala actuación, diálogo cursi, falta de humor y tono inconsistente. Algunas de las alabanzas de la película fueron sus efectos visuales, escenas de acción y la actuación de Cage como Ghost Rider.</p>
42
-
43
- <h3>Jinete fantasma: Espíritu de venganza (2011)</h3>
44
- <h4>Sinopsis</h4>
45
- <p>Ghost Rider: Spirit of Vengeance es una película de superhéroes de 2011 basada en el personaje de Marvel Comics del mismo nombre. Fue dirigida por Mark Neveldine y Brian Taylor y protagonizada por Nicolas Cage como Johnny Blaze/Ghost Rider, Ciarán Hinds como Roarke/Mephisto, Violante Placido como Nadya Ketch, Johnny Whitworth como Ray Carrigan/Blackout, Christopher Lambert como Methodius, e Idris Elba como Moreau</p>p.
46
- <p>La película es una secuela de Ghost Rider, pero también un reinicio suave que ignora algunos de los eventos y personajes de la primera película. Sigue a Johnny Blaze/ Ghost Rider, que se esconde en Europa del Este y trata de controlar su maldición. Es reclutado por Moreau, un miembro de una orden religiosa secreta, para proteger a un joven llamado Danny Ketch de Roarke/ Mephisto, que quiere usarlo como un recipiente para su poder. </p>
47
- <h4>Recepción</h4>
48
- <p>Ghost Rider: Spirit of Vengeance recibió críticas negativas de críticos y fans. Tiene una calificación del 19% en Rotten Tomatoes basado en 121 comentarios. El consenso dice: "Con un guion débil, un trabajo de CG desigual, y una actuación de Nic Cage tan predeciblemente loco que ya no es divertido, Ghost Rider: Spirit of Vengeance tiene como objetivo ser diversión basura pero termina como basura." </p>
49
- <p>Algunas de las críticas de la película fueron su trama sin sentido, personajes sosos, acción aburrida, efectos baratos y violencia excesiva. Algunas de las alabanzas de la película fueron su tono más oscuro, su estilo más atrevido y el compromiso de Cage con el papel. </p>
50
- <p>La película fue un fracaso de taquilla, sin embargo, recaudando solo $ 132 millones en todo el mundo con un presupuesto de $ 57 millones. Fue una de las películas menos taquilleras basadas en un personaje de Marvel Comics. </p>
51
- <h2>El futuro de Ghost Rider en el MCU</h2>
52
-
53
- <p>Desde entonces, ha habido varios rumores y especulaciones sobre la participación de Ghost Rider en el MCU. Aquí están algunos de los más notables:</p>
54
- <h3>Ryan Gosling como Ghost Rider? </h3>
55
- <p>En 2016, hubo un rumor de que Ryan Gosling estaba en conversaciones para interpretar a Johnny Blaze/Ghost Rider en una nueva película que sería parte de la Fase 4 del UCM. El rumor afirmaba que Gosling estaba interesado en trabajar con Marvel Studios después de ver al Doctor Strange y que se había reunido con Kevin Feige para discutir el papel. El rumor también afirmaba que la película sería dirigida por Neil Marshall (The Descent) y que incluiría a Doctor Strange como personaje secundario. </p>
56
- <p>Sin embargo, este rumor nunca fue confirmado o negado por Marvel Studios o el propio Gosling. Es posible que solo fuera un deseo de los fans o un informe falso. A partir de ahora, no hay noticias oficiales o anuncio sobre Gosling jugando Ghost Rider en el MCU.</p>
57
- <h3>Cómo Ghost Rider podría caber en el MCU</h3>
58
- <p>Incluso si Gosling no está jugando Ghost Rider en el MCU, todavía hay otras formas en que el personaje podría encajar en la franquicia. Estos son algunos de ellos:</p>
59
- <ul>
60
- <li>Ghost Rider podría aparecer en Doctor Strange en el Multiverso de la Locura. Se espera que esta película explore diferentes realidades y dimensiones dentro del UCM, que podría incluir una donde exista Ghost Rider. Ghost Rider también podría tener una conexión con Scarlet Witch, que se confirma que aparece en la película y que tiene poderes de deformación de la realidad. </li>
61
- <li>Ghost Rider podría aparecer en Blade. Esta película está programada para reiniciar Blade como parte de la Fase 5 del UCM y la estrella Mahershala Ali como el cazador de vampiros titular. Ghost Rider podría tener un cameo o un papel secundario en esta película, ya que se ha cruzado con Blade en los cómics antes. Ghost Rider y Blade podrían unirse para luchar contra vampiros y otras amenazas sobrenaturales. </li>
62
-
63
- <li>Ghost Rider podría aparecer en su propia película en solitario o serie de televisión. Esta es la opción más obvia y deseada para muchos fans, ya que daría a Ghost Rider la oportunidad de explorar su origen, sus poderes, sus enemigos y sus aliados. Una película o serie de televisión en solitario también podría introducir una nueva versión de Ghost Rider, como Danny Ketch, Robbie Reyes o Alejandra Jones, que tienen diferentes antecedentes e historias de Johnny Blaze.</li>
64
- </ul>
65
- <p>Por supuesto, estas son solo algunas de las posibles formas en que Ghost Rider podría caber en el MCU. Hay muchos otros escenarios y conexiones potenciales que podrían ser explorados. Lo único seguro es que Ghost Rider es un personaje popular e icónico que merece la oportunidad de brillar en el MCU.</p>
66
- <h2>Conclusión</h2>
67
- <p>En conclusión, Ghost Rider 3 Dawn of Darkness no es una película oficial, sino un título y concepto hecho por fans que ha estado circulando en Internet durante años. No hay confirmación o anuncio de que tal película exista o esté en desarrollo. Sin embargo, hay muchos remolques y carteles hechos por fans que han creado algo de bombo y curiosidad entre los fans de Ghost Rider. </p>
68
- <p>Ghost Rider ha aparecido en dos películas de acción en vivo hasta ahora, ambas protagonizadas por Nicolas Cage como Johnny Blaze/ Ghost Rider. El primero fue lanzado en 2007 y el segundo en 2011. Ambas películas recibieron críticas mixtas a negativas de críticos y fans, pero tuvieron éxito comercial. </p>
69
- <p>Los derechos de Ghost Rider volvieron a Marvel Studios en 2013, abriendo nuevas posibilidades para el futuro del personaje. Ha habido varios rumores y especulaciones sobre la participación de Ghost Rider en el UCM, pero nada ha sido confirmado o anunciado todavía. Sin embargo, hay muchas maneras en que Ghost Rider podría caber en el MCU, ya sea como un cameo, un papel de apoyo, o una estrella en solitario. </p>
70
-
71
- <h2>Preguntas frecuentes</h2>
72
- <p>Aquí están algunas de las preguntas más frecuentes sobre Ghost Rider 3 Dawn of Darkness:</p>
73
- <ul>
74
- ¿Es Ghost Rider 3 Dawn of Darkness real? </b><br>No, Ghost Rider 3 Dawn of Darkness no es una película real, sino un título hecho por fans y un concepto que ha estado circulando en Internet durante años. No hay confirmación o anuncio de que tal película exista o esté en desarrollo. </li>
75
- <li><b> ¿Quién está jugando Ghost Rider en Ghost Rider 3 Dawn of Darkness? </b><br>Como Ghost Rider 3 Dawn of Darkness no es una película real, no hay un elenco oficial para ella. Sin embargo, basados en los trailers y carteles hechos por fans, algunos de los actores que los fans quisieran ver en la película son Nicolas Cage como Johnny Blaze/ Ghost Rider, Wesley Snipes como Blade, Idris Elba como Moreau, Benedict Cumberbatch como Doctor Strange, Chris Hemsworth como Thor, y Tom Ellis como Lucifer Morningstar.</li>
76
- ¿Cuál es la trama de Ghost Rider 3 Dawn of Darkness? </b><br>Como Ghost Rider 3 Dawn of Darkness no es una película real, no hay ningún argumento oficial para ello. Sin embargo, sobre la base de los trailers y carteles hechos por fans, algunos de los posibles elementos de la trama son Johnny Blaze/ Ghost Rider haciendo equipo con Moreau y otros aliados para encontrar y destruir el Libro de Cagliostro, un antiguo tomo que contiene oscuros secretos y hechizos; Johnny Blaze/ Ghost Rider frente a Blade y su culto de vampiros que guardan el libro; Johnny Blaze/ Ghost Rider destruyendo el libro y liberándose de las garras del diablo. </li>
77
- <li><b>¿Cuándo se lanzará Ghost Rider 3 Dawn of Darkness? </b><br>Como Ghost Rider 3 Dawn of Darkness no es una película real, no hay fecha oficial para su lanzamiento. Sin embargo, basado en los remolques y carteles hechos por fans, algunas de las posibles fechas de lanzamiento son 2023, 2024 o 2025. </li>
78
-
79
- </ul>
80
- <p>Espero que este artículo haya respondido a sus preguntas y satisfecho su curiosidad sobre Ghost Rider 3 Dawn of Darkness. Si eres un fan de Ghost Rider, también puedes ver los cómics, los programas de televisión, los videojuegos y la mercancía relacionada con el personaje. Gracias por leer y tener un gran día! </p> 64aa2da5cf<br />
81
- <br />
82
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Choque Mini Descarga Pc.md DELETED
@@ -1,63 +0,0 @@
1
- <br />
2
- <h1>Clash Mini: Un juego de mesa divertido y estratégico en el universo de choque</h1>
3
- <p>¿Te encanta el universo Clash y sus personajes icónicos? ¿Te gustan los juegos de estrategia que desafían tu mente y ponen a prueba tus habilidades? Si es así, quizás quieras echar un vistazo a <strong>Clash Mini</strong>, un nuevo juego de Supercell, los creadores de <strong>Clash of Clans</strong> y <strong>Clash Royale</strong>. </p>
4
- <p>Clash Mini es un juego de mesa de estrategia que te permite recoger, convocar y actualizar tu ejército de Minis, que son versiones en miniatura de los personajes familiares del universo Clash. Puedes llevar a tu adorable ejército a la batalla junto a héroes legendarios como el Rey Bárbaro, la Doncella de Escudo, la Reina Arquera y más. También puedes liberar poderosas unidades como Pekka, magos y arqueros mágicos para cambiar la marea de la batalla. </p>
5
- <h2>choque mini descarga pc</h2><br /><p><b><b>DOWNLOAD</b> ::: <a href="https://bltlly.com/2v6Kw5">https://bltlly.com/2v6Kw5</a></b></p><br /><br />
6
- <p>En este artículo, le diremos todo lo que necesita saber sobre Clash Mini, incluyendo lo que es, cómo jugarlo en PC, cuándo se lanzará, y cómo registrarse para la versión beta. ¡Vamos a empezar! </p>
7
- <h2>¿Qué es Clash Mini? </h2>
8
- <p>Clash Mini es un juego de elecciones, duelo y retumbar, miniaturas, héroes y habilidades, y combinaciones dinámicas y un sinfín de posibilidades. Echemos un vistazo más de cerca a cada aspecto. </p>
9
- <h3>Un juego de elecciones, duelo y retumbar</h3>
10
- <p>En Clash Mini, puedes jugar en modo 1v1 o rumble contra otros 7 jugadores. En cada modo, tienes que predecir los movimientos de tu oponente y luego armar tu estrategia ganadora y formación. Puedes colocar tus Minis en un tablero al mismo tiempo que tu oponente, y luego verlos chocar automáticamente en tiempo real. </p>
11
- <p>Cada juego está lleno de acción y dura menos de 5 minutos. Puedes jugar casualmente por diversión o en partidos clasificados para aumentar tu posición en la liga. También puedes completar misiones para recoger minis y desbloquear nuevas habilidades. </p>
12
- <h3>Un juego de miniaturas, héroes y habilidades</h3>
13
-
14
- <p>También puedes elegir entre 8 héroes que pueden liderar tu ejército. Cada héroe tiene su propia habilidad especial que puede cambiar las tornas a tu favor. Por ejemplo, el Rey Bárbaro puede cargar hacia adelante y aturdir a los enemigos con su martillo, la Doncella de Escudo puede proteger a tus Minis con su muro de escudo, y la Reina Arquera puede disparar flechas que atraviesan múltiples objetivos. </p>
15
- <p>Puedes personalizar a tus héroes y Minis con pieles únicas que muestran tu individualidad y estilo en el campo de batalla. </p>
16
- <p></p>
17
- <h3>Un juego de combinaciones dinámicas y un sinfín de posibilidades</h3>
18
- <p>Uno de los aspectos más emocionantes de Clash Mini es la variedad de estrategias y combinaciones que puedes crear con tus Minis y héroes. Puedes experimentar con diferentes formaciones, sinergias, contadores y tácticas para encontrar la mejor manera de ganar. </p>
19
- <p>También puedes ajustar tu estrategia en el juego con tanques, cuerpo a cuerpo y Minis a distancia dependiendo de la situación. Puedes actualizar Minis durante la batalla para activar habilidades más fuertes o intercambiarlas entre rondas para adaptarlas a los movimientos de tu oponente. </p>
20
- <p>Con tantas opciones y variables, cada batalla en Clash Mini es diferente e impredecible. Tienes que ser creativo y flexible para superar a tus rivales y reclamar la victoria. </p>
21
- <h2>¿Cómo se juega Clash Mini en PC? </h2>
22
- <p>Clash Mini está diseñado para ser jugado en dispositivos móviles, pero es posible que se pregunte si se puede jugar en el PC, así. La respuesta es sí, se puede! Jugar Clash Mini en PC tiene varias ventajas, como una pantalla más grande, mejores gráficos, un rendimiento más rápido y controles más cómodos. Así es como puedes hacerlo. </p>
23
- <h3>¿Por qué jugar Clash Mini en PC? </h3>
24
- <p>Jugar Clash Mini en PC puede mejorar su experiencia de juego de muchas maneras. Aquí están algunos de los beneficios de jugar Clash Mini en PC:</p>
25
- <ul>
26
- <li>Puedes disfrutar de una vista más amplia y clara del tablero y los Minis, lo que puede ayudarte a planificar mejor tus movimientos y ver los detalles de las animaciones y efectos. </li>
27
-
28
- <li>Puedes usar el teclado y el ratón para controlar el juego, lo que puede ser más preciso y conveniente que usar los dedos en una pantalla táctil. </li>
29
- <li>Puede acceder a otras características y aplicaciones en su PC mientras juega Clash Mini, como chatear con sus amigos, navegar por la web o transmitir su juego. </li>
30
- </ul>
31
- <h3>¿Cómo descargar e instalar Clash Mini en el PC usando un emulador? </h3>
32
- <p>La forma más fácil de jugar Clash Mini en PC es usar un emulador. Un emulador es un software que le permite ejecutar aplicaciones Android o iOS en su PC. Hay muchos emuladores disponibles en línea, pero recomendamos usar <strong>BlueStacks</strong>, que es uno de los emuladores más populares y confiables para juegos. </p>
33
- <p>Aquí están los pasos para descargar e instalar Clash Mini en el PC usando BlueStacks:</p>
34
- <ol>
35
- <li>Descargar e instalar BlueStacks desde su sitio web oficial: <a href="">https://www.bluestacks.com/</a></li>
36
- <li>Inicie BlueStacks e inicie sesión con su cuenta de Google. </li>
37
- <li>Ir a la Google Play Store o la App Store en BlueStacks y buscar Clash Mini.</li>
38
- <li>Haga clic en el botón Instalar y espere a que el juego se descargue e instale. </li>
39
- <li>Una vez instalado el juego, haga clic en el botón Abrir o encuentre el icono del juego en la pantalla de inicio de BlueStacks. </li>
40
- <li>Disfruta jugando Clash Mini en PC! </li>
41
- </ol>
42
- <h3>¿Cómo se juega Clash Mini en el PC con el teclado y el ratón? </h3>
43
- <p>Una de las ventajas de jugar Clash Mini en PC es que puedes usar tu teclado y ratón para controlar el juego. Esto puede darle más precisión y comodidad que usar los dedos en una pantalla táctil. Sin embargo, es posible que necesite ajustar algunos ajustes y asignaciones de claves para optimizar su juego. </p>
44
- <p>Aquí hay algunos consejos para jugar Clash Mini en el PC con el teclado y el ratón:</p>
45
- <ul>
46
- <li> Puede utilizar el ratón para arrastrar y soltar sus Minis en el tablero, así como para seleccionar su héroe y habilidades. </li>
47
-
48
- <li> Puede utilizar el teclado para girar el tablero pulsando las teclas de flecha izquierda y derecha. </li>
49
- <li>Puedes usar el teclado para acceder al menú, chat, configuración, tienda, perfil, misiones, liga, clan y amigos presionando las teclas correspondientes. Puede comprobar las asignaciones de teclas haciendo clic en el icono del teclado en la esquina inferior derecha de BlueStacks.</li>
50
- <li>Puede personalizar las asignaciones de teclas haciendo clic en el icono del teclado y luego haciendo clic en Editar. Puede arrastrar y soltar diferentes teclas en diferentes funciones o crear nuevas según sus preferencias. </li>
51
- </ul>
52
- <h2>¿Cuándo es la fecha de lanzamiento de Clash Mini? </h2>
53
- <p>Si te emociona jugar a Clash Mini, es posible que te estés preguntando cuándo se lanzará. La respuesta no es tan simple, ya que hay diferentes fechas de lanzamiento para diferentes regiones y plataformas. Esto es lo que sabemos hasta ahora. </p>
54
- <h3>La versión beta de Clash Mini</h3>
55
- <p>La versión beta de Clash Mini es una versión de prueba del juego que permite a los jugadores probarlo antes de su lanzamiento oficial. La versión beta de Clash Mini está disponible actualmente en países seleccionados solo para dispositivos Android. Estos países son Finlandia, Suecia, Noruega, Dinamarca, Islandia, Nueva Zelanda, Australia, Canadá, Singapur, Filipinas, Malasia, Indonesia, India, Hong Kong SAR China.</p>
56
- <p>La versión beta de Clash Mini no es un producto final y puede contener errores, fallas o errores. La versión beta de Clash Mini también puede sufrir cambios o actualizaciones basadas en los comentarios de los jugadores. La versión beta de Clash Mini no representa la calidad ni las características del juego final. </p>
57
- <h3>El global para dispositivos Android e iOS. Sin embargo, el juego podría lanzarse en diferentes regiones en diferentes momentos, dependiendo de la retroalimentación y el rendimiento de la versión beta. </p>
58
- <h4>¿Cómo se juega Clash Mini en PC? </h4>
59
-
60
- <h4>¿Cómo registrarse para la versión beta de Clash Mini? </h4>
61
- <p>Puede registrarse para la versión beta de Clash Mini visitando el sitio web de Supercell e ingresando su dirección de correo electrónico. La versión beta de Clash Mini está abierta para cualquier persona que tenga un dispositivo Android y viva en uno de los siguientes países: Finlandia, Suecia, Noruega, Dinamarca, Islandia, Nueva Zelanda, Australia, Canadá, Singapur, Filipinas, Malasia, Indonesia, India, Hong Kong SAR China. Si cumple con estos criterios, recibirá un correo electrónico de Supercell con un enlace para descargar el juego desde la Google Play Store o la App Store.</p> 64aa2da5cf<br />
62
- <br />
63
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Fonte Clash Royale.md DELETED
@@ -1,61 +0,0 @@
1
- <br />
2
- <h1>Descargar Clash Royale para Windows 10: Una guía completa</h1>
3
- <p>Si eres un fan de los juegos de estrategia en tiempo real, es posible que hayas oído hablar de <strong>Clash Royale</strong>, uno de los juegos más populares y adictivos del género. Clash Royale es un juego desarrollado por Supercell, la misma compañía detrás del exitoso juego <strong>Clash of Clans</strong>. En este juego, puedes recoger y actualizar docenas de cartas con tus personajes y hechizos favoritos de Clash, y usarlos para luchar contra otros jugadores en línea en partidas trepidantes y emocionantes. También puedes unirte o crear un clan, chatear con otros jugadores y participar en guerras de clanes para ganar recompensas y gloria. </p>
4
- <h2>descargar fonte clash royale</h2><br /><p><b><b>Download Zip</b> &#10001; &#10001; &#10001; <a href="https://bltlly.com/2v6JO9">https://bltlly.com/2v6JO9</a></b></p><br /><br />
5
- <p>Clash Royale está disponible para dispositivos Android e iOS, pero ¿qué pasa si desea reproducirlo en su PC con Windows 10? Bueno, hay dos maneras de hacer eso, y te mostraremos cómo en este artículo. Pero primero, echemos un vistazo a algunas de las características de Clash Royale que lo hacen tan divertido y atractivo. </p>
6
- <h2>¿Qué es Clash Royale? </h2>
7
- <p>Clash Royale es un juego multijugador en tiempo real que combina elementos de juegos de cartas, torre de defensa y MOBA (campo de batalla multijugador en línea). El juego se desarrolla en el mismo universo que Clash of Clans, pero con un estilo de juego diferente. El juego consta de dos modos: modo escalera y modo torneo. En el modo escalera, puedes jugar contra otros jugadores de nivel de habilidad similar y ganar trofeos, que determinan tu rango en la clasificación global. En el modo torneo, puedes unirte o crear torneos personalizados con diferentes reglas y premios. </p>
8
-
9
- <h3>Características de Clash Royale</h3>
10
- <p>Algunas de las características que hacen de Clash Royale un juego emocionante y adictivo son:</p>
11
- <ul>
12
- <li><strong>Duelistas de todo el mundo</strong>: Puedes desafiar a cualquiera en línea en tiempo real y mostrar tus habilidades y estrategias. También puedes ver las repeticiones de batallas de otros jugadores y aprender de sus movimientos. </li>
13
- <li><strong>Gana cofres para desbloquear recompensas</strong>: Cada vez que ganes una partida, recibirás un cofre que contenga cartas, oro, gemas u otros objetos. Puede utilizar estos recursos para actualizar sus tarjetas o comprar nuevas en la tienda. Hay diferentes tipos de cofres, como el de plata, oro, gigante, mágico, épico, legendario y de clan. </li>
14
- <li><strong>Recoge y actualiza docenas de cartas</strong>: Puedes recoger cartas de diferentes arenas, cada una con su propio tema y personajes. Hay cuatro rarezas de cartas: comunes, raras, épicas y legendarias. Puede actualizar sus tarjetas mediante el uso de oro y tarjetas duplicadas para aumentar su nivel y poder. </li>
15
- <li><strong>Crear o unirse a un clan</strong>: Puedes <p>Crear o unirse a un clan</strong>: Puedes unir fuerzas con otros jugadores y formar un clan, donde puedes chatear, donar cartas, solicitar cartas y participar en guerras de clanes. Las guerras de clanes son un modo especial donde puedes competir con otros clanes por la gloria y las recompensas. También puedes crear tu propio clan e invitar a tus amigos a unirse. </li>
16
- <li><strong>Progresa a través de múltiples arenas</strong>: A medida que ganes partidos y ganes trofeos, desbloquearás nuevas arenas, cada una con su propio tema y grupo de cartas. Hay 13 arenas en total, además de una arena legendaria especial para los mejores jugadores. Cada arena tiene sus propias recompensas y desafíos. </li>
17
-
18
- </ul>
19
- <h3>Cómo jugar Clash Royale en Windows 10</h3>
20
- <p>Ahora que sabe lo que es Clash Royale y lo que ofrece, es posible que se pregunte cómo jugarlo en su PC con Windows 10. Bueno, hay dos métodos que puedes usar para hacer eso: usar un emulador o usar un sitio web. Veamos cómo funciona cada método y cuáles son los pros y los contras de cada uno. </p>
21
- <h2>Cómo descargar Clash Royale para Windows 10</h2>
22
- <h3>Método 1: Usando el emulador de Bluestacks</h3>
23
- <p>El primer método es utilizar un emulador, que es un software que le permite ejecutar aplicaciones Android en su PC. Hay muchos emuladores disponibles en línea, pero uno de los más populares y confiables es <strong>Bluestacks</strong>. Bluestacks es un emulador gratuito que tiene una interfaz fácil de usar y es compatible con muchos juegos y aplicaciones de Android, incluyendo Clash Royale. Estos son los pasos que debe seguir para descargar Clash Royale para Windows 10 usando Bluestacks:</p>
24
- <h4>Paso 1: Descargar e instalar Bluestacks</h4>
25
- <p>Lo primero que tienes que hacer es descargar Bluestacks desde su sitio web oficial: <a href="">https://www.bluestacks.com/</a>. Verá un botón de descarga en la página de inicio que detectará automáticamente su sistema operativo y descargará la versión adecuada para usted. Una vez finalizada la descarga, ejecute el instalador y siga las instrucciones para instalar Bluestacks en su PC.</p>
26
- <h4>Paso 2: Inicie Bluestacks e inicie sesión con la cuenta de Google</h4>
27
- <p>Después de instalar Bluestacks, inicie desde su escritorio o menú de inicio. Verás una pantalla de bienvenida que te pedirá que inicies sesión con tu cuenta de Google. Esto es necesario porque usted necesita para acceder a la Google Play Store para descargar Clash Royale. Si no tienes una cuenta de Google, puedes crear una gratis. Una vez que inicie sesión, verá la pantalla de inicio de Bluestacks, que parece una tableta Android. </p>
28
- <h4>Paso 3: Buscar Clash Royale en la Play Store e instalarlo</h4>
29
-
30
- <h4>Paso 4: Disfruta jugando Clash Royale en tu PC</h4>
31
- <p>Una vez realizada la instalación, verá un botón "Abrir" en la página Clash Royale. Haga clic en él para iniciar Clash Royale en su PC. Verás la pantalla de carga del juego y el menú principal. Ahora puedes jugar a Clash Royale en tu PC con el ratón y el teclado. También puede ajustar la configuración, como sonido, gráficos, idioma, etc., haciendo clic en el icono de engranaje en la esquina superior derecha de la pantalla. </p>
32
- <p></p>
33
- <h3>Método 2: Usando Filehippo.com</h3>
34
- <p>El segundo método es utilizar un sitio web que ofrece descargas gratuitas de aplicaciones de Android para PC. Uno de estos sitios web es <strong>Filehippo.com</strong>, que tiene una gran colección de juegos y aplicaciones para Android que puedes descargar e instalar en tu PC sin usar un emulador. Estos son los pasos que debe seguir para descargar Clash Royale para Windows 10 usando Filehippo.com:</p>
35
- <h4>Paso 1: Vaya a Filehippo.com y busque Clash Royale</h4>
36
- <p>Lo primero que debe hacer es ir a Filehippo.com desde su navegador web: < a href=">https://filehippo.com/</a>. Verá una barra de búsqueda en la parte superior de la página principal. Escriba "Clash Royale" en la barra de búsqueda y pulse enter. Verás la aplicación Clash Royale entre los resultados de búsqueda. Haz clic en ella para abrir su página. </p>
37
- <h4>Paso 2: Haga clic en el botón de descarga y guarde el archivo</h4>
38
- <p>En la página Clash Royale, verá un botón verde "Descargar la última versión" en el lado derecho de la pantalla. Haga clic en él para comenzar a descargar el archivo Clash Royale. Verá una ventana emergente que le pedirá que guarde el archivo. Elija una ubicación en su PC donde desea guardar el archivo y haga clic en "Guardar". El tamaño del archivo es de aproximadamente 110 MB, por lo que podría tomar algún tiempo dependiendo de su velocidad de Internet. </p>
39
- <h4>Paso 3: Ejecute el archivo y siga las instrucciones para instalar Clash Royale</h4>
40
-
41
- <h4>Paso 4: Inicie Clash Royale y comience a jugar</h4>
42
- <p>Después de la instalación, verá un icono de acceso directo de Clash Royale en su escritorio o menú de inicio. Haga clic en él para lanzar Clash Royale en su PC. Verá la pantalla de carga del juego y luego el menú principal. Ahora puede jugar Clash Royale en su PC con el ratón y el teclado. También puede ajustar la configuración, como sonido, gráficos, idioma, etc., haciendo clic en el icono de engranaje en la esquina superior derecha de la pantalla. </p>
43
- <h2>Conclusión</h2>
44
- <p>Clash Royale es uno de los juegos de estrategia en tiempo real más populares y adictivos que puedes jugar en tu dispositivo Android o iOS. Pero si quieres disfrutarlo en una pantalla más grande y con mejores controles, también puedes reproducirlo en tu PC con Windows 10 usando uno de los dos métodos que te mostramos en este artículo: usando el emulador de Bluestacks o usando Filehippo.com. Ambos métodos son fáciles y gratuitos, y te permitirán descargar e instalar Clash Royale para Windows 10 en poco tiempo. Entonces, ¿qué estás esperando? Descargar Clash Royale para Windows 10 hoy y unirse a millones de jugadores de todo el mundo en batallas épicas y torneos! </p>
45
- <h2>Preguntas frecuentes</h2>
46
- <p>Aquí están algunas de las preguntas más frecuentes sobre Clash Royale para Windows 10:</p>
47
- <ul>
48
- <li><strong>Clash Royale es libre de jugar? </strong></li>
49
- <p>Sí, Clash Royale es gratis para jugar, pero también ofrece compras en la aplicación que pueden mejorar su experiencia de juego. Puedes comprar gemas, oro, cofres, tarjetas u otros artículos con dinero real. Sin embargo, estas compras son opcionales y no se requieren para jugar o progresar en el juego. </p>
50
- <li><strong>¿Es seguro descargar Clash Royale? </strong></li>
51
-
52
- <li><strong>¿Puedo jugar a Clash Royale sin conexión? </strong></li>
53
- <p>No, Clash Royale requiere una conexión a Internet para jugar, ya que es un juego multijugador que te conecta con otros jugadores en línea. Necesitas tener una conexión a Internet estable y rápida para jugar a Clash Royale sin ningún retraso o interrupción. </p>
54
- <li><strong>¿Puedo sincronizar mi progreso entre mi dispositivo y PC? </strong></li>
55
- <p>Sí, puedes sincronizar tu progreso entre tu dispositivo y PC usando tu cuenta de Google. Es necesario iniciar sesión con la misma cuenta de Google en su dispositivo y PC al jugar Clash Royale. De esta forma, puedes acceder a tus datos de juego, como tus cartas, oro, gemas, trofeos, clan, etc., en ambas plataformas. </p>
56
- <li><strong>¿Puedo jugar a Clash Royale con mis amigos? </strong></li>
57
- <p>Sí, puedes jugar a Clash Royale con tus amigos uniéndote o creando un clan. Un clan es un grupo de jugadores que pueden chatear, donar cartas, solicitar cartas y participar en guerras de clanes juntos. Puedes invitar a tus amigos a unirse a tu clan o unirse a su clan usando su nombre o etiqueta de clan. También puedes retar a tus amigos a batallas amistosas o ver sus partidos tocando su nombre en el chat del clan. </p>
58
- </ul>
59
- <p></p> 64aa2da5cf<br />
60
- <br />
61
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/distlib/__init__.py DELETED
@@ -1,23 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- #
3
- # Copyright (C) 2012-2022 Vinay Sajip.
4
- # Licensed to the Python Software Foundation under a contributor agreement.
5
- # See LICENSE.txt and CONTRIBUTORS.txt.
6
- #
7
- import logging
8
-
9
- __version__ = '0.3.6'
10
-
11
- class DistlibException(Exception):
12
- pass
13
-
14
- try:
15
- from logging import NullHandler
16
- except ImportError: # pragma: no cover
17
- class NullHandler(logging.Handler):
18
- def handle(self, record): pass
19
- def emit(self, record): pass
20
- def createLock(self): self.lock = None
21
-
22
- logger = logging.getLogger(__name__)
23
- logger.addHandler(NullHandler())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tools/check-style.sh DELETED
@@ -1,44 +0,0 @@
1
- #!/bin/bash
2
- #
3
- # Script to check include/test code for common pybind11 code style errors.
4
- #
5
- # This script currently checks for
6
- #
7
- # 1. missing space between keyword and parenthesis, e.g.: for(, if(, while(
8
- # 2. Missing space between right parenthesis and brace, e.g. 'for (...){'
9
- # 3. opening brace on its own line. It should always be on the same line as the
10
- # if/while/for/do statement.
11
- #
12
- # Invoke as: tools/check-style.sh <filenames>
13
- #
14
-
15
- check_style_errors=0
16
- IFS=$'\n'
17
-
18
-
19
- found="$(grep '\<\(if\|for\|while\|catch\)(\|){' $@ -rn --color=always)"
20
- if [ -n "$found" ]; then
21
- echo -e '\033[31;01mError: found the following coding style problems:\033[0m'
22
- check_style_errors=1
23
- echo "$found" | sed -e 's/^/ /'
24
- fi
25
-
26
- found="$(awk '
27
- function prefix(filename, lineno) {
28
- return " \033[35m" filename "\033[36m:\033[32m" lineno "\033[36m:\033[0m"
29
- }
30
- function mark(pattern, string) { sub(pattern, "\033[01;31m&\033[0m", string); return string }
31
- last && /^\s*{/ {
32
- print prefix(FILENAME, FNR-1) mark("\\)\\s*$", last)
33
- print prefix(FILENAME, FNR) mark("^\\s*{", $0)
34
- last=""
35
- }
36
- { last = /(if|for|while|catch|switch)\s*\(.*\)\s*$/ ? $0 : "" }
37
- ' $(find include -type f) $@)"
38
- if [ -n "$found" ]; then
39
- check_style_errors=1
40
- echo -e '\033[31;01mError: braces should occur on the same line as the if/while/.. statement. Found issues in the following files:\033[0m'
41
- echo "$found"
42
- fi
43
-
44
- exit $check_style_errors
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/cpp/detail/transform_reduce.h DELETED
@@ -1,22 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system has no special version of this algorithm
22
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/unique.h DELETED
@@ -1,968 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- /*! \file unique.h
19
- * \brief Move unique elements to the front of a range
20
- */
21
-
22
- #pragma once
23
-
24
- #include <thrust/detail/config.h>
25
- #include <thrust/detail/execution_policy.h>
26
- #include <thrust/pair.h>
27
-
28
- namespace thrust
29
- {
30
-
31
-
32
- /*! \addtogroup stream_compaction
33
- * \{
34
- */
35
-
36
-
37
- /*! For each group of consecutive elements in the range <tt>[first, last)</tt>
38
- * with the same value, \p unique removes all but the first element of
39
- * the group. The return value is an iterator \c new_last such that
40
- * no two consecutive elements in the range <tt>[first, new_last)</tt> are
41
- * equal. The iterators in the range <tt>[new_last, last)</tt> are all still
42
- * dereferenceable, but the elements that they point to are unspecified.
43
- * \p unique is stable, meaning that the relative order of elements that are
44
- * not removed is unchanged.
45
- *
46
- * This version of \p unique uses \c operator== to test for equality.
47
- *
48
- * The algorithm's execution is parallelized as determined by \p exec.
49
- *
50
- * \param exec The execution policy to use for parallelization.
51
- * \param first The beginning of the input range.
52
- * \param last The end of the input range.
53
- * \return The end of the unique range <tt>[first, new_last)</tt>.
54
- *
55
- * \tparam DerivedPolicy The name of the derived execution policy.
56
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
57
- * and \p ForwardIterator is mutable,
58
- * and \p ForwardIterator's \c value_type is a model of <a href="http://www.sgi.com/tech/stl/EqualityComparable.html">Equality Comparable</a>.
59
- *
60
- * The following code snippet demonstrates how to use \p unique to
61
- * compact a sequence of numbers to remove consecutive duplicates using the \p thrust::host execution policy
62
- * for parallelization:
63
- *
64
- * \code
65
- * #include <thrust/unique.h>
66
- * #include <thrust/execution_policy.h>
67
- * ...
68
- * const int N = 7;
69
- * int A[N] = {1, 3, 3, 3, 2, 2, 1};
70
- * int *new_end = thrust::unique(thrust::host, A, A + N);
71
- * // The first four values of A are now {1, 3, 2, 1}
72
- * // Values beyond new_end are unspecified.
73
- * \endcode
74
- *
75
- * \see http://www.sgi.com/tech/stl/unique.html
76
- * \see unique_copy
77
- */
78
- template<typename DerivedPolicy,
79
- typename ForwardIterator>
80
- __host__ __device__
81
- ForwardIterator unique(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
82
- ForwardIterator first,
83
- ForwardIterator last);
84
-
85
-
86
- /*! For each group of consecutive elements in the range <tt>[first, last)</tt>
87
- * with the same value, \p unique removes all but the first element of
88
- * the group. The return value is an iterator \c new_last such that
89
- * no two consecutive elements in the range <tt>[first, new_last)</tt> are
90
- * equal. The iterators in the range <tt>[new_last, last)</tt> are all still
91
- * dereferenceable, but the elements that they point to are unspecified.
92
- * \p unique is stable, meaning that the relative order of elements that are
93
- * not removed is unchanged.
94
- *
95
- * This version of \p unique uses \c operator== to test for equality.
96
- *
97
- * \param first The beginning of the input range.
98
- * \param last The end of the input range.
99
- * \return The end of the unique range <tt>[first, new_last)</tt>.
100
- *
101
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
102
- * and \p ForwardIterator is mutable,
103
- * and \p ForwardIterator's \c value_type is a model of <a href="http://www.sgi.com/tech/stl/EqualityComparable.html">Equality Comparable</a>.
104
- *
105
- * The following code snippet demonstrates how to use \p unique to
106
- * compact a sequence of numbers to remove consecutive duplicates.
107
- *
108
- * \code
109
- * #include <thrust/unique.h>
110
- * ...
111
- * const int N = 7;
112
- * int A[N] = {1, 3, 3, 3, 2, 2, 1};
113
- * int *new_end = thrust::unique(A, A + N);
114
- * // The first four values of A are now {1, 3, 2, 1}
115
- * // Values beyond new_end are unspecified.
116
- * \endcode
117
- *
118
- * \see http://www.sgi.com/tech/stl/unique.html
119
- * \see unique_copy
120
- */
121
- template<typename ForwardIterator>
122
- ForwardIterator unique(ForwardIterator first,
123
- ForwardIterator last);
124
-
125
-
126
- /*! For each group of consecutive elements in the range <tt>[first, last)</tt>
127
- * with the same value, \p unique removes all but the first element of
128
- * the group. The return value is an iterator \c new_last such that
129
- * no two consecutive elements in the range <tt>[first, new_last)</tt> are
130
- * equal. The iterators in the range <tt>[new_last, last)</tt> are all still
131
- * dereferenceable, but the elements that they point to are unspecified.
132
- * \p unique is stable, meaning that the relative order of elements that are
133
- * not removed is unchanged.
134
- *
135
- * This version of \p unique uses the function object \p binary_pred to test
136
- * for equality.
137
- *
138
- * The algorithm's execution is parallelized as determined by \p exec.
139
- *
140
- * \param exec The execution policy to use for parallelization.
141
- * \param first The beginning of the input range.
142
- * \param last The end of the input range.
143
- * \param binary_pred The binary predicate used to determine equality.
144
- * \return The end of the unique range <tt>[first, new_last)</tt>
145
- *
146
- * \tparam DerivedPolicy The name of the derived execution policy.
147
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
148
- * and \p ForwardIterator is mutable,
149
- * and \p ForwardIterator's \c value_type is convertible to \p BinaryPredicate's \c first_argument_type and to \p BinaryPredicate's \c second_argument_type.
150
- * \tparam BinaryPredicate is a model of <a href="http://www.sgi.com/tech/stl/BinaryPredicate.html">Binary Predicate</a>.
151
- *
152
- * The following code snippet demonstrates how to use \p unique to
153
- * compact a sequence of numbers to remove consecutive duplicates using the \p thrust::host execution policy
154
- * for parallelization:
155
- *
156
- * \code
157
- * #include <thrust/unique.h>
158
- * #include <thrust/execution_policy.h>
159
- * ...
160
- * const int N = 7;
161
- * int A[N] = {1, 3, 3, 3, 2, 2, 1};
162
- * int *new_end = thrust::unique(thrust::host, A, A + N, thrust::equal_to<int>());
163
- * // The first four values of A are now {1, 3, 2, 1}
164
- * // Values beyond new_end are unspecified.
165
- * \endcode
166
- *
167
- * \see http://www.sgi.com/tech/stl/unique.html
168
- * \see unique_copy
169
- */
170
- template<typename DerivedPolicy,
171
- typename ForwardIterator,
172
- typename BinaryPredicate>
173
- __host__ __device__
174
- ForwardIterator unique(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
175
- ForwardIterator first,
176
- ForwardIterator last,
177
- BinaryPredicate binary_pred);
178
-
179
-
180
- /*! For each group of consecutive elements in the range <tt>[first, last)</tt>
181
- * with the same value, \p unique removes all but the first element of
182
- * the group. The return value is an iterator \c new_last such that
183
- * no two consecutive elements in the range <tt>[first, new_last)</tt> are
184
- * equal. The iterators in the range <tt>[new_last, last)</tt> are all still
185
- * dereferenceable, but the elements that they point to are unspecified.
186
- * \p unique is stable, meaning that the relative order of elements that are
187
- * not removed is unchanged.
188
- *
189
- * This version of \p unique uses the function object \p binary_pred to test
190
- * for equality.
191
- *
192
- * \param first The beginning of the input range.
193
- * \param last The end of the input range.
194
- * \param binary_pred The binary predicate used to determine equality.
195
- * \return The end of the unique range <tt>[first, new_last)</tt>
196
- *
197
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
198
- * and \p ForwardIterator is mutable,
199
- * and \p ForwardIterator's \c value_type is convertible to \p BinaryPredicate's \c first_argument_type and to \p BinaryPredicate's \c second_argument_type.
200
- * \tparam BinaryPredicate is a model of <a href="http://www.sgi.com/tech/stl/BinaryPredicate.html">Binary Predicate</a>.
201
- *
202
- * The following code snippet demonstrates how to use \p unique to
203
- * compact a sequence of numbers to remove consecutive duplicates.
204
- *
205
- * \code
206
- * #include <thrust/unique.h>
207
- * ...
208
- * const int N = 7;
209
- * int A[N] = {1, 3, 3, 3, 2, 2, 1};
210
- * int *new_end = thrust::unique(A, A + N, thrust::equal_to<int>());
211
- * // The first four values of A are now {1, 3, 2, 1}
212
- * // Values beyond new_end are unspecified.
213
- * \endcode
214
- *
215
- * \see http://www.sgi.com/tech/stl/unique.html
216
- * \see unique_copy
217
- */
218
- template<typename ForwardIterator,
219
- typename BinaryPredicate>
220
- ForwardIterator unique(ForwardIterator first,
221
- ForwardIterator last,
222
- BinaryPredicate binary_pred);
223
-
224
-
225
- /*! \p unique_copy copies elements from the range <tt>[first, last)</tt>
226
- * to a range beginning with \p result, except that in a consecutive group
227
- * of duplicate elements only the first one is copied. The return value
228
- * is the end of the range to which the elements are copied.
229
- *
230
- * The reason there are two different versions of unique_copy is that there
231
- * are two different definitions of what it means for a consecutive group of
232
- * elements to be duplicates. In the first version, the test is simple
233
- * equality: the elements in a range <tt>[f, l)</tt> are duplicates if,
234
- * for every iterator \p i in the range, either <tt>i == f</tt> or else
235
- * <tt>*i == *(i-1)</tt>. In the second, the test is an arbitrary
236
- * \p BinaryPredicate \p binary_pred: the elements in <tt>[f, l)</tt> are
237
- * duplicates if, for every iterator \p i in the range, either <tt>i == f</tt>
238
- * or else <tt>binary_pred(*i, *(i-1))</tt> is \p true.
239
- *
240
- * This version of \p unique_copy uses \c operator== to test for equality.
241
- *
242
- * The algorithm's execution is parallelized as determined by \p exec.
243
- *
244
- * \param exec The execution policy to use for parallelization.
245
- * \param first The beginning of the input range.
246
- * \param last The end of the input range.
247
- * \param result The beginning of the output range.
248
- * \return The end of the unique range <tt>[result, result_end)</tt>.
249
- *
250
- * \tparam DerivedPolicy The name of the derived execution policy.
251
- * \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>,
252
- * and \p InputIterator's \c value_type is a model of <a href="http://www.sgi.com/tech/stl/EqualityComparable.html">Equality Comparable</a>.
253
- * \tparam OutputIterator is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a> and
254
- * and \p InputIterator's \c value_type is convertible to \c OutputIterator's \c value_type.
255
- *
256
- * \pre The range <tt>[first,last)</tt> and the range <tt>[result, result + (last - first))</tt> shall not overlap.
257
- *
258
- * The following code snippet demonstrates how to use \p unique_copy to
259
- * compact a sequence of numbers to remove consecutive duplicates using the \p thrust::host execution
260
- * policy for parallelization:
261
- *
262
- * \code
263
- * #include <thrust/unique.h>
264
- * #include <thrust/execution_policy.h>
265
- * ...
266
- * const int N = 7;
267
- * int A[N] = {1, 3, 3, 3, 2, 2, 1};
268
- * int B[N];
269
- * int *result_end = thrust::unique_copy(thrust::host, A, A + N, B);
270
- * // The first four values of B are now {1, 3, 2, 1} and (result_end - B) is 4
271
- * // Values beyond result_end are unspecified
272
- * \endcode
273
- *
274
- * \see unique
275
- * \see http://www.sgi.com/tech/stl/unique_copy.html
276
- */
277
- template<typename DerivedPolicy,
278
- typename InputIterator,
279
- typename OutputIterator>
280
- __host__ __device__
281
- OutputIterator unique_copy(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
282
- InputIterator first,
283
- InputIterator last,
284
- OutputIterator result);
285
-
286
-
287
- /*! \p unique_copy copies elements from the range <tt>[first, last)</tt>
288
- * to a range beginning with \p result, except that in a consecutive group
289
- * of duplicate elements only the first one is copied. The return value
290
- * is the end of the range to which the elements are copied.
291
- *
292
- * The reason there are two different versions of unique_copy is that there
293
- * are two different definitions of what it means for a consecutive group of
294
- * elements to be duplicates. In the first version, the test is simple
295
- * equality: the elements in a range <tt>[f, l)</tt> are duplicates if,
296
- * for every iterator \p i in the range, either <tt>i == f</tt> or else
297
- * <tt>*i == *(i-1)</tt>. In the second, the test is an arbitrary
298
- * \p BinaryPredicate \p binary_pred: the elements in <tt>[f, l)</tt> are
299
- * duplicates if, for every iterator \p i in the range, either <tt>i == f</tt>
300
- * or else <tt>binary_pred(*i, *(i-1))</tt> is \p true.
301
- *
302
- * This version of \p unique_copy uses \c operator== to test for equality.
303
- *
304
- * \param first The beginning of the input range.
305
- * \param last The end of the input range.
306
- * \param result The beginning of the output range.
307
- * \return The end of the unique range <tt>[result, result_end)</tt>.
308
- *
309
- * \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>,
310
- * and \p InputIterator's \c value_type is a model of <a href="http://www.sgi.com/tech/stl/EqualityComparable.html">Equality Comparable</a>.
311
- * \tparam OutputIterator is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a> and
312
- * and \p InputIterator's \c value_type is convertible to \c OutputIterator's \c value_type.
313
- *
314
- * \pre The range <tt>[first,last)</tt> and the range <tt>[result, result + (last - first))</tt> shall not overlap.
315
- *
316
- * The following code snippet demonstrates how to use \p unique_copy to
317
- * compact a sequence of numbers to remove consecutive duplicates.
318
- *
319
- * \code
320
- * #include <thrust/unique.h>
321
- * ...
322
- * const int N = 7;
323
- * int A[N] = {1, 3, 3, 3, 2, 2, 1};
324
- * int B[N];
325
- * int *result_end = thrust::unique_copy(A, A + N, B);
326
- * // The first four values of B are now {1, 3, 2, 1} and (result_end - B) is 4
327
- * // Values beyond result_end are unspecified
328
- * \endcode
329
- *
330
- * \see unique
331
- * \see http://www.sgi.com/tech/stl/unique_copy.html
332
- */
333
- template<typename InputIterator,
334
- typename OutputIterator>
335
- OutputIterator unique_copy(InputIterator first,
336
- InputIterator last,
337
- OutputIterator result);
338
-
339
-
340
- /*! \p unique_copy copies elements from the range <tt>[first, last)</tt>
341
- * to a range beginning with \p result, except that in a consecutive group
342
- * of duplicate elements only the first one is copied. The return value
343
- * is the end of the range to which the elements are copied.
344
- *
345
- * This version of \p unique_copy uses the function object \c binary_pred
346
- * to test for equality.
347
- *
348
- * The algorithm's execution is parallelized as determined by \p exec.
349
- *
350
- * \param exec The execution policy to use for parallelization.
351
- * \param first The beginning of the input range.
352
- * \param last The end of the input range.
353
- * \param result The beginning of the output range.
354
- * \param binary_pred The binary predicate used to determine equality.
355
- * \return The end of the unique range <tt>[result, result_end)</tt>.
356
- *
357
- * \tparam DerivedPolicy The name of the derived execution policy.
358
- * \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>,
359
- * and \p InputIterator's \c value_type is a model of <a href="http://www.sgi.com/tech/stl/EqualityComparable.html">Equality Comparable</a>.
360
- * \tparam OutputIterator is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a> and
361
- * and \p InputIterator's \c value_type is convertible to \c OutputIterator's \c value_type.
362
- * \tparam BinaryPredicate is a model of <a href="http://www.sgi.com/tech/stl/BinaryPredicate.html">Binary Predicate</a>.
363
- *
364
- * \pre The range <tt>[first,last)</tt> and the range <tt>[result, result + (last - first))</tt> shall not overlap.
365
- *
366
- * The following code snippet demonstrates how to use \p unique_copy to
367
- * compact a sequence of numbers to remove consecutive duplicates using the \p thrust::host execution
368
- * policy for parallelization:
369
- *
370
- * \code
371
- * #include <thrust/unique.h>
372
- * #include <thrust/execution_policy.h>
373
- * ...
374
- * const int N = 7;
375
- * int A[N] = {1, 3, 3, 3, 2, 2, 1};
376
- * int B[N];
377
- * int *result_end = thrust::unique_copy(thrust::host, A, A + N, B, thrust::equal_to<int>());
378
- * // The first four values of B are now {1, 3, 2, 1} and (result_end - B) is 4
379
- * // Values beyond result_end are unspecified.
380
- * \endcode
381
- *
382
- * \see unique
383
- * \see http://www.sgi.com/tech/stl/unique_copy.html
384
- */
385
- template<typename DerivedPolicy,
386
- typename InputIterator,
387
- typename OutputIterator,
388
- typename BinaryPredicate>
389
- __host__ __device__
390
- OutputIterator unique_copy(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
391
- InputIterator first,
392
- InputIterator last,
393
- OutputIterator result,
394
- BinaryPredicate binary_pred);
395
-
396
-
397
- /*! \p unique_copy copies elements from the range <tt>[first, last)</tt>
398
- * to a range beginning with \p result, except that in a consecutive group
399
- * of duplicate elements only the first one is copied. The return value
400
- * is the end of the range to which the elements are copied.
401
- *
402
- * This version of \p unique_copy uses the function object \c binary_pred
403
- * to test for equality.
404
- *
405
- * \param first The beginning of the input range.
406
- * \param last The end of the input range.
407
- * \param result The beginning of the output range.
408
- * \param binary_pred The binary predicate used to determine equality.
409
- * \return The end of the unique range <tt>[result, result_end)</tt>.
410
- *
411
- * \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>,
412
- * and \p InputIterator's \c value_type is a model of <a href="http://www.sgi.com/tech/stl/EqualityComparable.html">Equality Comparable</a>.
413
- * \tparam OutputIterator is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a> and
414
- * and \p InputIterator's \c value_type is convertible to \c OutputIterator's \c value_type.
415
- * \tparam BinaryPredicate is a model of <a href="http://www.sgi.com/tech/stl/BinaryPredicate.html">Binary Predicate</a>.
416
- *
417
- * \pre The range <tt>[first,last)</tt> and the range <tt>[result, result + (last - first))</tt> shall not overlap.
418
- *
419
- * The following code snippet demonstrates how to use \p unique_copy to
420
- * compact a sequence of numbers to remove consecutive duplicates.
421
- *
422
- * \code
423
- * #include <thrust/unique.h>
424
- * ...
425
- * const int N = 7;
426
- * int A[N] = {1, 3, 3, 3, 2, 2, 1};
427
- * int B[N];
428
- * int *result_end = thrust::unique_copy(A, A + N, B, thrust::equal_to<int>());
429
- * // The first four values of B are now {1, 3, 2, 1} and (result_end - B) is 4
430
- * // Values beyond result_end are unspecified.
431
- * \endcode
432
- *
433
- * \see unique
434
- * \see http://www.sgi.com/tech/stl/unique_copy.html
435
- */
436
- template<typename InputIterator,
437
- typename OutputIterator,
438
- typename BinaryPredicate>
439
- OutputIterator unique_copy(InputIterator first,
440
- InputIterator last,
441
- OutputIterator result,
442
- BinaryPredicate binary_pred);
443
-
444
-
445
- /*! \p unique_by_key is a generalization of \p unique to key-value pairs.
446
- * For each group of consecutive keys in the range <tt>[keys_first, keys_last)</tt>
447
- * that are equal, \p unique_by_key removes all but the first element of
448
- * the group. Similarly, the corresponding values in the range
449
- * <tt>[values_first, values_first + (keys_last - keys_first))</tt>
450
- * are also removed.
451
- *
452
- * The return value is a \p pair of iterators <tt>(new_keys_last,new_values_last)</tt>
453
- * such that no two consecutive elements in the range <tt>[keys_first, new_keys_last)</tt>
454
- * are equal.
455
- *
456
- * This version of \p unique_by_key uses \c operator== to test for equality and
457
- * \c project1st to reduce values with equal keys.
458
- *
459
- * The algorithm's execution is parallelized as determined by \p exec.
460
- *
461
- * \param exec The execution policy to use for parallelization.
462
- * \param keys_first The beginning of the key range.
463
- * \param keys_last The end of the key range.
464
- * \param values_first The beginning of the value range.
465
- * \return A pair of iterators at end of the ranges <tt>[key_first, keys_new_last)</tt> and <tt>[values_first, values_new_last)</tt>.
466
- *
467
- * \tparam DerivedPolicy The name of the derived execution policy.
468
- * \tparam ForwardIterator1 is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
469
- * and \p ForwardIterator1 is mutable,
470
- * and \p ForwardIterator's \c value_type is a model of <a href="http://www.sgi.com/tech/stl/EqualityComparable.html">Equality Comparable</a>.
471
- * \tparam ForwardIterator2 is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
472
- * and \p ForwardIterator2 is mutable.
473
- *
474
- * \pre The range <tt>[keys_first, keys_last)</tt> and the range <tt>[values_first, values_first + (keys_last - keys_first))</tt> shall not overlap.
475
- *
476
- * The following code snippet demonstrates how to use \p unique_by_key to
477
- * compact a sequence of key/value pairs to remove consecutive duplicates using the \p thrust::host
478
- * execution policy for parallelization:
479
- *
480
- * \code
481
- * #include <thrust/unique.h>
482
- * #include <thrust/execution_policy.h>
483
- * ...
484
- * const int N = 7;
485
- * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // keys
486
- * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // values
487
- *
488
- * thrust::pair<int*,int*> new_end;
489
- * new_end = thrust::unique_by_key(thrust::host, A, A + N, B);
490
- *
491
- * // The first four keys in A are now {1, 3, 2, 1} and new_end.first - A is 4.
492
- * // The first four values in B are now {9, 8, 5, 3} and new_end.second - B is 4.
493
- * \endcode
494
- *
495
- * \see unique
496
- * \see unique_by_key_copy
497
- * \see reduce_by_key
498
- */
499
- template<typename DerivedPolicy,
500
- typename ForwardIterator1,
501
- typename ForwardIterator2>
502
- __host__ __device__
503
- thrust::pair<ForwardIterator1,ForwardIterator2>
504
- unique_by_key(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
505
- ForwardIterator1 keys_first,
506
- ForwardIterator1 keys_last,
507
- ForwardIterator2 values_first);
508
-
509
-
510
- /*! \p unique_by_key is a generalization of \p unique to key-value pairs.
511
- * For each group of consecutive keys in the range <tt>[keys_first, keys_last)</tt>
512
- * that are equal, \p unique_by_key removes all but the first element of
513
- * the group. Similarly, the corresponding values in the range
514
- * <tt>[values_first, values_first + (keys_last - keys_first))</tt>
515
- * are also removed.
516
- *
517
- * The return value is a \p pair of iterators <tt>(new_keys_last,new_values_last)</tt>
518
- * such that no two consecutive elements in the range <tt>[keys_first, new_keys_last)</tt>
519
- * are equal.
520
- *
521
- * This version of \p unique_by_key uses \c operator== to test for equality and
522
- * \c project1st to reduce values with equal keys.
523
- *
524
- * \param keys_first The beginning of the key range.
525
- * \param keys_last The end of the key range.
526
- * \param values_first The beginning of the value range.
527
- * \return A pair of iterators at end of the ranges <tt>[key_first, keys_new_last)</tt> and <tt>[values_first, values_new_last)</tt>.
528
- *
529
- * \tparam ForwardIterator1 is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
530
- * and \p ForwardIterator1 is mutable,
531
- * and \p ForwardIterator's \c value_type is a model of <a href="http://www.sgi.com/tech/stl/EqualityComparable.html">Equality Comparable</a>.
532
- * \tparam ForwardIterator2 is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
533
- * and \p ForwardIterator2 is mutable.
534
- *
535
- * \pre The range <tt>[keys_first, keys_last)</tt> and the range <tt>[values_first, values_first + (keys_last - keys_first))</tt> shall not overlap.
536
- *
537
- * The following code snippet demonstrates how to use \p unique_by_key to
538
- * compact a sequence of key/value pairs to remove consecutive duplicates.
539
- *
540
- * \code
541
- * #include <thrust/unique.h>
542
- * ...
543
- * const int N = 7;
544
- * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // keys
545
- * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // values
546
- *
547
- * thrust::pair<int*,int*> new_end;
548
- * new_end = thrust::unique_by_key(A, A + N, B);
549
- *
550
- * // The first four keys in A are now {1, 3, 2, 1} and new_end.first - A is 4.
551
- * // The first four values in B are now {9, 8, 5, 3} and new_end.second - B is 4.
552
- * \endcode
553
- *
554
- * \see unique
555
- * \see unique_by_key_copy
556
- * \see reduce_by_key
557
- */
558
- template<typename ForwardIterator1,
559
- typename ForwardIterator2>
560
- thrust::pair<ForwardIterator1,ForwardIterator2>
561
- unique_by_key(ForwardIterator1 keys_first,
562
- ForwardIterator1 keys_last,
563
- ForwardIterator2 values_first);
564
-
565
-
566
- /*! \p unique_by_key is a generalization of \p unique to key-value pairs.
567
- * For each group of consecutive keys in the range <tt>[keys_first, keys_last)</tt>
568
- * that are equal, \p unique_by_key removes all but the first element of
569
- * the group. Similarly, the corresponding values in the range
570
- * <tt>[values_first, values_first + (keys_last - keys_first))</tt>
571
- * are also removed.
572
- *
573
- * This version of \p unique_by_key uses the function object \c binary_pred
574
- * to test for equality and \c project1st to reduce values with equal keys.
575
- *
576
- * The algorithm's execution is parallelized as determined by \p exec.
577
- *
578
- * \param exec The execution policy to use for parallelization.
579
- * \param keys_first The beginning of the key range.
580
- * \param keys_last The end of the key range.
581
- * \param values_first The beginning of the value range.
582
- * \param binary_pred The binary predicate used to determine equality.
583
- * \return The end of the unique range <tt>[first, new_last)</tt>.
584
- *
585
- * \tparam DerivedPolicy The name of the derived execution policy.
586
- * \tparam ForwardIterator1 is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
587
- * and \p ForwardIterator1 is mutable,
588
- * and \p ForwardIterator's \c value_type is a model of <a href="http://www.sgi.com/tech/stl/EqualityComparable.html">Equality Comparable</a>.
589
- * \tparam ForwardIterator2 is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
590
- * and \p ForwardIterator2 is mutable.
591
- * \tparam BinaryPredicate is a model of <a href="http://www.sgi.com/tech/stl/BinaryPredicate.html">Binary Predicate</a>.
592
- *
593
- * \pre The range <tt>[keys_first, keys_last)</tt> and the range <tt>[values_first, values_first + (keys_last - keys_first))</tt> shall not overlap.
594
- *
595
- * The following code snippet demonstrates how to use \p unique_by_key to
596
- * compact a sequence of key/value pairs to remove consecutive duplicates using the \p thrust::host
597
- * execution policy for parallelization:
598
- *
599
- * \code
600
- * #include <thrust/unique.h>
601
- * #include <thrust/execution_policy.h>
602
- * ...
603
- * const int N = 7;
604
- * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // keys
605
- * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // values
606
- *
607
- * thrust::pair<int*,int*> new_end;
608
- * thrust::equal_to<int> binary_pred;
609
- * new_end = thrust::unique_by_key(thrust::host, keys, keys + N, values, binary_pred);
610
- *
611
- * // The first four keys in A are now {1, 3, 2, 1} and new_end.first - A is 4.
612
- * // The first four values in B are now {9, 8, 5, 3} and new_end.second - B is 4.
613
- * \endcode
614
- *
615
- * \see unique
616
- * \see unique_by_key_copy
617
- * \see reduce_by_key
618
- */
619
- template<typename DerivedPolicy,
620
- typename ForwardIterator1,
621
- typename ForwardIterator2,
622
- typename BinaryPredicate>
623
- __host__ __device__
624
- thrust::pair<ForwardIterator1,ForwardIterator2>
625
- unique_by_key(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
626
- ForwardIterator1 keys_first,
627
- ForwardIterator1 keys_last,
628
- ForwardIterator2 values_first,
629
- BinaryPredicate binary_pred);
630
-
631
-
632
- /*! \p unique_by_key is a generalization of \p unique to key-value pairs.
633
- * For each group of consecutive keys in the range <tt>[keys_first, keys_last)</tt>
634
- * that are equal, \p unique_by_key removes all but the first element of
635
- * the group. Similarly, the corresponding values in the range
636
- * <tt>[values_first, values_first + (keys_last - keys_first))</tt>
637
- * are also removed.
638
- *
639
- * This version of \p unique_by_key uses the function object \c binary_pred
640
- * to test for equality and \c project1st to reduce values with equal keys.
641
- *
642
- * \param keys_first The beginning of the key range.
643
- * \param keys_last The end of the key range.
644
- * \param values_first The beginning of the value range.
645
- * \param binary_pred The binary predicate used to determine equality.
646
- * \return The end of the unique range <tt>[first, new_last)</tt>.
647
- *
648
- * \tparam ForwardIterator1 is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
649
- * and \p ForwardIterator1 is mutable,
650
- * and \p ForwardIterator's \c value_type is a model of <a href="http://www.sgi.com/tech/stl/EqualityComparable.html">Equality Comparable</a>.
651
- * \tparam ForwardIterator2 is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
652
- * and \p ForwardIterator2 is mutable.
653
- * \tparam BinaryPredicate is a model of <a href="http://www.sgi.com/tech/stl/BinaryPredicate.html">Binary Predicate</a>.
654
- *
655
- * \pre The range <tt>[keys_first, keys_last)</tt> and the range <tt>[values_first, values_first + (keys_last - keys_first))</tt> shall not overlap.
656
- *
657
- * The following code snippet demonstrates how to use \p unique_by_key to
658
- * compact a sequence of key/value pairs to remove consecutive duplicates.
659
- *
660
- * \code
661
- * #include <thrust/unique.h>
662
- * ...
663
- * const int N = 7;
664
- * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // keys
665
- * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // values
666
- *
667
- * thrust::pair<int*,int*> new_end;
668
- * thrust::equal_to<int> binary_pred;
669
- * new_end = thrust::unique_by_key(keys, keys + N, values, binary_pred);
670
- *
671
- * // The first four keys in A are now {1, 3, 2, 1} and new_end.first - A is 4.
672
- * // The first four values in B are now {9, 8, 5, 3} and new_end.second - B is 4.
673
- * \endcode
674
- *
675
- * \see unique
676
- * \see unique_by_key_copy
677
- * \see reduce_by_key
678
- */
679
- template<typename ForwardIterator1,
680
- typename ForwardIterator2,
681
- typename BinaryPredicate>
682
- thrust::pair<ForwardIterator1,ForwardIterator2>
683
- unique_by_key(ForwardIterator1 keys_first,
684
- ForwardIterator1 keys_last,
685
- ForwardIterator2 values_first,
686
- BinaryPredicate binary_pred);
687
-
688
-
689
- /*! \p unique_by_key_copy is a generalization of \p unique_copy to key-value pairs.
690
- * For each group of consecutive keys in the range <tt>[keys_first, keys_last)</tt>
691
- * that are equal, \p unique_by_key_copy copies the first element of the group to
692
- * a range beginning with \c keys_result and the corresponding values from the range
693
- * <tt>[values_first, values_first + (keys_last - keys_first))</tt> are copied to a range
694
- * beginning with \c values_result.
695
- *
696
- * This version of \p unique_by_key_copy uses \c operator== to test for equality and
697
- * \c project1st to reduce values with equal keys.
698
- *
699
- * The algorithm's execution is parallelized as determined by \p exec.
700
- *
701
- * \param exec The execution policy to use for parallelization.
702
- * \param keys_first The beginning of the input key range.
703
- * \param keys_last The end of the input key range.
704
- * \param values_first The beginning of the input value range.
705
- * \param keys_result The beginning of the output key range.
706
- * \param values_result The beginning of the output value range.
707
- * \return A pair of iterators at end of the ranges <tt>[keys_result, keys_result_last)</tt> and <tt>[values_result, values_result_last)</tt>.
708
- *
709
- * \tparam DerivedPolicy The name of the derived execution policy.
710
- * \tparam InputIterator1 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>,
711
- * \tparam InputIterator2 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>,
712
- * \tparam OutputIterator1 is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a> and
713
- * and \p InputIterator1's \c value_type is convertible to \c OutputIterator1's \c value_type.
714
- * \tparam OutputIterator2 is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a> and
715
- * and \p InputIterator2's \c value_type is convertible to \c OutputIterator2's \c value_type.
716
- *
717
- * \pre The input ranges shall not overlap either output range.
718
- *
719
- * The following code snippet demonstrates how to use \p unique_by_key_copy to
720
- * compact a sequence of key/value pairs and with equal keys using the \p thrust::host execution policy
721
- * for parallelization:
722
- *
723
- * \code
724
- * #include <thrust/unique.h>
725
- * #include <thrust/execution_policy.h>
726
- * ...
727
- * const int N = 7;
728
- * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // input keys
729
- * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // input values
730
- * int C[N]; // output keys
731
- * int D[N]; // output values
732
- *
733
- * thrust::pair<int*,int*> new_end;
734
- * new_end = thrust::unique_by_key_copy(thrust::host, A, A + N, B, C, D);
735
- *
736
- * // The first four keys in C are now {1, 3, 2, 1} and new_end.first - C is 4.
737
- * // The first four values in D are now {9, 8, 5, 3} and new_end.second - D is 4.
738
- * \endcode
739
- *
740
- * \see unique_copy
741
- * \see unique_by_key
742
- * \see reduce_by_key
743
- */
744
- template<typename DerivedPolicy,
745
- typename InputIterator1,
746
- typename InputIterator2,
747
- typename OutputIterator1,
748
- typename OutputIterator2>
749
- __host__ __device__
750
- thrust::pair<OutputIterator1,OutputIterator2>
751
- unique_by_key_copy(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
752
- InputIterator1 keys_first,
753
- InputIterator1 keys_last,
754
- InputIterator2 values_first,
755
- OutputIterator1 keys_result,
756
- OutputIterator2 values_result);
757
-
758
-
759
- /*! \p unique_by_key_copy is a generalization of \p unique_copy to key-value pairs.
760
- * For each group of consecutive keys in the range <tt>[keys_first, keys_last)</tt>
761
- * that are equal, \p unique_by_key_copy copies the first element of the group to
762
- * a range beginning with \c keys_result and the corresponding values from the range
763
- * <tt>[values_first, values_first + (keys_last - keys_first))</tt> are copied to a range
764
- * beginning with \c values_result.
765
- *
766
- * This version of \p unique_by_key_copy uses \c operator== to test for equality and
767
- * \c project1st to reduce values with equal keys.
768
- *
769
- * \param keys_first The beginning of the input key range.
770
- * \param keys_last The end of the input key range.
771
- * \param values_first The beginning of the input value range.
772
- * \param keys_result The beginning of the output key range.
773
- * \param values_result The beginning of the output value range.
774
- * \return A pair of iterators at end of the ranges <tt>[keys_result, keys_result_last)</tt> and <tt>[values_result, values_result_last)</tt>.
775
- *
776
- * \tparam InputIterator1 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>,
777
- * \tparam InputIterator2 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>,
778
- * \tparam OutputIterator1 is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a> and
779
- * and \p InputIterator1's \c value_type is convertible to \c OutputIterator1's \c value_type.
780
- * \tparam OutputIterator2 is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a> and
781
- * and \p InputIterator2's \c value_type is convertible to \c OutputIterator2's \c value_type.
782
- *
783
- * \pre The input ranges shall not overlap either output range.
784
- *
785
- * The following code snippet demonstrates how to use \p unique_by_key_copy to
786
- * compact a sequence of key/value pairs and with equal keys.
787
- *
788
- * \code
789
- * #include <thrust/unique.h>
790
- * ...
791
- * const int N = 7;
792
- * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // input keys
793
- * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // input values
794
- * int C[N]; // output keys
795
- * int D[N]; // output values
796
- *
797
- * thrust::pair<int*,int*> new_end;
798
- * new_end = thrust::unique_by_key_copy(A, A + N, B, C, D);
799
- *
800
- * // The first four keys in C are now {1, 3, 2, 1} and new_end.first - C is 4.
801
- * // The first four values in D are now {9, 8, 5, 3} and new_end.second - D is 4.
802
- * \endcode
803
- *
804
- * \see unique_copy
805
- * \see unique_by_key
806
- * \see reduce_by_key
807
- */
808
- template<typename InputIterator1,
809
- typename InputIterator2,
810
- typename OutputIterator1,
811
- typename OutputIterator2>
812
- thrust::pair<OutputIterator1,OutputIterator2>
813
- unique_by_key_copy(InputIterator1 keys_first,
814
- InputIterator1 keys_last,
815
- InputIterator2 values_first,
816
- OutputIterator1 keys_result,
817
- OutputIterator2 values_result);
818
-
819
-
820
- /*! \p unique_by_key_copy is a generalization of \p unique_copy to key-value pairs.
821
- * For each group of consecutive keys in the range <tt>[keys_first, keys_last)</tt>
822
- * that are equal, \p unique_by_key_copy copies the first element of the group to
823
- * a range beginning with \c keys_result and the corresponding values from the range
824
- * <tt>[values_first, values_first + (keys_last - keys_first))</tt> are copied to a range
825
- * beginning with \c values_result.
826
- *
827
- * This version of \p unique_by_key_copy uses the function object \c binary_pred
828
- * to test for equality and \c project1st to reduce values with equal keys.
829
- *
830
- * The algorithm's execution is parallelized as determined by \p exec.
831
- *
832
- * \param exec The execution policy to use for parallelization.
833
- * \param keys_first The beginning of the input key range.
834
- * \param keys_last The end of the input key range.
835
- * \param values_first The beginning of the input value range.
836
- * \param keys_result The beginning of the output key range.
837
- * \param values_result The beginning of the output value range.
838
- * \param binary_pred The binary predicate used to determine equality.
839
- * \return A pair of iterators at end of the ranges <tt>[keys_result, keys_result_last)</tt> and <tt>[values_result, values_result_last)</tt>.
840
- *
841
- * \tparam DerivedPolicy The name of the derived execution policy.
842
- * \tparam InputIterator1 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>,
843
- * \tparam InputIterator2 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>,
844
- * \tparam OutputIterator1 is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a> and
845
- * and \p InputIterator1's \c value_type is convertible to \c OutputIterator1's \c value_type.
846
- * \tparam OutputIterator2 is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a> and
847
- * and \p InputIterator2's \c value_type is convertible to \c OutputIterator2's \c value_type.
848
- * \tparam BinaryPredicate is a model of <a href="http://www.sgi.com/tech/stl/BinaryPredicate.html">Binary Predicate</a>.
849
- *
850
- * \pre The input ranges shall not overlap either output range.
851
- *
852
- * The following code snippet demonstrates how to use \p unique_by_key_copy to
853
- * compact a sequence of key/value pairs and with equal keys using the \p thrust::host execution policy for
854
- * parallelization:
855
- *
856
- * \code
857
- * #include <thrust/unique.h>
858
- * #include <thrust/execution_policy.h>
859
- * ...
860
- * const int N = 7;
861
- * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // input keys
862
- * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // input values
863
- * int C[N]; // output keys
864
- * int D[N]; // output values
865
- *
866
- * thrust::pair<int*,int*> new_end;
867
- * thrust::equal_to<int> binary_pred;
868
- * new_end = thrust::unique_by_key_copy(thrust::host, A, A + N, B, C, D, binary_pred);
869
- *
870
- * // The first four keys in C are now {1, 3, 2, 1} and new_end.first - C is 4.
871
- * // The first four values in D are now {9, 8, 5, 3} and new_end.second - D is 4.
872
- * \endcode
873
- *
874
- * \see unique_copy
875
- * \see unique_by_key
876
- * \see reduce_by_key
877
- */
878
- template<typename DerivedPolicy,
879
- typename InputIterator1,
880
- typename InputIterator2,
881
- typename OutputIterator1,
882
- typename OutputIterator2,
883
- typename BinaryPredicate>
884
- __host__ __device__
885
- thrust::pair<OutputIterator1,OutputIterator2>
886
- unique_by_key_copy(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
887
- InputIterator1 keys_first,
888
- InputIterator1 keys_last,
889
- InputIterator2 values_first,
890
- OutputIterator1 keys_result,
891
- OutputIterator2 values_result,
892
- BinaryPredicate binary_pred);
893
-
894
-
895
- /*! \p unique_by_key_copy is a generalization of \p unique_copy to key-value pairs.
896
- * For each group of consecutive keys in the range <tt>[keys_first, keys_last)</tt>
897
- * that are equal, \p unique_by_key_copy copies the first element of the group to
898
- * a range beginning with \c keys_result and the corresponding values from the range
899
- * <tt>[values_first, values_first + (keys_last - keys_first))</tt> are copied to a range
900
- * beginning with \c values_result.
901
- *
902
- * This version of \p unique_by_key_copy uses the function object \c binary_pred
903
- * to test for equality and \c project1st to reduce values with equal keys.
904
- *
905
- * \param keys_first The beginning of the input key range.
906
- * \param keys_last The end of the input key range.
907
- * \param values_first The beginning of the input value range.
908
- * \param keys_result The beginning of the output key range.
909
- * \param values_result The beginning of the output value range.
910
- * \param binary_pred The binary predicate used to determine equality.
911
- * \return A pair of iterators at end of the ranges <tt>[keys_result, keys_result_last)</tt> and <tt>[values_result, values_result_last)</tt>.
912
- *
913
- * \tparam InputIterator1 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>,
914
- * \tparam InputIterator2 is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>,
915
- * \tparam OutputIterator1 is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a> and
916
- * and \p InputIterator1's \c value_type is convertible to \c OutputIterator1's \c value_type.
917
- * \tparam OutputIterator2 is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a> and
918
- * and \p InputIterator2's \c value_type is convertible to \c OutputIterator2's \c value_type.
919
- * \tparam BinaryPredicate is a model of <a href="http://www.sgi.com/tech/stl/BinaryPredicate.html">Binary Predicate</a>.
920
- *
921
- * \pre The input ranges shall not overlap either output range.
922
- *
923
- * The following code snippet demonstrates how to use \p unique_by_key_copy to
924
- * compact a sequence of key/value pairs and with equal keys.
925
- *
926
- * \code
927
- * #include <thrust/unique.h>
928
- * ...
929
- * const int N = 7;
930
- * int A[N] = {1, 3, 3, 3, 2, 2, 1}; // input keys
931
- * int B[N] = {9, 8, 7, 6, 5, 4, 3}; // input values
932
- * int C[N]; // output keys
933
- * int D[N]; // output values
934
- *
935
- * thrust::pair<int*,int*> new_end;
936
- * thrust::equal_to<int> binary_pred;
937
- * new_end = thrust::unique_by_key_copy(A, A + N, B, C, D, binary_pred);
938
- *
939
- * // The first four keys in C are now {1, 3, 2, 1} and new_end.first - C is 4.
940
- * // The first four values in D are now {9, 8, 5, 3} and new_end.second - D is 4.
941
- * \endcode
942
- *
943
- * \see unique_copy
944
- * \see unique_by_key
945
- * \see reduce_by_key
946
- */
947
- template<typename InputIterator1,
948
- typename InputIterator2,
949
- typename OutputIterator1,
950
- typename OutputIterator2,
951
- typename BinaryPredicate>
952
- thrust::pair<OutputIterator1,OutputIterator2>
953
- unique_by_key_copy(InputIterator1 keys_first,
954
- InputIterator1 keys_last,
955
- InputIterator2 values_first,
956
- OutputIterator1 keys_result,
957
- OutputIterator2 values_result,
958
- BinaryPredicate binary_pred);
959
-
960
-
961
- /*! \} // end stream_compaction
962
- */
963
-
964
-
965
- } // end namespace thrust
966
-
967
- #include <thrust/detail/unique.inl>
968
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cat125/text-generator-v2/utils.py DELETED
@@ -1,36 +0,0 @@
1
- from termcolor import colored
2
-
3
- def log(text):
4
- '''The function logs a given text to a file named 'runtime.log'.
5
-
6
- Parameters
7
- ----------
8
- text
9
- The text that will be written to the log file.
10
-
11
- '''
12
- print(text, file=open('runtime.log', 'a+'))
13
-
14
- # Print iterations progress
15
-
16
-
17
- def progressbar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill=colored('█', 'green'), print_end="\r"):
18
- """
19
- Call in a loop to create terminal progress bar
20
- @params:
21
- iteration - Required : current iteration (Int)
22
- total - Required : total iterations (Int)
23
- prefix - Optional : prefix string (Str)
24
- suffix - Optional : suffix string (Str)
25
- decimals - Optional : positive number of decimals in percent complete (Int)
26
- length - Optional : character length of bar (Int)
27
- fill - Optional : bar fill character (Str)
28
- printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
29
- """
30
- percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
31
- filled_length = int(length * iteration // total)
32
- bar = fill * filled_length + colored('-', 'red') * (length - filled_length)
33
- print(f'\r{prefix} [{bar}] {percent}% ({iteration}/{total}) {suffix}', end = print_end)
34
- # Print New Line on Complete
35
- if iteration == total:
36
- print()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Celestinian/Topic-Detection/app.py DELETED
@@ -1,39 +0,0 @@
1
- from transformers import AutoTokenizer, AutoModelForCausalLM
2
- import gradio as gr
3
- import torch
4
-
5
- device = "cuda" if torch.cuda.is_available() else "cpu"
6
-
7
- tokenizer = AutoTokenizer.from_pretrained("Celestinian/TopicGPT")
8
- model = AutoModelForCausalLM.from_pretrained("Celestinian/TopicGPT")
9
-
10
- def generate_text(prompt, temperature, max_size):
11
- input_ids = tokenizer.encode("#CONTEXT# " + prompt + " #TOPIC#", return_tensors='pt')
12
- input_ids = input_ids.to(device)
13
- model.eval()
14
- model.to(device)
15
-
16
- output_tokens = []
17
- eos_token_id = tokenizer.encode('#')[0]
18
-
19
- for _ in range(max_size):
20
- with torch.no_grad():
21
- outputs = model(input_ids)
22
- logits = outputs.logits[:, -1, :] / temperature
23
- next_token = torch.multinomial(torch.softmax(logits, dim=-1), num_samples=1)
24
- if next_token.item() == eos_token_id:
25
- break
26
- input_ids = torch.cat((input_ids, next_token), dim=-1)
27
- output_tokens.append(next_token.item())
28
-
29
- output = tokenizer.decode(output_tokens)
30
- clean_output = output.replace('\n', '\n')
31
- print(prompt + clean_output)
32
- return clean_output
33
-
34
- input_text = gr.inputs.Textbox(lines=5, label="Input Text")
35
- temperature_input = gr.inputs.Slider(minimum=0.01, maximum=2, step=0.01, default=0.01, label="Temperature")
36
- max_size_input = gr.inputs.Slider(minimum=1, maximum=250, step=1, default=30, label="Max Size")
37
- output_text = gr.outputs.Textbox(label="Generated Text")
38
-
39
- gr.Interface(generate_text, inputs=[input_text, temperature_input, max_size_input], outputs=output_text).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChandraMohanNayal/AutoGPT/autogpt/speech/base.py DELETED
@@ -1,50 +0,0 @@
1
- """Base class for all voice classes."""
2
- import abc
3
- from threading import Lock
4
-
5
- from autogpt.config import AbstractSingleton
6
-
7
-
8
- class VoiceBase(AbstractSingleton):
9
- """
10
- Base class for all voice classes.
11
- """
12
-
13
- def __init__(self):
14
- """
15
- Initialize the voice class.
16
- """
17
- self._url = None
18
- self._headers = None
19
- self._api_key = None
20
- self._voices = []
21
- self._mutex = Lock()
22
- self._setup()
23
-
24
- def say(self, text: str, voice_index: int = 0) -> bool:
25
- """
26
- Say the given text.
27
-
28
- Args:
29
- text (str): The text to say.
30
- voice_index (int): The index of the voice to use.
31
- """
32
- with self._mutex:
33
- return self._speech(text, voice_index)
34
-
35
- @abc.abstractmethod
36
- def _setup(self) -> None:
37
- """
38
- Setup the voices, API key, etc.
39
- """
40
- pass
41
-
42
- @abc.abstractmethod
43
- def _speech(self, text: str, voice_index: int = 0) -> bool:
44
- """
45
- Play the given text.
46
-
47
- Args:
48
- text (str): The text to play.
49
- """
50
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChongCJ/fish/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Fish
3
- emoji: 👁
4
- colorFrom: purple
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.15.0
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Clebersla/RVC_V2_Huggingface_Version/lib/infer_pack/modules/F0Predictor/PMF0Predictor.py DELETED
@@ -1,97 +0,0 @@
1
- from lib.infer_pack.modules.F0Predictor.F0Predictor import F0Predictor
2
- import parselmouth
3
- import numpy as np
4
-
5
-
6
- class PMF0Predictor(F0Predictor):
7
- def __init__(self, hop_length=512, f0_min=50, f0_max=1100, sampling_rate=44100):
8
- self.hop_length = hop_length
9
- self.f0_min = f0_min
10
- self.f0_max = f0_max
11
- self.sampling_rate = sampling_rate
12
-
13
- def interpolate_f0(self, f0):
14
- """
15
- 对F0进行插值处理
16
- """
17
-
18
- data = np.reshape(f0, (f0.size, 1))
19
-
20
- vuv_vector = np.zeros((data.size, 1), dtype=np.float32)
21
- vuv_vector[data > 0.0] = 1.0
22
- vuv_vector[data <= 0.0] = 0.0
23
-
24
- ip_data = data
25
-
26
- frame_number = data.size
27
- last_value = 0.0
28
- for i in range(frame_number):
29
- if data[i] <= 0.0:
30
- j = i + 1
31
- for j in range(i + 1, frame_number):
32
- if data[j] > 0.0:
33
- break
34
- if j < frame_number - 1:
35
- if last_value > 0.0:
36
- step = (data[j] - data[i - 1]) / float(j - i)
37
- for k in range(i, j):
38
- ip_data[k] = data[i - 1] + step * (k - i + 1)
39
- else:
40
- for k in range(i, j):
41
- ip_data[k] = data[j]
42
- else:
43
- for k in range(i, frame_number):
44
- ip_data[k] = last_value
45
- else:
46
- ip_data[i] = data[i] # 这里可能存在一个没有必要的拷贝
47
- last_value = data[i]
48
-
49
- return ip_data[:, 0], vuv_vector[:, 0]
50
-
51
- def compute_f0(self, wav, p_len=None):
52
- x = wav
53
- if p_len is None:
54
- p_len = x.shape[0] // self.hop_length
55
- else:
56
- assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
57
- time_step = self.hop_length / self.sampling_rate * 1000
58
- f0 = (
59
- parselmouth.Sound(x, self.sampling_rate)
60
- .to_pitch_ac(
61
- time_step=time_step / 1000,
62
- voicing_threshold=0.6,
63
- pitch_floor=self.f0_min,
64
- pitch_ceiling=self.f0_max,
65
- )
66
- .selected_array["frequency"]
67
- )
68
-
69
- pad_size = (p_len - len(f0) + 1) // 2
70
- if pad_size > 0 or p_len - len(f0) - pad_size > 0:
71
- f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
72
- f0, uv = self.interpolate_f0(f0)
73
- return f0
74
-
75
- def compute_f0_uv(self, wav, p_len=None):
76
- x = wav
77
- if p_len is None:
78
- p_len = x.shape[0] // self.hop_length
79
- else:
80
- assert abs(p_len - x.shape[0] // self.hop_length) < 4, "pad length error"
81
- time_step = self.hop_length / self.sampling_rate * 1000
82
- f0 = (
83
- parselmouth.Sound(x, self.sampling_rate)
84
- .to_pitch_ac(
85
- time_step=time_step / 1000,
86
- voicing_threshold=0.6,
87
- pitch_floor=self.f0_min,
88
- pitch_ceiling=self.f0_max,
89
- )
90
- .selected_array["frequency"]
91
- )
92
-
93
- pad_size = (p_len - len(f0) + 1) // 2
94
- if pad_size > 0 or p_len - len(f0) - pad_size > 0:
95
- f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
96
- f0, uv = self.interpolate_f0(f0)
97
- return f0, uv
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CuriousDolphin/MobileSAM/app.py DELETED
@@ -1,319 +0,0 @@
1
- import gradio as gr
2
- import numpy as np
3
- import torch
4
- import os
5
- from mobile_sam import SamAutomaticMaskGenerator, SamPredictor, sam_model_registry
6
- from PIL import ImageDraw
7
- from utils.tools import box_prompt, format_results, point_prompt
8
- from utils.tools_gradio import fast_process
9
-
10
- # Most of our demo code is from [FastSAM Demo](https://huggingface.co/spaces/An-619/FastSAM). Huge thanks for AN-619.
11
-
12
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
-
14
- # Load the pre-trained model
15
- sam_checkpoint = "./mobile_sam.pt"
16
- model_type = "vit_t"
17
-
18
- mobile_sam = sam_model_registry[model_type](checkpoint=sam_checkpoint)
19
- mobile_sam = mobile_sam.to(device=device)
20
- mobile_sam.eval()
21
-
22
- mask_generator = SamAutomaticMaskGenerator(mobile_sam)
23
- predictor = SamPredictor(mobile_sam)
24
-
25
- # Description
26
- title = "<center><strong><font size='8'>Faster Segment Anything(MobileSAM)<font></strong></center>"
27
-
28
- description_e = """This is a demo of [Faster Segment Anything(MobileSAM) Model](https://github.com/ChaoningZhang/MobileSAM).
29
-
30
- We will provide box mode soon.
31
-
32
- Enjoy!
33
-
34
- """
35
-
36
- description_p = """ # Instructions for point mode
37
-
38
- 0. Restart by click the Restart button
39
- 1. Select a point with Add Mask for the foreground (Must)
40
- 2. Select a point with Remove Area for the background (Optional)
41
- 3. Click the Start Segmenting.
42
-
43
- """
44
-
45
- examples = [
46
- ["assets/picture3.jpg"],
47
- ["assets/picture4.jpg"],
48
- ["assets/picture5.jpg"],
49
- ["assets/picture6.jpg"],
50
- ["assets/picture1.jpg"],
51
- ["assets/picture2.jpg"],
52
- ]
53
-
54
- default_example = examples[0]
55
-
56
- css = "h1 { text-align: center } .about { text-align: justify; padding-left: 10%; padding-right: 10%; }"
57
-
58
-
59
- @torch.no_grad()
60
- def segment_everything(
61
- image,
62
- input_size=1024,
63
- better_quality=False,
64
- withContours=True,
65
- use_retina=True,
66
- mask_random_color=True,
67
- ):
68
- global mask_generator
69
-
70
- input_size = int(input_size)
71
- w, h = image.size
72
- scale = input_size / max(w, h)
73
- new_w = int(w * scale)
74
- new_h = int(h * scale)
75
- image = image.resize((new_w, new_h))
76
-
77
- nd_image = np.array(image)
78
- annotations = mask_generator.generate(nd_image)
79
-
80
- fig = fast_process(
81
- annotations=annotations,
82
- image=image,
83
- device=device,
84
- scale=(1024 // input_size),
85
- better_quality=better_quality,
86
- mask_random_color=mask_random_color,
87
- bbox=None,
88
- use_retina=use_retina,
89
- withContours=withContours,
90
- )
91
- return fig
92
-
93
-
94
- def segment_with_points(
95
- image,
96
- input_size=1024,
97
- better_quality=False,
98
- withContours=True,
99
- use_retina=True,
100
- mask_random_color=True,
101
- ):
102
- global global_points
103
- global global_point_label
104
-
105
- input_size = int(input_size)
106
- w, h = image.size
107
- scale = input_size / max(w, h)
108
- new_w = int(w * scale)
109
- new_h = int(h * scale)
110
- image = image.resize((new_w, new_h))
111
-
112
- scaled_points = np.array([[int(x * scale) for x in point] for point in global_points])
113
- scaled_point_label = np.array(global_point_label)
114
-
115
- nd_image = np.array(image)
116
- predictor.set_image(nd_image)
117
- masks, scores, logits = predictor.predict(
118
- point_coords=scaled_points,
119
- point_labels=scaled_point_label,
120
- multimask_output=True,
121
- )
122
-
123
- results = format_results(masks, scores, logits, 0)
124
-
125
- annotations, _ = point_prompt(
126
- results, scaled_points, scaled_point_label, new_h, new_w
127
- )
128
- annotations = np.array([annotations])
129
-
130
- fig = fast_process(
131
- annotations=annotations,
132
- image=image,
133
- device=device,
134
- scale=(1024 // input_size),
135
- better_quality=better_quality,
136
- mask_random_color=mask_random_color,
137
- bbox=None,
138
- use_retina=use_retina,
139
- withContours=withContours,
140
- )
141
-
142
- global_points = []
143
- global_point_label = []
144
- # return fig, None
145
- return fig, image
146
-
147
-
148
- def get_points_with_draw(image, label, evt: gr.SelectData):
149
- global global_points
150
- global global_point_label
151
-
152
- x, y = evt.index[0], evt.index[1]
153
- point_radius, point_color = 15, (255, 255, 0) if label == "Add Mask" else (
154
- 255,
155
- 0,
156
- 255,
157
- )
158
- global_points.append([x, y])
159
- global_point_label.append(1 if label == "Add Mask" else 0)
160
-
161
- print(x, y, label == "Add Mask")
162
-
163
- # 创建一个可以在图像上绘图的对象
164
- draw = ImageDraw.Draw(image)
165
- draw.ellipse(
166
- [(x - point_radius, y - point_radius), (x + point_radius, y + point_radius)],
167
- fill=point_color,
168
- )
169
- return image
170
-
171
-
172
- cond_img_e = gr.Image(label="Input", value=default_example[0], type="pil")
173
- cond_img_p = gr.Image(label="Input with points", value=default_example[0], type="pil")
174
-
175
- segm_img_e = gr.Image(label="Segmented Image", interactive=False, type="pil")
176
- segm_img_p = gr.Image(
177
- label="Segmented Image with points", interactive=False, type="pil"
178
- )
179
-
180
- global_points = []
181
- global_point_label = []
182
-
183
- input_size_slider = gr.components.Slider(
184
- minimum=512,
185
- maximum=1024,
186
- value=1024,
187
- step=64,
188
- label="Input_size",
189
- info="Our model was trained on a size of 1024",
190
- )
191
-
192
- with gr.Blocks(css=css, title="Faster Segment Anything(MobileSAM)") as demo:
193
- with gr.Row():
194
- with gr.Column(scale=1):
195
- # Title
196
- gr.Markdown(title)
197
-
198
- # with gr.Tab("Everything mode"):
199
- # # Images
200
- # with gr.Row(variant="panel"):
201
- # with gr.Column(scale=1):
202
- # cond_img_e.render()
203
- #
204
- # with gr.Column(scale=1):
205
- # segm_img_e.render()
206
- #
207
- # # Submit & Clear
208
- # with gr.Row():
209
- # with gr.Column():
210
- # input_size_slider.render()
211
- #
212
- # with gr.Row():
213
- # contour_check = gr.Checkbox(
214
- # value=True,
215
- # label="withContours",
216
- # info="draw the edges of the masks",
217
- # )
218
- #
219
- # with gr.Column():
220
- # segment_btn_e = gr.Button(
221
- # "Segment Everything", variant="primary"
222
- # )
223
- # clear_btn_e = gr.Button("Clear", variant="secondary")
224
- #
225
- # gr.Markdown("Try some of the examples below ⬇️")
226
- # gr.Examples(
227
- # examples=examples,
228
- # inputs=[cond_img_e],
229
- # outputs=segm_img_e,
230
- # fn=segment_everything,
231
- # cache_examples=True,
232
- # examples_per_page=4,
233
- # )
234
- #
235
- # with gr.Column():
236
- # with gr.Accordion("Advanced options", open=False):
237
- # # text_box = gr.Textbox(label="text prompt")
238
- # with gr.Row():
239
- # mor_check = gr.Checkbox(
240
- # value=False,
241
- # label="better_visual_quality",
242
- # info="better quality using morphologyEx",
243
- # )
244
- # with gr.Column():
245
- # retina_check = gr.Checkbox(
246
- # value=True,
247
- # label="use_retina",
248
- # info="draw high-resolution segmentation masks",
249
- # )
250
- # # Description
251
- # gr.Markdown(description_e)
252
- #
253
- with gr.Tab("Point mode"):
254
- # Images
255
- with gr.Row(variant="panel"):
256
- with gr.Column(scale=1):
257
- cond_img_p.render()
258
-
259
- with gr.Column(scale=1):
260
- segm_img_p.render()
261
-
262
- # Submit & Clear
263
- with gr.Row():
264
- with gr.Column():
265
- with gr.Row():
266
- add_or_remove = gr.Radio(
267
- ["Add Mask", "Remove Area"],
268
- value="Add Mask",
269
- )
270
-
271
- with gr.Column():
272
- segment_btn_p = gr.Button(
273
- "Start segmenting!", variant="primary"
274
- )
275
- clear_btn_p = gr.Button("Restart", variant="secondary")
276
-
277
- gr.Markdown("Try some of the examples below ⬇️")
278
- gr.Examples(
279
- examples=examples,
280
- inputs=[cond_img_p],
281
- # outputs=segm_img_p,
282
- # fn=segment_with_points,
283
- # cache_examples=True,
284
- examples_per_page=4,
285
- )
286
-
287
- with gr.Column():
288
- # Description
289
- gr.Markdown(description_p)
290
-
291
- cond_img_p.select(get_points_with_draw, [cond_img_p, add_or_remove], cond_img_p)
292
-
293
- # segment_btn_e.click(
294
- # segment_everything,
295
- # inputs=[
296
- # cond_img_e,
297
- # input_size_slider,
298
- # mor_check,
299
- # contour_check,
300
- # retina_check,
301
- # ],
302
- # outputs=segm_img_e,
303
- # )
304
-
305
- segment_btn_p.click(
306
- segment_with_points, inputs=[cond_img_p], outputs=[segm_img_p, cond_img_p]
307
- )
308
-
309
- def clear():
310
- return None, None
311
-
312
- def clear_text():
313
- return None, None, None
314
-
315
- # clear_btn_e.click(clear, outputs=[cond_img_e, segm_img_e])
316
- clear_btn_p.click(clear, outputs=[cond_img_p, segm_img_p])
317
-
318
- demo.queue()
319
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cybsechuman/Consistency_analysis/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Consistency Analysis
3
- emoji: 🏢
4
- colorFrom: indigo
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.29.0
8
- app_file: app.py
9
- pinned: false
10
- license: openrail
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference