parquet-converter commited on
Commit
f420ff2
·
1 Parent(s): 88d6db9

Update parquet files (step 22 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1gistliPinn/ChatGPT4/Examples/3 Jumbo Movie English Subtitles Download Torrent.md +0 -6
  2. spaces/1gistliPinn/ChatGPT4/Examples/Articad Pro V16 Cracked Iso 18.md +0 -32
  3. spaces/1gistliPinn/ChatGPT4/Examples/Dark Souls 2 Save Editor Fix.md +0 -18
  4. spaces/1phancelerku/anime-remove-background/Aprenda a baixar Stick War 3 com dinheiro infinito e desbloquear todos os recursos.md +0 -81
  5. spaces/1phancelerku/anime-remove-background/Download FF Advance Server APK Juli 2021 How to Register and Play.md +0 -91
  6. spaces/1phancelerku/anime-remove-background/Download The Baby In Yellow APK for Android - Free Horror Game.md +0 -133
  7. spaces/1phancelerku/anime-remove-background/Epic War 6 How to Conquer Every Spot on the Board.md +0 -134
  8. spaces/1toTree/lora_test/ppdiffusers/pipelines/latent_diffusion_uncond/__init__.py +0 -17
  9. spaces/AIFILMS/generate_human_motion/app.py +0 -319
  10. spaces/AIZerotoHero-Health4All/02-ClinicalTerminology/README.md +0 -12
  11. spaces/ASJMO/freegpt/client/css/buttons.css +0 -4
  12. spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/GptGod.py +0 -51
  13. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/Builders.js +0 -79
  14. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateLabel.js +0 -8
  15. spaces/Allie7/Nose/Dockerfile +0 -21
  16. spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/model/__init__.py +0 -6
  17. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_tensorrt_img2img.py +0 -1055
  18. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_models_diffuser_to_diffusers.py +0 -100
  19. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/__init__.py +0 -291
  20. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py +0 -409
  21. spaces/Andy1621/uniformer_image_detection/configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py +0 -5
  22. spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/anchor_free_head.py +0 -340
  23. spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r50-d8_480x480_40k_pascal_context_59.py +0 -10
  24. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/_stack.py +0 -16
  25. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/extension.py +0 -148
  26. spaces/AtomdffAI/wechatgpt4atom/README.md +0 -13
  27. spaces/Awesimo/jojogan/e4e/training/__init__.py +0 -0
  28. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/meta_arch/fcos.py +0 -303
  29. spaces/Benson/text-generation/Examples/Blockman Ir Aventura Hack Apk 2022 Cubos Ilimitados.md +0 -83
  30. spaces/Benson/text-generation/Examples/Cmo Descargar El ltimo Simulador De Conduccin De Coches En PC.md +0 -128
  31. spaces/Benson/text-generation/Examples/Cocina Aire Freidora Recetas Apk.md +0 -81
  32. spaces/Benson/text-generation/Examples/Descargar Amantes Y Mejores Amigos Azana.md +0 -68
  33. spaces/Bishan/Speech_To_Text_Hindi/app.py +0 -83
  34. spaces/Buatong/Computing/app.py +0 -7
  35. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/tests/test_model_e2e.py +0 -43
  36. spaces/CVPR/LIVE/pybind11/include/pybind11/options.h +0 -65
  37. spaces/CVPR/LIVE/pybind11/tests/test_call_policies.cpp +0 -101
  38. spaces/CVPR/LIVE/thrust/thrust/advance.h +0 -141
  39. spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/adjacent_difference.h +0 -58
  40. spaces/CVPR/regionclip-demo/detectron2/modeling/proposal_generator/rrpn.py +0 -203
  41. spaces/CanonOverseer/Canons-Den/Dockerfile +0 -11
  42. spaces/CarlDennis/Lovelive-VITS-JPZH/text/korean.py +0 -205
  43. spaces/ChandraMohanNayal/AutoGPT/autogpt/__main__.py +0 -5
  44. spaces/ChrisCaviar/ControlNet-v1-1/app_segmentation.py +0 -104
  45. spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/model/red/tool.js +0 -428
  46. spaces/CjangCjengh/Sanskrit-TTS/utils.py +0 -75
  47. spaces/CofAI/LengthConverter/style.css +0 -28
  48. spaces/CofAI/chat.v1/web.html +0 -60
  49. spaces/CofAI/chat/g4f/models.py +0 -233
  50. spaces/CyberHarem/find_my_waifu/civitai.py +0 -26
spaces/1gistliPinn/ChatGPT4/Examples/3 Jumbo Movie English Subtitles Download Torrent.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>3 Jumbo Movie English Subtitles Download Torrent</h2><br /><p><b><b>Download File</b> &#9889; <a href="https://imgfil.com/2uy0DR">https://imgfil.com/2uy0DR</a></b></p><br /><br />
2
-
3
- 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Articad Pro V16 Cracked Iso 18.md DELETED
@@ -1,32 +0,0 @@
1
- <h2>Articad Pro V16 Cracked Iso 18</h2><br /><p><b><b>DOWNLOAD</b> &#9734;&#9734;&#9734; <a href="https://imgfil.com/2uy1Qm">https://imgfil.com/2uy1Qm</a></b></p><br /><br />
2
- <br />
3
- Use Control + F to find your desired software. If your program wasn’t listed, then it is most likely not a pdf downloader and most probably a shareware program. If your program wasn’t listed, then it is most likely not a pdf downloader and most probably a shareware program.
4
-
5
- If you’re looking for a free pdf downloader or software that lets you download from websites for free, then you are in the right place. On this page you will find the best programs for this!
6
-
7
- The free pdf downloader Program
8
-
9
- There are a lot of pdf downloader software to choose from, but most of them are expensive, so we’ve put together a list of the best free software!
10
-
11
- The best free pdf downloader & software
12
-
13
- #1 DownloadPipe
14
-
15
- DownloadPipe is a free download manager for windows which supports multiple platforms like Windows, Mac, and Linux. It supports multiple protocols including HTTPS, FTPS, FTP, etc. to secure the download process. You can quickly download more than 100 of your favorite programs.
16
-
17
- With this program you can download anything for free. PDF files, documents, movies, songs, games, software, and more. It has a very simple design and intuitive user interface. Also, DownloadPipe is extremely easy to use and intuitive.
18
-
19
- To download a PDF file you need to go to the “Download” menu on the top right corner and select the “Save as” option. You can then specify where you want to download the file to.
20
-
21
- #2 Zipeg
22
-
23
- Zipeg is a free PDF downloader that lets you download any file from a website. It’s a standalone downloader. Zipeg doesn’t require a browser.
24
-
25
- This is a program for users who want to download a PDF file without a web browser. You can use the program without installing it. It’s available as a standalone downloader.
26
-
27
- For example, if you have a PDF file that you need to download, then just open the Zipeg app and start the download. Zipeg will prompt you to select the link, the file name and other details.
28
-
29
- You can download the PDF file. Zipeg allows you to select any file from your browser and download it to the computer. Also, it can download HTML files. You can download a file from any web page. The program is very simple to use 4fefd39f24<br />
30
- <br />
31
- <br />
32
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Dark Souls 2 Save Editor Fix.md DELETED
@@ -1,18 +0,0 @@
1
- <h2>Dark Souls 2 Save Editor</h2><br /><p><b><b>Download File</b> &#9675;&#9675;&#9675; <a href="https://imgfil.com/2uy0Lh">https://imgfil.com/2uy0Lh</a></b></p><br /><br />
2
-
3
- Mar 04, 2017 · 2. Come to the hearts of your viewers and make them feel like they're actually at the show. Be able to produce audio and video content that is consistent in quality. YouTube Help Center. Reply to a video, message or comment. How to: Ask a question; Start a discussion; Share your thoughts; Start or join a discussion. What's your question? How to: Ask a question. How to: Answer a question. How to: Answer a question about your video. How to: Answer a question about your video.
4
-
5
- Satisfaction Guarantee. If you aren't completely satisfied, return the item. We've got it. This top rated casino has been around for many years and is a site full of interesting games. This online poker room offers a good welcome bonus for newcomers, a great welcome bonus for repeat players, and a wide selection of unique tournaments. We are an affiliate of the best online poker room in the world.
6
-
7
- The first bet is 10. As you can see on this animation, the next bet will be 10 more, for a total bet of 20. The player is at this point committed to the second play.
8
-
9
- Create New Account. You are one step away from creating your new account. In order to create your account please select your city:. Select a Username. Select a password. Select your city:. Please select your city. Select a city: Select a state: Select a state. Please select a city. Select a city. Select a state. Please select a city. Please select a city. Please select a state. Please select a state.
10
-
11
- Hello mate! This is Renato from the Mexican Casino Club website. Let me introduce ourselves; We are the world's largest online gambling and gaming website that works with an excellent selection of online casinos from around the world.
12
-
13
- Have you ever considered what life would be like if you could control every moment, and be able to touch, hear, and taste anything that was around you? It's a fascinating concept, and we think you'd be interested in taking the next step in your experience. Perhaps we should use more of our time to come up with better ways to be at peace with ourselves, our family, and our world, and stop obsessing about the little stuff.
14
-
15
- Instead of trying to fix the'symptoms', why not try to 'get rid of the disease'? After all, most people would rather cut off a hand than cut off the 4fefd39f24<br />
16
- <br />
17
- <br />
18
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Aprenda a baixar Stick War 3 com dinheiro infinito e desbloquear todos os recursos.md DELETED
@@ -1,81 +0,0 @@
1
- <br />
2
- <h1>Stick War 3: How to Download and Play the Ultimate Strategy Game</h1>
3
- <p>If you are a fan of strategy games, you have probably heard of Stick War 3, one of the most popular and addictive games in the genre. Stick War 3 is a game where you can create your own army, fight against other players or AI opponents, and conquer the world of Inamorta. Whether you prefer single player or multiplayer modes, Stick War 3 has something for everyone. In this article, we will show you how to download and install Stick War 3 on your device, how to play the different modes and features of the game, and how to improve your skills with some tips and tricks.</p>
4
- <h2>PVP Matches</h2>
5
- <p>One of the main attractions of Stick War 3 is its real-time multiplayer strategy mode, where you can team up with your friends or battle against strangers from around the world. You can choose from 1v1 or 2v2 matches, and use any deck that you have created or unlocked. The goal is to destroy your enemy's statue before they destroy yours, using your units, spells, enchantments, and strategies.</p>
6
- <h2>stick war 3 dinheiro infinito download</h2><br /><p><b><b>Download File</b> &#9733;&#9733;&#9733;&#9733;&#9733; <a href="https://jinyurl.com/2uNN1k">https://jinyurl.com/2uNN1k</a></b></p><br /><br />
7
- <p>One of the coolest features of Stick War 3 is that you can take control of any unit at any time, giving you more flexibility and control over your army. You can also use spells such as a giant bubble that blocks incoming projectiles, or snow squall that freezes entire legions. You can also use enchantments such as the rune of reanimation that will cause any poisoned enemy units to respawn as zombies.</p>
8
- <p>Another way to make your battles more fun and personalized is to customize your battlefield with skins, statues, voice-lines, and emotes. You can change the appearance of your units, your statue, your tower, and even your voice commands. You can also use emotes to communicate with your allies or taunt your enemies.</p>
9
- <h2>Single Player Modes</h2>
10
- <p>If you prefer playing solo or offline, Stick War 3 has plenty of options for you as well. You can play the huge ever expanding campaign mode, where you will follow an epic story with multiple chapters, fully animated comic book style cut scenes, and huge storylines. You will explore the world of Inamorta, where weapons are religion and nations are constantly at war. You will encounter different factions, allies, enemies, secrets, and challenges along the way.</p>
11
- <p>You can also practice your strategies against AI opponents in different scenarios in the proving grounds mode. You can choose from various selectable decks and situations to test your skills and learn new tactics. You can also challenge yourself with daily battles, where you will face a special scenario with fixed decks and other special conditions that do not appear in normal gameplay. You can earn gem rewards for completing each difficulty level.</p>
12
- <h2>Custom Armies</h2>
13
- <p>One of the most important aspects of Stick War 3 is building your own battle decks with a variety of army types and upgrades. You can collect and unlock new cards from a growing selection of over 40 different nations, each with their own unique units, abilities, and bonuses. You can also research new upgrades and technologies to make your army stronger and more versatile. You can create up to 10 different decks, each with a maximum of 12 cards, and switch between them before each battle.</p>
14
- <p>stick war 3 mod apk dinheiro infinito<br />
15
- stick war 3 hack dinheiro infinito<br />
16
- stick war 3 legacy dinheiro infinito<br />
17
- stick war 3 download para android com dinheiro infinito<br />
18
- stick war 3 atualizado com dinheiro infinito<br />
19
- stick war 3 jogo online com dinheiro infinito<br />
20
- stick war 3 como baixar e instalar dinheiro infinito<br />
21
- stick war 3 dicas e truques para ganhar dinheiro infinito<br />
22
- stick war 3 versão completa com dinheiro infinito<br />
23
- stick war 3 multiplayer com dinheiro infinito<br />
24
- stick war 3 cheats dinheiro infinito<br />
25
- stick war 3 apk mod menu dinheiro infinito<br />
26
- stick war 3 tudo desbloqueado com dinheiro infinito<br />
27
- stick war 3 sem root com dinheiro infinito<br />
28
- stick war 3 offline com dinheiro infinito<br />
29
- stick war 3 estratégia de guerra com dinheiro infinito<br />
30
- stick war 3 skins personalizadas com dinheiro infinito<br />
31
- stick war 3 novas atualizações com dinheiro infinito<br />
32
- stick war 3 jogabilidade incrível com dinheiro infinito<br />
33
- stick war 3 gráficos impressionantes com dinheiro infinito<br />
34
- stick war 3 download rápido e fácil com dinheiro infinito<br />
35
- stick war 3 tutorial passo a passo com dinheiro infinito<br />
36
- stick war 3 melhores armas e unidades com dinheiro infinito<br />
37
- stick war 3 modo história com dinheiro infinito<br />
38
- stick war 3 modo sobrevivência com dinheiro infinito<br />
39
- stick war 3 modo zumbi com dinheiro infinito<br />
40
- stick war 3 modo clássico com dinheiro infinito<br />
41
- stick war 3 modo torneio com dinheiro infinito<br />
42
- stick war 3 modo desafio com dinheiro infinito<br />
43
- stick war 3 modo sandbox com dinheiro infinito<br />
44
- stick war 3 modo criativo com dinheiro infinito<br />
45
- stick war 3 modo cooperativo com dinheiro infinito<br />
46
- stick war 3 modo versus com dinheiro infinito<br />
47
- stick war 3 modo ranking com dinheiro infinito<br />
48
- stick war 3 modo conquista com dinheiro infinito<br />
49
- stick war 3 modo missão com dinheiro infinito<br />
50
- stick war 3 modo aventura com dinheiro infinito<br />
51
- stick war 3 modo campanha com dinheiro infinito<br />
52
- stick war 3 modo batalha épica com dinheiro infinito<br />
53
- stick war 3 modo guerra mundial com dinheiro infinito</p>
54
- <p>Another way to customize your army is to use generals of each nation, who have their own unique abilities and effects. You can choose one general for each deck, and use their power once per battle. For example, you can use the general of the Order Empire, who can summon a giant sword that deals massive damage to enemies in front of him. Or you can use the general of the Chaos Empire, who can transform into a powerful demon that can fly and shoot fireballs.</p>
55
- <h2>Tips and Tricks</h2>
56
- <p>Stick War 3 is a game that requires skill, strategy, and creativity to master. Here are some tips and tricks that can help you improve your gameplay and win more battles.</p>
57
- <ul>
58
- <li>Learn the strengths and weaknesses of each unit type and nation. For example, archers are good at dealing damage from a distance, but are vulnerable to melee attacks. Speartons are good at defending and blocking enemy units, but are slow and expensive. The Order Empire is good at balanced and versatile strategies, but lacks specialization. The Chaos Empire is good at aggressive and chaotic strategies, but lacks defense and stability.</li>
59
- <li>Use the right units for the right situations. For example, use miners to gather gold and mana, which are essential for building your army and using spells. Use swordwraths to rush your enemy in the early game or flank them in the late game. Use magikills to cast powerful spells that can turn the tide of the battle.</li>
60
- <li>Use your spells and enchantments wisely. For example, use heal to restore the health of your units or your statue. Use poison to deal damage over time to enemy units or their statue. Use shield to protect your units or your statue from enemy attacks.</li>
61
- <li>Take control of your units when necessary. For example, take control of an archer to aim more accurately or avoid enemy fire. Take control of a spearton to block enemy units or charge at them. Take control of a magikill to cast spells more precisely or escape from danger.</li>
62
- <li>Avoid common mistakes and pitfalls in the game. For example, do not overextend your army or leave your statue undefended. Do not waste your gold or mana on unnecessary units or spells. Do not underestimate your enemy or overestimate yourself.</li>
63
- </ul>
64
- <h2>Conclusion</h2>
65
- <p>Stick War 3 is a game that will keep you entertained for hours with its amazing graphics, gameplay, and features. Whether you want to play online with other players or offline by yourself, you will find something that suits your taste and style. You can download and install Stick War 3 on your device for free from the official website or from the app store of your choice. You can also follow the game on social media for more news and updates. If you are looking for a fun and challenging strategy game, you should definitely give Stick War 3 a try.</p>
66
- <h3>FAQs</h3>
67
- <ul>
68
- <li><b>Q: How do I download Stick War 3?</b></li>
69
- <li>A: You can download Stick War 3 from the official website or from the app store of your choice. You will need an internet connection to play online modes, but you can play offline modes without it.</li>
70
- <li><b>Q: How do I unlock new cards and generals?</b></li>
71
- <li>A: You can unlock new cards and generals by playing the campaign mode, completing daily battles, opening chests, or buying them with gems.</li>
72
- <li><b>Q: How do I earn gems?</b></li>
73
- <li>A: You can earn gems by playing the campaign mode, completing daily battles, watching ads, or buying them with real money.</li>
74
- <li><b>Q: How do I play with my friends?</b></li>
75
- <li>A: You can play with your friends by inviting them to join your team in PVP matches, or by creating a private room with a code that they can enter.</li>
76
- <li><b>Q: How do I contact the developers?</b></li>
77
- <li>A: You can contact the developers by sending them an email at [email protected] or by filling out a form on their website.</li>
78
- </ul>
79
- : https://stickwar.com/ : https://play.google.com/store/apps/details?id=com.maxgames.stickwar3&hl=en_US&gl=US : https://www.facebook</p> 401be4b1e0<br />
80
- <br />
81
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download FF Advance Server APK Juli 2021 How to Register and Play.md DELETED
@@ -1,91 +0,0 @@
1
-
2
- <h1>How to Join and Play Free Fire Advance Server in July 2023</h1>
3
- <p>Free Fire is one of the most popular and exciting battle royale games on mobile, with millions of players around the world. But did you know that there is a special server where you can try out new features and updates before they are released to the public? This server is called Free Fire Advance Server, and it is a great opportunity for you to experience the latest developments in the game, as well as to help the developers improve the game by reporting bugs and providing feedback.</p>
4
- <h2>ff-advance.ff.garena.com apk juli</h2><br /><p><b><b>Download Zip</b> &middot; <a href="https://jinyurl.com/2uNMPp">https://jinyurl.com/2uNMPp</a></b></p><br /><br />
5
- <p>In this article, we will tell you everything you need to know about Free Fire Advance Server, including what it is, how to register, how to download and install, how to play, and how to enjoy it. So, if you are a fan of Free Fire and want to join the exclusive club of advanced players, read on!</p>
6
- <h2>What is Free Fire Advance Server?</h2>
7
- <p>Free Fire Advance Server is a test server that is created by Garena, the developer of Free Fire, for experienced players who want to test new features and items that are not yet available on the regular server. The goal of this server is to allow players to explore and experiment with the upcoming updates, as well as to help the developers identify and fix any bugs or issues that may arise.</p>
8
- <p>By joining Free Fire Advance Server, you will be able to access new weapons, characters, skins, modes, maps, events, and more before anyone else. You will also be able to provide your feedback and suggestions directly to the developers, which may influence the final version of the updates. Moreover, you will be rewarded with diamonds for finding and reporting bugs on the server.</p>
9
- <p>However, there are some differences between Free Fire Advance Server and the regular server that you should be aware of. First of all, not everyone can join Free Fire Advance Server. You need to register and get an activation code from Garena, which is limited in number. Secondly, Free Fire Advance Server is not always open. It only opens for a certain period of time before each major update. Thirdly, your progress and data on Free Fire Advance Server are not linked to your regular account. You will start from scratch on the test server, and you will not be able to transfer anything back to your regular account.</p>
10
- <h2>How to Register for Free Fire Advance Server?</h2>
11
- <p>If you are interested in joining Free Fire Advance Server, you need to register first. The registration process is simple and easy, but you need to act fast because there are only a limited number of activation codes available. Here are the steps you need to follow:</p>
12
- <ol>
13
- <li>Visit the official website of Free Fire Advance Server at <a href="(^2^)">ff-advance.ff.garena.com</a>.</li>
14
- <li>Click or tap on the "Login Facebook" button to sign up for Free Fire Advance Server using your Facebook account. Make sure that your Facebook account is linked to your Free Fire or FF MAX game account.</li>
15
- <li>Enter your personal information, such as name, email address, and phone number. Make sure that your email address and phone number are active.</li>
16
- <li>Click or tap on the "Submit" button to complete your registration.</li>
17
- <li>Wait for an email from Garena with your activation code and the download link for the Free Fire Advance Server APK file. Note that not everyone who registers will receive an activation code, as they are limited in number and given on a first-come, first-served basis.</li>
18
- </ol>
19
- <p>If you are lucky enough to get an activation code, you can proceed to download and install the Free Fire Advance Server APK file on your Android device.</p>
20
- <h2>How to Download and Install Free Fire Advance Server APK?</h2>
21
- <p>Once you have received your activation code and the download link for the Free Fire Advance Server APK file, you can follow these steps to download and install it on your Android device:</p>
22
- <ol>
23
- <li>Click or tap on the download link in the email to download the Free Fire Advance Server APK file. The file size is about 700 MB, so make sure you have enough storage space and a stable internet connection.</li>
24
- <li>After the download is complete, locate the APK file on your device and tap on it to install it. You may need to enable the "Install from unknown sources" option in your device settings if you haven't done so before.</li>
25
- <li>Once the installation is done, open the Free Fire Advance Server app and log in using your Facebook account that you used to register for the Advance Server.</li>
26
- <li>Enter your activation code when prompted and tap on "Confirm". You will then be able to access the Free Fire Advance Server and enjoy the new features and updates.</li>
27
- </ol>
28
- <p>Note that the Free Fire Advance Server is only open for a limited period of time, usually a few days before each major update. You can check the official website of Free Fire Advance Server at <a href="(^2^)">ff-advance.ff.garena.com</a> to see when the server is open and when it will close. You will not be able to play on the Advance Server once it is closed, so make sure you make the most of it while it is open.</p>
29
- <h2>How to Play and Enjoy Free Fire Advance Server?</h2>
30
- <p>Playing on Free Fire Advance Server is similar to playing on the regular server, except that you will have access to new features and updates that are not yet available to the public. You will also start from scratch on the Advance Server, meaning that you will not have any of your previous progress, items, or data from your regular account. You will also not be able to transfer anything from the Advance Server back to your regular account.</p>
31
- <p>How to register and download APK for Free Fire advance server July 2021<br />
32
- Free Fire advance server 2021: latest updates and features<br />
33
- Free Fire advance server bug hunting and feedback: how to get diamonds<br />
34
- Free Fire advance server login using Facebook account: step by step guide<br />
35
- Free Fire advance server timeline: server opening and closing time<br />
36
- Free Fire advance server rules: what you need to know before playing<br />
37
- Free Fire advance server main contributor: how to become one and get rewards<br />
38
- Free Fire advance server APK download link: where to find it and how to install it<br />
39
- Free Fire advance server activation code: how to get it and use it<br />
40
- Free Fire advance server FAQ: answers to common questions<br />
41
- Free Fire advance server review: pros and cons of playing in the test server<br />
42
- Free Fire advance server tips and tricks: how to survive and win in the new mode<br />
43
- Free Fire advance server new characters and weapons: what are they and how to use them<br />
44
- Free Fire advance server bugs and glitches: how to report them and avoid them<br />
45
- Free Fire advance server system requirements: what you need to play on your device<br />
46
- Free Fire advance server news and updates: where to find the latest information<br />
47
- Free Fire advance server feedback form: how to fill it and submit it<br />
48
- Free Fire advance server download size: how much space you need on your device<br />
49
- Free Fire advance server gameplay videos: where to watch them and learn from them<br />
50
- Free Fire advance server community: how to join and interact with other players<br />
51
- Free Fire advance server support: how to contact Garena if you have any issues<br />
52
- Free Fire advance server registration status: how to check if you are accepted or not<br />
53
- Free Fire advance server best settings: how to optimize your game performance<br />
54
- Free Fire advance server comparison: how is it different from the regular server<br />
55
- Free Fire advance server rewards redemption: how to claim your diamonds and other prizes<br />
56
- Free Fire advance server invitation code: how to get it and share it with your friends<br />
57
- Free Fire advance server maintenance schedule: when will the server be offline and for how long<br />
58
- Free Fire advance server patch notes: what are the changes and improvements in the new version<br />
59
- Free Fire advance server error messages: what they mean and how to fix them<br />
60
- Free Fire advance server feedback survey: how to participate and share your opinions<br />
61
- Free Fire advance server registration deadline: when is the last day to sign up for the test server<br />
62
- Free Fire advance server download problem: what to do if you can't download or install the APK file<br />
63
- Free Fire advance server login problem: what to do if you can't access or play the game<br />
64
- Free Fire advance server VPN: do you need it and which one to use<br />
65
- Free Fire advance server emulator: can you play it on PC and which one to use</p>
66
- <p>However, this also means that you will have more freedom and fun to explore and experiment with the new features and updates without worrying about losing anything. You will also be able to provide your feedback and suggestions directly to the developers, as well as report any bugs or issues that you encounter on the server. By doing so, you will help improve the game and also earn rewards such as diamonds for your contribution.</p>
67
- <p>To play and enjoy Free Fire Advance Server, here are some tips and tricks that you can follow:</p>
68
- <ul>
69
- <li>Check out the new weapons, characters, skins, modes, maps, events, and more that are available on the Advance Server. Try them out and see how they work and how they affect your gameplay.</li>
70
- <li>Be prepared for some glitches, errors, or crashes that may occur on the Advance Server. Remember that this is a test server and not everything is perfect or stable. If you encounter any problems, report them using the "Report" button on the game screen.</li>
71
- <li>Give your honest feedback and suggestions on the new features and updates using the "Feedback" button on the game screen. Tell the developers what you like, what you don't like, what you think can be improved, or what you think is missing.</li>
72
- <li>Have fun and enjoy playing with other advanced players who share your passion and enthusiasm for Free Fire. You can also invite your friends who have registered for the Advance Server to join you in testing out the new features and updates.</li>
73
- </ul>
74
- <h2>Conclusion</h2>
75
- <p>Free Fire Advance Server is a great opportunity for advanced players who want to experience new features and updates before they are released to the public. By joining Free Fire Advance Server, you will be able to access new weapons, characters, skins, modes, maps, events, and more before anyone else. You will also be able to provide your feedback and suggestions directly to the developers, which may influence the final version of the updates. Moreover, you will be rewarded with diamonds for finding and reporting bugs on the server.</p>
76
- <p>If you are a fan of Free Fire and want to join the exclusive club of advanced players, don't miss this chance to register and download Free Fire Advance Server as soon as possible. The registration process is simple and easy, but you need to act fast because there are only a limited number of activation codes available. The download and installation process is also simple and easy, but you need to have an Android device and a stable internet connection. The playing and enjoying process is similar to the regular server, but with more freedom and fun to explore and experiment with the new features and updates. We hope that this article has helped you understand how to join and play Free Fire Advance Server in July 2023. If you have any questions or comments, feel free to leave them below. And don't forget to share this article with your friends who are also fans of Free Fire. Happy gaming! <h2>FAQs</h2>
77
- <p>Here are some of the frequently asked questions and answers about Free Fire Advance Server:</p>
78
- <ol>
79
- <li><b>What is the difference between Free Fire Advance Server and Free Fire MAX?</b></li>
80
- <p>Free Fire Advance Server is a test server that is only open for a limited period of time before each major update. It allows players to try out new features and updates that are not yet available on the regular server. Free Fire MAX is a enhanced version of Free Fire that offers higher graphics quality, smoother performance, and exclusive content. It is compatible with the regular server and can be played anytime.</p>
81
- <li><b>How can I get more diamonds on Free Fire Advance Server?</b></li>
82
- <p>You can get more diamonds on Free Fire Advance Server by finding and reporting bugs on the server using the "Report" button on the game screen. You will be rewarded with diamonds for each bug that you report, depending on the severity and validity of the bug. You can also get diamonds by providing your feedback and suggestions on the new features and updates using the "Feedback" button on the game screen.</p>
83
- <li><b>Can I play with my friends on Free Fire Advance Server?</b></li>
84
- <p>You can play with your friends on Free Fire Advance Server if they have also registered for the Advance Server and have received an activation code from Garena. You can invite them to join you in testing out the new features and updates on the server. However, you will not be able to play with your friends who are on the regular server, as the two servers are not connected.</p>
85
- <li><b>Will my progress and data on Free Fire Advance Server be saved or transferred to my regular account?</b></li>
86
- <p>No, your progress and data on Free Fire Advance Server will not be saved or transferred to your regular account. You will start from scratch on the Advance Server, and you will not have any of your previous items or data from your regular account. You will also not be able to transfer anything from the Advance Server back to your regular account.</p>
87
- <li><b>When will the new features and updates on Free Fire Advance Server be released to the public?</b></li>
88
- <p>The new features and updates on Free Fire Advance Server will be released to the public after they have been tested and improved by the developers based on the feedback and suggestions from the players on the Advance Server. The exact date of release may vary depending on the update, but it is usually within a few weeks after the closure of the Advance Server.</p>
89
- </ol></p> 197e85843d<br />
90
- <br />
91
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download The Baby In Yellow APK for Android - Free Horror Game.md DELETED
@@ -1,133 +0,0 @@
1
-
2
- <h1>, <h2>, <h3>, etc.) to divide your content into sections and make it easier for the reader to follow. Step 5: Write a rough draft and pare down your outline. The fifth step is to write a rough draft of your article based on your outline. You don't have to worry about grammar, spelling, or formatting at this stage; just focus on getting your ideas down on paper. You can use tools like Google Docs, Microsoft Word, or Grammarly to write and edit your draft. As you write, try to: - Use a conversational style as written by a human (use an informal tone, utilize personal pronouns, keep it simple, engage the reader, use the active voice, keep it brief, use rhetorical questions, and incorporate analogies and metaphors). - Use SEO-optimized keywords and phrases that match the intent of your audience and the topic of your article. - Use fully detailed paragraphs that explain each point clearly and provide examples or evidence. - Use transitions words or phrases (such as however, therefore, moreover, etc.) to connect your paragraphs and sentences. - Use at least one table (using HTML tags such as <table>, <tr>, <th>, <td>, etc.) to display data or information in a structured way. After you finish writing your draft, pare down your outline by removing any unnecessary or redundant information. You can also rearrange the order of your paragraphs or sections if needed. Step 6: Revise and proofread your article. The final step is to revise and proofread your article before publishing it. You can use tools like Hemingway Editor, ProWritingAid, or Grammarly to check for errors in grammar, spelling, punctuation, readability, style, tone, etc. You can also ask someone else to read your article and give you feedback. As you revise and proofread your article, try to: - Make sure that your article is coherent, consistent, and logical. - Make sure that your article is unique and original (you can use tools like Copyscape or Turnitin to check for plagiarism). - Make sure that your article is engaging and informative (you can use tools like CoSchedule Headline Analyzer or Yoast SEO Plugin to check for headline quality and SEO performance). Outline of the article: - <h1>The Baby In Yellow: A Horror Game That Will Make You Think Twice About Babysitting</h1>
3
- - <h2>Introduction</h2>
4
- - What is The Baby In Yellow and what is it about? - Why is it a horror game and what makes it scary? - How can you download and play it on your Android device? - <h2>The Baby In Yellow: A Game That Will Test Your Nerves</h2>
5
- - <h3>The premise and the gameplay of The Baby In Yellow</h3>
6
- - You are a babysitter who has to take care of a baby in a yellow onesie - The baby is not a normal baby, but a demonic entity that can do strange things - You have to follow the instructions on the screen and try to survive the night - <h3>The graphics and the sound effects of The Baby In Yellow</h3>
7
- - The game has a low-poly style that creates a contrast between the cute and the creepy - The game has a dark and eerie atmosphere that builds up tension and suspense - The game has realistic and disturbing sound effects that add to the horror - <h2>How to Download and Play The Baby In Yellow on Your Android Device</h2>
8
- - <h3>The requirements and the compatibility of The Baby In Yellow</h3>
9
- - The game requires Android 4.4 or higher and 136 MB of free space - The game is compatible with most Android devices, but some may experience performance issues - The game is free to download and play, but it may contain ads or in-app purchases - <h3>The steps to download and install The Baby In Yellow</h3>
10
- - Go to one of the trusted sources that offer the APK file of The Baby In Yellow, such as [Softonic](^1^), [Tamindir](^2^), or [APKCombo](^3^) - Tap on the download button and wait for the file to be downloaded - Go to your device settings and enable the installation of apps from unknown sources - Locate the downloaded file in your file manager and tap on it to install it - Launch the game and enjoy the horror - <h2>Conclusion</h2>
11
- - Summarize the main points of the article - Restate the thesis and provide a call to action or an interesting insight - <h2>FAQs</h2>
12
- - List five unique FAQs related to the topic of the article Article with HTML formatting: <h1>The Baby In Yellow: A Horror Game That Will Make You Think Twice About Babysitting</h1>
13
- <h2>Introduction</h2>
14
- <p>If you are looking for a horror game that will challenge your nerves and make you jump out of your seat, you might want to try The Baby In Yellow. This is a first-person horror game developed by Team Terrible, where you will simulate the life of a babysitter. However, what you will babysit is more sinister than he first appears. The Baby In Yellow follows the same premise as the PC game, but it is now available for Android devices. In this article, we will tell you what The Baby In Yellow is about, why it is a horror game, and how you can download and play it on your Android device.</p>
15
- <h2>the baby in yellow indir apk</h2><br /><p><b><b>Download</b> &#10145; <a href="https://jinyurl.com/2uNU6e">https://jinyurl.com/2uNU6e</a></b></p><br /><br />
16
- <h2>The Baby In Yellow: A Game That Will Test Your Nerves</h2>
17
- <h3>The premise and the gameplay of The Baby In Yellow</h3>
18
- <p>In The Baby In Yellow, you are a babysitter who has to take care of a baby in a yellow onesie. Sounds easy, right? Well, not quite. The baby is not a normal baby, but a demonic entity that can do strange things. He can teleport, levitate, laugh maniacally, stare at you with glowing eyes, and even summon fire. He can also escape from his crib, his room, or even his house. Your job is to follow the instructions on the screen and try to survive the night. You will have to feed him, change his diaper, put him to bed, and deal with his mischief. But be careful, because he might not like what you do.</p>
19
- <h3>The graphics and the sound effects of The Baby In Yellow</h3>
20
- from. The game has realistic and disturbing sound effects that add to the horror. You will hear the baby's cries, laughs, whispers, and screams, as well as the creaking of doors, the flickering of lights, and the thumping of footsteps.</p>
21
- <h2>How to Download and Play The Baby In Yellow on Your Android Device</h2>
22
- <h3>The requirements and the compatibility of The Baby In Yellow</h3>
23
- <p>The game requires Android 4.4 or higher and 136 MB of free space. The game is compatible with most Android devices, but some may experience performance issues. The game is free to download and play, but it may contain ads or in-app purchases.</p>
24
- <h3>The steps to download and install The Baby In Yellow</h3>
25
- <p>To download and play The Baby In Yellow on your Android device, you need to follow these steps:</p>
26
- <table>
27
- <tr>
28
- <th>Step</th>
29
- <th>Instruction</th>
30
- </tr>
31
- <tr>
32
- <td>1</td>
33
- <td>Go to one of the trusted sources that offer the APK file of The Baby In Yellow, such as <a href="^1^">Softonic</a>, <a href="^2^">Tamindir</a>, or <a href="^3^">APKCombo</a>.</td>
34
- </tr>
35
- <tr>
36
- <td>2</td>
37
- <td>Tap on the download button and wait for the file to be downloaded.</td>
38
- </tr>
39
- <tr>
40
- <td>3</td>
41
- <td>Go to your device settings and enable the installation of apps from unknown sources.</td>
42
- </tr>
43
- <tr>
44
- <td>4</td>
45
- <td>Locate the downloaded file in your file manager and tap on it to install it.</td>
46
- </tr>
47
- <tr>
48
- <td>5</td>
49
- <td>Launch the game and enjoy the horror.</td>
50
- </tr>
51
- </table>
52
- <h2>Conclusion</h2>
53
- <p>The Baby In Yellow is a horror game that will make you think twice about babysitting. It is a game that will test your nerves and make you jump out of your seat. It is a game that has a low-poly style, a dark and eerie atmosphere, and realistic and disturbing sound effects. It is a game that is available for Android devices and can be downloaded and played for free. If you are looking for a horror game that will challenge you and scare you, you might want to try The Baby In Yellow. But be warned, this is not a game for the faint-hearted.</p>
54
- <h2>FAQs</h2>
55
- <p>Here are some frequently asked questions related to The Baby In Yellow:</p>
56
- <ol>
57
- <li><b>Is The Baby In Yellow based on a true story?</b></li>
58
- <p>No, The Baby In Yellow is not based on a true story. It is a fictional horror game inspired by a short film called <a href="^4^">The Thing in the Apartment Chapter 2</a>, which was directed by John William Ross.</p>
59
- <li><b>Is The Baby In Yellow safe to play?</b></li>
60
- <p>The Baby In Yellow is safe to play as long as you are aware that it is a horror game that contains scary and violent scenes. It is not recommended for children or people who are sensitive to horror or gore. It is also advisable to play it in a well-lit room and with someone else nearby.</p>
61
- <li><b>How long does it take to finish The Baby In Yellow?</b></li>
62
- <p>The Baby In Yellow is a short game that can be finished in about 15 minutes. However, it has multiple endings depending on your choices and actions. You can replay the game to see different outcomes and discover more secrets.</p>
63
- <p>the baby in yellow download android<br />
64
- the baby in yellow game apk<br />
65
- the baby in yellow free apk<br />
66
- the baby in yellow horror game apk<br />
67
- the baby in yellow apk mod<br />
68
- the baby in yellow apk pure<br />
69
- the baby in yellow apk latest version<br />
70
- the baby in yellow apk offline<br />
71
- the baby in yellow apk uptodown<br />
72
- the baby in yellow apk for pc<br />
73
- the baby in yellow apk android oyun club<br />
74
- the baby in yellow apk hile<br />
75
- the baby in yellow apk indir gezginler<br />
76
- the baby in yellow apk indir tamindir<br />
77
- the baby in yellow apk indir softonic<br />
78
- the baby in yellow apk indir cepde<br />
79
- the baby in yellow apk indir apkpure<br />
80
- the baby in yellow apk indir android oyun club<br />
81
- the baby in yellow apk indir son sürüm<br />
82
- the baby in yellow apk indir ücretsiz<br />
83
- the baby in yellow oyunu indir apk<br />
84
- the baby in yellow oyunu indir android<br />
85
- the baby in yellow oyunu indir pc<br />
86
- the baby in yellow oyunu indir ücretsiz<br />
87
- the baby in yellow oyunu indir tamindir<br />
88
- the baby in yellow oyunu indir gezginler<br />
89
- the baby in yellow oyunu indir softonic<br />
90
- the baby in yellow oyunu indir cepde<br />
91
- the baby in yellow oyunu indir apkpure<br />
92
- the baby in yellow oyunu indir android oyun club<br />
93
- download game the baby in yellow apk<br />
94
- download game the baby in yellow android<br />
95
- download game the baby in yellow mod apk<br />
96
- download game the baby in yellow free apk<br />
97
- download game the baby in yellow horror apk<br />
98
- download game the baby in yellow latest version apk<br />
99
- download game the baby in yellow offline apk<br />
100
- download game the baby in yellow uptodown apk<br />
101
- download game the baby in yellow for pc apk<br />
102
- download game the baby in yellow android oyun club apk<br />
103
- download game the baby in yellow hileli apk<br />
104
- download game the baby in yellow gezginler apk<br />
105
- download game the baby in yellow tamindir apk<br />
106
- download game the baby in yellow softonic apk<br />
107
- download game the baby in yellow cepde apk <br />
108
- download game the baby in yellow apkpure apk <br />
109
- download game the baby in yellow android oyun club apk <br />
110
- download game the baby in yellow son sürüm apk <br />
111
- download game the baby in yellow ücretsiz apk</p>
112
- <li><b>What are some tips and tricks to play The Baby In Yellow?</b></li>
113
- <p>Some tips and tricks to play The Baby In Yellow are:</p>
114
- <ul>
115
- <li>Pay attention to the instructions on the screen and follow them carefully.</li>
116
- <li>Use the flashlight to see better in the dark.</li>
117
- <li>Avoid looking at the baby's eyes or touching him when he is angry.</li>
118
- <li>Hide in the closet or under the bed if you hear something suspicious.</li>
119
- <li>Don't let the baby escape from his room or his house.</li>
120
- <li>Don't trust everything you see or hear.</li>
121
- </ul>
122
- <li><b>Where can I find more games like The Baby In Yellow?</b></li>
123
- <p>If you enjoyed playing The Baby In Yellow, you might also like these games:</p>
124
- <ul>
125
- <li><a href="^6^">Five Nights at Freddy's</a>: A horror game where you have to survive five nights in a pizzeria haunted by animatronic animals.</li>
126
- <li><a href="^7^">Slendrina: The Cellar</a>: A horror game where you have to explore a cellar and avoid a ghostly woman.</li>
127
- <li><a href="^8^">Eyes: The Horror Game</a>: A horror game where you have to collect valuables in a haunted house and avoid a monster.</li>
128
- <li><a href="^9^">Hello Neighbor</a>: A stealth horror game where you have to sneak into your neighbor's house and discover his secrets.</li>
129
- </ul>
130
- </ol>
131
- <p>I hope you enjoyed reading this article and learned something new. If you have any questions or comments, feel free to leave them below. And if you want to play The Baby In Yellow, don't forget to download it from one of the sources mentioned above. But be careful, because this game is not for the faint-hearted.</p> 401be4b1e0<br />
132
- <br />
133
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Epic War 6 How to Conquer Every Spot on the Board.md DELETED
@@ -1,134 +0,0 @@
1
-
2
- <h1>Epic War 6 APK: A Thrilling Battle Game for Android</h1>
3
- <p>If you are looking for a game that combines strategy, action, and fantasy, then you should check out <strong>Epic War 6 APK</strong>. This is a game that lets you command legendary heroes and a strong army in epic battles against powerful enemies. You can choose from six unique heroes, each with their own strengths, weaknesses, and skills. You can also train and upgrade over 40 battle units, from archers and knights to dragons and giants. You can also challenge and defeat huge titans that will test your skills and strategy. And if you want to compete with other players from around the world, you can enter the PVP Arena and show how epic you are.</p>
4
- <h2>epic war 6 apk</h2><br /><p><b><b>DOWNLOAD</b> &#9999; <a href="https://jinyurl.com/2uNKrf">https://jinyurl.com/2uNKrf</a></b></p><br /><br />
5
- <p>In this article, we will tell you everything you need to know about Epic War 6 APK, including its features, how to download and install it, how to play it, how it compares with other games, what are its pros and cons, and what is its review. By the end of this article, you will have a clear idea of whether this game is worth playing or not.</p>
6
- <h2>Features of Epic War 6 APK</h2>
7
- <p>Epic War 6 APK has a lot of features that make it a fun and exciting game to play. Here are some of them:</p>
8
- <ul>
9
- <li><strong>6 unique heroes</strong>: You can choose from six different heroes, each with their own personality, backstory, and abilities. Some of them are based on famous characters from mythology or history, such as Thor, Hercules, or Joan of Arc. Each hero has a special skill that can change the outcome of the battle, such as summoning thunderstorms, healing allies, or boosting morale.</li <p><li><strong>Over 40 battle units</strong>: You can train and upgrade a variety of units to fight for you in the battlefield. You can choose from different classes, such as infantry, cavalry, ranged, magic, or special. Each class has its own advantages and disadvantages, and you need to balance your army composition according to the situation. You can also unlock new units as you progress in the game, such as ninjas, samurais, or angels.</li>
10
- <li><strong>10 powerful titans</strong>: You can face and defeat 10 massive titans that will pose a great challenge to your skills and strategy. These titans are based on mythical creatures, such as dragons, hydras, or krakens. They have different abilities and weaknesses, and you need to find the best way to exploit them. You can also use your hero's skill to deal extra damage or gain an edge in the fight.</li>
11
- <li><strong>PVP Arena</strong>: You can compete with other players from around the world in the PVP Arena mode. You can choose your hero and units and enter a random match against another player. You can also join a clan and participate in clan wars, where you can cooperate with your clan members and fight against other clans. You can earn rewards and rank up in the leaderboards by winning matches and wars.</li>
12
- </ul>
13
- <h2>How to Download and Install Epic War 6 APK</h2>
14
- <p>If you want to play Epic War 6 APK on your Android device, you need to download and install it first. Here are the steps that you need to follow:</p>
15
- <ol>
16
- <li><strong>Go to the official website of mob.org</strong>: This is one of the best sources for downloading free Android games. You can access it by typing <a href="">mob.org</a> in your browser or clicking on this link. </li>
17
- <li><strong>Search for Epic War 6 APK</strong>: Once you are on the website, you can use the search bar to look for Epic War 6 APK. You can also browse through the categories or genres to find it. Alternatively, you can use this direct link to go to the download page of Epic War 6 APK.</li>
18
- <li><strong>Click on the download button</strong>: When you find the game that you want, you can click on the green download button that says "Download Epic War 6". This will start the download process and you will see a progress bar on your screen.</li>
19
- <li><strong>Enable unknown sources on your device settings</strong>: Before you can install the APK file that you downloaded, you need to allow your device to install apps from unknown sources. To do this, go to your device settings and look for security or privacy options. Then, find the option that says "Unknown sources" or "Allow installation of apps from unknown sources" and enable it.</li>
20
- <li><strong>Install the APK file</strong>: After enabling unknown sources, you can go to your file manager or downloads folder and find the APK file that you downloaded. Tap on it and follow the instructions on your screen to install it.</li>
21
- <li><strong>Launch the game and enjoy the epic battles</strong>: Once the installation is complete, you can launch the game by tapping on its icon on your home screen or app drawer. You can then start playing the game and enjoy the epic battles.</li>
22
- </ol> <h2>Gameplay Tips and Tricks for Epic War 6 APK</h2>
23
- <p>Epic War 6 APK is a game that requires strategy, skill, and patience. You need to plan your moves carefully and use your resources wisely. Here are some tips and tricks that can help you improve your gameplay and win more battles:</p>
24
- <ul>
25
- <li><strong>Choose your hero wisely</strong>: Each hero has a different skill that can affect the battle in various ways. For example, Thor can summon thunderstorms that deal damage to all enemies, Hercules can heal all allies and boost their morale, and Joan of Arc can increase the attack and defense of all units. You need to choose the hero that suits your playstyle and strategy, and use their skill at the right time and place.</li>
26
- <li><strong>Use spells and skills at the right time and place</strong>: Apart from your hero's skill, you can also use spells that you can buy from the shop or earn from quests. These spells can have different effects, such as healing, damaging, freezing, or stunning. You need to use them wisely and strategically, as they have a cooldown time and a limited number of uses. You also need to aim them well, as some of them have a specific target or area of effect.</li>
27
- <li><strong>Upgrade your units and heroes regularly</strong>: As you progress in the game, you will face stronger enemies and tougher challenges. You need to upgrade your units and heroes regularly to increase their power and performance. You can upgrade them by using gold and gems that you can earn from battles, quests, or achievements. You can also equip them with items that you can buy from the shop or find in chests. These items can enhance their stats or give them special abilities.</li>
28
- <li><strong>Experiment with different combinations of units and heroes</strong>: There are many possible combinations of units and heroes that you can use in the game. You can mix and match different classes, such as infantry, cavalry, ranged, magic, or special. You can also try different heroes with different skills and abilities. You need to experiment with different combinations to find the best synergy and balance for your army.</li>
29
- </ul>
30
- <h2>Comparison of Epic War 6 APK with Other Games</h2>
31
- <p>Epic War 6 APK is not the only game that offers strategy and action in a fantasy setting. There are many other games that have similar or different features and gameplay. Here are some of them and how they compare with Epic War 6 APK:</p>
32
- <table>
33
- <tr>
34
- <th>Game</th>
35
- <th>Similarities</th>
36
- <th>Differences</th>
37
- </tr>
38
- <tr>
39
- <td>Epic War Saga</td>
40
- <td>- Same developer as Epic War 6 APK<br>- Similar gameplay but with more RPG elements<br>- Same genre of strategy and action</td>
41
- <td>- Fewer heroes, units, and titans than Epic War 6 APK<br>- More quests, missions, and achievements than Epic War 6 APK<br>- Different graphics style and theme than Epic War 6 APK</td>
42
- </tr>
43
- <tr>
44
- <td>Kingdom Rush</td>
45
- <td>- Same genre of strategy and action<br>- Similar gameplay but with tower defense elements<br>- Same theme of fantasy and mythology</td>
46
- <td>- Different developer than Epic War 6 APK<br>- Fewer heroes and units than Epic War 6 APK<br>- No titans or PVP mode in Kingdom Rush</td>
47
- </tr>
48
- <tr>
49
- <td>Clash of Clans</td>
50
- <td>- Same genre of strategy and action<br>- Similar gameplay but with base building and army management elements<br>- Same theme of fantasy and mythology</td>
51
- <td>- Different developer than Epic War 6 APK<br>- More online multiplayer features than Epic War 6 APK<br>- Different graphics style and tone than Epic War 6 APK</td>
52
- </tr>
53
- </table> <h2>Pros and Cons of Epic War 6 APK</h2>
54
- <p>Epic War 6 APK is a game that has many positive and negative aspects. Here are some of them:</p>
55
- <h3>Pros</h3>
56
- <ul>
57
- <li><strong>High-quality graphics</strong>: The game has impressive graphics that create a realistic and immersive experience. The heroes, units, and titans are well-designed and animated. The backgrounds and environments are detailed and colorful. The effects and sounds are also realistic and captivating.</li>
58
- <li><strong>Addictive gameplay</strong>: The game has a simple but engaging gameplay that keeps you hooked for hours. The battles are fast-paced and thrilling, with a lot of strategy and action involved. The game also has a lot of content and features to explore, such as quests, achievements, items, and PVP mode.</li>
59
- <li><strong>Diverse heroes and units</strong>: The game has a lot of variety and diversity in terms of heroes and units. You can choose from six different heroes, each with their own skills and abilities. You can also train and upgrade over 40 battle units, from archers and knights to dragons and giants. You can also unlock new units as you progress in the game, such as ninjas, samurais, or angels.</li>
60
- <li><strong>Online PVP mode</strong>: The game has an online PVP mode that lets you compete with other players from around the world. You can choose your hero and units and enter a random match against another player. You can also join a clan and participate in clan wars, where you can cooperate with your clan members and fight against other clans. You can earn rewards and rank up in the leaderboards by winning matches and wars.</li>
61
- <li><strong>Free to play</strong>: The game is free to download and play on your Android device. You do not need to pay anything to enjoy the game. You can also play the game offline without an internet connection.</li>
62
- </ul>
63
- <h3>Cons</h3>
64
- <ul>
65
- <li><strong>High learning curve</strong>: The game is not very easy to learn or master. You need to understand the mechanics and strategies of the game, such as how to use your hero's skill, how to upgrade your units, how to use spells, how to defeat titans, etc. You also need to practice a lot to improve your skills and performance.</li>
66
- <li><strong>Requires internet connection</strong>: The game requires an internet connection to access some of its features, such as PVP mode, clan wars, quests, achievements, etc. If you do not have a stable or fast internet connection, you may experience lagging or crashing issues.</li>
67
- <li><strong>May have bugs and glitches</strong>: The game may have some bugs and glitches that can affect your gameplay or experience. For example, some users have reported that the game freezes or crashes randomly, that the game does not save their progress or data, that the game does not load properly, etc.</li>
68
- <li><strong>May consume battery and storage space</strong>: The game may consume a lot of battery power and storage space on your device. This is because the game has high-quality graphics, sounds, and effects that require a lot of resources. You may need to charge your device frequently or clear some space on your device to play the game smoothly.</li>
69
- </ul>
70
- <h2>Review of Epic War 6 APK</h2>
71
- <p>Epic War 6 APK is a game that deserves a positive review from us. We think that it is a great game for fans of strategy and action games, with a lot of content and features to enjoy. We like the graphics, the gameplay, the diversity, and the online mode of the game. We think that it is a fun and exciting game to play.</p>
72
- <p>However, we also acknowledge that the game has some flaws that need to be fixed or improved. We think that the game is not very easy to learn or master, that it requires an internet connection for some features, that it may have some bugs and glitches, and that it may consume a lot of battery power and storage space on your device.</p>
73
- <p>Therefore, we give Epic War 6 APK a rating of 4.5 out of 5 stars based on our experience and feedback from other users. We think that it is a game worth playing if you like strategy and action games.</p>
74
- <p>epic war 6 android game download<br />
75
- epic war 6 free online strategy game<br />
76
- epic war 6 heroes and titans apk<br />
77
- epic war 6 unblocked html5 game<br />
78
- epic war 6 mob.org apk file<br />
79
- epic war 6 crazygames.com play now<br />
80
- epic war 6 best army and spells<br />
81
- epic war 6 mod apk unlimited money<br />
82
- epic war 6 walkthrough and tips<br />
83
- epic war 6 latest version update<br />
84
- epic war 6 cheats and hacks<br />
85
- epic war 6 review and rating<br />
86
- epic war 6 gameplay and features<br />
87
- epic war 6 multiplayer mode apk<br />
88
- epic war 6 offline play apk<br />
89
- epic war 6 trailer and screenshots<br />
90
- epic war 6 system requirements and compatibility<br />
91
- epic war 6 how to install apk<br />
92
- epic war 6 similar games and alternatives<br />
93
- epic war 6 developer and publisher<br />
94
- epic war 6 forum and community<br />
95
- epic war 6 guide and wiki<br />
96
- epic war 6 support and contact<br />
97
- epic war 6 news and updates<br />
98
- epic war 6 awards and achievements<br />
99
- epic war 6 fan art and videos<br />
100
- epic war 6 merchandise and products<br />
101
- epic war 6 soundtrack and music<br />
102
- epic war 6 lore and story<br />
103
- epic war 6 characters and skills<br />
104
- epic war 6 units and classes<br />
105
- epic war 6 maps and levels<br />
106
- epic war 6 weapons and items<br />
107
- epic war 6 enemies and bosses<br />
108
- epic war 6 missions and challenges<br />
109
- epic war 6 events and tournaments<br />
110
- epic war 6 codes and coupons<br />
111
- epic war 6 bugs and issues<br />
112
- epic war 6 feedback and suggestions<br />
113
- epic war 6 faq and help</p>
114
- <h2>Conclusion</h2>
115
- <p>In conclusion, Epic War 6 APK is a thrilling battle game for Android devices that lets you command legendary heroes and a strong army in epic battles against powerful enemies. You can choose from six unique heroes, each with their own skills and abilities. You can also train and upgrade over 40 battle units, from archers and knights to dragons and giants. You can also challenge and defeat huge titans that will test your skills and strategy. And if you want to compete with other players from around the world, you can enter the PVP Arena and show how epic you are.</p>
116
- <p>We have also told you how to download and install Epic War 6 APK on your device, how to play it, how it compares with other games, what are its pros and cons, and what is its review. We hope that this article has been helpful and informative for you.</p>
117
- <p>If you are interested in playing Epic War 6 APK, you can download it from the official website of mob.org or use this direct link. You can also visit the official Facebook page of the game for more updates and news. You can also watch this video for a preview of the game.</p>
118
- <p>Thank you for reading this article and we hope that you enjoy playing Epic War 6 APK. Have fun and good luck!</p>
119
- <h2>FAQs</h2>
120
- <p>Here are some frequently asked questions about Epic War 6 APK:</p>
121
- <ol>
122
- <li><strong>What are the requirements to play Epic War 6 APK?</strong><br>
123
- You need an Android device with Android 4.1 or higher and at least 100 MB of free storage space to play Epic War 6 APK. You also need an internet connection to access some features of the game, such as PVP mode, clan wars, quests, achievements, etc.</li>
124
- <li><strong>Is Epic War 6 APK safe to download and install?</strong><br>
125
- Yes, Epic War 6 APK is safe to download and install on your device. It does not contain any viruses, malware, or spyware that can harm your device or data. However, you need to make sure that you download it from a trusted source, such as mob.org or the direct link that we provided in this article.</li>
126
- <li><strong>How can I get more gold and gems in Epic War 6 APK?</strong><br>
127
- You can get more gold and gems in Epic War 6 APK by winning battles, completing quests, achieving goals, opening chests, watching ads, or buying them with real money. You can use gold and gems to upgrade your units and heroes, buy items and spells, or unlock new features and content.</li>
128
- <li><strong>How can I join or create a clan in Epic War 6 APK?</strong><br>
129
- You can join or create a clan in Epic War 6 APK by going to the clan menu in the game. You can either search for an existing clan that suits your preferences and apply to join it, or create your own clan by choosing a name, a logo, and a description. You can also invite your friends or other players to join your clan. You can participate in clan wars, chat with your clan members, and share resources and tips with them.</li>
130
- <li><strong>How can I contact the developer of Epic War 6 APK?</strong><br>
131
- You can contact the developer of Epic War 6 APK by sending an email to [email protected] or by visiting their website at www.artlogicgames.com. You can also follow them on Facebook at www.facebook.com/epicwargames. You can send them your feedback, suggestions, questions, or complaints about the game.</li>
132
- </ol></p> 401be4b1e0<br />
133
- <br />
134
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/pipelines/latent_diffusion_uncond/__init__.py DELETED
@@ -1,17 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- # Copyright 2022 The HuggingFace Team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # flake8: noqa
17
- from .pipeline_latent_diffusion_uncond import LDMPipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/generate_human_motion/app.py DELETED
@@ -1,319 +0,0 @@
1
- import sys
2
- import os
3
- import OpenGL.GL as gl
4
- os.environ["PYOPENGL_PLATFORM"] = "egl"
5
- os.environ["MESA_GL_VERSION_OVERRIDE"] = "4.1"
6
- os.system('pip install /home/user/app/pyrender')
7
-
8
- sys.argv = ['VQ-Trans/GPT_eval_multi.py']
9
- os.chdir('VQ-Trans')
10
-
11
- sys.path.append('/home/user/app/VQ-Trans')
12
- sys.path.append('/home/user/app/pyrender')
13
-
14
- import options.option_transformer as option_trans
15
- from huggingface_hub import snapshot_download
16
- model_path = snapshot_download(repo_id="vumichien/T2M-GPT")
17
-
18
- args = option_trans.get_args_parser()
19
-
20
- args.dataname = 't2m'
21
- args.resume_pth = f'{model_path}/VQVAE/net_last.pth'
22
- args.resume_trans = f'{model_path}/VQTransformer_corruption05/net_best_fid.pth'
23
- args.down_t = 2
24
- args.depth = 3
25
- args.block_size = 51
26
-
27
- import clip
28
- import torch
29
- import numpy as np
30
- import models.vqvae as vqvae
31
- import models.t2m_trans as trans
32
- from utils.motion_process import recover_from_ric
33
- import visualization.plot_3d_global as plot_3d
34
- from models.rotation2xyz import Rotation2xyz
35
- import numpy as np
36
- from trimesh import Trimesh
37
- import gc
38
-
39
- import torch
40
- from visualize.simplify_loc2rot import joints2smpl
41
- import pyrender
42
- # import matplotlib.pyplot as plt
43
-
44
- import io
45
- import imageio
46
- from shapely import geometry
47
- import trimesh
48
- from pyrender.constants import RenderFlags
49
- import math
50
- # import ffmpeg
51
- # from PIL import Image
52
- import hashlib
53
- import gradio as gr
54
- import moviepy.editor as mp
55
-
56
- ## load clip model and datasets
57
- is_cuda = torch.cuda.is_available()
58
- device = torch.device("cuda" if is_cuda else "cpu")
59
- print(device)
60
- clip_model, clip_preprocess = clip.load("ViT-B/32", device=device, jit=False, download_root='./') # Must set jit=False for training
61
-
62
- if is_cuda:
63
- clip.model.convert_weights(clip_model)
64
-
65
- clip_model.eval()
66
- for p in clip_model.parameters():
67
- p.requires_grad = False
68
-
69
- net = vqvae.HumanVQVAE(args, ## use args to define different parameters in different quantizers
70
- args.nb_code,
71
- args.code_dim,
72
- args.output_emb_width,
73
- args.down_t,
74
- args.stride_t,
75
- args.width,
76
- args.depth,
77
- args.dilation_growth_rate)
78
-
79
-
80
- trans_encoder = trans.Text2Motion_Transformer(num_vq=args.nb_code,
81
- embed_dim=1024,
82
- clip_dim=args.clip_dim,
83
- block_size=args.block_size,
84
- num_layers=9,
85
- n_head=16,
86
- drop_out_rate=args.drop_out_rate,
87
- fc_rate=args.ff_rate)
88
-
89
-
90
- print('loading checkpoint from {}'.format(args.resume_pth))
91
- ckpt = torch.load(args.resume_pth, map_location='cpu')
92
- net.load_state_dict(ckpt['net'], strict=True)
93
- net.eval()
94
-
95
- print('loading transformer checkpoint from {}'.format(args.resume_trans))
96
- ckpt = torch.load(args.resume_trans, map_location='cpu')
97
- trans_encoder.load_state_dict(ckpt['trans'], strict=True)
98
- trans_encoder.eval()
99
-
100
- mean = torch.from_numpy(np.load(f'{model_path}/meta/mean.npy'))
101
- std = torch.from_numpy(np.load(f'{model_path}/meta/std.npy'))
102
-
103
- if is_cuda:
104
- net.cuda()
105
- trans_encoder.cuda()
106
- mean = mean.cuda()
107
- std = std.cuda()
108
-
109
- def render(motions, device_id=0, name='test_vis'):
110
- frames, njoints, nfeats = motions.shape
111
- MINS = motions.min(axis=0).min(axis=0)
112
- MAXS = motions.max(axis=0).max(axis=0)
113
-
114
- height_offset = MINS[1]
115
- motions[:, :, 1] -= height_offset
116
- trajec = motions[:, 0, [0, 2]]
117
- is_cuda = torch.cuda.is_available()
118
- # device = torch.device("cuda" if is_cuda else "cpu")
119
- j2s = joints2smpl(num_frames=frames, device_id=0, cuda=is_cuda)
120
- rot2xyz = Rotation2xyz(device=device)
121
- faces = rot2xyz.smpl_model.faces
122
-
123
- if not os.path.exists(f'output/{name}_pred.pt'):
124
- print(f'Running SMPLify, it may take a few minutes.')
125
- motion_tensor, opt_dict = j2s.joint2smpl(motions) # [nframes, njoints, 3]
126
-
127
- vertices = rot2xyz(torch.tensor(motion_tensor).clone(), mask=None,
128
- pose_rep='rot6d', translation=True, glob=True,
129
- jointstype='vertices',
130
- vertstrans=True)
131
- vertices = vertices.detach().cpu()
132
- torch.save(vertices, f'output/{name}_pred.pt')
133
- else:
134
- vertices = torch.load(f'output/{name}_pred.pt')
135
- frames = vertices.shape[3] # shape: 1, nb_frames, 3, nb_joints
136
- print(vertices.shape)
137
- MINS = torch.min(torch.min(vertices[0], axis=0)[0], axis=1)[0]
138
- MAXS = torch.max(torch.max(vertices[0], axis=0)[0], axis=1)[0]
139
-
140
- out_list = []
141
-
142
- minx = MINS[0] - 0.5
143
- maxx = MAXS[0] + 0.5
144
- minz = MINS[2] - 0.5
145
- maxz = MAXS[2] + 0.5
146
- polygon = geometry.Polygon([[minx, minz], [minx, maxz], [maxx, maxz], [maxx, minz]])
147
- polygon_mesh = trimesh.creation.extrude_polygon(polygon, 1e-5)
148
-
149
- vid = []
150
- for i in range(frames):
151
- if i % 10 == 0:
152
- print(i)
153
-
154
- mesh = Trimesh(vertices=vertices[0, :, :, i].squeeze().tolist(), faces=faces)
155
-
156
- base_color = (0.11, 0.53, 0.8, 0.5)
157
- ## OPAQUE rendering without alpha
158
- ## BLEND rendering consider alpha
159
- material = pyrender.MetallicRoughnessMaterial(
160
- metallicFactor=0.7,
161
- alphaMode='OPAQUE',
162
- baseColorFactor=base_color
163
- )
164
-
165
-
166
- mesh = pyrender.Mesh.from_trimesh(mesh, material=material)
167
-
168
- polygon_mesh.visual.face_colors = [0, 0, 0, 0.21]
169
- polygon_render = pyrender.Mesh.from_trimesh(polygon_mesh, smooth=False)
170
-
171
- bg_color = [1, 1, 1, 0.8]
172
- scene = pyrender.Scene(bg_color=bg_color, ambient_light=(0.4, 0.4, 0.4))
173
-
174
- sx, sy, tx, ty = [0.75, 0.75, 0, 0.10]
175
-
176
- camera = pyrender.PerspectiveCamera(yfov=(np.pi / 3.0))
177
-
178
- light = pyrender.DirectionalLight(color=[1,1,1], intensity=300)
179
-
180
- scene.add(mesh)
181
-
182
- c = np.pi / 2
183
-
184
- scene.add(polygon_render, pose=np.array([[ 1, 0, 0, 0],
185
-
186
- [ 0, np.cos(c), -np.sin(c), MINS[1].cpu().numpy()],
187
-
188
- [ 0, np.sin(c), np.cos(c), 0],
189
-
190
- [ 0, 0, 0, 1]]))
191
-
192
- light_pose = np.eye(4)
193
- light_pose[:3, 3] = [0, -1, 1]
194
- scene.add(light, pose=light_pose.copy())
195
-
196
- light_pose[:3, 3] = [0, 1, 1]
197
- scene.add(light, pose=light_pose.copy())
198
-
199
- light_pose[:3, 3] = [1, 1, 2]
200
- scene.add(light, pose=light_pose.copy())
201
-
202
-
203
- c = -np.pi / 6
204
-
205
- scene.add(camera, pose=[[ 1, 0, 0, (minx+maxx).cpu().numpy()/2],
206
-
207
- [ 0, np.cos(c), -np.sin(c), 1.5],
208
-
209
- [ 0, np.sin(c), np.cos(c), max(4, minz.cpu().numpy()+(1.5-MINS[1].cpu().numpy())*2, (maxx-minx).cpu().numpy())],
210
-
211
- [ 0, 0, 0, 1]
212
- ])
213
-
214
- # render scene
215
- r = pyrender.OffscreenRenderer(960, 960)
216
-
217
- color, _ = r.render(scene, flags=RenderFlags.RGBA)
218
- # Image.fromarray(color).save(outdir+'/'+name+'_'+str(i)+'.png')
219
-
220
- vid.append(color)
221
-
222
- r.delete()
223
-
224
- out = np.stack(vid, axis=0)
225
- imageio.mimwrite(f'output/results.gif', out, fps=20)
226
- out_video = mp.VideoFileClip(f'output/results.gif')
227
- out_video.write_videofile("output/results.mp4")
228
- del out, vertices
229
- return f'output/results.mp4'
230
-
231
- def predict(clip_text, method='fast'):
232
- gc.collect()
233
- if torch.cuda.is_available():
234
- text = clip.tokenize([clip_text], truncate=True).cuda()
235
- else:
236
- text = clip.tokenize([clip_text], truncate=True)
237
- feat_clip_text = clip_model.encode_text(text).float()
238
- index_motion = trans_encoder.sample(feat_clip_text[0:1], False)
239
- pred_pose = net.forward_decoder(index_motion)
240
- pred_xyz = recover_from_ric((pred_pose*std+mean).float(), 22)
241
- output_name = hashlib.md5(clip_text.encode()).hexdigest()
242
- if method == 'fast':
243
- xyz = pred_xyz.reshape(1, -1, 22, 3)
244
- pose_vis = plot_3d.draw_to_batch(xyz.detach().cpu().numpy(), title_batch=None, outname=[f'output/results.gif'])
245
- out_video = mp.VideoFileClip("output/results.gif")
246
- out_video.write_videofile("output/results.mp4")
247
- return f'output/results.mp4'
248
- elif method == 'slow':
249
- output_path = render(pred_xyz.detach().cpu().numpy().squeeze(axis=0), device_id=0, name=output_name)
250
- return output_path
251
-
252
-
253
- # ---- Gradio Layout -----
254
- text_prompt = gr.Textbox(label="Text prompt", lines=1, interactive=True)
255
- video_out = gr.Video(label="Motion", mirror_webcam=False, interactive=False)
256
- demo = gr.Blocks()
257
- demo.encrypt = False
258
-
259
- with demo:
260
- gr.Markdown('''
261
- <div>
262
- <h1 style='text-align: center'>Generating Human Motion from Textual Descriptions (T2M-GPT)</h1>
263
- This space uses <a href='https://mael-zys.github.io/T2M-GPT/' target='_blank'><b>T2M-GPT models</b></a> based on Vector Quantised-Variational AutoEncoder (VQ-VAE) and Generative Pre-trained Transformer (GPT) for human motion generation from textural descriptions🤗
264
- </div>
265
- ''')
266
- with gr.Row():
267
- with gr.Column():
268
- gr.Markdown('''
269
- <figure>
270
- <img src="https://huggingface.co/vumichien/T2M-GPT/resolve/main/demo_slow1.gif" alt="Demo Slow", width="425", height=480/>
271
- <figcaption> a man starts off in an up right position with botg arms extended out by his sides, he then brings his arms down to his body and claps his hands together. after this he wals down amd the the left where he proceeds to sit on a seat
272
- </figcaption>
273
- </figure>
274
- ''')
275
- with gr.Column():
276
- gr.Markdown('''
277
- <figure>
278
- <img src="https://huggingface.co/vumichien/T2M-GPT/resolve/main/demo_slow2.gif" alt="Demo Slow 2", width="425", height=480/>
279
- <figcaption> a person puts their hands together, leans forwards slightly then swings the arms from right to left
280
- </figcaption>
281
- </figure>
282
- ''')
283
- with gr.Column():
284
- gr.Markdown('''
285
- <figure>
286
- <img src="https://huggingface.co/vumichien/T2M-GPT/resolve/main/demo_slow3.gif" alt="Demo Slow 3", width="425", height=480/>
287
- <figcaption> a man is practicing the waltz with a partner
288
- </figcaption>
289
- </figure>
290
- ''')
291
- with gr.Row():
292
- with gr.Column():
293
- gr.Markdown('''
294
- ### Generate human motion by **T2M-GPT**
295
- ##### Step 1. Give prompt text describing human motion
296
- ##### Step 2. Choice method to render output (Fast: Sketch skeleton; Slow: SMPL mesh, only work with GPU and running time around 2 mins)
297
- ##### Step 3. Generate output and enjoy
298
- ''')
299
- with gr.Column():
300
- with gr.Row():
301
- text_prompt.render()
302
- method = gr.Dropdown(["slow", "fast"], label="Method", value="slow")
303
- with gr.Row():
304
- generate_btn = gr.Button("Generate")
305
- generate_btn.click(predict, [text_prompt, method], [video_out], api_name="generate")
306
- print(video_out)
307
- with gr.Row():
308
- video_out.render()
309
- with gr.Row():
310
- gr.Markdown('''
311
- ### You can test by following examples:
312
- ''')
313
- examples = gr.Examples(examples=
314
- [ "a person jogs in place, slowly at first, then increases speed. they then back up and squat down.",
315
- "a man steps forward and does a handstand",
316
- "a man rises from the ground, walks in a circle and sits back down on the ground"],
317
- label="Examples", inputs=[text_prompt])
318
-
319
- demo.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIZerotoHero-Health4All/02-ClinicalTerminology/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: 02 ClinicalTerminology
3
- emoji: 🐠
4
- colorFrom: red
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.12.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ASJMO/freegpt/client/css/buttons.css DELETED
@@ -1,4 +0,0 @@
1
- .buttons {
2
- display: flex;
3
- justify-content: left;
4
- }
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/GptGod.py DELETED
@@ -1,51 +0,0 @@
1
- from __future__ import annotations
2
- import secrets, json
3
- from aiohttp import ClientSession
4
- from typing import AsyncGenerator
5
- from .base_provider import AsyncGeneratorProvider
6
- from .helper import format_prompt
7
-
8
- class GptGod(AsyncGeneratorProvider):
9
- url = "https://gptgod.site"
10
- supports_gpt_35_turbo = True
11
- working = True
12
-
13
- @classmethod
14
- async def create_async_generator(
15
- cls,
16
- model: str,
17
- messages: list[dict[str, str]],
18
- **kwargs
19
- ) -> AsyncGenerator:
20
- headers = {
21
- "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/118.0",
22
- "Accept": "text/event-stream",
23
- "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
24
- "Accept-Encoding": "gzip, deflate, br",
25
- "Alt-Used": "gptgod.site",
26
- "Connection": "keep-alive",
27
- "Referer": "https://gptgod.site/",
28
- "Sec-Fetch-Dest": "empty",
29
- "Sec-Fetch-Mode": "cors",
30
- "Sec-Fetch-Site": "same-origin",
31
- "Pragma": "no-cache",
32
- "Cache-Control": "no-cache",
33
- }
34
- async with ClientSession(headers=headers) as session:
35
- prompt = format_prompt(messages)
36
- data = {
37
- "content": prompt,
38
- "id": secrets.token_hex(16).zfill(32)
39
- }
40
- async with session.get(f"{cls.url}/api/session/free/gpt3p5", params=data) as response:
41
- response.raise_for_status()
42
- event = None
43
- async for line in response.content:
44
- if line.startswith(b'event: '):
45
- event = line[7:-1]
46
- elif event == b"data" and line.startswith(b"data: "):
47
- data = json.loads(line[6:-1])
48
- if data:
49
- yield data
50
- elif event == b"done":
51
- break
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/Builders.js DELETED
@@ -1,79 +0,0 @@
1
- import CreateImage from './CreateImage.js';
2
- import CreateSprite from './CreateSprite.js';
3
- import CreateVideo from './CreateVideo.js';
4
- import CreateText from './CreateText.js';
5
- import CreateBBCodeText from './CreateBBCodeText.js';
6
- import CreateRoundRectangle from './CreateRoundRectangle.js';
7
- import CreateNinePatch from './CreateNinePatch.js';
8
- import CreateNinePatch2 from './CreateNinePatch2.js';
9
- import CreateCanvas from './CreateCanvas.js';
10
- import CreateCircleMaskImage from './CreateCircleMaskImage.js';
11
- import CreateSpace from './CreateSpace.js';
12
-
13
- import CreateSizer from './CreateSizer.js';
14
- import CreateFixWidthSizer from './CreateFixWidthSizer.js';
15
- import CreateGridSizer from './CreateGridSizer.js';
16
- import CreateOverlapSizer from './CreateOverlapSizer.js';
17
-
18
- import CreateButtons from './CreateButtons.js';
19
- import CreateFixWidthButtons from './CreateFixWidthButtons.js';
20
- import CreateGridButtons from './CreateGridButtons.js';
21
-
22
- import CreateLabel from './CreateLabel.js';
23
- import CreateBadgeLabel from './CreateBadgeLabel.js';
24
- import CreateDialog from './CreateDialog.js';
25
- import CreateTextBox from './CreateTextBox.js';
26
- import CreateSlider from './CreateSlider.js';
27
- import CreateNumberBar from './CreateNumberBar.js';
28
- import CreateScrollBar from './CreateScrollBar.js';
29
- import CreateTextArea from './CreateTextArea.js';
30
- import CreatePages from './CreatePages.js';
31
- import CreateToast from './CreateToast.js';
32
- import CreateKnob from './CreateKnob.js';
33
- import CreateHolyGrail from './CreateHolyGrail.js';
34
- import CreateMenu from './CreateMenu.js';
35
-
36
- var Builders = {
37
- Image: CreateImage,
38
- Sprite: CreateSprite,
39
- Video: CreateVideo,
40
- Text: CreateText,
41
- BBCodeText: CreateBBCodeText,
42
- RoundRectangle: CreateRoundRectangle,
43
- Ninepatch: CreateNinePatch,
44
- Ninepatch2: CreateNinePatch2,
45
- Canvas: CreateCanvas,
46
- CircleMaskImage: CreateCircleMaskImage,
47
- Space: CreateSpace,
48
-
49
- Sizer: CreateSizer,
50
- FixWidthSizer: CreateFixWidthSizer,
51
- GridSizer: CreateGridSizer,
52
- OverlapSizer: CreateOverlapSizer,
53
-
54
- Buttons: CreateButtons,
55
- FixWidthButtons: CreateFixWidthButtons,
56
- GridButtons: CreateGridButtons,
57
-
58
- Label: CreateLabel,
59
- BadgeLabel: CreateBadgeLabel,
60
- Dialog: CreateDialog,
61
- TextBox: CreateTextBox,
62
- Slider: CreateSlider,
63
- NumberBar: CreateNumberBar,
64
- ScrollBar: CreateScrollBar,
65
- TextArea: CreateTextArea,
66
- Pages: CreatePages,
67
- Toast: CreateToast,
68
- Knob: CreateKnob,
69
- HolyGrail: CreateHolyGrail,
70
- Menu: CreateMenu,
71
- };
72
-
73
- /*
74
- function(scene, data, view, styles, customBuilders) {
75
- return gameObject;
76
- }
77
- */
78
-
79
- export default Builders;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateLabel.js DELETED
@@ -1,8 +0,0 @@
1
- import CreateAnyLabel from './utils/CreateAnyLabel.js';
2
- import Label from '../../label/Label.js';
3
-
4
- var CreateLabel = function (scene, data, view, styles, customBuilders) {
5
- return CreateAnyLabel(scene, data, view, styles, customBuilders, Label);
6
- }
7
-
8
- export default CreateLabel;
 
 
 
 
 
 
 
 
 
spaces/Allie7/Nose/Dockerfile DELETED
@@ -1,21 +0,0 @@
1
- FROM node: 18-bullseye-slim
2
-
3
- RUN apt-get update && \
4
-
5
- apt-get install -y git
6
-
7
- RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git/app
8
-
9
- WORKDIR /app
10
-
11
- RUN npm install
12
-
13
- COPY Dockerfile greeting.md env* ./
14
-
15
- RUN npm run build
16
-
17
- EXPOSE 7860
18
-
19
- ENV NODE_ENV=production
20
-
21
- CMD ["npm", "start" ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnTo2209/3D_Zeroshot_Neural_Style_Transfer/src/model/__init__.py DELETED
@@ -1,6 +0,0 @@
1
- from src.model.styleRF import StyleRF
2
- from src.utils.registry import Registry
3
-
4
- MODEL_REGISTRY = Registry("MODEL")
5
-
6
- MODEL_REGISTRY.register(StyleRF)
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_tensorrt_img2img.py DELETED
@@ -1,1055 +0,0 @@
1
- #
2
- # Copyright 2023 The HuggingFace Inc. team.
3
- # SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
4
- # SPDX-License-Identifier: Apache-2.0
5
- #
6
- # Licensed under the Apache License, Version 2.0 (the "License");
7
- # you may not use this file except in compliance with the License.
8
- # You may obtain a copy of the License at
9
- #
10
- # http://www.apache.org/licenses/LICENSE-2.0
11
- #
12
- # Unless required by applicable law or agreed to in writing, software
13
- # distributed under the License is distributed on an "AS IS" BASIS,
14
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
- # See the License for the specific language governing permissions and
16
- # limitations under the License.
17
-
18
- import gc
19
- import os
20
- from collections import OrderedDict
21
- from copy import copy
22
- from typing import List, Optional, Union
23
-
24
- import numpy as np
25
- import onnx
26
- import onnx_graphsurgeon as gs
27
- import PIL
28
- import tensorrt as trt
29
- import torch
30
- from huggingface_hub import snapshot_download
31
- from onnx import shape_inference
32
- from polygraphy import cuda
33
- from polygraphy.backend.common import bytes_from_path
34
- from polygraphy.backend.onnx.loader import fold_constants
35
- from polygraphy.backend.trt import (
36
- CreateConfig,
37
- Profile,
38
- engine_from_bytes,
39
- engine_from_network,
40
- network_from_onnx_path,
41
- save_engine,
42
- )
43
- from polygraphy.backend.trt import util as trt_util
44
- from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
45
-
46
- from diffusers.models import AutoencoderKL, UNet2DConditionModel
47
- from diffusers.pipelines.stable_diffusion import (
48
- StableDiffusionImg2ImgPipeline,
49
- StableDiffusionPipelineOutput,
50
- StableDiffusionSafetyChecker,
51
- )
52
- from diffusers.schedulers import DDIMScheduler
53
- from diffusers.utils import DIFFUSERS_CACHE, logging
54
-
55
-
56
- """
57
- Installation instructions
58
- python3 -m pip install --upgrade transformers diffusers>=0.16.0
59
- python3 -m pip install --upgrade tensorrt>=8.6.1
60
- python3 -m pip install --upgrade polygraphy>=0.47.0 onnx-graphsurgeon --extra-index-url https://pypi.ngc.nvidia.com
61
- python3 -m pip install onnxruntime
62
- """
63
-
64
- TRT_LOGGER = trt.Logger(trt.Logger.ERROR)
65
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
66
-
67
- # Map of numpy dtype -> torch dtype
68
- numpy_to_torch_dtype_dict = {
69
- np.uint8: torch.uint8,
70
- np.int8: torch.int8,
71
- np.int16: torch.int16,
72
- np.int32: torch.int32,
73
- np.int64: torch.int64,
74
- np.float16: torch.float16,
75
- np.float32: torch.float32,
76
- np.float64: torch.float64,
77
- np.complex64: torch.complex64,
78
- np.complex128: torch.complex128,
79
- }
80
- if np.version.full_version >= "1.24.0":
81
- numpy_to_torch_dtype_dict[np.bool_] = torch.bool
82
- else:
83
- numpy_to_torch_dtype_dict[np.bool] = torch.bool
84
-
85
- # Map of torch dtype -> numpy dtype
86
- torch_to_numpy_dtype_dict = {value: key for (key, value) in numpy_to_torch_dtype_dict.items()}
87
-
88
-
89
- def device_view(t):
90
- return cuda.DeviceView(ptr=t.data_ptr(), shape=t.shape, dtype=torch_to_numpy_dtype_dict[t.dtype])
91
-
92
-
93
- def preprocess_image(image):
94
- """
95
- image: torch.Tensor
96
- """
97
- w, h = image.size
98
- w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
99
- image = image.resize((w, h))
100
- image = np.array(image).astype(np.float32) / 255.0
101
- image = image[None].transpose(0, 3, 1, 2)
102
- image = torch.from_numpy(image).contiguous()
103
- return 2.0 * image - 1.0
104
-
105
-
106
- class Engine:
107
- def __init__(self, engine_path):
108
- self.engine_path = engine_path
109
- self.engine = None
110
- self.context = None
111
- self.buffers = OrderedDict()
112
- self.tensors = OrderedDict()
113
-
114
- def __del__(self):
115
- [buf.free() for buf in self.buffers.values() if isinstance(buf, cuda.DeviceArray)]
116
- del self.engine
117
- del self.context
118
- del self.buffers
119
- del self.tensors
120
-
121
- def build(
122
- self,
123
- onnx_path,
124
- fp16,
125
- input_profile=None,
126
- enable_preview=False,
127
- enable_all_tactics=False,
128
- timing_cache=None,
129
- workspace_size=0,
130
- ):
131
- logger.warning(f"Building TensorRT engine for {onnx_path}: {self.engine_path}")
132
- p = Profile()
133
- if input_profile:
134
- for name, dims in input_profile.items():
135
- assert len(dims) == 3
136
- p.add(name, min=dims[0], opt=dims[1], max=dims[2])
137
-
138
- config_kwargs = {}
139
-
140
- config_kwargs["preview_features"] = [trt.PreviewFeature.DISABLE_EXTERNAL_TACTIC_SOURCES_FOR_CORE_0805]
141
- if enable_preview:
142
- # Faster dynamic shapes made optional since it increases engine build time.
143
- config_kwargs["preview_features"].append(trt.PreviewFeature.FASTER_DYNAMIC_SHAPES_0805)
144
- if workspace_size > 0:
145
- config_kwargs["memory_pool_limits"] = {trt.MemoryPoolType.WORKSPACE: workspace_size}
146
- if not enable_all_tactics:
147
- config_kwargs["tactic_sources"] = []
148
-
149
- engine = engine_from_network(
150
- network_from_onnx_path(onnx_path, flags=[trt.OnnxParserFlag.NATIVE_INSTANCENORM]),
151
- config=CreateConfig(fp16=fp16, profiles=[p], load_timing_cache=timing_cache, **config_kwargs),
152
- save_timing_cache=timing_cache,
153
- )
154
- save_engine(engine, path=self.engine_path)
155
-
156
- def load(self):
157
- logger.warning(f"Loading TensorRT engine: {self.engine_path}")
158
- self.engine = engine_from_bytes(bytes_from_path(self.engine_path))
159
-
160
- def activate(self):
161
- self.context = self.engine.create_execution_context()
162
-
163
- def allocate_buffers(self, shape_dict=None, device="cuda"):
164
- for idx in range(trt_util.get_bindings_per_profile(self.engine)):
165
- binding = self.engine[idx]
166
- if shape_dict and binding in shape_dict:
167
- shape = shape_dict[binding]
168
- else:
169
- shape = self.engine.get_binding_shape(binding)
170
- dtype = trt.nptype(self.engine.get_binding_dtype(binding))
171
- if self.engine.binding_is_input(binding):
172
- self.context.set_binding_shape(idx, shape)
173
- tensor = torch.empty(tuple(shape), dtype=numpy_to_torch_dtype_dict[dtype]).to(device=device)
174
- self.tensors[binding] = tensor
175
- self.buffers[binding] = cuda.DeviceView(ptr=tensor.data_ptr(), shape=shape, dtype=dtype)
176
-
177
- def infer(self, feed_dict, stream):
178
- start_binding, end_binding = trt_util.get_active_profile_bindings(self.context)
179
- # shallow copy of ordered dict
180
- device_buffers = copy(self.buffers)
181
- for name, buf in feed_dict.items():
182
- assert isinstance(buf, cuda.DeviceView)
183
- device_buffers[name] = buf
184
- bindings = [0] * start_binding + [buf.ptr for buf in device_buffers.values()]
185
- noerror = self.context.execute_async_v2(bindings=bindings, stream_handle=stream.ptr)
186
- if not noerror:
187
- raise ValueError("ERROR: inference failed.")
188
-
189
- return self.tensors
190
-
191
-
192
- class Optimizer:
193
- def __init__(self, onnx_graph):
194
- self.graph = gs.import_onnx(onnx_graph)
195
-
196
- def cleanup(self, return_onnx=False):
197
- self.graph.cleanup().toposort()
198
- if return_onnx:
199
- return gs.export_onnx(self.graph)
200
-
201
- def select_outputs(self, keep, names=None):
202
- self.graph.outputs = [self.graph.outputs[o] for o in keep]
203
- if names:
204
- for i, name in enumerate(names):
205
- self.graph.outputs[i].name = name
206
-
207
- def fold_constants(self, return_onnx=False):
208
- onnx_graph = fold_constants(gs.export_onnx(self.graph), allow_onnxruntime_shape_inference=True)
209
- self.graph = gs.import_onnx(onnx_graph)
210
- if return_onnx:
211
- return onnx_graph
212
-
213
- def infer_shapes(self, return_onnx=False):
214
- onnx_graph = gs.export_onnx(self.graph)
215
- if onnx_graph.ByteSize() > 2147483648:
216
- raise TypeError("ERROR: model size exceeds supported 2GB limit")
217
- else:
218
- onnx_graph = shape_inference.infer_shapes(onnx_graph)
219
-
220
- self.graph = gs.import_onnx(onnx_graph)
221
- if return_onnx:
222
- return onnx_graph
223
-
224
-
225
- class BaseModel:
226
- def __init__(self, model, fp16=False, device="cuda", max_batch_size=16, embedding_dim=768, text_maxlen=77):
227
- self.model = model
228
- self.name = "SD Model"
229
- self.fp16 = fp16
230
- self.device = device
231
-
232
- self.min_batch = 1
233
- self.max_batch = max_batch_size
234
- self.min_image_shape = 256 # min image resolution: 256x256
235
- self.max_image_shape = 1024 # max image resolution: 1024x1024
236
- self.min_latent_shape = self.min_image_shape // 8
237
- self.max_latent_shape = self.max_image_shape // 8
238
-
239
- self.embedding_dim = embedding_dim
240
- self.text_maxlen = text_maxlen
241
-
242
- def get_model(self):
243
- return self.model
244
-
245
- def get_input_names(self):
246
- pass
247
-
248
- def get_output_names(self):
249
- pass
250
-
251
- def get_dynamic_axes(self):
252
- return None
253
-
254
- def get_sample_input(self, batch_size, image_height, image_width):
255
- pass
256
-
257
- def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
258
- return None
259
-
260
- def get_shape_dict(self, batch_size, image_height, image_width):
261
- return None
262
-
263
- def optimize(self, onnx_graph):
264
- opt = Optimizer(onnx_graph)
265
- opt.cleanup()
266
- opt.fold_constants()
267
- opt.infer_shapes()
268
- onnx_opt_graph = opt.cleanup(return_onnx=True)
269
- return onnx_opt_graph
270
-
271
- def check_dims(self, batch_size, image_height, image_width):
272
- assert batch_size >= self.min_batch and batch_size <= self.max_batch
273
- assert image_height % 8 == 0 or image_width % 8 == 0
274
- latent_height = image_height // 8
275
- latent_width = image_width // 8
276
- assert latent_height >= self.min_latent_shape and latent_height <= self.max_latent_shape
277
- assert latent_width >= self.min_latent_shape and latent_width <= self.max_latent_shape
278
- return (latent_height, latent_width)
279
-
280
- def get_minmax_dims(self, batch_size, image_height, image_width, static_batch, static_shape):
281
- min_batch = batch_size if static_batch else self.min_batch
282
- max_batch = batch_size if static_batch else self.max_batch
283
- latent_height = image_height // 8
284
- latent_width = image_width // 8
285
- min_image_height = image_height if static_shape else self.min_image_shape
286
- max_image_height = image_height if static_shape else self.max_image_shape
287
- min_image_width = image_width if static_shape else self.min_image_shape
288
- max_image_width = image_width if static_shape else self.max_image_shape
289
- min_latent_height = latent_height if static_shape else self.min_latent_shape
290
- max_latent_height = latent_height if static_shape else self.max_latent_shape
291
- min_latent_width = latent_width if static_shape else self.min_latent_shape
292
- max_latent_width = latent_width if static_shape else self.max_latent_shape
293
- return (
294
- min_batch,
295
- max_batch,
296
- min_image_height,
297
- max_image_height,
298
- min_image_width,
299
- max_image_width,
300
- min_latent_height,
301
- max_latent_height,
302
- min_latent_width,
303
- max_latent_width,
304
- )
305
-
306
-
307
- def getOnnxPath(model_name, onnx_dir, opt=True):
308
- return os.path.join(onnx_dir, model_name + (".opt" if opt else "") + ".onnx")
309
-
310
-
311
- def getEnginePath(model_name, engine_dir):
312
- return os.path.join(engine_dir, model_name + ".plan")
313
-
314
-
315
- def build_engines(
316
- models: dict,
317
- engine_dir,
318
- onnx_dir,
319
- onnx_opset,
320
- opt_image_height,
321
- opt_image_width,
322
- opt_batch_size=1,
323
- force_engine_rebuild=False,
324
- static_batch=False,
325
- static_shape=True,
326
- enable_preview=False,
327
- enable_all_tactics=False,
328
- timing_cache=None,
329
- max_workspace_size=0,
330
- ):
331
- built_engines = {}
332
- if not os.path.isdir(onnx_dir):
333
- os.makedirs(onnx_dir)
334
- if not os.path.isdir(engine_dir):
335
- os.makedirs(engine_dir)
336
-
337
- # Export models to ONNX
338
- for model_name, model_obj in models.items():
339
- engine_path = getEnginePath(model_name, engine_dir)
340
- if force_engine_rebuild or not os.path.exists(engine_path):
341
- logger.warning("Building Engines...")
342
- logger.warning("Engine build can take a while to complete")
343
- onnx_path = getOnnxPath(model_name, onnx_dir, opt=False)
344
- onnx_opt_path = getOnnxPath(model_name, onnx_dir)
345
- if force_engine_rebuild or not os.path.exists(onnx_opt_path):
346
- if force_engine_rebuild or not os.path.exists(onnx_path):
347
- logger.warning(f"Exporting model: {onnx_path}")
348
- model = model_obj.get_model()
349
- with torch.inference_mode(), torch.autocast("cuda"):
350
- inputs = model_obj.get_sample_input(opt_batch_size, opt_image_height, opt_image_width)
351
- torch.onnx.export(
352
- model,
353
- inputs,
354
- onnx_path,
355
- export_params=True,
356
- opset_version=onnx_opset,
357
- do_constant_folding=True,
358
- input_names=model_obj.get_input_names(),
359
- output_names=model_obj.get_output_names(),
360
- dynamic_axes=model_obj.get_dynamic_axes(),
361
- )
362
- del model
363
- torch.cuda.empty_cache()
364
- gc.collect()
365
- else:
366
- logger.warning(f"Found cached model: {onnx_path}")
367
-
368
- # Optimize onnx
369
- if force_engine_rebuild or not os.path.exists(onnx_opt_path):
370
- logger.warning(f"Generating optimizing model: {onnx_opt_path}")
371
- onnx_opt_graph = model_obj.optimize(onnx.load(onnx_path))
372
- onnx.save(onnx_opt_graph, onnx_opt_path)
373
- else:
374
- logger.warning(f"Found cached optimized model: {onnx_opt_path} ")
375
-
376
- # Build TensorRT engines
377
- for model_name, model_obj in models.items():
378
- engine_path = getEnginePath(model_name, engine_dir)
379
- engine = Engine(engine_path)
380
- onnx_path = getOnnxPath(model_name, onnx_dir, opt=False)
381
- onnx_opt_path = getOnnxPath(model_name, onnx_dir)
382
-
383
- if force_engine_rebuild or not os.path.exists(engine.engine_path):
384
- engine.build(
385
- onnx_opt_path,
386
- fp16=True,
387
- input_profile=model_obj.get_input_profile(
388
- opt_batch_size,
389
- opt_image_height,
390
- opt_image_width,
391
- static_batch=static_batch,
392
- static_shape=static_shape,
393
- ),
394
- enable_preview=enable_preview,
395
- timing_cache=timing_cache,
396
- workspace_size=max_workspace_size,
397
- )
398
- built_engines[model_name] = engine
399
-
400
- # Load and activate TensorRT engines
401
- for model_name, model_obj in models.items():
402
- engine = built_engines[model_name]
403
- engine.load()
404
- engine.activate()
405
-
406
- return built_engines
407
-
408
-
409
- def runEngine(engine, feed_dict, stream):
410
- return engine.infer(feed_dict, stream)
411
-
412
-
413
- class CLIP(BaseModel):
414
- def __init__(self, model, device, max_batch_size, embedding_dim):
415
- super(CLIP, self).__init__(
416
- model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim
417
- )
418
- self.name = "CLIP"
419
-
420
- def get_input_names(self):
421
- return ["input_ids"]
422
-
423
- def get_output_names(self):
424
- return ["text_embeddings", "pooler_output"]
425
-
426
- def get_dynamic_axes(self):
427
- return {"input_ids": {0: "B"}, "text_embeddings": {0: "B"}}
428
-
429
- def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
430
- self.check_dims(batch_size, image_height, image_width)
431
- min_batch, max_batch, _, _, _, _, _, _, _, _ = self.get_minmax_dims(
432
- batch_size, image_height, image_width, static_batch, static_shape
433
- )
434
- return {
435
- "input_ids": [(min_batch, self.text_maxlen), (batch_size, self.text_maxlen), (max_batch, self.text_maxlen)]
436
- }
437
-
438
- def get_shape_dict(self, batch_size, image_height, image_width):
439
- self.check_dims(batch_size, image_height, image_width)
440
- return {
441
- "input_ids": (batch_size, self.text_maxlen),
442
- "text_embeddings": (batch_size, self.text_maxlen, self.embedding_dim),
443
- }
444
-
445
- def get_sample_input(self, batch_size, image_height, image_width):
446
- self.check_dims(batch_size, image_height, image_width)
447
- return torch.zeros(batch_size, self.text_maxlen, dtype=torch.int32, device=self.device)
448
-
449
- def optimize(self, onnx_graph):
450
- opt = Optimizer(onnx_graph)
451
- opt.select_outputs([0]) # delete graph output#1
452
- opt.cleanup()
453
- opt.fold_constants()
454
- opt.infer_shapes()
455
- opt.select_outputs([0], names=["text_embeddings"]) # rename network output
456
- opt_onnx_graph = opt.cleanup(return_onnx=True)
457
- return opt_onnx_graph
458
-
459
-
460
- def make_CLIP(model, device, max_batch_size, embedding_dim, inpaint=False):
461
- return CLIP(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)
462
-
463
-
464
- class UNet(BaseModel):
465
- def __init__(
466
- self, model, fp16=False, device="cuda", max_batch_size=16, embedding_dim=768, text_maxlen=77, unet_dim=4
467
- ):
468
- super(UNet, self).__init__(
469
- model=model,
470
- fp16=fp16,
471
- device=device,
472
- max_batch_size=max_batch_size,
473
- embedding_dim=embedding_dim,
474
- text_maxlen=text_maxlen,
475
- )
476
- self.unet_dim = unet_dim
477
- self.name = "UNet"
478
-
479
- def get_input_names(self):
480
- return ["sample", "timestep", "encoder_hidden_states"]
481
-
482
- def get_output_names(self):
483
- return ["latent"]
484
-
485
- def get_dynamic_axes(self):
486
- return {
487
- "sample": {0: "2B", 2: "H", 3: "W"},
488
- "encoder_hidden_states": {0: "2B"},
489
- "latent": {0: "2B", 2: "H", 3: "W"},
490
- }
491
-
492
- def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
493
- latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
494
- (
495
- min_batch,
496
- max_batch,
497
- _,
498
- _,
499
- _,
500
- _,
501
- min_latent_height,
502
- max_latent_height,
503
- min_latent_width,
504
- max_latent_width,
505
- ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)
506
- return {
507
- "sample": [
508
- (2 * min_batch, self.unet_dim, min_latent_height, min_latent_width),
509
- (2 * batch_size, self.unet_dim, latent_height, latent_width),
510
- (2 * max_batch, self.unet_dim, max_latent_height, max_latent_width),
511
- ],
512
- "encoder_hidden_states": [
513
- (2 * min_batch, self.text_maxlen, self.embedding_dim),
514
- (2 * batch_size, self.text_maxlen, self.embedding_dim),
515
- (2 * max_batch, self.text_maxlen, self.embedding_dim),
516
- ],
517
- }
518
-
519
- def get_shape_dict(self, batch_size, image_height, image_width):
520
- latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
521
- return {
522
- "sample": (2 * batch_size, self.unet_dim, latent_height, latent_width),
523
- "encoder_hidden_states": (2 * batch_size, self.text_maxlen, self.embedding_dim),
524
- "latent": (2 * batch_size, 4, latent_height, latent_width),
525
- }
526
-
527
- def get_sample_input(self, batch_size, image_height, image_width):
528
- latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
529
- dtype = torch.float16 if self.fp16 else torch.float32
530
- return (
531
- torch.randn(
532
- 2 * batch_size, self.unet_dim, latent_height, latent_width, dtype=torch.float32, device=self.device
533
- ),
534
- torch.tensor([1.0], dtype=torch.float32, device=self.device),
535
- torch.randn(2 * batch_size, self.text_maxlen, self.embedding_dim, dtype=dtype, device=self.device),
536
- )
537
-
538
-
539
- def make_UNet(model, device, max_batch_size, embedding_dim, inpaint=False):
540
- return UNet(
541
- model,
542
- fp16=True,
543
- device=device,
544
- max_batch_size=max_batch_size,
545
- embedding_dim=embedding_dim,
546
- unet_dim=(9 if inpaint else 4),
547
- )
548
-
549
-
550
- class VAE(BaseModel):
551
- def __init__(self, model, device, max_batch_size, embedding_dim):
552
- super(VAE, self).__init__(
553
- model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim
554
- )
555
- self.name = "VAE decoder"
556
-
557
- def get_input_names(self):
558
- return ["latent"]
559
-
560
- def get_output_names(self):
561
- return ["images"]
562
-
563
- def get_dynamic_axes(self):
564
- return {"latent": {0: "B", 2: "H", 3: "W"}, "images": {0: "B", 2: "8H", 3: "8W"}}
565
-
566
- def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
567
- latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
568
- (
569
- min_batch,
570
- max_batch,
571
- _,
572
- _,
573
- _,
574
- _,
575
- min_latent_height,
576
- max_latent_height,
577
- min_latent_width,
578
- max_latent_width,
579
- ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)
580
- return {
581
- "latent": [
582
- (min_batch, 4, min_latent_height, min_latent_width),
583
- (batch_size, 4, latent_height, latent_width),
584
- (max_batch, 4, max_latent_height, max_latent_width),
585
- ]
586
- }
587
-
588
- def get_shape_dict(self, batch_size, image_height, image_width):
589
- latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
590
- return {
591
- "latent": (batch_size, 4, latent_height, latent_width),
592
- "images": (batch_size, 3, image_height, image_width),
593
- }
594
-
595
- def get_sample_input(self, batch_size, image_height, image_width):
596
- latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
597
- return torch.randn(batch_size, 4, latent_height, latent_width, dtype=torch.float32, device=self.device)
598
-
599
-
600
- def make_VAE(model, device, max_batch_size, embedding_dim, inpaint=False):
601
- return VAE(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)
602
-
603
-
604
- class TorchVAEEncoder(torch.nn.Module):
605
- def __init__(self, model):
606
- super().__init__()
607
- self.vae_encoder = model
608
-
609
- def forward(self, x):
610
- return self.vae_encoder.encode(x).latent_dist.sample()
611
-
612
-
613
- class VAEEncoder(BaseModel):
614
- def __init__(self, model, device, max_batch_size, embedding_dim):
615
- super(VAEEncoder, self).__init__(
616
- model=model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim
617
- )
618
- self.name = "VAE encoder"
619
-
620
- def get_model(self):
621
- vae_encoder = TorchVAEEncoder(self.model)
622
- return vae_encoder
623
-
624
- def get_input_names(self):
625
- return ["images"]
626
-
627
- def get_output_names(self):
628
- return ["latent"]
629
-
630
- def get_dynamic_axes(self):
631
- return {"images": {0: "B", 2: "8H", 3: "8W"}, "latent": {0: "B", 2: "H", 3: "W"}}
632
-
633
- def get_input_profile(self, batch_size, image_height, image_width, static_batch, static_shape):
634
- assert batch_size >= self.min_batch and batch_size <= self.max_batch
635
- min_batch = batch_size if static_batch else self.min_batch
636
- max_batch = batch_size if static_batch else self.max_batch
637
- self.check_dims(batch_size, image_height, image_width)
638
- (
639
- min_batch,
640
- max_batch,
641
- min_image_height,
642
- max_image_height,
643
- min_image_width,
644
- max_image_width,
645
- _,
646
- _,
647
- _,
648
- _,
649
- ) = self.get_minmax_dims(batch_size, image_height, image_width, static_batch, static_shape)
650
-
651
- return {
652
- "images": [
653
- (min_batch, 3, min_image_height, min_image_width),
654
- (batch_size, 3, image_height, image_width),
655
- (max_batch, 3, max_image_height, max_image_width),
656
- ]
657
- }
658
-
659
- def get_shape_dict(self, batch_size, image_height, image_width):
660
- latent_height, latent_width = self.check_dims(batch_size, image_height, image_width)
661
- return {
662
- "images": (batch_size, 3, image_height, image_width),
663
- "latent": (batch_size, 4, latent_height, latent_width),
664
- }
665
-
666
- def get_sample_input(self, batch_size, image_height, image_width):
667
- self.check_dims(batch_size, image_height, image_width)
668
- return torch.randn(batch_size, 3, image_height, image_width, dtype=torch.float32, device=self.device)
669
-
670
-
671
- def make_VAEEncoder(model, device, max_batch_size, embedding_dim, inpaint=False):
672
- return VAEEncoder(model, device=device, max_batch_size=max_batch_size, embedding_dim=embedding_dim)
673
-
674
-
675
- class TensorRTStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
676
- r"""
677
- Pipeline for image-to-image generation using TensorRT accelerated Stable Diffusion.
678
-
679
- This model inherits from [`StableDiffusionImg2ImgPipeline`]. Check the superclass documentation for the generic methods the
680
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
681
-
682
- Args:
683
- vae ([`AutoencoderKL`]):
684
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
685
- text_encoder ([`CLIPTextModel`]):
686
- Frozen text-encoder. Stable Diffusion uses the text portion of
687
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
688
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
689
- tokenizer (`CLIPTokenizer`):
690
- Tokenizer of class
691
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
692
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
693
- scheduler ([`SchedulerMixin`]):
694
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
695
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
696
- safety_checker ([`StableDiffusionSafetyChecker`]):
697
- Classification module that estimates whether generated images could be considered offensive or harmful.
698
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
699
- feature_extractor ([`CLIPFeatureExtractor`]):
700
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
701
- """
702
-
703
- def __init__(
704
- self,
705
- vae: AutoencoderKL,
706
- text_encoder: CLIPTextModel,
707
- tokenizer: CLIPTokenizer,
708
- unet: UNet2DConditionModel,
709
- scheduler: DDIMScheduler,
710
- safety_checker: StableDiffusionSafetyChecker,
711
- feature_extractor: CLIPFeatureExtractor,
712
- requires_safety_checker: bool = True,
713
- stages=["clip", "unet", "vae", "vae_encoder"],
714
- image_height: int = 512,
715
- image_width: int = 512,
716
- max_batch_size: int = 16,
717
- # ONNX export parameters
718
- onnx_opset: int = 17,
719
- onnx_dir: str = "onnx",
720
- # TensorRT engine build parameters
721
- engine_dir: str = "engine",
722
- build_preview_features: bool = True,
723
- force_engine_rebuild: bool = False,
724
- timing_cache: str = "timing_cache",
725
- ):
726
- super().__init__(
727
- vae, text_encoder, tokenizer, unet, scheduler, safety_checker, feature_extractor, requires_safety_checker
728
- )
729
-
730
- self.vae.forward = self.vae.decode
731
-
732
- self.stages = stages
733
- self.image_height, self.image_width = image_height, image_width
734
- self.inpaint = False
735
- self.onnx_opset = onnx_opset
736
- self.onnx_dir = onnx_dir
737
- self.engine_dir = engine_dir
738
- self.force_engine_rebuild = force_engine_rebuild
739
- self.timing_cache = timing_cache
740
- self.build_static_batch = False
741
- self.build_dynamic_shape = False
742
- self.build_preview_features = build_preview_features
743
-
744
- self.max_batch_size = max_batch_size
745
- # TODO: Restrict batch size to 4 for larger image dimensions as a WAR for TensorRT limitation.
746
- if self.build_dynamic_shape or self.image_height > 512 or self.image_width > 512:
747
- self.max_batch_size = 4
748
-
749
- self.stream = None # loaded in loadResources()
750
- self.models = {} # loaded in __loadModels()
751
- self.engine = {} # loaded in build_engines()
752
-
753
- def __loadModels(self):
754
- # Load pipeline models
755
- self.embedding_dim = self.text_encoder.config.hidden_size
756
- models_args = {
757
- "device": self.torch_device,
758
- "max_batch_size": self.max_batch_size,
759
- "embedding_dim": self.embedding_dim,
760
- "inpaint": self.inpaint,
761
- }
762
- if "clip" in self.stages:
763
- self.models["clip"] = make_CLIP(self.text_encoder, **models_args)
764
- if "unet" in self.stages:
765
- self.models["unet"] = make_UNet(self.unet, **models_args)
766
- if "vae" in self.stages:
767
- self.models["vae"] = make_VAE(self.vae, **models_args)
768
- if "vae_encoder" in self.stages:
769
- self.models["vae_encoder"] = make_VAEEncoder(self.vae, **models_args)
770
-
771
- @classmethod
772
- def set_cached_folder(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):
773
- cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
774
- resume_download = kwargs.pop("resume_download", False)
775
- proxies = kwargs.pop("proxies", None)
776
- local_files_only = kwargs.pop("local_files_only", False)
777
- use_auth_token = kwargs.pop("use_auth_token", None)
778
- revision = kwargs.pop("revision", None)
779
-
780
- cls.cached_folder = (
781
- pretrained_model_name_or_path
782
- if os.path.isdir(pretrained_model_name_or_path)
783
- else snapshot_download(
784
- pretrained_model_name_or_path,
785
- cache_dir=cache_dir,
786
- resume_download=resume_download,
787
- proxies=proxies,
788
- local_files_only=local_files_only,
789
- use_auth_token=use_auth_token,
790
- revision=revision,
791
- )
792
- )
793
-
794
- def to(self, torch_device: Optional[Union[str, torch.device]] = None, silence_dtype_warnings: bool = False):
795
- super().to(torch_device, silence_dtype_warnings=silence_dtype_warnings)
796
-
797
- self.onnx_dir = os.path.join(self.cached_folder, self.onnx_dir)
798
- self.engine_dir = os.path.join(self.cached_folder, self.engine_dir)
799
- self.timing_cache = os.path.join(self.cached_folder, self.timing_cache)
800
-
801
- # set device
802
- self.torch_device = self._execution_device
803
- logger.warning(f"Running inference on device: {self.torch_device}")
804
-
805
- # load models
806
- self.__loadModels()
807
-
808
- # build engines
809
- self.engine = build_engines(
810
- self.models,
811
- self.engine_dir,
812
- self.onnx_dir,
813
- self.onnx_opset,
814
- opt_image_height=self.image_height,
815
- opt_image_width=self.image_width,
816
- force_engine_rebuild=self.force_engine_rebuild,
817
- static_batch=self.build_static_batch,
818
- static_shape=not self.build_dynamic_shape,
819
- enable_preview=self.build_preview_features,
820
- timing_cache=self.timing_cache,
821
- )
822
-
823
- return self
824
-
825
- def __initialize_timesteps(self, timesteps, strength):
826
- self.scheduler.set_timesteps(timesteps)
827
- offset = self.scheduler.steps_offset if hasattr(self.scheduler, "steps_offset") else 0
828
- init_timestep = int(timesteps * strength) + offset
829
- init_timestep = min(init_timestep, timesteps)
830
- t_start = max(timesteps - init_timestep + offset, 0)
831
- timesteps = self.scheduler.timesteps[t_start:].to(self.torch_device)
832
- return timesteps, t_start
833
-
834
- def __preprocess_images(self, batch_size, images=()):
835
- init_images = []
836
- for image in images:
837
- image = image.to(self.torch_device).float()
838
- image = image.repeat(batch_size, 1, 1, 1)
839
- init_images.append(image)
840
- return tuple(init_images)
841
-
842
- def __encode_image(self, init_image):
843
- init_latents = runEngine(self.engine["vae_encoder"], {"images": device_view(init_image)}, self.stream)[
844
- "latent"
845
- ]
846
- init_latents = 0.18215 * init_latents
847
- return init_latents
848
-
849
- def __encode_prompt(self, prompt, negative_prompt):
850
- r"""
851
- Encodes the prompt into text encoder hidden states.
852
-
853
- Args:
854
- prompt (`str` or `List[str]`, *optional*):
855
- prompt to be encoded
856
- negative_prompt (`str` or `List[str]`, *optional*):
857
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
858
- `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
859
- Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
860
- """
861
- # Tokenize prompt
862
- text_input_ids = (
863
- self.tokenizer(
864
- prompt,
865
- padding="max_length",
866
- max_length=self.tokenizer.model_max_length,
867
- truncation=True,
868
- return_tensors="pt",
869
- )
870
- .input_ids.type(torch.int32)
871
- .to(self.torch_device)
872
- )
873
-
874
- text_input_ids_inp = device_view(text_input_ids)
875
- # NOTE: output tensor for CLIP must be cloned because it will be overwritten when called again for negative prompt
876
- text_embeddings = runEngine(self.engine["clip"], {"input_ids": text_input_ids_inp}, self.stream)[
877
- "text_embeddings"
878
- ].clone()
879
-
880
- # Tokenize negative prompt
881
- uncond_input_ids = (
882
- self.tokenizer(
883
- negative_prompt,
884
- padding="max_length",
885
- max_length=self.tokenizer.model_max_length,
886
- truncation=True,
887
- return_tensors="pt",
888
- )
889
- .input_ids.type(torch.int32)
890
- .to(self.torch_device)
891
- )
892
- uncond_input_ids_inp = device_view(uncond_input_ids)
893
- uncond_embeddings = runEngine(self.engine["clip"], {"input_ids": uncond_input_ids_inp}, self.stream)[
894
- "text_embeddings"
895
- ]
896
-
897
- # Concatenate the unconditional and text embeddings into a single batch to avoid doing two forward passes for classifier free guidance
898
- text_embeddings = torch.cat([uncond_embeddings, text_embeddings]).to(dtype=torch.float16)
899
-
900
- return text_embeddings
901
-
902
- def __denoise_latent(
903
- self, latents, text_embeddings, timesteps=None, step_offset=0, mask=None, masked_image_latents=None
904
- ):
905
- if not isinstance(timesteps, torch.Tensor):
906
- timesteps = self.scheduler.timesteps
907
- for step_index, timestep in enumerate(timesteps):
908
- # Expand the latents if we are doing classifier free guidance
909
- latent_model_input = torch.cat([latents] * 2)
910
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, timestep)
911
- if isinstance(mask, torch.Tensor):
912
- latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
913
-
914
- # Predict the noise residual
915
- timestep_float = timestep.float() if timestep.dtype != torch.float32 else timestep
916
-
917
- sample_inp = device_view(latent_model_input)
918
- timestep_inp = device_view(timestep_float)
919
- embeddings_inp = device_view(text_embeddings)
920
- noise_pred = runEngine(
921
- self.engine["unet"],
922
- {"sample": sample_inp, "timestep": timestep_inp, "encoder_hidden_states": embeddings_inp},
923
- self.stream,
924
- )["latent"]
925
-
926
- # Perform guidance
927
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
928
- noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
929
-
930
- latents = self.scheduler.step(noise_pred, timestep, latents).prev_sample
931
-
932
- latents = 1.0 / 0.18215 * latents
933
- return latents
934
-
935
- def __decode_latent(self, latents):
936
- images = runEngine(self.engine["vae"], {"latent": device_view(latents)}, self.stream)["images"]
937
- images = (images / 2 + 0.5).clamp(0, 1)
938
- return images.cpu().permute(0, 2, 3, 1).float().numpy()
939
-
940
- def __loadResources(self, image_height, image_width, batch_size):
941
- self.stream = cuda.Stream()
942
-
943
- # Allocate buffers for TensorRT engine bindings
944
- for model_name, obj in self.models.items():
945
- self.engine[model_name].allocate_buffers(
946
- shape_dict=obj.get_shape_dict(batch_size, image_height, image_width), device=self.torch_device
947
- )
948
-
949
- @torch.no_grad()
950
- def __call__(
951
- self,
952
- prompt: Union[str, List[str]] = None,
953
- image: Union[torch.FloatTensor, PIL.Image.Image] = None,
954
- strength: float = 0.8,
955
- num_inference_steps: int = 50,
956
- guidance_scale: float = 7.5,
957
- negative_prompt: Optional[Union[str, List[str]]] = None,
958
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
959
- ):
960
- r"""
961
- Function invoked when calling the pipeline for generation.
962
-
963
- Args:
964
- prompt (`str` or `List[str]`, *optional*):
965
- The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
966
- instead.
967
- image (`PIL.Image.Image`):
968
- `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
969
- be masked out with `mask_image` and repainted according to `prompt`.
970
- strength (`float`, *optional*, defaults to 0.8):
971
- Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
972
- will be used as a starting point, adding more noise to it the larger the `strength`. The number of
973
- denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
974
- be maximum and the denoising process will run for the full number of iterations specified in
975
- `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
976
- num_inference_steps (`int`, *optional*, defaults to 50):
977
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
978
- expense of slower inference.
979
- guidance_scale (`float`, *optional*, defaults to 7.5):
980
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
981
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
982
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
983
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
984
- usually at the expense of lower image quality.
985
- negative_prompt (`str` or `List[str]`, *optional*):
986
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
987
- `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
988
- Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
989
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
990
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
991
- to make generation deterministic.
992
-
993
- """
994
- self.generator = generator
995
- self.denoising_steps = num_inference_steps
996
- self.guidance_scale = guidance_scale
997
-
998
- # Pre-compute latent input scales and linear multistep coefficients
999
- self.scheduler.set_timesteps(self.denoising_steps, device=self.torch_device)
1000
-
1001
- # Define call parameters
1002
- if prompt is not None and isinstance(prompt, str):
1003
- batch_size = 1
1004
- prompt = [prompt]
1005
- elif prompt is not None and isinstance(prompt, list):
1006
- batch_size = len(prompt)
1007
- else:
1008
- raise ValueError(f"Expected prompt to be of type list or str but got {type(prompt)}")
1009
-
1010
- if negative_prompt is None:
1011
- negative_prompt = [""] * batch_size
1012
-
1013
- if negative_prompt is not None and isinstance(negative_prompt, str):
1014
- negative_prompt = [negative_prompt]
1015
-
1016
- assert len(prompt) == len(negative_prompt)
1017
-
1018
- if batch_size > self.max_batch_size:
1019
- raise ValueError(
1020
- f"Batch size {len(prompt)} is larger than allowed {self.max_batch_size}. If dynamic shape is used, then maximum batch size is 4"
1021
- )
1022
-
1023
- # load resources
1024
- self.__loadResources(self.image_height, self.image_width, batch_size)
1025
-
1026
- with torch.inference_mode(), torch.autocast("cuda"), trt.Runtime(TRT_LOGGER):
1027
- # Initialize timesteps
1028
- timesteps, t_start = self.__initialize_timesteps(self.denoising_steps, strength)
1029
- latent_timestep = timesteps[:1].repeat(batch_size)
1030
-
1031
- # Pre-process input image
1032
- if isinstance(image, PIL.Image.Image):
1033
- image = preprocess_image(image)
1034
- init_image = self.__preprocess_images(batch_size, (image,))[0]
1035
-
1036
- # VAE encode init image
1037
- init_latents = self.__encode_image(init_image)
1038
-
1039
- # Add noise to latents using timesteps
1040
- noise = torch.randn(
1041
- init_latents.shape, generator=self.generator, device=self.torch_device, dtype=torch.float32
1042
- )
1043
- latents = self.scheduler.add_noise(init_latents, noise, latent_timestep)
1044
-
1045
- # CLIP text encoder
1046
- text_embeddings = self.__encode_prompt(prompt, negative_prompt)
1047
-
1048
- # UNet denoiser
1049
- latents = self.__denoise_latent(latents, text_embeddings, timesteps=timesteps, step_offset=t_start)
1050
-
1051
- # VAE decode latent
1052
- images = self.__decode_latent(latents)
1053
-
1054
- images = self.numpy_to_pil(images)
1055
- return StableDiffusionPipelineOutput(images=images, nsfw_content_detected=None)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/convert_models_diffuser_to_diffusers.py DELETED
@@ -1,100 +0,0 @@
1
- import json
2
- import os
3
-
4
- import torch
5
-
6
- from diffusers import UNet1DModel
7
-
8
-
9
- os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
10
- os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
11
-
12
- os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
13
-
14
-
15
- def unet(hor):
16
- if hor == 128:
17
- down_block_types = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
18
- block_out_channels = (32, 128, 256)
19
- up_block_types = ("UpResnetBlock1D", "UpResnetBlock1D")
20
-
21
- elif hor == 32:
22
- down_block_types = ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D")
23
- block_out_channels = (32, 64, 128, 256)
24
- up_block_types = ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D")
25
- model = torch.load(f"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch")
26
- state_dict = model.state_dict()
27
- config = {
28
- "down_block_types": down_block_types,
29
- "block_out_channels": block_out_channels,
30
- "up_block_types": up_block_types,
31
- "layers_per_block": 1,
32
- "use_timestep_embedding": True,
33
- "out_block_type": "OutConv1DBlock",
34
- "norm_num_groups": 8,
35
- "downsample_each_block": False,
36
- "in_channels": 14,
37
- "out_channels": 14,
38
- "extra_in_channels": 0,
39
- "time_embedding_type": "positional",
40
- "flip_sin_to_cos": False,
41
- "freq_shift": 1,
42
- "sample_size": 65536,
43
- "mid_block_type": "MidResTemporalBlock1D",
44
- "act_fn": "mish",
45
- }
46
- hf_value_function = UNet1DModel(**config)
47
- print(f"length of state dict: {len(state_dict.keys())}")
48
- print(f"length of value function dict: {len(hf_value_function.state_dict().keys())}")
49
- mapping = dict(zip(model.state_dict().keys(), hf_value_function.state_dict().keys()))
50
- for k, v in mapping.items():
51
- state_dict[v] = state_dict.pop(k)
52
- hf_value_function.load_state_dict(state_dict)
53
-
54
- torch.save(hf_value_function.state_dict(), f"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin")
55
- with open(f"hub/hopper-medium-v2/unet/hor{hor}/config.json", "w") as f:
56
- json.dump(config, f)
57
-
58
-
59
- def value_function():
60
- config = {
61
- "in_channels": 14,
62
- "down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"),
63
- "up_block_types": (),
64
- "out_block_type": "ValueFunction",
65
- "mid_block_type": "ValueFunctionMidBlock1D",
66
- "block_out_channels": (32, 64, 128, 256),
67
- "layers_per_block": 1,
68
- "downsample_each_block": True,
69
- "sample_size": 65536,
70
- "out_channels": 14,
71
- "extra_in_channels": 0,
72
- "time_embedding_type": "positional",
73
- "use_timestep_embedding": True,
74
- "flip_sin_to_cos": False,
75
- "freq_shift": 1,
76
- "norm_num_groups": 8,
77
- "act_fn": "mish",
78
- }
79
-
80
- model = torch.load("/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch")
81
- state_dict = model
82
- hf_value_function = UNet1DModel(**config)
83
- print(f"length of state dict: {len(state_dict.keys())}")
84
- print(f"length of value function dict: {len(hf_value_function.state_dict().keys())}")
85
-
86
- mapping = dict(zip(state_dict.keys(), hf_value_function.state_dict().keys()))
87
- for k, v in mapping.items():
88
- state_dict[v] = state_dict.pop(k)
89
-
90
- hf_value_function.load_state_dict(state_dict)
91
-
92
- torch.save(hf_value_function.state_dict(), "hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin")
93
- with open("hub/hopper-medium-v2/value_function/config.json", "w") as f:
94
- json.dump(config, f)
95
-
96
-
97
- if __name__ == "__main__":
98
- unet(32)
99
- # unet(128)
100
- value_function()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/__init__.py DELETED
@@ -1,291 +0,0 @@
1
- __version__ = "0.19.3"
2
-
3
- from .configuration_utils import ConfigMixin
4
- from .utils import (
5
- OptionalDependencyNotAvailable,
6
- is_flax_available,
7
- is_inflect_available,
8
- is_invisible_watermark_available,
9
- is_k_diffusion_available,
10
- is_k_diffusion_version,
11
- is_librosa_available,
12
- is_note_seq_available,
13
- is_onnx_available,
14
- is_scipy_available,
15
- is_torch_available,
16
- is_torchsde_available,
17
- is_transformers_available,
18
- is_transformers_version,
19
- is_unidecode_available,
20
- logging,
21
- )
22
-
23
-
24
- try:
25
- if not is_onnx_available():
26
- raise OptionalDependencyNotAvailable()
27
- except OptionalDependencyNotAvailable:
28
- from .utils.dummy_onnx_objects import * # noqa F403
29
- else:
30
- from .pipelines import OnnxRuntimeModel
31
-
32
- try:
33
- if not is_torch_available():
34
- raise OptionalDependencyNotAvailable()
35
- except OptionalDependencyNotAvailable:
36
- from .utils.dummy_pt_objects import * # noqa F403
37
- else:
38
- from .models import (
39
- AsymmetricAutoencoderKL,
40
- AutoencoderKL,
41
- ControlNetModel,
42
- ModelMixin,
43
- MultiAdapter,
44
- PriorTransformer,
45
- T2IAdapter,
46
- T5FilmDecoder,
47
- Transformer2DModel,
48
- UNet1DModel,
49
- UNet2DConditionModel,
50
- UNet2DModel,
51
- UNet3DConditionModel,
52
- VQModel,
53
- )
54
- from .optimization import (
55
- get_constant_schedule,
56
- get_constant_schedule_with_warmup,
57
- get_cosine_schedule_with_warmup,
58
- get_cosine_with_hard_restarts_schedule_with_warmup,
59
- get_linear_schedule_with_warmup,
60
- get_polynomial_decay_schedule_with_warmup,
61
- get_scheduler,
62
- )
63
- from .pipelines import (
64
- AudioPipelineOutput,
65
- AutoPipelineForImage2Image,
66
- AutoPipelineForInpainting,
67
- AutoPipelineForText2Image,
68
- ConsistencyModelPipeline,
69
- DanceDiffusionPipeline,
70
- DDIMPipeline,
71
- DDPMPipeline,
72
- DiffusionPipeline,
73
- DiTPipeline,
74
- ImagePipelineOutput,
75
- KarrasVePipeline,
76
- LDMPipeline,
77
- LDMSuperResolutionPipeline,
78
- PNDMPipeline,
79
- RePaintPipeline,
80
- ScoreSdeVePipeline,
81
- )
82
- from .schedulers import (
83
- CMStochasticIterativeScheduler,
84
- DDIMInverseScheduler,
85
- DDIMParallelScheduler,
86
- DDIMScheduler,
87
- DDPMParallelScheduler,
88
- DDPMScheduler,
89
- DEISMultistepScheduler,
90
- DPMSolverMultistepInverseScheduler,
91
- DPMSolverMultistepScheduler,
92
- DPMSolverSinglestepScheduler,
93
- EulerAncestralDiscreteScheduler,
94
- EulerDiscreteScheduler,
95
- HeunDiscreteScheduler,
96
- IPNDMScheduler,
97
- KarrasVeScheduler,
98
- KDPM2AncestralDiscreteScheduler,
99
- KDPM2DiscreteScheduler,
100
- PNDMScheduler,
101
- RePaintScheduler,
102
- SchedulerMixin,
103
- ScoreSdeVeScheduler,
104
- UnCLIPScheduler,
105
- UniPCMultistepScheduler,
106
- VQDiffusionScheduler,
107
- )
108
- from .training_utils import EMAModel
109
-
110
- try:
111
- if not (is_torch_available() and is_scipy_available()):
112
- raise OptionalDependencyNotAvailable()
113
- except OptionalDependencyNotAvailable:
114
- from .utils.dummy_torch_and_scipy_objects import * # noqa F403
115
- else:
116
- from .schedulers import LMSDiscreteScheduler
117
-
118
- try:
119
- if not (is_torch_available() and is_torchsde_available()):
120
- raise OptionalDependencyNotAvailable()
121
- except OptionalDependencyNotAvailable:
122
- from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
123
- else:
124
- from .schedulers import DPMSolverSDEScheduler
125
-
126
- try:
127
- if not (is_torch_available() and is_transformers_available()):
128
- raise OptionalDependencyNotAvailable()
129
- except OptionalDependencyNotAvailable:
130
- from .utils.dummy_torch_and_transformers_objects import * # noqa F403
131
- else:
132
- from .pipelines import (
133
- AltDiffusionImg2ImgPipeline,
134
- AltDiffusionPipeline,
135
- AudioLDMPipeline,
136
- CycleDiffusionPipeline,
137
- IFImg2ImgPipeline,
138
- IFImg2ImgSuperResolutionPipeline,
139
- IFInpaintingPipeline,
140
- IFInpaintingSuperResolutionPipeline,
141
- IFPipeline,
142
- IFSuperResolutionPipeline,
143
- ImageTextPipelineOutput,
144
- KandinskyCombinedPipeline,
145
- KandinskyImg2ImgCombinedPipeline,
146
- KandinskyImg2ImgPipeline,
147
- KandinskyInpaintCombinedPipeline,
148
- KandinskyInpaintPipeline,
149
- KandinskyPipeline,
150
- KandinskyPriorPipeline,
151
- KandinskyV22CombinedPipeline,
152
- KandinskyV22ControlnetImg2ImgPipeline,
153
- KandinskyV22ControlnetPipeline,
154
- KandinskyV22Img2ImgCombinedPipeline,
155
- KandinskyV22Img2ImgPipeline,
156
- KandinskyV22InpaintCombinedPipeline,
157
- KandinskyV22InpaintPipeline,
158
- KandinskyV22Pipeline,
159
- KandinskyV22PriorEmb2EmbPipeline,
160
- KandinskyV22PriorPipeline,
161
- LDMTextToImagePipeline,
162
- PaintByExamplePipeline,
163
- SemanticStableDiffusionPipeline,
164
- ShapEImg2ImgPipeline,
165
- ShapEPipeline,
166
- StableDiffusionAdapterPipeline,
167
- StableDiffusionAttendAndExcitePipeline,
168
- StableDiffusionControlNetImg2ImgPipeline,
169
- StableDiffusionControlNetInpaintPipeline,
170
- StableDiffusionControlNetPipeline,
171
- StableDiffusionDepth2ImgPipeline,
172
- StableDiffusionDiffEditPipeline,
173
- StableDiffusionImageVariationPipeline,
174
- StableDiffusionImg2ImgPipeline,
175
- StableDiffusionInpaintPipeline,
176
- StableDiffusionInpaintPipelineLegacy,
177
- StableDiffusionInstructPix2PixPipeline,
178
- StableDiffusionLatentUpscalePipeline,
179
- StableDiffusionLDM3DPipeline,
180
- StableDiffusionModelEditingPipeline,
181
- StableDiffusionPanoramaPipeline,
182
- StableDiffusionParadigmsPipeline,
183
- StableDiffusionPipeline,
184
- StableDiffusionPipelineSafe,
185
- StableDiffusionPix2PixZeroPipeline,
186
- StableDiffusionSAGPipeline,
187
- StableDiffusionUpscalePipeline,
188
- StableDiffusionXLControlNetPipeline,
189
- StableDiffusionXLImg2ImgPipeline,
190
- StableDiffusionXLInpaintPipeline,
191
- StableDiffusionXLInstructPix2PixPipeline,
192
- StableDiffusionXLPipeline,
193
- StableUnCLIPImg2ImgPipeline,
194
- StableUnCLIPPipeline,
195
- TextToVideoSDPipeline,
196
- TextToVideoZeroPipeline,
197
- UnCLIPImageVariationPipeline,
198
- UnCLIPPipeline,
199
- UniDiffuserModel,
200
- UniDiffuserPipeline,
201
- UniDiffuserTextDecoder,
202
- VersatileDiffusionDualGuidedPipeline,
203
- VersatileDiffusionImageVariationPipeline,
204
- VersatileDiffusionPipeline,
205
- VersatileDiffusionTextToImagePipeline,
206
- VideoToVideoSDPipeline,
207
- VQDiffusionPipeline,
208
- )
209
-
210
- try:
211
- if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
212
- raise OptionalDependencyNotAvailable()
213
- except OptionalDependencyNotAvailable:
214
- from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
215
- else:
216
- from .pipelines import StableDiffusionKDiffusionPipeline
217
-
218
- try:
219
- if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
220
- raise OptionalDependencyNotAvailable()
221
- except OptionalDependencyNotAvailable:
222
- from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
223
- else:
224
- from .pipelines import (
225
- OnnxStableDiffusionImg2ImgPipeline,
226
- OnnxStableDiffusionInpaintPipeline,
227
- OnnxStableDiffusionInpaintPipelineLegacy,
228
- OnnxStableDiffusionPipeline,
229
- OnnxStableDiffusionUpscalePipeline,
230
- StableDiffusionOnnxPipeline,
231
- )
232
-
233
- try:
234
- if not (is_torch_available() and is_librosa_available()):
235
- raise OptionalDependencyNotAvailable()
236
- except OptionalDependencyNotAvailable:
237
- from .utils.dummy_torch_and_librosa_objects import * # noqa F403
238
- else:
239
- from .pipelines import AudioDiffusionPipeline, Mel
240
-
241
- try:
242
- if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
243
- raise OptionalDependencyNotAvailable()
244
- except OptionalDependencyNotAvailable:
245
- from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
246
- else:
247
- from .pipelines import SpectrogramDiffusionPipeline
248
-
249
- try:
250
- if not is_flax_available():
251
- raise OptionalDependencyNotAvailable()
252
- except OptionalDependencyNotAvailable:
253
- from .utils.dummy_flax_objects import * # noqa F403
254
- else:
255
- from .models.controlnet_flax import FlaxControlNetModel
256
- from .models.modeling_flax_utils import FlaxModelMixin
257
- from .models.unet_2d_condition_flax import FlaxUNet2DConditionModel
258
- from .models.vae_flax import FlaxAutoencoderKL
259
- from .pipelines import FlaxDiffusionPipeline
260
- from .schedulers import (
261
- FlaxDDIMScheduler,
262
- FlaxDDPMScheduler,
263
- FlaxDPMSolverMultistepScheduler,
264
- FlaxKarrasVeScheduler,
265
- FlaxLMSDiscreteScheduler,
266
- FlaxPNDMScheduler,
267
- FlaxSchedulerMixin,
268
- FlaxScoreSdeVeScheduler,
269
- )
270
-
271
-
272
- try:
273
- if not (is_flax_available() and is_transformers_available()):
274
- raise OptionalDependencyNotAvailable()
275
- except OptionalDependencyNotAvailable:
276
- from .utils.dummy_flax_and_transformers_objects import * # noqa F403
277
- else:
278
- from .pipelines import (
279
- FlaxStableDiffusionControlNetPipeline,
280
- FlaxStableDiffusionImg2ImgPipeline,
281
- FlaxStableDiffusionInpaintPipeline,
282
- FlaxStableDiffusionPipeline,
283
- )
284
-
285
- try:
286
- if not (is_note_seq_available()):
287
- raise OptionalDependencyNotAvailable()
288
- except OptionalDependencyNotAvailable:
289
- from .utils.dummy_note_seq_objects import * # noqa F403
290
- else:
291
- from .pipelines import MidiProcessor
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_controlnet_img2img.py DELETED
@@ -1,409 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- from typing import Callable, List, Optional, Union
16
-
17
- import numpy as np
18
- import PIL
19
- import torch
20
- from PIL import Image
21
-
22
- from ...models import UNet2DConditionModel, VQModel
23
- from ...schedulers import DDPMScheduler
24
- from ...utils import (
25
- is_accelerate_available,
26
- is_accelerate_version,
27
- logging,
28
- randn_tensor,
29
- replace_example_docstring,
30
- )
31
- from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
32
-
33
-
34
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
35
-
36
- EXAMPLE_DOC_STRING = """
37
- Examples:
38
- ```py
39
- >>> import torch
40
- >>> import numpy as np
41
-
42
- >>> from diffusers import KandinskyV22PriorEmb2EmbPipeline, KandinskyV22ControlnetImg2ImgPipeline
43
- >>> from transformers import pipeline
44
- >>> from diffusers.utils import load_image
45
-
46
-
47
- >>> def make_hint(image, depth_estimator):
48
- ... image = depth_estimator(image)["depth"]
49
- ... image = np.array(image)
50
- ... image = image[:, :, None]
51
- ... image = np.concatenate([image, image, image], axis=2)
52
- ... detected_map = torch.from_numpy(image).float() / 255.0
53
- ... hint = detected_map.permute(2, 0, 1)
54
- ... return hint
55
-
56
-
57
- >>> depth_estimator = pipeline("depth-estimation")
58
-
59
- >>> pipe_prior = KandinskyV22PriorEmb2EmbPipeline.from_pretrained(
60
- ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
61
- ... )
62
- >>> pipe_prior = pipe_prior.to("cuda")
63
-
64
- >>> pipe = KandinskyV22ControlnetImg2ImgPipeline.from_pretrained(
65
- ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
66
- ... )
67
- >>> pipe = pipe.to("cuda")
68
-
69
- >>> img = load_image(
70
- ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
71
- ... "/kandinsky/cat.png"
72
- ... ).resize((768, 768))
73
-
74
-
75
- >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
76
-
77
- >>> prompt = "A robot, 4k photo"
78
- >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
79
-
80
- >>> generator = torch.Generator(device="cuda").manual_seed(43)
81
-
82
- >>> img_emb = pipe_prior(prompt=prompt, image=img, strength=0.85, generator=generator)
83
- >>> negative_emb = pipe_prior(prompt=negative_prior_prompt, image=img, strength=1, generator=generator)
84
-
85
- >>> images = pipe(
86
- ... image=img,
87
- ... strength=0.5,
88
- ... image_embeds=img_emb.image_embeds,
89
- ... negative_image_embeds=negative_emb.image_embeds,
90
- ... hint=hint,
91
- ... num_inference_steps=50,
92
- ... generator=generator,
93
- ... height=768,
94
- ... width=768,
95
- ... ).images
96
-
97
- >>> images[0].save("robot_cat.png")
98
- ```
99
- """
100
-
101
-
102
- # Copied from diffusers.pipelines.kandinsky2_2.pipeline_kandinsky2_2.downscale_height_and_width
103
- def downscale_height_and_width(height, width, scale_factor=8):
104
- new_height = height // scale_factor**2
105
- if height % scale_factor**2 != 0:
106
- new_height += 1
107
- new_width = width // scale_factor**2
108
- if width % scale_factor**2 != 0:
109
- new_width += 1
110
- return new_height * scale_factor, new_width * scale_factor
111
-
112
-
113
- # Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_img2img.prepare_image
114
- def prepare_image(pil_image, w=512, h=512):
115
- pil_image = pil_image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1)
116
- arr = np.array(pil_image.convert("RGB"))
117
- arr = arr.astype(np.float32) / 127.5 - 1
118
- arr = np.transpose(arr, [2, 0, 1])
119
- image = torch.from_numpy(arr).unsqueeze(0)
120
- return image
121
-
122
-
123
- class KandinskyV22ControlnetImg2ImgPipeline(DiffusionPipeline):
124
- """
125
- Pipeline for image-to-image generation using Kandinsky
126
-
127
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
128
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
129
-
130
- Args:
131
- scheduler ([`DDIMScheduler`]):
132
- A scheduler to be used in combination with `unet` to generate image latents.
133
- unet ([`UNet2DConditionModel`]):
134
- Conditional U-Net architecture to denoise the image embedding.
135
- movq ([`VQModel`]):
136
- MoVQ Decoder to generate the image from the latents.
137
- """
138
-
139
- def __init__(
140
- self,
141
- unet: UNet2DConditionModel,
142
- scheduler: DDPMScheduler,
143
- movq: VQModel,
144
- ):
145
- super().__init__()
146
-
147
- self.register_modules(
148
- unet=unet,
149
- scheduler=scheduler,
150
- movq=movq,
151
- )
152
- self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1)
153
-
154
- # Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_img2img.KandinskyImg2ImgPipeline.get_timesteps
155
- def get_timesteps(self, num_inference_steps, strength, device):
156
- # get the original timestep using init_timestep
157
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
158
-
159
- t_start = max(num_inference_steps - init_timestep, 0)
160
- timesteps = self.scheduler.timesteps[t_start:]
161
-
162
- return timesteps, num_inference_steps - t_start
163
-
164
- # Copied from diffusers.pipelines.kandinsky2_2.pipeline_kandinsky2_2_img2img.KandinskyV22Img2ImgPipeline.prepare_latents
165
- def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
166
- if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
167
- raise ValueError(
168
- f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
169
- )
170
-
171
- image = image.to(device=device, dtype=dtype)
172
-
173
- batch_size = batch_size * num_images_per_prompt
174
-
175
- if image.shape[1] == 4:
176
- init_latents = image
177
-
178
- else:
179
- if isinstance(generator, list) and len(generator) != batch_size:
180
- raise ValueError(
181
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
182
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
183
- )
184
-
185
- elif isinstance(generator, list):
186
- init_latents = [
187
- self.movq.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
188
- ]
189
- init_latents = torch.cat(init_latents, dim=0)
190
- else:
191
- init_latents = self.movq.encode(image).latent_dist.sample(generator)
192
-
193
- init_latents = self.movq.config.scaling_factor * init_latents
194
-
195
- init_latents = torch.cat([init_latents], dim=0)
196
-
197
- shape = init_latents.shape
198
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
199
-
200
- # get latents
201
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
202
-
203
- latents = init_latents
204
-
205
- return latents
206
-
207
- # Copied from diffusers.pipelines.kandinsky2_2.pipeline_kandinsky2_2.KandinskyV22Pipeline.enable_model_cpu_offload
208
- def enable_model_cpu_offload(self, gpu_id=0):
209
- r"""
210
- Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
211
- to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
212
- method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
213
- `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
214
- """
215
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
216
- from accelerate import cpu_offload_with_hook
217
- else:
218
- raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
219
-
220
- device = torch.device(f"cuda:{gpu_id}")
221
-
222
- if self.device.type != "cpu":
223
- self.to("cpu", silence_dtype_warnings=True)
224
- torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
225
-
226
- hook = None
227
- for cpu_offloaded_model in [self.unet, self.movq]:
228
- _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
229
-
230
- # We'll offload the last model manually.
231
- self.final_offload_hook = hook
232
-
233
- @torch.no_grad()
234
- @replace_example_docstring(EXAMPLE_DOC_STRING)
235
- def __call__(
236
- self,
237
- image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]],
238
- image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]],
239
- negative_image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]],
240
- hint: torch.FloatTensor,
241
- height: int = 512,
242
- width: int = 512,
243
- num_inference_steps: int = 100,
244
- guidance_scale: float = 4.0,
245
- strength: float = 0.3,
246
- num_images_per_prompt: int = 1,
247
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
248
- output_type: Optional[str] = "pil",
249
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
250
- callback_steps: int = 1,
251
- return_dict: bool = True,
252
- ):
253
- """
254
- Function invoked when calling the pipeline for generation.
255
-
256
- Args:
257
- image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`):
258
- The clip image embeddings for text prompt, that will be used to condition the image generation.
259
- image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
260
- `Image`, or tensor representing an image batch, that will be used as the starting point for the
261
- process. Can also accpet image latents as `image`, if passing latents directly, it will not be encoded
262
- again.
263
- strength (`float`, *optional*, defaults to 0.8):
264
- Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
265
- will be used as a starting point, adding more noise to it the larger the `strength`. The number of
266
- denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
267
- be maximum and the denoising process will run for the full number of iterations specified in
268
- `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
269
- hint (`torch.FloatTensor`):
270
- The controlnet condition.
271
- negative_image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`):
272
- The clip image embeddings for negative text prompt, will be used to condition the image generation.
273
- height (`int`, *optional*, defaults to 512):
274
- The height in pixels of the generated image.
275
- width (`int`, *optional*, defaults to 512):
276
- The width in pixels of the generated image.
277
- num_inference_steps (`int`, *optional*, defaults to 100):
278
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
279
- expense of slower inference.
280
- guidance_scale (`float`, *optional*, defaults to 4.0):
281
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
282
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
283
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
284
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
285
- usually at the expense of lower image quality.
286
- num_images_per_prompt (`int`, *optional*, defaults to 1):
287
- The number of images to generate per prompt.
288
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
289
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
290
- to make generation deterministic.
291
- output_type (`str`, *optional*, defaults to `"pil"`):
292
- The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
293
- (`np.array`) or `"pt"` (`torch.Tensor`).
294
- callback (`Callable`, *optional*):
295
- A function that calls every `callback_steps` steps during inference. The function is called with the
296
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
297
- callback_steps (`int`, *optional*, defaults to 1):
298
- The frequency at which the `callback` function is called. If not specified, the callback is called at
299
- every step.
300
- return_dict (`bool`, *optional*, defaults to `True`):
301
- Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
302
-
303
- Examples:
304
-
305
- Returns:
306
- [`~pipelines.ImagePipelineOutput`] or `tuple`
307
- """
308
- device = self._execution_device
309
-
310
- do_classifier_free_guidance = guidance_scale > 1.0
311
-
312
- if isinstance(image_embeds, list):
313
- image_embeds = torch.cat(image_embeds, dim=0)
314
- if isinstance(negative_image_embeds, list):
315
- negative_image_embeds = torch.cat(negative_image_embeds, dim=0)
316
- if isinstance(hint, list):
317
- hint = torch.cat(hint, dim=0)
318
-
319
- batch_size = image_embeds.shape[0]
320
-
321
- if do_classifier_free_guidance:
322
- image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
323
- negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
324
- hint = hint.repeat_interleave(num_images_per_prompt, dim=0)
325
-
326
- image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to(
327
- dtype=self.unet.dtype, device=device
328
- )
329
- hint = torch.cat([hint, hint], dim=0).to(dtype=self.unet.dtype, device=device)
330
-
331
- if not isinstance(image, list):
332
- image = [image]
333
- if not all(isinstance(i, (PIL.Image.Image, torch.Tensor)) for i in image):
334
- raise ValueError(
335
- f"Input is in incorrect format: {[type(i) for i in image]}. Currently, we only support PIL image and pytorch tensor"
336
- )
337
-
338
- image = torch.cat([prepare_image(i, width, height) for i in image], dim=0)
339
- image = image.to(dtype=image_embeds.dtype, device=device)
340
-
341
- latents = self.movq.encode(image)["latents"]
342
- latents = latents.repeat_interleave(num_images_per_prompt, dim=0)
343
- self.scheduler.set_timesteps(num_inference_steps, device=device)
344
- timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
345
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
346
- height, width = downscale_height_and_width(height, width, self.movq_scale_factor)
347
- latents = self.prepare_latents(
348
- latents, latent_timestep, batch_size, num_images_per_prompt, image_embeds.dtype, device, generator
349
- )
350
- for i, t in enumerate(self.progress_bar(timesteps)):
351
- # expand the latents if we are doing classifier free guidance
352
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
353
-
354
- added_cond_kwargs = {"image_embeds": image_embeds, "hint": hint}
355
- noise_pred = self.unet(
356
- sample=latent_model_input,
357
- timestep=t,
358
- encoder_hidden_states=None,
359
- added_cond_kwargs=added_cond_kwargs,
360
- return_dict=False,
361
- )[0]
362
-
363
- if do_classifier_free_guidance:
364
- noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1)
365
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
366
- _, variance_pred_text = variance_pred.chunk(2)
367
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
368
- noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1)
369
-
370
- if not (
371
- hasattr(self.scheduler.config, "variance_type")
372
- and self.scheduler.config.variance_type in ["learned", "learned_range"]
373
- ):
374
- noise_pred, _ = noise_pred.split(latents.shape[1], dim=1)
375
-
376
- # compute the previous noisy sample x_t -> x_t-1
377
-
378
- latents = self.scheduler.step(
379
- noise_pred,
380
- t,
381
- latents,
382
- generator=generator,
383
- )[0]
384
-
385
- if callback is not None and i % callback_steps == 0:
386
- callback(i, t, latents)
387
-
388
- # post-processing
389
- image = self.movq.decode(latents, force_not_quantize=True)["sample"]
390
-
391
- # Offload last model to CPU
392
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
393
- self.final_offload_hook.offload()
394
-
395
- if output_type not in ["pt", "np", "pil"]:
396
- raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
397
-
398
- if output_type in ["np", "pil"]:
399
- image = image * 0.5 + 0.5
400
- image = image.clamp(0, 1)
401
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
402
-
403
- if output_type == "pil":
404
- image = self.numpy_to_pil(image)
405
-
406
- if not return_dict:
407
- return (image,)
408
-
409
- return ImagePipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py DELETED
@@ -1,5 +0,0 @@
1
- _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
2
- model = dict(
3
- backbone=dict(
4
- dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
5
- stage_with_dcn=(False, True, True, True)))
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/dense_heads/anchor_free_head.py DELETED
@@ -1,340 +0,0 @@
1
- from abc import abstractmethod
2
-
3
- import torch
4
- import torch.nn as nn
5
- from mmcv.cnn import ConvModule, bias_init_with_prob, normal_init
6
- from mmcv.runner import force_fp32
7
-
8
- from mmdet.core import multi_apply
9
- from ..builder import HEADS, build_loss
10
- from .base_dense_head import BaseDenseHead
11
- from .dense_test_mixins import BBoxTestMixin
12
-
13
-
14
- @HEADS.register_module()
15
- class AnchorFreeHead(BaseDenseHead, BBoxTestMixin):
16
- """Anchor-free head (FCOS, Fovea, RepPoints, etc.).
17
-
18
- Args:
19
- num_classes (int): Number of categories excluding the background
20
- category.
21
- in_channels (int): Number of channels in the input feature map.
22
- feat_channels (int): Number of hidden channels. Used in child classes.
23
- stacked_convs (int): Number of stacking convs of the head.
24
- strides (tuple): Downsample factor of each feature map.
25
- dcn_on_last_conv (bool): If true, use dcn in the last layer of
26
- towers. Default: False.
27
- conv_bias (bool | str): If specified as `auto`, it will be decided by
28
- the norm_cfg. Bias of conv will be set as True if `norm_cfg` is
29
- None, otherwise False. Default: "auto".
30
- loss_cls (dict): Config of classification loss.
31
- loss_bbox (dict): Config of localization loss.
32
- conv_cfg (dict): Config dict for convolution layer. Default: None.
33
- norm_cfg (dict): Config dict for normalization layer. Default: None.
34
- train_cfg (dict): Training config of anchor head.
35
- test_cfg (dict): Testing config of anchor head.
36
- """ # noqa: W605
37
-
38
- _version = 1
39
-
40
- def __init__(self,
41
- num_classes,
42
- in_channels,
43
- feat_channels=256,
44
- stacked_convs=4,
45
- strides=(4, 8, 16, 32, 64),
46
- dcn_on_last_conv=False,
47
- conv_bias='auto',
48
- loss_cls=dict(
49
- type='FocalLoss',
50
- use_sigmoid=True,
51
- gamma=2.0,
52
- alpha=0.25,
53
- loss_weight=1.0),
54
- loss_bbox=dict(type='IoULoss', loss_weight=1.0),
55
- conv_cfg=None,
56
- norm_cfg=None,
57
- train_cfg=None,
58
- test_cfg=None):
59
- super(AnchorFreeHead, self).__init__()
60
- self.num_classes = num_classes
61
- self.cls_out_channels = num_classes
62
- self.in_channels = in_channels
63
- self.feat_channels = feat_channels
64
- self.stacked_convs = stacked_convs
65
- self.strides = strides
66
- self.dcn_on_last_conv = dcn_on_last_conv
67
- assert conv_bias == 'auto' or isinstance(conv_bias, bool)
68
- self.conv_bias = conv_bias
69
- self.loss_cls = build_loss(loss_cls)
70
- self.loss_bbox = build_loss(loss_bbox)
71
- self.train_cfg = train_cfg
72
- self.test_cfg = test_cfg
73
- self.conv_cfg = conv_cfg
74
- self.norm_cfg = norm_cfg
75
- self.fp16_enabled = False
76
-
77
- self._init_layers()
78
-
79
- def _init_layers(self):
80
- """Initialize layers of the head."""
81
- self._init_cls_convs()
82
- self._init_reg_convs()
83
- self._init_predictor()
84
-
85
- def _init_cls_convs(self):
86
- """Initialize classification conv layers of the head."""
87
- self.cls_convs = nn.ModuleList()
88
- for i in range(self.stacked_convs):
89
- chn = self.in_channels if i == 0 else self.feat_channels
90
- if self.dcn_on_last_conv and i == self.stacked_convs - 1:
91
- conv_cfg = dict(type='DCNv2')
92
- else:
93
- conv_cfg = self.conv_cfg
94
- self.cls_convs.append(
95
- ConvModule(
96
- chn,
97
- self.feat_channels,
98
- 3,
99
- stride=1,
100
- padding=1,
101
- conv_cfg=conv_cfg,
102
- norm_cfg=self.norm_cfg,
103
- bias=self.conv_bias))
104
-
105
- def _init_reg_convs(self):
106
- """Initialize bbox regression conv layers of the head."""
107
- self.reg_convs = nn.ModuleList()
108
- for i in range(self.stacked_convs):
109
- chn = self.in_channels if i == 0 else self.feat_channels
110
- if self.dcn_on_last_conv and i == self.stacked_convs - 1:
111
- conv_cfg = dict(type='DCNv2')
112
- else:
113
- conv_cfg = self.conv_cfg
114
- self.reg_convs.append(
115
- ConvModule(
116
- chn,
117
- self.feat_channels,
118
- 3,
119
- stride=1,
120
- padding=1,
121
- conv_cfg=conv_cfg,
122
- norm_cfg=self.norm_cfg,
123
- bias=self.conv_bias))
124
-
125
- def _init_predictor(self):
126
- """Initialize predictor layers of the head."""
127
- self.conv_cls = nn.Conv2d(
128
- self.feat_channels, self.cls_out_channels, 3, padding=1)
129
- self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
130
-
131
- def init_weights(self):
132
- """Initialize weights of the head."""
133
- for m in self.cls_convs:
134
- if isinstance(m.conv, nn.Conv2d):
135
- normal_init(m.conv, std=0.01)
136
- for m in self.reg_convs:
137
- if isinstance(m.conv, nn.Conv2d):
138
- normal_init(m.conv, std=0.01)
139
- bias_cls = bias_init_with_prob(0.01)
140
- normal_init(self.conv_cls, std=0.01, bias=bias_cls)
141
- normal_init(self.conv_reg, std=0.01)
142
-
143
- def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
144
- missing_keys, unexpected_keys, error_msgs):
145
- """Hack some keys of the model state dict so that can load checkpoints
146
- of previous version."""
147
- version = local_metadata.get('version', None)
148
- if version is None:
149
- # the key is different in early versions
150
- # for example, 'fcos_cls' become 'conv_cls' now
151
- bbox_head_keys = [
152
- k for k in state_dict.keys() if k.startswith(prefix)
153
- ]
154
- ori_predictor_keys = []
155
- new_predictor_keys = []
156
- # e.g. 'fcos_cls' or 'fcos_reg'
157
- for key in bbox_head_keys:
158
- ori_predictor_keys.append(key)
159
- key = key.split('.')
160
- conv_name = None
161
- if key[1].endswith('cls'):
162
- conv_name = 'conv_cls'
163
- elif key[1].endswith('reg'):
164
- conv_name = 'conv_reg'
165
- elif key[1].endswith('centerness'):
166
- conv_name = 'conv_centerness'
167
- else:
168
- assert NotImplementedError
169
- if conv_name is not None:
170
- key[1] = conv_name
171
- new_predictor_keys.append('.'.join(key))
172
- else:
173
- ori_predictor_keys.pop(-1)
174
- for i in range(len(new_predictor_keys)):
175
- state_dict[new_predictor_keys[i]] = state_dict.pop(
176
- ori_predictor_keys[i])
177
- super()._load_from_state_dict(state_dict, prefix, local_metadata,
178
- strict, missing_keys, unexpected_keys,
179
- error_msgs)
180
-
181
- def forward(self, feats):
182
- """Forward features from the upstream network.
183
-
184
- Args:
185
- feats (tuple[Tensor]): Features from the upstream network, each is
186
- a 4D-tensor.
187
-
188
- Returns:
189
- tuple: Usually contain classification scores and bbox predictions.
190
- cls_scores (list[Tensor]): Box scores for each scale level,
191
- each is a 4D-tensor, the channel number is
192
- num_points * num_classes.
193
- bbox_preds (list[Tensor]): Box energies / deltas for each scale
194
- level, each is a 4D-tensor, the channel number is
195
- num_points * 4.
196
- """
197
- return multi_apply(self.forward_single, feats)[:2]
198
-
199
- def forward_single(self, x):
200
- """Forward features of a single scale level.
201
-
202
- Args:
203
- x (Tensor): FPN feature maps of the specified stride.
204
-
205
- Returns:
206
- tuple: Scores for each class, bbox predictions, features
207
- after classification and regression conv layers, some
208
- models needs these features like FCOS.
209
- """
210
- cls_feat = x
211
- reg_feat = x
212
-
213
- for cls_layer in self.cls_convs:
214
- cls_feat = cls_layer(cls_feat)
215
- cls_score = self.conv_cls(cls_feat)
216
-
217
- for reg_layer in self.reg_convs:
218
- reg_feat = reg_layer(reg_feat)
219
- bbox_pred = self.conv_reg(reg_feat)
220
- return cls_score, bbox_pred, cls_feat, reg_feat
221
-
222
- @abstractmethod
223
- @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
224
- def loss(self,
225
- cls_scores,
226
- bbox_preds,
227
- gt_bboxes,
228
- gt_labels,
229
- img_metas,
230
- gt_bboxes_ignore=None):
231
- """Compute loss of the head.
232
-
233
- Args:
234
- cls_scores (list[Tensor]): Box scores for each scale level,
235
- each is a 4D-tensor, the channel number is
236
- num_points * num_classes.
237
- bbox_preds (list[Tensor]): Box energies / deltas for each scale
238
- level, each is a 4D-tensor, the channel number is
239
- num_points * 4.
240
- gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
241
- shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
242
- gt_labels (list[Tensor]): class indices corresponding to each box
243
- img_metas (list[dict]): Meta information of each image, e.g.,
244
- image size, scaling factor, etc.
245
- gt_bboxes_ignore (None | list[Tensor]): specify which bounding
246
- boxes can be ignored when computing the loss.
247
- """
248
-
249
- raise NotImplementedError
250
-
251
- @abstractmethod
252
- @force_fp32(apply_to=('cls_scores', 'bbox_preds'))
253
- def get_bboxes(self,
254
- cls_scores,
255
- bbox_preds,
256
- img_metas,
257
- cfg=None,
258
- rescale=None):
259
- """Transform network output for a batch into bbox predictions.
260
-
261
- Args:
262
- cls_scores (list[Tensor]): Box scores for each scale level
263
- Has shape (N, num_points * num_classes, H, W)
264
- bbox_preds (list[Tensor]): Box energies / deltas for each scale
265
- level with shape (N, num_points * 4, H, W)
266
- img_metas (list[dict]): Meta information of each image, e.g.,
267
- image size, scaling factor, etc.
268
- cfg (mmcv.Config): Test / postprocessing configuration,
269
- if None, test_cfg would be used
270
- rescale (bool): If True, return boxes in original image space
271
- """
272
-
273
- raise NotImplementedError
274
-
275
- @abstractmethod
276
- def get_targets(self, points, gt_bboxes_list, gt_labels_list):
277
- """Compute regression, classification and centerness targets for points
278
- in multiple images.
279
-
280
- Args:
281
- points (list[Tensor]): Points of each fpn level, each has shape
282
- (num_points, 2).
283
- gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,
284
- each has shape (num_gt, 4).
285
- gt_labels_list (list[Tensor]): Ground truth labels of each box,
286
- each has shape (num_gt,).
287
- """
288
- raise NotImplementedError
289
-
290
- def _get_points_single(self,
291
- featmap_size,
292
- stride,
293
- dtype,
294
- device,
295
- flatten=False):
296
- """Get points of a single scale level."""
297
- h, w = featmap_size
298
- x_range = torch.arange(w, dtype=dtype, device=device)
299
- y_range = torch.arange(h, dtype=dtype, device=device)
300
- y, x = torch.meshgrid(y_range, x_range)
301
- if flatten:
302
- y = y.flatten()
303
- x = x.flatten()
304
- return y, x
305
-
306
- def get_points(self, featmap_sizes, dtype, device, flatten=False):
307
- """Get points according to feature map sizes.
308
-
309
- Args:
310
- featmap_sizes (list[tuple]): Multi-level feature map sizes.
311
- dtype (torch.dtype): Type of points.
312
- device (torch.device): Device of points.
313
-
314
- Returns:
315
- tuple: points of each image.
316
- """
317
- mlvl_points = []
318
- for i in range(len(featmap_sizes)):
319
- mlvl_points.append(
320
- self._get_points_single(featmap_sizes[i], self.strides[i],
321
- dtype, device, flatten))
322
- return mlvl_points
323
-
324
- def aug_test(self, feats, img_metas, rescale=False):
325
- """Test function with test time augmentation.
326
-
327
- Args:
328
- feats (list[Tensor]): the outer list indicates test-time
329
- augmentations and inner Tensor should have a shape NxCxHxW,
330
- which contains features for all images in the batch.
331
- img_metas (list[list[dict]]): the outer list indicates test-time
332
- augs (multiscale, flip, etc.) and the inner list indicates
333
- images in a batch. each dict has image information.
334
- rescale (bool, optional): Whether to rescale the results.
335
- Defaults to False.
336
-
337
- Returns:
338
- list[ndarray]: bbox results of each class
339
- """
340
- return self.aug_test_bboxes(feats, img_metas, rescale=rescale)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r50-d8_480x480_40k_pascal_context_59.py DELETED
@@ -1,10 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/fcn_r50-d8.py',
3
- '../_base_/datasets/pascal_context_59.py', '../_base_/default_runtime.py',
4
- '../_base_/schedules/schedule_40k.py'
5
- ]
6
- model = dict(
7
- decode_head=dict(num_classes=59),
8
- auxiliary_head=dict(num_classes=59),
9
- test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320)))
10
- optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001)
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/_stack.py DELETED
@@ -1,16 +0,0 @@
1
- from typing import List, TypeVar
2
-
3
- T = TypeVar("T")
4
-
5
-
6
- class Stack(List[T]):
7
- """A small shim over builtin list."""
8
-
9
- @property
10
- def top(self) -> T:
11
- """Get top of stack."""
12
- return self[-1]
13
-
14
- def push(self, item: T) -> None:
15
- """Push an item on to the stack (append in stack nomenclature)."""
16
- self.append(item)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/extension.py DELETED
@@ -1,148 +0,0 @@
1
- import re
2
- import functools
3
- import distutils.core
4
- import distutils.errors
5
- import distutils.extension
6
-
7
- from .monkey import get_unpatched
8
-
9
-
10
- def _have_cython():
11
- """
12
- Return True if Cython can be imported.
13
- """
14
- cython_impl = 'Cython.Distutils.build_ext'
15
- try:
16
- # from (cython_impl) import build_ext
17
- __import__(cython_impl, fromlist=['build_ext']).build_ext
18
- return True
19
- except Exception:
20
- pass
21
- return False
22
-
23
-
24
- # for compatibility
25
- have_pyrex = _have_cython
26
-
27
- _Extension = get_unpatched(distutils.core.Extension)
28
-
29
-
30
- class Extension(_Extension):
31
- """
32
- Describes a single extension module.
33
-
34
- This means that all source files will be compiled into a single binary file
35
- ``<module path>.<suffix>`` (with ``<module path>`` derived from ``name`` and
36
- ``<suffix>`` defined by one of the values in
37
- ``importlib.machinery.EXTENSION_SUFFIXES``).
38
-
39
- In the case ``.pyx`` files are passed as ``sources and`` ``Cython`` is **not**
40
- installed in the build environment, ``setuptools`` may also try to look for the
41
- equivalent ``.cpp`` or ``.c`` files.
42
-
43
- :arg str name:
44
- the full name of the extension, including any packages -- ie.
45
- *not* a filename or pathname, but Python dotted name
46
-
47
- :arg list[str] sources:
48
- list of source filenames, relative to the distribution root
49
- (where the setup script lives), in Unix form (slash-separated)
50
- for portability. Source files may be C, C++, SWIG (.i),
51
- platform-specific resource files, or whatever else is recognized
52
- by the "build_ext" command as source for a Python extension.
53
-
54
- :keyword list[str] include_dirs:
55
- list of directories to search for C/C++ header files (in Unix
56
- form for portability)
57
-
58
- :keyword list[tuple[str, str|None]] define_macros:
59
- list of macros to define; each macro is defined using a 2-tuple:
60
- the first item corresponding to the name of the macro and the second
61
- item either a string with its value or None to
62
- define it without a particular value (equivalent of "#define
63
- FOO" in source or -DFOO on Unix C compiler command line)
64
-
65
- :keyword list[str] undef_macros:
66
- list of macros to undefine explicitly
67
-
68
- :keyword list[str] library_dirs:
69
- list of directories to search for C/C++ libraries at link time
70
-
71
- :keyword list[str] libraries:
72
- list of library names (not filenames or paths) to link against
73
-
74
- :keyword list[str] runtime_library_dirs:
75
- list of directories to search for C/C++ libraries at run time
76
- (for shared extensions, this is when the extension is loaded).
77
- Setting this will cause an exception during build on Windows
78
- platforms.
79
-
80
- :keyword list[str] extra_objects:
81
- list of extra files to link with (eg. object files not implied
82
- by 'sources', static library that must be explicitly specified,
83
- binary resource files, etc.)
84
-
85
- :keyword list[str] extra_compile_args:
86
- any extra platform- and compiler-specific information to use
87
- when compiling the source files in 'sources'. For platforms and
88
- compilers where "command line" makes sense, this is typically a
89
- list of command-line arguments, but for other platforms it could
90
- be anything.
91
-
92
- :keyword list[str] extra_link_args:
93
- any extra platform- and compiler-specific information to use
94
- when linking object files together to create the extension (or
95
- to create a new static Python interpreter). Similar
96
- interpretation as for 'extra_compile_args'.
97
-
98
- :keyword list[str] export_symbols:
99
- list of symbols to be exported from a shared extension. Not
100
- used on all platforms, and not generally necessary for Python
101
- extensions, which typically export exactly one symbol: "init" +
102
- extension_name.
103
-
104
- :keyword list[str] swig_opts:
105
- any extra options to pass to SWIG if a source file has the .i
106
- extension.
107
-
108
- :keyword list[str] depends:
109
- list of files that the extension depends on
110
-
111
- :keyword str language:
112
- extension language (i.e. "c", "c++", "objc"). Will be detected
113
- from the source extensions if not provided.
114
-
115
- :keyword bool optional:
116
- specifies that a build failure in the extension should not abort the
117
- build process, but simply not install the failing extension.
118
-
119
- :keyword bool py_limited_api:
120
- opt-in flag for the usage of :doc:`Python's limited API <python:c-api/stable>`.
121
-
122
- :raises setuptools.errors.PlatformError: if 'runtime_library_dirs' is
123
- specified on Windows. (since v63)
124
- """
125
-
126
- def __init__(self, name, sources, *args, **kw):
127
- # The *args is needed for compatibility as calls may use positional
128
- # arguments. py_limited_api may be set only via keyword.
129
- self.py_limited_api = kw.pop("py_limited_api", False)
130
- super().__init__(name, sources, *args, **kw)
131
-
132
- def _convert_pyx_sources_to_lang(self):
133
- """
134
- Replace sources with .pyx extensions to sources with the target
135
- language extension. This mechanism allows language authors to supply
136
- pre-converted sources but to prefer the .pyx sources.
137
- """
138
- if _have_cython():
139
- # the build has Cython, so allow it to compile the .pyx files
140
- return
141
- lang = self.language or ''
142
- target_ext = '.cpp' if lang.lower() == 'c++' else '.c'
143
- sub = functools.partial(re.sub, '.pyx$', target_ext)
144
- self.sources = list(map(sub, self.sources))
145
-
146
-
147
- class Library(Extension):
148
- """Just like a regular Extension, but built as a library instead"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AtomdffAI/wechatgpt4atom/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: wechat-bot
3
- emoji: 👀
4
- colorFrom: red
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.19.1
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: lewisliuX123/wechatgpt3
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awesimo/jojogan/e4e/training/__init__.py DELETED
File without changes
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/meta_arch/fcos.py DELETED
@@ -1,303 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
-
3
- import logging
4
- from typing import List, Optional, Tuple
5
- import torch
6
- from fvcore.nn import sigmoid_focal_loss_jit
7
- from torch import Tensor, nn
8
- from torch.nn import functional as F
9
-
10
- from detectron2.layers import ShapeSpec, batched_nms
11
- from detectron2.structures import Boxes, ImageList, Instances, pairwise_point_box_distance
12
- from detectron2.utils.events import get_event_storage
13
-
14
- from ..anchor_generator import DefaultAnchorGenerator
15
- from ..backbone import Backbone
16
- from ..box_regression import Box2BoxTransformLinear, _dense_box_regression_loss
17
- from .dense_detector import DenseDetector
18
- from .retinanet import RetinaNetHead
19
-
20
- __all__ = ["FCOS"]
21
-
22
-
23
- logger = logging.getLogger(__name__)
24
-
25
-
26
- class FCOS(DenseDetector):
27
- """
28
- Implement FCOS in :paper:`fcos`.
29
- """
30
-
31
- def __init__(
32
- self,
33
- *,
34
- backbone: Backbone,
35
- head: nn.Module,
36
- head_in_features: Optional[List[str]] = None,
37
- box2box_transform=None,
38
- num_classes,
39
- center_sampling_radius: float = 1.5,
40
- focal_loss_alpha=0.25,
41
- focal_loss_gamma=2.0,
42
- test_score_thresh=0.2,
43
- test_topk_candidates=1000,
44
- test_nms_thresh=0.6,
45
- max_detections_per_image=100,
46
- pixel_mean,
47
- pixel_std,
48
- ):
49
- """
50
- Args:
51
- center_sampling_radius: radius of the "center" of a groundtruth box,
52
- within which all anchor points are labeled positive.
53
- Other arguments mean the same as in :class:`RetinaNet`.
54
- """
55
- super().__init__(
56
- backbone, head, head_in_features, pixel_mean=pixel_mean, pixel_std=pixel_std
57
- )
58
-
59
- self.num_classes = num_classes
60
-
61
- # FCOS uses one anchor point per location.
62
- # We represent the anchor point by a box whose size equals the anchor stride.
63
- feature_shapes = backbone.output_shape()
64
- fpn_strides = [feature_shapes[k].stride for k in self.head_in_features]
65
- self.anchor_generator = DefaultAnchorGenerator(
66
- sizes=[[k] for k in fpn_strides], aspect_ratios=[1.0], strides=fpn_strides
67
- )
68
-
69
- # FCOS parameterizes box regression by a linear transform,
70
- # where predictions are normalized by anchor stride (equal to anchor size).
71
- if box2box_transform is None:
72
- box2box_transform = Box2BoxTransformLinear(normalize_by_size=True)
73
- self.box2box_transform = box2box_transform
74
-
75
- self.center_sampling_radius = float(center_sampling_radius)
76
-
77
- # Loss parameters:
78
- self.focal_loss_alpha = focal_loss_alpha
79
- self.focal_loss_gamma = focal_loss_gamma
80
-
81
- # Inference parameters:
82
- self.test_score_thresh = test_score_thresh
83
- self.test_topk_candidates = test_topk_candidates
84
- self.test_nms_thresh = test_nms_thresh
85
- self.max_detections_per_image = max_detections_per_image
86
-
87
- def forward_training(self, images, features, predictions, gt_instances):
88
- # Transpose the Hi*Wi*A dimension to the middle:
89
- pred_logits, pred_anchor_deltas, pred_centerness = self._transpose_dense_predictions(
90
- predictions, [self.num_classes, 4, 1]
91
- )
92
- anchors = self.anchor_generator(features)
93
- gt_labels, gt_boxes = self.label_anchors(anchors, gt_instances)
94
- return self.losses(
95
- anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes, pred_centerness
96
- )
97
-
98
- @torch.no_grad()
99
- def match_anchors(self, anchors: List[Boxes], gt_instances: List[Instances]):
100
- """
101
- Match anchors with ground truth boxes.
102
-
103
- Args:
104
- anchors: #level boxes, from the highest resolution to lower resolution
105
- gt_instances: ground truth instances per image
106
-
107
- Returns:
108
- List[Tensor]:
109
- #image tensors, each is a vector of matched gt
110
- indices (or -1 for unmatched anchors) for all anchors.
111
- """
112
- num_anchors_per_level = [len(x) for x in anchors]
113
- anchors = Boxes.cat(anchors) # Rx4
114
- anchor_centers = anchors.get_centers() # Rx2
115
- anchor_sizes = anchors.tensor[:, 2] - anchors.tensor[:, 0] # R
116
-
117
- lower_bound = anchor_sizes * 4
118
- lower_bound[: num_anchors_per_level[0]] = 0
119
- upper_bound = anchor_sizes * 8
120
- upper_bound[-num_anchors_per_level[-1] :] = float("inf")
121
-
122
- matched_indices = []
123
- for gt_per_image in gt_instances:
124
- gt_centers = gt_per_image.gt_boxes.get_centers() # Nx2
125
- # FCOS with center sampling: anchor point must be close enough to gt center.
126
- pairwise_match = (anchor_centers[:, None, :] - gt_centers[None, :, :]).abs_().max(
127
- dim=2
128
- ).values < self.center_sampling_radius * anchor_sizes[:, None]
129
- pairwise_dist = pairwise_point_box_distance(anchor_centers, gt_per_image.gt_boxes)
130
-
131
- # The original FCOS anchor matching rule: anchor point must be inside gt
132
- pairwise_match &= pairwise_dist.min(dim=2).values > 0
133
-
134
- # Multilevel anchor matching in FCOS: each anchor is only responsible
135
- # for certain scale range.
136
- pairwise_dist = pairwise_dist.max(dim=2).values
137
- pairwise_match &= (pairwise_dist > lower_bound[:, None]) & (
138
- pairwise_dist < upper_bound[:, None]
139
- )
140
-
141
- # Match the GT box with minimum area, if there are multiple GT matches
142
- gt_areas = gt_per_image.gt_boxes.area() # N
143
- pairwise_match = pairwise_match.to(torch.float32) * (1e8 - gt_areas[None, :])
144
- min_values, matched_idx = pairwise_match.max(dim=1) # R, per-anchor match
145
- matched_idx[min_values < 1e-5] = -1 # Unmatched anchors are assigned -1
146
-
147
- matched_indices.append(matched_idx)
148
- return matched_indices
149
-
150
- @torch.no_grad()
151
- def label_anchors(self, anchors, gt_instances):
152
- """
153
- Same interface as :meth:`RetinaNet.label_anchors`, but implemented with FCOS
154
- anchor matching rule.
155
-
156
- Unlike RetinaNet, there are no ignored anchors.
157
- """
158
- matched_indices = self.match_anchors(anchors, gt_instances)
159
-
160
- matched_labels, matched_boxes = [], []
161
- for gt_index, gt_per_image in zip(matched_indices, gt_instances):
162
- label = gt_per_image.gt_classes[gt_index.clip(min=0)]
163
- label[gt_index < 0] = self.num_classes # background
164
-
165
- matched_gt_boxes = gt_per_image.gt_boxes[gt_index.clip(min=0)]
166
-
167
- matched_labels.append(label)
168
- matched_boxes.append(matched_gt_boxes)
169
- return matched_labels, matched_boxes
170
-
171
- def losses(
172
- self, anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes, pred_centerness
173
- ):
174
- """
175
- This method is almost identical to :meth:`RetinaNet.losses`, with an extra
176
- "loss_centerness" in the returned dict.
177
- """
178
- num_images = len(gt_labels)
179
- gt_labels = torch.stack(gt_labels) # (N, R)
180
-
181
- pos_mask = (gt_labels >= 0) & (gt_labels != self.num_classes)
182
- num_pos_anchors = pos_mask.sum().item()
183
- get_event_storage().put_scalar("num_pos_anchors", num_pos_anchors / num_images)
184
- normalizer = self._ema_update("loss_normalizer", max(num_pos_anchors, 1), 300)
185
-
186
- # classification and regression loss
187
- gt_labels_target = F.one_hot(gt_labels, num_classes=self.num_classes + 1)[
188
- :, :, :-1
189
- ] # no loss for the last (background) class
190
- loss_cls = sigmoid_focal_loss_jit(
191
- torch.cat(pred_logits, dim=1),
192
- gt_labels_target.to(pred_logits[0].dtype),
193
- alpha=self.focal_loss_alpha,
194
- gamma=self.focal_loss_gamma,
195
- reduction="sum",
196
- )
197
-
198
- loss_box_reg = _dense_box_regression_loss(
199
- anchors,
200
- self.box2box_transform,
201
- pred_anchor_deltas,
202
- [x.tensor for x in gt_boxes],
203
- pos_mask,
204
- box_reg_loss_type="giou",
205
- )
206
-
207
- ctrness_targets = self.compute_ctrness_targets(anchors, gt_boxes) # NxR
208
- pred_centerness = torch.cat(pred_centerness, dim=1).squeeze(dim=2) # NxR
209
- ctrness_loss = F.binary_cross_entropy_with_logits(
210
- pred_centerness[pos_mask], ctrness_targets[pos_mask], reduction="sum"
211
- )
212
- return {
213
- "loss_fcos_cls": loss_cls / normalizer,
214
- "loss_fcos_loc": loss_box_reg / normalizer,
215
- "loss_fcos_ctr": ctrness_loss / normalizer,
216
- }
217
-
218
- def compute_ctrness_targets(self, anchors, gt_boxes): # NxR
219
- anchors = Boxes.cat(anchors).tensor # Rx4
220
- reg_targets = [self.box2box_transform.get_deltas(anchors, m.tensor) for m in gt_boxes]
221
- reg_targets = torch.stack(reg_targets, dim=0) # NxRx4
222
- if len(reg_targets) == 0:
223
- return reg_targets.new_zeros(len(reg_targets))
224
- left_right = reg_targets[:, :, [0, 2]]
225
- top_bottom = reg_targets[:, :, [1, 3]]
226
- ctrness = (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * (
227
- top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]
228
- )
229
- return torch.sqrt(ctrness)
230
-
231
- def forward_inference(
232
- self, images: ImageList, features: List[Tensor], predictions: List[List[Tensor]]
233
- ):
234
- pred_logits, pred_anchor_deltas, pred_centerness = self._transpose_dense_predictions(
235
- predictions, [self.num_classes, 4, 1]
236
- )
237
- anchors = self.anchor_generator(features)
238
-
239
- results: List[Instances] = []
240
- for img_idx, image_size in enumerate(images.image_sizes):
241
- scores_per_image = [
242
- # Multiply and sqrt centerness & classification scores
243
- # (See eqn. 4 in https://arxiv.org/abs/2006.09214)
244
- torch.sqrt(x[img_idx].sigmoid_() * y[img_idx].sigmoid_())
245
- for x, y in zip(pred_logits, pred_centerness)
246
- ]
247
- deltas_per_image = [x[img_idx] for x in pred_anchor_deltas]
248
- results_per_image = self.inference_single_image(
249
- anchors, scores_per_image, deltas_per_image, image_size
250
- )
251
- results.append(results_per_image)
252
- return results
253
-
254
- def inference_single_image(
255
- self,
256
- anchors: List[Boxes],
257
- box_cls: List[Tensor],
258
- box_delta: List[Tensor],
259
- image_size: Tuple[int, int],
260
- ):
261
- """
262
- Identical to :meth:`RetinaNet.inference_single_image.
263
- """
264
- pred = self._decode_multi_level_predictions(
265
- anchors,
266
- box_cls,
267
- box_delta,
268
- self.test_score_thresh,
269
- self.test_topk_candidates,
270
- image_size,
271
- )
272
- keep = batched_nms(
273
- pred.pred_boxes.tensor, pred.scores, pred.pred_classes, self.test_nms_thresh
274
- )
275
- return pred[keep[: self.max_detections_per_image]]
276
-
277
-
278
- class FCOSHead(RetinaNetHead):
279
- """
280
- The head used in :paper:`fcos`. It adds an additional centerness
281
- prediction branch on top of :class:`RetinaNetHead`.
282
- """
283
-
284
- def __init__(self, *, input_shape: List[ShapeSpec], conv_dims: List[int], **kwargs):
285
- super().__init__(input_shape=input_shape, conv_dims=conv_dims, num_anchors=1, **kwargs)
286
- # Unlike original FCOS, we do not add an additional learnable scale layer
287
- # because it's found to have no benefits after normalizing regression targets by stride.
288
- self._num_features = len(input_shape)
289
- self.ctrness = nn.Conv2d(conv_dims[-1], 1, kernel_size=3, stride=1, padding=1)
290
- torch.nn.init.normal_(self.ctrness.weight, std=0.01)
291
- torch.nn.init.constant_(self.ctrness.bias, 0)
292
-
293
- def forward(self, features):
294
- assert len(features) == self._num_features
295
- logits = []
296
- bbox_reg = []
297
- ctrness = []
298
- for feature in features:
299
- logits.append(self.cls_score(self.cls_subnet(feature)))
300
- bbox_feature = self.bbox_subnet(feature)
301
- bbox_reg.append(self.bbox_pred(bbox_feature))
302
- ctrness.append(self.ctrness(bbox_feature))
303
- return logits, bbox_reg, ctrness
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Blockman Ir Aventura Hack Apk 2022 Cubos Ilimitados.md DELETED
@@ -1,83 +0,0 @@
1
- <br />
2
- <h1>Blockman Go aventura Hack APK 2022 cubos ilimitados</h1>
3
- <p>¿Te encanta jugar juegos basados en bloques con tus amigos? ¿Quieres explorar diferentes mundos y completar varios desafíos? Si es así, entonces deberías probar Blockman Go Adventure, un juego divertido y adictivo que te permite crear tu propio avatar, personalizar tus ajustes y unirte a millones de jugadores en línea. ¡Pero espera, hay más! También puede utilizar Blockman Go Aventura Hack APK, una versión modificada del juego que le da cubos ilimitados, monedas, gemas, y otros recursos. En este artículo, le diremos todo lo que necesita saber sobre Blockman Go Adventure y Blockman Go Adventure Hack APK, incluyendo sus características, cómo jugarlos, y algunos consejos y trucos para aprovechar al máximo su experiencia de juego. ¡Vamos a empezar! </p>
4
- <h2>blockman ir aventura hack apk 2022 cubos ilimitados</h2><br /><p><b><b>Download</b> &#9733;&#9733;&#9733;&#9733;&#9733; <a href="https://bltlly.com/2v6KrL">https://bltlly.com/2v6KrL</a></b></p><br /><br />
5
- <h2>¿Qué es Blockman Go Adventure? </h2>
6
- <p>Blockman Go Adventure es un juego online gratuito que combina elementos de sandbox, aventura y juegos sociales. Está desarrollado por Blockman GO Studio, un equipo de desarrolladores de juegos creativos y apasionados que tienen como objetivo proporcionar juegos de alta calidad para jugadores de todas las edades. Blockman Go Adventure es uno de sus juegos más populares, con más de 10 millones de descargas en Google Play Store y una calificación de 4.4 estrellas. </p>
7
- <h3>Características de Blockman Go Adventure</h3>
8
- <p>Blockman Go Adventure tiene muchas características que lo hacen un juego agradable y atractivo para todos. Algunas de estas características son:</p>
9
- <ul>
10
- <li><b>Múltiples minijuegos:</b> Puedes elegir entre más de 100 minijuegos que se adapten a tus preferencias y habilidades. Si te gustan los juegos de carreras, disparos, parkour o rompecabezas, encontrarás algo que te interesa en Blockman Go Adventure.</li>
11
- <li><b>Diversos mundos:</b> Puedes explorar diferentes mundos que tienen sus propios temas, entornos y desafíos. Puedes visitar el castillo medieval, la ciudad futurista, la isla tropical, y más. </li>
12
-
13
- <li><b>Interacción social:</b> Puedes chatear con otros jugadores en tiempo real usando mensajes de voz o texto. También puedes hacer amigos, enviar regalos y unirte a clanes. </li>
14
- <li><b>Sistema de recompensas:</b> Puedes ganar monedas y gemas jugando minijuegos, completando tareas e iniciando sesión diariamente. Puedes usar estas monedas para comprar nuevos artículos para tu avatar o actualizar los existentes. </li>
15
- </ul>
16
- <h3>Cómo jugar Blockman Go Aventura</h3>
17
- <p>Jugar Blockman Go Adventure es fácil y divertido. Estos son los pasos a seguir:</p>
18
- <ol>
19
- <li>Descargar e instalar el juego desde Google Play Store o App Store.</li>
20
- <li>Crea una cuenta o inicia sesión con la existente. </li>
21
- <li>Selecciona un mini-juego desde el lobby o crea tu propia habitación. </li>
22
- <li>Invita a tus amigos o únete a otros jugadores en línea. </li>
23
- <li>Disfruta del juego y chatea con otros jugadores. </li>
24
- </ol>
25
- <h2>¿Qué es Blockman Go Aventura Hack APK? </h2>
26
- <p>Blockman Go Aventura Hack APK es una versión modificada del juego original que le da acceso a recursos y características ilimitadas. No está disponible en las tiendas de aplicaciones oficiales, pero se puede descargar desde sitios web de terceros. Sin embargo, debe tener cuidado al descargar estos archivos, ya que pueden contener virus o malware que pueden dañar su dispositivo o robar su información personal. </p>
27
- <h3>Beneficios de Blockman Go Aventura Hack APK</h3 <p>Algunos de los beneficios de Blockman Go Aventura Hack APK son:</p>
28
- <p></p>
29
- <ul>
30
- <li><b>Cubos ilimitados:</b> Puedes obtener cubos ilimitados, que son la moneda premium del juego. Puedes usar cubos para comprar artículos especiales, como membresía VIP, bolsas de la suerte y pieles exclusivas. </li>
31
- <li><b>Monedas y gemas ilimitadas:</b> También puedes obtener monedas y gemas ilimitadas, que son las monedas regulares del juego. Puedes usar monedas y gemas para comprar más atuendos, peinados, accesorios y pieles para tu avatar. </li>
32
-
33
- <li><b>Libre y fácil de usar:</b> Usted no necesita raíz o jailbreak su dispositivo para utilizar Blockman Go Aventura Hack APK. Solo tienes que descargar e instalar el archivo, y estás listo para ir. No necesitas pagar nada ni completar ninguna encuesta para usar el hack. </li>
34
- </ul>
35
- <h3> Cómo descargar e instalar Blockman Go Aventura Hack APK</h3>
36
- <p>Si desea probar Blockman Go Aventura Hack APK, es necesario seguir estos pasos:</p>
37
- <ol>
38
- <li>Ir a un sitio web confiable que ofrece Blockman Go Aventura Hack APK, tales como [HackDL] o [APKPure]. </li>
39
- <li>Haga clic en el botón de descarga y espere a que se descargue el archivo. </li>
40
- <li>Ir a la configuración de su dispositivo y permitir la instalación de aplicaciones de fuentes desconocidas. </li>
41
- <li>Busque el archivo descargado y toque en él para iniciar el proceso de instalación. </li>
42
- <li> Siga las instrucciones en la pantalla y espere a que se complete la instalación. </li>
43
- <li>Iniciar el juego y disfrutar del hack. </li>
44
- </ol>
45
- <h2>Consejos y trucos para Blockman Go Aventura</h2>
46
- <p>Para hacer tu experiencia de juego más divertida y gratificante, aquí hay algunos consejos y trucos que puedes usar en Blockman Go Adventure:</p>
47
- <h3>Usa el menú mod para personalizar tu juego</h3>
48
- <p>Si usted está utilizando Blockman Go Aventura Hack APK, puede utilizar el menú mod para cambiar la configuración de juego de acuerdo a sus preferencias. Por ejemplo, puede aumentar su velocidad, saltar más alto, volar en el aire o volverse invisible. También puede deshabilitar algunas funciones que no le gustan, como anuncios, protección contra van o actualización automática. Sin embargo, debes tener cuidado al usar el menú mod, ya que algunos ajustes pueden causar fallas o errores en el juego. También debes evitar usarlo en salas públicas, ya que otros jugadores pueden reportarte por hacer trampa. </p>
49
-
50
- <h3>Únete a un clan y juega con amigos</h3 <p>Otra forma de disfrutar de Blockman Go Adventure es unirse a un clan y jugar con amigos. Un clan es un grupo de jugadores que comparten un interés o objetivo común en el juego. Puedes unirte a un clan existente o crear uno propio. Al unirte a un clan, puedes chatear con otros miembros, enviar regalos, participar en guerras de clanes y ganar puntos de clan. También puedes invitar a tus amigos a unirse a tu clan o jugar con ellos en habitaciones privadas. Jugar con amigos puede hacer que el juego sea más divertido y social. </p>
51
- <h2>Conclusión</h2>
52
- <p>Blockman Go Adventure es un gran juego para cualquiera que ame los juegos basados en bloques con mucha variedad y creatividad. Puedes jugar diferentes minijuegos, explorar diferentes mundos, personalizar tu avatar e interactuar con otros jugadores en línea. También puede utilizar Blockman Go Aventura Hack APK para obtener recursos ilimitados y características que pueden mejorar su experiencia de juego. Sin embargo, debe tener cuidado al descargar e instalar estos archivos, ya que pueden contener virus o malware que pueden dañar su dispositivo o robar su información personal. También debe utilizar el truco de forma responsable y no abusar de él en las salas públicas o contra otros jugadores. </p>
53
- <h3>Resumen de los puntos principales</h3 <p>En este artículo, hemos cubierto los siguientes puntos:</p>
54
- <ul>
55
- <li>Blockman Go Adventure es un juego en línea gratuito que combina elementos de sandbox, aventura y juegos sociales. </li> <li>Blockman Go Aventura Hack APK es una versión modificada del juego que le da cubos ilimitados, monedas, gemas y otros recursos. </li>
56
- <li> Puede descargar e instalar Blockman Go Aventura Hack APK de sitios web de terceros, pero usted debe tener cuidado con los virus y el malware. </li>
57
- <li>Puedes usar el menú mod para personalizar la configuración de tu juego, como velocidad, gravedad, invisibilidad y más. </li>
58
- <li>Puedes recoger monedas y gemas para desbloquear nuevos objetos para tu avatar o actualizar los existentes. </li>
59
-
60
- </ul>
61
- <h3>Llamada a la acción</h3>
62
- <p>Si usted está interesado en jugar Blockman Go Aventura o Blockman Go Aventura Hack APK, puede descargarlos de los enlaces a continuación. También puede visitar el sitio web oficial o las páginas de redes sociales de Blockman GO Studio para obtener más información sobre sus juegos y actualizaciones. ¡Diviértete y disfruta de la aventura! </p>
63
- <ul>
64
- <li><a href="">Descargar Blockman Go Adventure de Google Play Store</a></li>
65
- <li><a href="">Descargar Blockman Go Adventure de App Store</a></li>
66
- <li><a href="">Descargar Blockman Go aventura Hack APK de HackDL</a></li>
67
- <li><a href="">Descargar Blockman Go aventura Hack APK de APKPure</a></li>
68
- <li><a href="">Visite el sitio web oficial de Blockman GO Studio</a></li>
69
- <li><a href=">Sigue a Blockman GO Studio en Facebook</a></li>
70
- <li><a href=">Sigue a Blockman GO Studio en Twitter</a></li>
71
- <li><a href=">Sigue a Blockman GO Studio en Instagram</a></li>
72
- </ul>
73
- <h4>Preguntas frecuentes</h4>
74
- <p>Aquí hay algunas preguntas frecuentes sobre Blockman Go Aventura y Blockman Go Aventura Hack APK:</p>
75
- <ol>
76
- ¿Es seguro jugar a Blockman Go Adventure? </b><br>Sí, Blockman Go Adventure es seguro para jugar, ya que es desarrollado por un estudio de juegos de buena reputación y verificado por Google Play Store y App Store. Sin embargo, usted debe tener cuidado al descargar e instalar Blockman Go Aventura Hack APK, ya que puede contener virus o malware que pueden dañar su dispositivo o robar su información personal. </li>
77
- <li><b>¿Es Blockman Go Adventure gratis para jugar? </b><br>Sí, Blockman Go Adventure es gratis, pero contiene compras en la aplicación que le permiten comprar cubos, monedas, gemas y otros recursos. También puede utilizar Blockman Go Aventura Hack APK para obtener recursos ilimitados de forma gratuita. </li>
78
-
79
- <li><b> ¿Puedo jugar Blockman Go aventura en PC? </b><br>S��, puedes jugar Blockman Go Adventure en PC usando un emulador de Android, como BlueStacks o NoxPlayer. También puede utilizar el emulador para instalar y ejecutar Blockman Go Aventura Hack APK en su PC.</li>
80
- <li><b>¿Cómo puedo contactar a los desarrolladores de Blockman Go Adventure? </b><br>Puede ponerse en contacto con los desarrolladores de Blockman Go Adventure enviando un correo electrónico a [[email protected]] o rellenando el formulario de comentarios en su sitio web oficial. También puede seguirlos en sus páginas de redes sociales y dejar un comentario o mensaje allí. </li>
81
- </ol></p> 64aa2da5cf<br />
82
- <br />
83
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Cmo Descargar El ltimo Simulador De Conduccin De Coches En PC.md DELETED
@@ -1,128 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar Ultimate Car Driving Simulator en PC</h1>
3
- <p>¿Te encantan los juegos de conducción? ¿Quieres experimentar la emoción de conducir coches realistas en un enorme mundo abierto? Si es así, entonces usted debe probar <strong>Ultimate Car Driving Simulator</strong>, uno de los mejores juegos de simulador de conducción de coches de 2020. En este artículo, le mostraremos cómo descargar Ultimate Car Driving Simulator en PC y disfrutarlo en una pantalla más grande con mejores gráficos y controles. </p>
4
- <h2>¿Qué es Ultimate Car Driving Simulator? </h2>
5
- <p>Ultimate Car Driving Simulator es un juego para móviles desarrollado por Sir Studios que combina el realismo y la diversión de la física de la conducción con una personalización ilimitada y un juego adictivo. Puedes elegir entre una gran variedad de coches, desde coches de carreras hasta vehículos todoterreno, todoterrenos, muscle cars, camiones, etc. y personalizarlos con innumerables vinilos, piezas, colores, etc. También puedes conducir en un enorme mapa de mundo abierto que tiene diferentes entornos, como ciudades, desiertos, montañas, bosques, etc. También puede disfrutar de diferentes modos de juego, como carreras, offroad, tráfico, punto de control, etc. y realizar acrobacias increíbles, derivas, saltos, etc. con física de conducción realista. Ultimate Car Driving Simulator es un juego gratuito, pero contiene anuncios y compras en la aplicación que puedes desactivar o comprar con dinero real. </p>
6
- <h2>cómo descargar el último simulador de conducción de coches en PC</h2><br /><p><b><b>Download Zip</b> &#9733;&#9733;&#9733; <a href="https://bltlly.com/2v6LBt">https://bltlly.com/2v6LBt</a></b></p><br /><br />
7
- <h2> ¿Por qué jugar Ultimate Car Driving Simulator en PC? </h2>
8
- <p>Si bien Ultimate Car Driving Simulator es un gran juego para jugar en su dispositivo móvil, es posible que se pregunte por qué debe jugar en su PC. Bueno, hay muchas razones para hacerlo, como:</p>
9
- <ul>
10
- <li><strong>Mejores gráficos y calidad de sonido</strong>: Jugando Ultimate Car Driving Simulator en PC le permitirá disfrutar de las impresionantes imágenes y efectos de sonido realistas del juego en alta resolución y pantalla completa. Usted será capaz de apreciar los detalles de los coches, los entornos, los efectos meteorológicos, etc. más claramente y sumergirse en el mundo del juego. </li>
11
-
12
- <li><strong>Pantalla más grande y más divertido</strong>: Jugar Ultimate Car Driving Simulator en PC también hará que su experiencia de juego sea más divertida y agradable. Puedes jugar el juego en una pantalla más grande y compartirlo con tus amigos y familiares. También puede grabar su juego, tomar capturas de pantalla, transmitir en línea, chatear con otros jugadores, etc. con facilidad. </li>
13
- </ul>
14
- <p>Como puedes ver, jugar Ultimate Car Driving Simulator en PC tiene muchas ventajas que mejorarán tu experiencia de juego. Entonces, ¿cómo se puede descargar y jugar Ultimate Car Driving Simulator en PC? Hay dos métodos principales que explicaremos en las siguientes secciones. </p>
15
- <h2>Cómo jugar último coche conducción simulador en PC con Windows 11</h2>
16
- <p>Si tienes un PC con Windows 11, estás de suerte porque puedes usar la función nativa de emulación de Android que viene con el nuevo sistema operativo. Esta función le permite ejecutar aplicaciones y juegos de Android en su PC sin ningún software o hardware adicional. Estos son los pasos para jugar Ultimate Car Driving Simulator en PC con Windows 11:</p>
17
- <ol>
18
- <li><strong>Abra la aplicación de Microsoft Store</strong> en su PC con Windows 11 y busque <strong>Simulador de conducción de automóviles definitivo</strong>. Alternativamente, puedes usar este enlace para ir directamente a la página del juego. </li>
19
- <li><strong>Haga clic en el botón Instalar</strong> para descargar e instalar el juego en su PC. Es posible que necesite iniciar sesión con su cuenta de Microsoft si aún no lo ha hecho. </li>
20
- <li><strong>Inicie el juego</strong> desde el menú Inicio o el acceso directo del escritorio. Verá una ventana emergente que le pide que habilite las aplicaciones de Android en su PC. Haga clic en <strong>Activar</strong>. </li>
21
- <li><strong>Inicia sesión con tu cuenta de Google</strong> para acceder a los Servicios de Google Play y sincronizar tus datos de juego y logros. Puede usar una cuenta existente o crear una nueva. </li>
22
-
23
- </ol>
24
- <p>¡Eso es todo! Has descargado y jugado con éxito Ultimate Car Driving Simulator en PC con la función de emulación nativa de Windows 11 para Android. Sin embargo, si no tiene un PC con Windows 11 o prefiere otro método, puede usar un emulador de Android para PC en su lugar. </p>
25
- <h2>Cómo jugar Ultimate Car Driving Simulator en PC con emuladores de Android</h2>
26
- <p>Un emulador de Android es un programa de software que simula un dispositivo Android en su PC. Le permite ejecutar aplicaciones y juegos de Android en su PC con características y funciones similares como un dispositivo Android real. Hay muchos emuladores de Android para PC disponibles en línea, pero no todos ellos son compatibles o optimizados para juegos. Por lo tanto, hemos seleccionado algunos de los mejores emuladores de Android para PC que puede utilizar para jugar Ultimate Car Driving Simulator en PC. Son:</p>
27
- <tabla>
28
- <tr>
29
- <th>Nombre</th>
30
- <th>Descripción</th>
31
- <th>Pros</th>
32
- <th>Contras</th>
33
- </tr>
34
- <tr>
35
- <td><strong>Bluestacks</strong></td>
36
- <td>Un emulador de Android popular y potente para PC que ha sido diseñado para juegos. Tiene una interfaz fácil de usar y muchas características y opciones para mejorar su experiencia de juego </td>
37
- <td>
38
- <ul>
39
- <li>Soporta juegos de gama alta con gráficos y rendimiento altos</li>
40
- <li>Ofrece una variedad de modos de juego, como Eco Mode, Multi-Instance, Macro Recorder, etc.</li>
41
- <li>Tiene una tienda de aplicaciones incorporada y un centro de juegos con miles de juegos</li>
42
- <li>Permite personalizar los controles, ajustes y preferencias del emulador y el juego</li>
43
- <li> Tiene una gran y activa comunidad de usuarios y desarrolladores</li>
44
- </ul>
45
- </td>
46
- <td>
47
- <ul>
48
- <li>Requiere un PC de gama alta con al menos 4GB de RAM y una GPU dedicada</li>
49
- <li>Consume muchos recursos de CPU y memoria</li>
50
- <li>Puede tener problemas de compatibilidad con algunos juegos o aplicaciones</li>
51
- <li>Puede tener anuncios o ventanas emergentes que pueden ser molestos o intrusivos</li>
52
- <li>Puede tener riesgos de seguridad o privacidad si no se descarga desde el sitio web oficial</li>
53
- </ul>
54
- </td>
55
- </tr>
56
- <tr>
57
-
58
- <td>Un emulador de Android rápido y suave para PC que también está diseñado para juegos. Tiene una interfaz simple e intuitiva y muchas características y opciones para mejorar tu experiencia de juego</td>
59
- <td>
60
- <ul>
61
- <li>Soporta la mayoría de los juegos con altos gráficos y rendimiento</li>
62
- <li>Ofrece una variedad de modos de juego, como Control de teclado, Registro de guiones, Multi-Drive, etc.</li>
63
- <li>Tiene una tienda de aplicaciones incorporada y un centro de juegos con miles de juegos</li>
64
- <li>Permite personalizar los controles, ajustes y preferencias del emulador y el juego</li>
65
- <li> Tiene una gran y activa comunidad de usuarios y desarrolladores</li>
66
- </ul>
67
- </td>
68
- <td>
69
- <ul>
70
- <li>Requiere un PC de gama alta con al menos 2GB de RAM y una GPU dedicada</li>
71
- <li>Consume muchos recursos de CPU y memoria</li>
72
- <li>Puede tener problemas de compatibilidad con algunos juegos o aplicaciones</li>
73
- <li>Puede tener anuncios o ventanas emergentes que pueden ser molestos o intrusivos</li>
74
- <li>Puede tener riesgos de seguridad o privacidad si no se descarga desde el sitio web oficial</li>
75
- </ul>
76
- </td>
77
- </tr>
78
- <tr>
79
- <td><strong>Gameloop</strong></td>
80
- <td>Un emulador de Android potente y optimizado para PC que está especialmente diseñado para juegos. Tiene una interfaz moderna y elegante y un montón de características y opciones para mejorar su experiencia de juego</td>
81
- <td>
82
- <ul>
83
- <li>Soporta la mayoría de los juegos con gráficos y rendimiento altos, especialmente juegos FPS y MOBA</li>
84
- <li>Ofrece una variedad de modos de juego, como Modo Turbo, Modo Inteligente, Modo Esports, etc.</li>
85
- <li>Tiene una tienda de aplicaciones incorporada y un centro de juegos con miles de juegos</li>
86
- <li>Permite personalizar los controles, ajustes y preferencias del emulador y el juego</li>
87
- <li> Tiene una gran y activa comunidad de usuarios y desarrolladores</li> </td>
88
- <td>
89
- <ul>
90
- <li>Requiere un PC de gama alta con al menos 4GB de RAM y una GPU dedicada</li>
91
- <li>Consume muchos recursos de CPU y memoria</li>
92
- <li>Puede tener problemas de compatibilidad con algunos juegos o aplicaciones</li>
93
- <li>Puede tener anuncios o ventanas emergentes que pueden ser molestos o intrusivos</li>
94
-
95
- </ul>
96
- </td>
97
- </tr>
98
- </tabla>
99
- <p>Como puedes ver, cada emulador de Android para PC tiene sus propios pros y contras, y puedes elegir el que se adapte a tus necesidades y preferencias. Estos son los pasos para jugar Ultimate Car Driving Simulator en PC con cualquiera de estos emuladores de Android:</p>
100
- <ol>
101
- <li><strong>Descargar e instalar el emulador de Android de su elección</strong> desde su sitio web oficial. Asegúrate de tener suficiente espacio y recursos en tu PC para ejecutar el emulador sin problemas. </li>
102
- <li><strong>Inicie el emulador</strong> e inicie sesión con su cuenta de Google para acceder a la Google Play Store y sincronizar los datos y logros del juego. Puede usar una cuenta existente o crear una nueva. </li>
103
- <li><strong>Búsqueda de Ultimate Car Driving Simulator</strong> en la tienda de Google Play o la tienda de aplicaciones del emulador e instalarlo en su PC.</li>
104
- <li><strong>Iniciar el juego</strong> desde la pantalla de inicio del emulador o el acceso directo del escritorio. Puede utilizar el teclado y el ratón o un controlador para conducir su coche. También puedes ajustar la configuración del juego y el emulador según tu preferencia. </li>
105
- <li><strong>Disfruta jugando Ultimate Car Driving Simulator en PC</strong> con cualquiera de estos emuladores de Android para PC. También puede grabar su juego, tomar capturas de pantalla, transmitir en línea, chatear con otros jugadores, etc. con facilidad. </li>
106
- </ol>
107
- <h2>Conclusión</h2>
108
- <p>En este artículo, le hemos mostrado cómo descargar Ultimate Car Driving Simulator en PC y disfrutarlo en una pantalla más grande con mejores gráficos y controles. Hemos explicado dos métodos principales para jugar Ultimate Car Driving Simulator en PC: usando la función nativa de emulación de Android de Windows 11 o usando un emulador de Android para PC. Ambos métodos son fáciles y eficaces, y usted puede elegir el que funciona mejor para usted. Esperamos que haya encontrado este artículo útil e informativo, y le animamos a probar Ultimate Car Driving Simulator en PC hoy. ¡No te arrepentirás! </p>
109
- <h2>Preguntas frecuentes</h2>
110
-
111
- <p>A1: Sí, es gratis para descargar y jugar, pero contiene anuncios y compras en la aplicación que puede desactivar o comprar con dinero real. </p>
112
- <p></p>
113
- <h3>Q2: ¿Cuáles son los requisitos mínimos para ejecutar Ultimate Car Driving Simulator en PC? </h3>
114
- <p>A2: depende del método que utilice, pero generalmente necesita un PC con Windows 10 o 11 con al menos 4 GB de RAM, un procesador Intel o AMD, una unidad de estado sólido con 10 GB de espacio libre y una GPU Intel UHD Graphics 630 o similar. </p>
115
- <h3>Q3: ¿Puedo jugar Ultimate Car Driving Simulator con un controlador o un teclado y ratón? </h3>
116
- <p>A3: Sí, puede usar cualquier dispositivo de entrada que sea compatible con su PC y el emulador que elija. También puede personalizar los controles según su preferencia. </p>
117
- <h3>Q4: ¿Puedo sincronizar mi progreso y mi biblioteca de juegos entre dispositivos? </h3>
118
- <p>A4: Sí, puede iniciar sesión con su cuenta de Google tanto en su dispositivo móvil como en su PC y acceder a sus datos y logros guardados. También puede cambiar entre dispositivos en cualquier momento sin perder su progreso. </p>
119
- <h3>Q5: ¿Cuáles son algunos consejos y trucos para mejorar mi juego en Ultimate Car Driving Simulator? </h3>
120
- <p>A5: Algunos consejos y trucos son: <ul>
121
- <li>Explora el mapa del mundo abierto y descubre diferentes terrenos, ciudades, desiertos, etc.</li>
122
- <li>Personaliza tu coche con varias partes, vinilos, colores, etc. para que sea único y elegante. </li>
123
- <li>Utilice la física de conducción realista para realizar acrobacias, derivas, saltos, etc. y ganar monedas y recompensas. </li>
124
- <li>Actualizar el motor de su coche, suspensión, frenos, neumáticos, etc. para mejorar su rendimiento y velocidad. </li>
125
- <li>Ponte a prueba con diferentes modos de juego, como carreras, offroad, tráfico, punto de control, etc.</li>
126
- </ul></p> 64aa2da5cf<br />
127
- <br />
128
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Cocina Aire Freidora Recetas Apk.md DELETED
@@ -1,81 +0,0 @@
1
- <br />
2
- <h1>Cocina de aire freidora recetas Apk: Cómo cocinar deliciosas comidas con menos aceite</h1>
3
- <p>Si te gustan los alimentos fritos pero quieres reducir el aceite y las calorías, es posible que quieras probar una freidora. Una freidora de aire es un aparato de cocina que cocina alimentos circulando aire caliente a su alrededor, creando un exterior crujiente y dorado con un mínimo o ningún aceite. Es una gran manera de disfrutar de sus comidas favoritas sin sentirse culpable o comprometer el sabor. </p>
4
- <h2>cocina aire freidora recetas apk</h2><br /><p><b><b>Download Zip</b> &bull;&bull;&bull; <a href="https://bltlly.com/2v6Lv6">https://bltlly.com/2v6Lv6</a></b></p><br /><br />
5
- <h2>Beneficios de freír aire</h2>
6
- <p>Hay muchas razones por las que es posible que desee utilizar una freidora de aire en lugar de una freidora profunda o un horno. Estos son algunos de los beneficios de la fritura de aire:</p>
7
- <ul>
8
- <li><strong>Salud:</strong> La fritura de aire reduce la cantidad de grasa y calorías en los alimentos, así como los niveles de acrilamida, una sustancia química potencialmente dañina que se forma cuando los alimentos con almidón se cocinan a altas temperaturas. La fritura de aire también puede preservar algunos nutrientes que se pierden en otros métodos de cocción. </li>
9
- <li><strong>Conveniencia:</strong> La fritura de aire es rápida y fácil, ya que precalienta rápidamente y cocina los alimentos de manera uniforme. No necesitas usar mucho aceite o grasa, lo que significa menos desorden y una limpieza más fácil. Tampoco tiene que preocuparse por salpicaduras de aceite caliente o por prenderse fuego. </li>
10
- <li><strong>Versatilidad:</strong> Freír al aire libre puede cocinar una amplia variedad de alimentos, desde papas fritas congeladas y nuggets de pollo hasta verduras frescas y pescado. También puede hornear, asar, asar y deshidratar alimentos en una freidora. Incluso puede hacer postres como rosquillas, galletas y pasteles. </li>
11
- </ul>
12
- <h2>Cómo usar una freidora de aire</h2>
13
- <p>Para obtener los mejores resultados de tu freidora de aire, necesitas seguir algunos consejos y trucos. Estos son algunos de ellos:</p>
14
- <ul>
15
- <li><strong>Precalentar:</strong> La mayoría de las freidoras de aire necesitan precalentar durante unos minutos antes de agregar la comida. Esto asegura que su comida comience a cocinar de inmediato y se vuelva crujiente. </li>
16
-
17
- <li><strong>Agitar o voltear:</strong> Para ayudar a la comida crujiente, es necesario agitar o voltear a la mitad del tiempo de cocción. Esto evita que los alimentos se peguen a la cesta y garantiza un dorado uniforme. </li>
18
- <li><strong>Rocía ligeramente:</strong> Si quieres que tu comida tenga un color dorado y una textura crujiente, puedes rociarla ligeramente con aceite de cocina antes o durante la cocción. Esto también ayuda a evitar que la comida se seque. Sin embargo, no use demasiado aceite, ya que puede gotear en el cajón y causar humo. </li>
19
- </ul>
20
- <h2>Recetas de cocina de aire freidora Apk</h2>
21
- <p>Si usted está buscando un poco de inspiración para sus comidas de aire freidora, es posible que desee echa un vistazo a Cocina Aire Freidora Recetas Apk. Esta es una aplicación gratuita que ofrece cientos de recetas para freír al aire libre, desde aperitivos y aperitivos hasta platos principales y postres. Puedes navegar por categoría, cocina o ingrediente, o buscar recetas específicas. También puedes guardar tus recetas favoritas, calificarlas y compartirlas con tus amigos. </p>
22
- <p>Para descargar Cocina Freidora Recetas Apk, es necesario seguir estos pasos:</p>
23
- <ol>
24
- <li>Ir a [este enlace]( 1 ) en su dispositivo Android. </li>
25
- <li>Toque en "Descargar APK" y esperar a que el archivo para descargar. </li>
26
- <li>Abra el archivo y toque en "Instalar". Es posible que necesite permitir la instalación desde fuentes desconocidas en su configuración. </li>
27
- <li>Una vez instalada la aplicación, ¡ábrela y disfruta! </li>
28
- </ol>
29
- <h2>Algunos ejemplos de recetas de la aplicación</h2>
30
- <p>Para darle una idea de lo que se puede cocinar con Cocina Freidora Recetas Apk, aquí hay algunos ejemplos de recetas de la aplicación:</p>
31
- <tabla>
32
- <tr>
33
- <th>Categoría</th>
34
- <th>Receta</th>
35
- <th>Tiempo de cocción</th>
36
- </tr>
37
- <tr>
38
- <td>Aperitivos</td>
39
- <td>Patatas fritas de freidora de aire</td>
40
- <td>40 minutos</td>
41
- </tr>
42
- <tr>
43
- <td>Aperitivos</td>
44
- <td>Espárragos de freidora de aire</td>
45
- <td>20 minutos</td>
46
- </tr>
47
- <tr>
48
- <td>Platos principales</td>
49
- <td>Chuletas de cerdo de freidora de aire</td>
50
- <td>20 minutos</td>
51
- </tr>
52
- <tr>
53
- <td>Platos principales</td>
54
- <td>Pizza de freidora de aire</td>
55
- <td>10 minutos</td>
56
- </tr>
57
-
58
- <td>Postres</td>
59
- <td>Aire freidora Mini pastel de chocolate oscuro</td>
60
- <td>25 minutos</td>
61
- </tr>
62
- <tr>
63
- <td>Postres</td>
64
- <td>Cruasanes de queso crema de cereza con freidora de aire</td>
65
- <td>15 minutos</td>
66
- </tr>
67
- </tabla>
68
- <h2>Conclusión</h2>
69
- <p>Freír al aire es una forma maravillosa de cocinar comidas deliciosas con menos aceite y más sabor. Puedes hacer casi cualquier cosa en una freidora, desde bocadillos crujientes y carnes jugosas hasta verduras tiernas y postres decadentes. Con Kitchen Air Fryer Recipes Apk, se puede acceder a cientos de recetas de fritura de aire, todo de forma gratuita. Puede descargar la aplicación desde [este enlace]( 1 ) y comenzar a cocinar de inmediato. Si eres nuevo en el aire fritura o un profesional experimentado, usted encontrará algo para amar en esta aplicación. Pruébelo hoy y ver por ti mismo! </p>
70
- <h2>Preguntas frecuentes</h2>
71
- <p>Aquí hay algunas preguntas y respuestas comunes sobre fritura de aire y cocina Recetas de freidora Apk:</p>
72
- <ol>
73
- <li><strong>¿Qué tamaño de freidora de aire necesito? </strong><br>El tamaño de la freidora de aire depende de la cantidad de comida que desea cocinar a la vez y de cuánto espacio tiene en su cocina. Generalmente, una freidora de aire de 3 a 5 cuartos puede acomodar suficiente comida para dos a cuatro personas, mientras que una freidora de aire de 6 a 10 cuartos puede acomodar suficiente comida para cuatro a ocho personas. </li>
74
- <li><strong>¿Cuáles son algunas de las mejores marcas de freidoras de aire? </strong><br>Hay muchas marcas de freidoras de aire en el mercado, cada una con sus propias características y ventajas. Algunas de las marcas más populares y altamente calificadas son Philips, Ninja, Cosori, Instant Pot y Cuisinart.</li>
75
- <li><strong>¿Cómo limpio mi freidora de aire? </strong><br>Para limpiar tu freidora de aire, necesitas desenchufarla y dejar que se enfríe completamente. Luego, puede retirar la cesta y el cajón y lavarlos con agua tibia y jabón o en el lavavajillas. Puede limpiar el interior y el exterior de la freidora de aire con un paño húmedo o una esponja. También puede utilizar un cepillo suave o un palillo de dientes para eliminar cualquier residuo de comida del elemento calefactor. </li>
76
-
77
- <li><strong>¿Puedo enviar mis propias recetas a Cocina Freidora Recetas Apk? </strong><br>Sí, puede enviar sus propias recetas a Cocina Freidora Recetas Apk mediante el botón "Enviar receta" en la aplicación. También puedes calificar y revisar otras recetas, así como compartirlas con tus amigos en las redes sociales. </li>
78
- </ol></p>
79
- <p></p> 64aa2da5cf<br />
80
- <br />
81
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Amantes Y Mejores Amigos Azana.md DELETED
@@ -1,68 +0,0 @@
1
-
2
- <h1>Descargar Amantes y Mejores Amigos por Azana</h1>
3
- <p>Si usted está buscando una canción conmovedora y romántica para añadir a su lista de reproducción, es posible que desee echa un vistazo a "Amantes y mejores amigos" por Azana. Azana es una cantante y compositora sudafricana que ha cautivado a muchos oyentes con su mezcla de afro-pop, afro-house vocal y música soul. "Lovers and Best Friends" es una de sus canciones populares de su álbum debut Ingoma, que fue lanzado en 2020. La canción cuenta con Disciples of House, un dúo de talentosos productores que han trabajado con muchos artistas sudafricanos. </p>
4
- <h2>descargar amantes y mejores amigos azana</h2><br /><p><b><b>Download File</b> ::: <a href="https://bltlly.com/2v6IJX">https://bltlly.com/2v6IJX</a></b></p><br /><br />
5
- <p>En este artículo, te contaremos más sobre Azana, su carrera musical, y el significado y mensaje de "Amantes y Mejores Amigos". También le mostraremos cómo descargar la canción legalmente y apoyar al artista. Si eres fan de Azana o simplemente tienes curiosidad por su música, sigue leyendo para saber más. </p>
6
- <h2>Biografía y carrera musical de Azana</h2>
7
- <p>El verdadero nombre de Azana es Makhosazana Masongo. Nació el 13 de septiembre de 2000, en Chesterville, Durban. Actualmente estudia derecho en la Universidad del Estado Libre. Descubrió su pasión por la música a una edad temprana y comenzó a cantar en los coros de la escuela y la iglesia. También admiraba a artistas como Beyoncé, Nina Simone, Camagwini, Simphiwe Dana y Letta Mbulu.</p>
8
- <p>Su carrera musical despegó cuando firmó un contrato discográfico con Big City Dreams en 2019. Lanzó su primer single "Your Love" en mayo de 2020, que fue producido por Taffy Da Don. La canción fue un gran éxito y fue certificada doble platino por la Industria Discográfica de Sudáfrica (RiSA). Su álbum debut Ingoma siguió en julio de 2020. El álbum alcanzó el número uno en Apple Music Pop Chart y contó con artistas como Afriikan Papi, Disciples of House y Sun-El Musician.</p>
9
-
10
- <p>Azana ha recibido reconocimiento y aclamación por su música. Fue nominada al Mejor Álbum de Pop Afro y Recién Llegado del Año en el 27º South African Music Awards (SAMAs) en 2021. También ganó el premio a la Mejor Artista Femenina en los Mzansi Kwaito & House Music Awards (MKHMA) en 2021. </p>
11
- <p></p>
12
- <h2>El significado y mensaje de "Amantes y Mejores Amigos"</h2>
13
- <p>"Amantes y Mejores Amigos" es una canción hermosa y sincera que celebra el vínculo entre dos personas que no solo son amantes sino también mejores amigos. La canción expresa la alegría y la gratitud de encontrar a alguien que te entiende, te apoya y te aprecia. La canción también reconoce los desafíos y luchas que vienen con cualquier relación, pero afirma el compromiso y la lealtad de los socios. </p>
14
- <p>La letra de la canción es simple pero potente. Azana canta tanto en inglés como en zulú, creando un contraste y armonía entre los idiomas. Canta en el estribillo: "Tú eres mi amante y mi mejor amigo/ Tú eres mi todo/ Te amo más de lo que las palabras pueden decir/ Tú eres mi amante y mi mejor amigo/ Tú eres mi todo/ Nunca te dejaré ir". She also sings in Zulu: "Ngifuna wena wedwa/ Ngifuna wena wedwa/ Ngifuna wena wedwa/ Ngifuna wena wedwa" which means "I want you only/ I want you only/ I want you only/ I want you only". </p>
15
- <p>La producción y el género de la canción están influenciados por Afro-house, un subgénero de música house que se originó en Sudáfrica. La canción tiene un ritmo pegadizo y optimista, con una mezcla de ritmos electrónicos, acordes de piano y percusión. La canción también cuenta con las voces de Disciples of House, que añaden una capa de armonía y profundidad a la canción. La canción es adecuada para bailar, relajarse o simplemente disfrutar de la música. </p>
16
-
17
- <h2>Las mejores maneras de descargar y transmitir "Amantes y mejores amigos"</h2>
18
- <p>Si quieres descargar o transmitir "Lovers and Best Friends" de Azana, tienes muchas opciones para elegir. La canción está disponible en varias plataformas y servicios que ofrecen formas legales y éticas para acceder a la música. Estas son algunas de las mejores maneras de descargar o transmitir la canción:</p>
19
- <tabla>
20
- <tr>
21
- <th>Plataforma o servicio</th>
22
- <th>Características y beneficios</th>
23
- </tr>
24
- <tr>
25
- <td>Música de Apple</td>
26
- <td>- Ofrece descargas ilimitadas y transmisiones de más de 75 millones de canciones, incluyendo "Amantes y Mejores Amigos" por Azana.<br>- Soporta la escucha sin conexión en múltiples dispositivos. <br>- Proporciona recomendaciones personalizadas, listas de reproducción, estaciones de radio y podcasts. <br>- Cuesta $9.99 por mes para los individuos, $14.99 por mes para las familias, o $4.99 por mes para los estudiantes. <br>- Ofrece una prueba gratuita durante tres meses. </td>
27
- </tr>
28
- <tr>
29
- <td>Spotify</td>
30
- <td>- Ofrece transmisiones ilimitadas de más de 70 millones de canciones, incluyendo "Amantes y mejores amigos" por Azana.<br>- Permite descargas de hasta 10.000 canciones por dispositivo para usuarios premium. <br>- Proporciona recomendaciones personalizadas, listas de reproducción, estaciones de radio, podcasts y videos. <br>- Cuesta $9.99 por mes para los individuos, $14.99 por mes para las familias, o $4.99 por mes para los estudiantes. <br>- Ofrece una versión gratuita con anuncios y características limitadas. </td>
31
- </tr>
32
- <tr>
33
- <td>Música de YouTube</td>
34
- <td>- Ofrece transmisiones ilimitadas de más de 60 millones de canciones, incluyendo "Amantes y mejores amigos" por Azana.<br>- Permite descargas de hasta 100.000 canciones por dispositivo para usuarios premium. <br>- Proporciona recomendaciones personalizadas, listas de reproducción, estaciones de radio, podcasts y videos. <br>- Cuesta $9.99 por mes para individuos o $14.99 por mes para familias. <br>- Ofrece una versión gratuita con anuncios y características limitadas. </td>
35
- </tr>
36
- <tr>
37
- <td>Deezer</td>
38
-
39
- </ </tr>
40
- </tabla>
41
- <p>Como puedes ver, hay muchos beneficios de descargar o transmitir "Amantes y Mejores Amigos" por Azana legal y éticamente. Usted puede disfrutar de la canción en alta calidad, apoyar al artista y la industria de la música, y descubrir más música que le gustaría. También puede evitar los riesgos de descarga ilegal, como virus, malware, demandas o multas. </p>
42
- <p>Sin embargo, si prefieres no descargar o transmitir la canción, también puedes comprar el CD o vinilo de Ingoma by Azana, que incluye "Lovers and Best Friends" y otras canciones. Puede encontrar el CD o vinilo en línea o en tiendas físicas. Comprar el CD o vinilo también puede darte una copia física de las ilustraciones, letras y créditos del álbum. También puedes apoyar al artista comprando su mercancía, como camisetas, sudaderas, gorras o carteles. </p>
43
- <h2>Conclusión</h2>
44
- <p>En conclusión, "Lovers and Best Friends" de Azana es una maravillosa canción que celebra el amor y la amistad entre dos personas. Azana es una talentosa y prometedora cantante y compositora que ha impresionado a muchos fans y críticos con su álbum debut Ingoma. También ha colaborado con muchos otros artistas, como Sun-El Musician y Disciples of House. Si quieres descargar o transmitir "Lovers and Best Friends" de Azana, tienes muchas opciones para elegir. Puedes usar plataformas o servicios como Apple Music, Spotify, YouTube Music o Deezer. También puede comprar el CD o vinilo de Ingoma por Azana o su mercancía. Al hacerlo, puedes apoyar al artista y a la industria de la música, y disfrutar de la canción en alta calidad. </p>
45
- <p>Esperamos que hayas disfrutado este artículo y hayas aprendido algo nuevo sobre Azana y su música. Si te ha gustado "Lovers and Best Friends" de Azana, puede que también te gusten otras canciones de ella o de artistas similares. Algunas de nuestras recomendaciones son:</p>
46
- <ul>
47
- <li>"Uhuru" de Sun-El Músico feat. Azana</li>
48
- <li>"Mamela" de Mi Casa feat. Azana</li>
49
- <li>"Uzobuya" de Sun-El Músico feat. Azana</li>
50
- <li>"Tu amor" por Azana</li>
51
-
52
- <li>"Okhokho Bethu" de Vico Da Sporo feat. Azana</li>
53
- <li>"Jerusalema" por Master KG feat. Nomcebo Zikode</li>
54
- <li>"Busca tu vida" por Prince Kaybee feat. Msaki</li>
55
- <li>"Banomoya" de Prince Kaybee feat. Busiswa y TNS</li>
56
- <li>"Drive" de Black Coffee feat. David Guetta y Delilah Montagu</li>
57
- </ul>
58
- <h2>Preguntas frecuentes</h2>
59
- <p>Aquí hay algunas preguntas y respuestas frecuentes relacionadas con el tema:</p>
60
- <ol>
61
- <li><b>¿Cuál es el género de "Amantes y Mejores Amigos" de Azana? </b><br>El género de "Amantes y Mejores Amigos" de Azana es Afro-house, un subgénero de música house que se originó en Sudáfrica.</li>
62
- <li><b>¿Quiénes son los artistas destacados en "Lovers and Best Friends" de Azana? </b><br>Los artistas destacados en "Lovers and Best Friends" de Azana son Disciples of House, un dúo de productores que han trabajado con muchos artistas sudafricanos. </li>
63
- <li><b>¿Cuándo se lanzó "Lovers and Best Friends" de Azana? </b><br>"Lovers and Best Friends" de Azana fue lanzado el 17 de julio de 2020, como parte de su álbum debut Ingoma.</li>
64
- <li><b>¿Cómo puedo descargar o transmitir "Amantes y mejores amigos" por Azana legal y éticamente? </b><br>Puedes descargar o transmitir "Amantes y Mejores Amigos" por Azana legal y éticamente usando plataformas o servicios como Apple Music, Spotify, YouTube Music o Deezer. También puedes comprar el CD o vinilo de Ingoma de Azana o su mercancía. </li>
65
- <li><b>¿Cuáles son algunas otras canciones de Azana o artistas similares que me podrían gustar? </b><br>Algunas otras canciones de Azana o artistas similares que te pueden gustar son: "Uhuru" de Sun-El Musician feat. Azana, "Mamela" de Mi Casa feat. Azana, "Uzobuya" de Sun-El Musician feat. Azana, "Your Love" de Azana, "Ngize Ngifike" de Sun-El Musician feat. Azana, "Okhokho Bethu" de Vico Da Sporo feat. Azana, "Jerusalema" de Master KG feat. Nomce bo Zikode, "Fetch Your Life" de Prince Kaybee feat. Msaki, "Banomoya" de Prince Kaybee feat. Busiswa y TNS, y "Drive" de Black Coffee feat. David Guetta y Delilah Montagu.</li>
66
- </ol></p> 64aa2da5cf<br />
67
- <br />
68
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bishan/Speech_To_Text_Hindi/app.py DELETED
@@ -1,83 +0,0 @@
1
- import soundfile as sf
2
- import torch
3
- from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor,Wav2Vec2ProcessorWithLM
4
- import gradio as gr
5
- import sox
6
- import subprocess
7
- import time
8
-
9
-
10
- def read_file_and_process(wav_file):
11
- filename = wav_file.split('.')[0]
12
- filename_16k = filename + "16k.wav"
13
- resampler(wav_file, filename_16k)
14
- speech, _ = sf.read(filename_16k)
15
- print("---------------------------------------------------------")
16
- print(speech)
17
- inputs = processor(speech, sampling_rate=16_000, return_tensors="pt", padding=True)
18
- print("---------------------------------------------------------")
19
- print(inputs)
20
-
21
- return inputs
22
-
23
-
24
- def resampler(input_file_path, output_file_path):
25
- command = (
26
- f"ffmpeg -hide_banner -loglevel panic -i {input_file_path} -ar 16000 -ac 1 -bits_per_raw_sample 16 -vn "
27
- f"{output_file_path}"
28
- )
29
- subprocess.call(command, shell=True)
30
-
31
-
32
- def parse_transcription_with_lm(logits):
33
- result = processor_with_LM.batch_decode(logits.cpu().numpy())
34
- text = result.text
35
- transcription = text[0].replace('<s>','')
36
- return transcription
37
-
38
- def parse_transcription(logits):
39
- predicted_ids = torch.argmax(logits, dim=-1)
40
- transcription = processor.decode(predicted_ids[0], skip_special_tokens=True)
41
- return transcription
42
-
43
- def parse(wav_file, applyLM):
44
-
45
- # record start time
46
- start = time.time()
47
- input_values = read_file_and_process(wav_file)
48
- with torch.no_grad():
49
- logits = model(**input_values).logits
50
-
51
- # if applyLM:
52
- # return parse_transcription_with_lm(logits)
53
- # else:
54
- # return parse_transcription(logits)
55
-
56
- output = parse_transcription(logits)
57
- # record end time
58
- end = time.time()
59
- print("------------------------------------------------------------------------------------------")
60
- print("The time of execution of above program is :",(end-start) * 10**3, "ms")
61
- # total time taken
62
- print("Execution time of the program is- ", end-start)
63
- print("------------------------------------------------------------------------------------------")
64
- return output
65
-
66
-
67
- model_id = "Harveenchadha/vakyansh-wav2vec2-hindi-him-4200"
68
- processor = Wav2Vec2Processor.from_pretrained(model_id)
69
- processor_with_LM = Wav2Vec2ProcessorWithLM.from_pretrained(model_id)
70
- model = Wav2Vec2ForCTC.from_pretrained(model_id)
71
-
72
-
73
- input_ = gr.Audio(source="upload", type="filepath")
74
- txtbox = gr.Textbox(
75
- label="Output from model will appear here:",
76
- lines=5
77
- )
78
- chkbox = gr.Checkbox(label="Apply LM", value=False)
79
-
80
-
81
- gr.Interface(parse, inputs = [input_, chkbox], outputs=txtbox,
82
- streaming=True, interactive=True,
83
- analytics_enabled=False, show_tips=False, enable_queue=True).launch(inline=False);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Buatong/Computing/app.py DELETED
@@ -1,7 +0,0 @@
1
- import gradio as gr
2
-
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
-
6
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- iface.launch()
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/tests/test_model_e2e.py DELETED
@@ -1,43 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2
-
3
- import unittest
4
- import torch
5
-
6
- from detectron2.structures import BitMasks, Boxes, Instances
7
-
8
- from .common import get_model
9
-
10
-
11
- # TODO(plabatut): Modularize detectron2 tests and re-use
12
- def make_model_inputs(image, instances=None):
13
- if instances is None:
14
- return {"image": image}
15
-
16
- return {"image": image, "instances": instances}
17
-
18
-
19
- def make_empty_instances(h, w):
20
- instances = Instances((h, w))
21
- instances.gt_boxes = Boxes(torch.rand(0, 4))
22
- instances.gt_classes = torch.tensor([]).to(dtype=torch.int64)
23
- instances.gt_masks = BitMasks(torch.rand(0, h, w))
24
- return instances
25
-
26
-
27
- class ModelE2ETest(unittest.TestCase):
28
- CONFIG_PATH = ""
29
-
30
- def setUp(self):
31
- self.model = get_model(self.CONFIG_PATH)
32
-
33
- def _test_eval(self, sizes):
34
- inputs = [make_model_inputs(torch.rand(3, size[0], size[1])) for size in sizes]
35
- self.model.eval()
36
- self.model(inputs)
37
-
38
-
39
- class DensePoseRCNNE2ETest(ModelE2ETest):
40
- CONFIG_PATH = "densepose_rcnn_R_101_FPN_s1x.yaml"
41
-
42
- def test_empty_data(self):
43
- self._test_eval([(200, 250), (200, 249)])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/include/pybind11/options.h DELETED
@@ -1,65 +0,0 @@
1
- /*
2
- pybind11/options.h: global settings that are configurable at runtime.
3
-
4
- Copyright (c) 2016 Wenzel Jakob <[email protected]>
5
-
6
- All rights reserved. Use of this source code is governed by a
7
- BSD-style license that can be found in the LICENSE file.
8
- */
9
-
10
- #pragma once
11
-
12
- #include "detail/common.h"
13
-
14
- PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
15
-
16
- class options {
17
- public:
18
-
19
- // Default RAII constructor, which leaves settings as they currently are.
20
- options() : previous_state(global_state()) {}
21
-
22
- // Class is non-copyable.
23
- options(const options&) = delete;
24
- options& operator=(const options&) = delete;
25
-
26
- // Destructor, which restores settings that were in effect before.
27
- ~options() {
28
- global_state() = previous_state;
29
- }
30
-
31
- // Setter methods (affect the global state):
32
-
33
- options& disable_user_defined_docstrings() & { global_state().show_user_defined_docstrings = false; return *this; }
34
-
35
- options& enable_user_defined_docstrings() & { global_state().show_user_defined_docstrings = true; return *this; }
36
-
37
- options& disable_function_signatures() & { global_state().show_function_signatures = false; return *this; }
38
-
39
- options& enable_function_signatures() & { global_state().show_function_signatures = true; return *this; }
40
-
41
- // Getter methods (return the global state):
42
-
43
- static bool show_user_defined_docstrings() { return global_state().show_user_defined_docstrings; }
44
-
45
- static bool show_function_signatures() { return global_state().show_function_signatures; }
46
-
47
- // This type is not meant to be allocated on the heap.
48
- void* operator new(size_t) = delete;
49
-
50
- private:
51
-
52
- struct state {
53
- bool show_user_defined_docstrings = true; //< Include user-supplied texts in docstrings.
54
- bool show_function_signatures = true; //< Include auto-generated function signatures in docstrings.
55
- };
56
-
57
- static state &global_state() {
58
- static state instance;
59
- return instance;
60
- }
61
-
62
- state previous_state;
63
- };
64
-
65
- PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tests/test_call_policies.cpp DELETED
@@ -1,101 +0,0 @@
1
- /*
2
- tests/test_call_policies.cpp -- keep_alive and call_guard
3
-
4
- Copyright (c) 2016 Wenzel Jakob <[email protected]>
5
-
6
- All rights reserved. Use of this source code is governed by a
7
- BSD-style license that can be found in the LICENSE file.
8
- */
9
-
10
- #include "pybind11_tests.h"
11
-
12
- struct CustomGuard {
13
- static bool enabled;
14
-
15
- CustomGuard() { enabled = true; }
16
- ~CustomGuard() { enabled = false; }
17
-
18
- static const char *report_status() { return enabled ? "guarded" : "unguarded"; }
19
- };
20
- bool CustomGuard::enabled = false;
21
-
22
- struct DependentGuard {
23
- static bool enabled;
24
-
25
- DependentGuard() { enabled = CustomGuard::enabled; }
26
- ~DependentGuard() { enabled = false; }
27
-
28
- static const char *report_status() { return enabled ? "guarded" : "unguarded"; }
29
- };
30
- bool DependentGuard::enabled = false;
31
-
32
- TEST_SUBMODULE(call_policies, m) {
33
- // Parent/Child are used in:
34
- // test_keep_alive_argument, test_keep_alive_return_value, test_alive_gc_derived,
35
- // test_alive_gc_multi_derived, test_return_none, test_keep_alive_constructor
36
- class Child {
37
- public:
38
- Child() { py::print("Allocating child."); }
39
- Child(const Child &) = default;
40
- Child(Child &&) = default;
41
- ~Child() { py::print("Releasing child."); }
42
- };
43
- py::class_<Child>(m, "Child")
44
- .def(py::init<>());
45
-
46
- class Parent {
47
- public:
48
- Parent() { py::print("Allocating parent."); }
49
- Parent(const Parent& parent) = default;
50
- ~Parent() { py::print("Releasing parent."); }
51
- void addChild(Child *) { }
52
- Child *returnChild() { return new Child(); }
53
- Child *returnNullChild() { return nullptr; }
54
- };
55
- py::class_<Parent>(m, "Parent")
56
- .def(py::init<>())
57
- .def(py::init([](Child *) { return new Parent(); }), py::keep_alive<1, 2>())
58
- .def("addChild", &Parent::addChild)
59
- .def("addChildKeepAlive", &Parent::addChild, py::keep_alive<1, 2>())
60
- .def("returnChild", &Parent::returnChild)
61
- .def("returnChildKeepAlive", &Parent::returnChild, py::keep_alive<1, 0>())
62
- .def("returnNullChildKeepAliveChild", &Parent::returnNullChild, py::keep_alive<1, 0>())
63
- .def("returnNullChildKeepAliveParent", &Parent::returnNullChild, py::keep_alive<0, 1>());
64
-
65
- #if !defined(PYPY_VERSION)
66
- // test_alive_gc
67
- class ParentGC : public Parent {
68
- public:
69
- using Parent::Parent;
70
- };
71
- py::class_<ParentGC, Parent>(m, "ParentGC", py::dynamic_attr())
72
- .def(py::init<>());
73
- #endif
74
-
75
- // test_call_guard
76
- m.def("unguarded_call", &CustomGuard::report_status);
77
- m.def("guarded_call", &CustomGuard::report_status, py::call_guard<CustomGuard>());
78
-
79
- m.def("multiple_guards_correct_order", []() {
80
- return CustomGuard::report_status() + std::string(" & ") + DependentGuard::report_status();
81
- }, py::call_guard<CustomGuard, DependentGuard>());
82
-
83
- m.def("multiple_guards_wrong_order", []() {
84
- return DependentGuard::report_status() + std::string(" & ") + CustomGuard::report_status();
85
- }, py::call_guard<DependentGuard, CustomGuard>());
86
-
87
- #if defined(WITH_THREAD) && !defined(PYPY_VERSION)
88
- // `py::call_guard<py::gil_scoped_release>()` should work in PyPy as well,
89
- // but it's unclear how to test it without `PyGILState_GetThisThreadState`.
90
- auto report_gil_status = []() {
91
- auto is_gil_held = false;
92
- if (auto tstate = py::detail::get_thread_state_unchecked())
93
- is_gil_held = (tstate == PyGILState_GetThisThreadState());
94
-
95
- return is_gil_held ? "GIL held" : "GIL released";
96
- };
97
-
98
- m.def("with_gil", report_gil_status);
99
- m.def("without_gil", report_gil_status, py::call_guard<py::gil_scoped_release>());
100
- #endif
101
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/advance.h DELETED
@@ -1,141 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- /*! \file advance.h
19
- * \brief Advance an iterator by a given distance.
20
- */
21
-
22
- #pragma once
23
-
24
- #include <thrust/detail/config.h>
25
-
26
- namespace thrust
27
- {
28
-
29
- /*! \addtogroup iterators
30
- * \{
31
- */
32
-
33
- /*! \p advance(i, n) increments the iterator \p i by the distance \p n.
34
- * If <tt>n > 0</tt> it is equivalent to executing <tt>++i</tt> \p n
35
- * times, and if <tt>n < 0</tt> it is equivalent to executing <tt>--i</tt>
36
- * \p n times. If <tt>n == 0</tt>, the call has no effect.
37
- *
38
- * \param i The iterator to be advanced.
39
- * \param n The distance by which to advance the iterator.
40
- *
41
- * \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>.
42
- * \tparam Distance is an integral type that is convertible to \p InputIterator's distance type.
43
- *
44
- * \pre \p n shall be negative only for bidirectional and random access iterators.
45
- *
46
- * The following code snippet demonstrates how to use \p advance to increment
47
- * an iterator a given number of times.
48
- *
49
- * \code
50
- * #include <thrust/advance.h>
51
- * #include <thrust/device_vector.h>
52
- * ...
53
- * thrust::device_vector<int> vec(13);
54
- * thrust::device_vector<int>::iterator iter = vec.begin();
55
- *
56
- * thrust::advance(iter, 7);
57
- *
58
- * // iter - vec.begin() == 7
59
- * \endcode
60
- *
61
- * \see http://www.sgi.com/tech/stl/advance.html
62
- */
63
- template <typename InputIterator, typename Distance>
64
- __host__ __device__
65
- void advance(InputIterator& i, Distance n);
66
-
67
- /*! \p next(i, n) returns the \p n th successor of the iterator \p i.
68
- *
69
- * \param i An iterator.
70
- * \param n The number of elements to advance.
71
- *
72
- * \tparam InputIterator must meet the <a href="https://en.cppreference.com/w/cpp/named_req/InputIterator">InputIterator</a>.
73
- *
74
- * \pre \p n shall be negative only for bidirectional and random access iterators.
75
- *
76
- * The following code snippet demonstrates how to use \p next.
77
- *
78
- * \code
79
- * #include <thrust/advance.h>
80
- * #include <thrust/device_vector.h>
81
- * ...
82
- * thrust::device_vector<int> vec(13);
83
- * thrust::device_vector<int>::iterator i0 = vec.begin();
84
- *
85
- * auto i1 = thrust::next(i0);
86
- *
87
- * // i0 - vec.begin() == 0
88
- * // i1 - vec.begin() == 1
89
- * \endcode
90
- *
91
- * \see https://en.cppreference.com/w/cpp/iterator/next
92
- */
93
- #if 0 // Doxygen only
94
- template <typename InputIterator, typename Distance>
95
- __host__ __device__
96
- InputIterator next(
97
- InputIterator i
98
- , typename iterator_traits<InputIterator>::difference_type n = 1
99
- );
100
- #endif
101
-
102
- /*! \p prev(i, n) returns the \p n th predecessor of the iterator \p i.
103
- *
104
- * \param i An iterator.
105
- * \param n The number of elements to descend.
106
- *
107
- * \tparam BidirectionalIterator must meet the <a href="https://en.cppreference.com/w/cpp/named_req/BidirectionalIterator">BidirectionalIterator</a>.
108
- *
109
- * The following code snippet demonstrates how to use \p prev.
110
- *
111
- * \code
112
- * #include <thrust/advance.h>
113
- * #include <thrust/device_vector.h>
114
- * ...
115
- * thrust::device_vector<int> vec(13);
116
- * thrust::device_vector<int>::iterator i0 = vec.end();
117
- *
118
- * auto i1 = thrust::prev(i0);
119
- *
120
- * // vec.end() - i0 == 0
121
- * // vec.end() - i1 == 1
122
- * \endcode
123
- *
124
- * \see https://en.cppreference.com/w/cpp/iterator/prev
125
- */
126
- #if 0 // Doxygen only
127
- template <typename BidirectionalIterator, typename Distance>
128
- __host__ __device__
129
- BidirectionalIterator prev(
130
- BidirectionalIterator i
131
- , typename iterator_traits<BidirectionalIterator>::difference_type n = 1
132
- );
133
- #endif
134
-
135
- /*! \} // end iterators
136
- */
137
-
138
- } // end thrust
139
-
140
- #include <thrust/detail/advance.inl>
141
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/generic/adjacent_difference.h DELETED
@@ -1,58 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- /*! \file adjacent_difference.h
19
- * \brief Generic implementation of adjacent_difference.
20
- */
21
-
22
- #pragma once
23
-
24
- #include <thrust/detail/config.h>
25
- #include <thrust/system/detail/generic/tag.h>
26
-
27
- namespace thrust
28
- {
29
- namespace system
30
- {
31
- namespace detail
32
- {
33
- namespace generic
34
- {
35
-
36
-
37
- template<typename DerivedPolicy, typename InputIterator, typename OutputIterator>
38
- __host__ __device__
39
- OutputIterator adjacent_difference(thrust::execution_policy<DerivedPolicy> &exec,
40
- InputIterator first, InputIterator last,
41
- OutputIterator result);
42
-
43
-
44
- template<typename DerivedPolicy, typename InputIterator, typename OutputIterator, typename BinaryFunction>
45
- __host__ __device__
46
- OutputIterator adjacent_difference(thrust::execution_policy<DerivedPolicy> &exec,
47
- InputIterator first, InputIterator last,
48
- OutputIterator result,
49
- BinaryFunction binary_op);
50
-
51
-
52
- } // end namespace generic
53
- } // end namespace detail
54
- } // end namespace system
55
- } // end namespace thrust
56
-
57
- #include <thrust/system/detail/generic/adjacent_difference.inl>
58
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/modeling/proposal_generator/rrpn.py DELETED
@@ -1,203 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import itertools
3
- import logging
4
- from typing import Dict, List
5
- import torch
6
-
7
- from detectron2.config import configurable
8
- from detectron2.layers import ShapeSpec, batched_nms_rotated, cat
9
- from detectron2.structures import Instances, RotatedBoxes, pairwise_iou_rotated
10
- from detectron2.utils.memory import retry_if_cuda_oom
11
-
12
- from ..box_regression import Box2BoxTransformRotated
13
- from .build import PROPOSAL_GENERATOR_REGISTRY
14
- from .rpn import RPN
15
-
16
- logger = logging.getLogger(__name__)
17
-
18
-
19
- def find_top_rrpn_proposals(
20
- proposals,
21
- pred_objectness_logits,
22
- image_sizes,
23
- nms_thresh,
24
- pre_nms_topk,
25
- post_nms_topk,
26
- min_box_size,
27
- training,
28
- ):
29
- """
30
- For each feature map, select the `pre_nms_topk` highest scoring proposals,
31
- apply NMS, clip proposals, and remove small boxes. Return the `post_nms_topk`
32
- highest scoring proposals among all the feature maps if `training` is True,
33
- otherwise, returns the highest `post_nms_topk` scoring proposals for each
34
- feature map.
35
-
36
- Args:
37
- proposals (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A, 5).
38
- All proposal predictions on the feature maps.
39
- pred_objectness_logits (list[Tensor]): A list of L tensors. Tensor i has shape (N, Hi*Wi*A).
40
- image_sizes (list[tuple]): sizes (h, w) for each image
41
- nms_thresh (float): IoU threshold to use for NMS
42
- pre_nms_topk (int): number of top k scoring proposals to keep before applying NMS.
43
- When RRPN is run on multiple feature maps (as in FPN) this number is per
44
- feature map.
45
- post_nms_topk (int): number of top k scoring proposals to keep after applying NMS.
46
- When RRPN is run on multiple feature maps (as in FPN) this number is total,
47
- over all feature maps.
48
- min_box_size(float): minimum proposal box side length in pixels (absolute units wrt
49
- input images).
50
- training (bool): True if proposals are to be used in training, otherwise False.
51
- This arg exists only to support a legacy bug; look for the "NB: Legacy bug ..."
52
- comment.
53
-
54
- Returns:
55
- proposals (list[Instances]): list of N Instances. The i-th Instances
56
- stores post_nms_topk object proposals for image i.
57
- """
58
- num_images = len(image_sizes)
59
- device = proposals[0].device
60
-
61
- # 1. Select top-k anchor for every level and every image
62
- topk_scores = [] # #lvl Tensor, each of shape N x topk
63
- topk_proposals = []
64
- level_ids = [] # #lvl Tensor, each of shape (topk,)
65
- batch_idx = torch.arange(num_images, device=device)
66
- for level_id, proposals_i, logits_i in zip(
67
- itertools.count(), proposals, pred_objectness_logits
68
- ):
69
- Hi_Wi_A = logits_i.shape[1]
70
- num_proposals_i = min(pre_nms_topk, Hi_Wi_A)
71
-
72
- # sort is faster than topk (https://github.com/pytorch/pytorch/issues/22812)
73
- # topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1)
74
- logits_i, idx = logits_i.sort(descending=True, dim=1)
75
- topk_scores_i = logits_i[batch_idx, :num_proposals_i]
76
- topk_idx = idx[batch_idx, :num_proposals_i]
77
-
78
- # each is N x topk
79
- topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 5
80
-
81
- topk_proposals.append(topk_proposals_i)
82
- topk_scores.append(topk_scores_i)
83
- level_ids.append(torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device))
84
-
85
- # 2. Concat all levels together
86
- topk_scores = cat(topk_scores, dim=1)
87
- topk_proposals = cat(topk_proposals, dim=1)
88
- level_ids = cat(level_ids, dim=0)
89
-
90
- # 3. For each image, run a per-level NMS, and choose topk results.
91
- results = []
92
- for n, image_size in enumerate(image_sizes):
93
- boxes = RotatedBoxes(topk_proposals[n])
94
- scores_per_img = topk_scores[n]
95
- valid_mask = torch.isfinite(boxes.tensor).all(dim=1) & torch.isfinite(scores_per_img)
96
- if not valid_mask.all():
97
- boxes = boxes[valid_mask]
98
- scores_per_img = scores_per_img[valid_mask]
99
- boxes.clip(image_size)
100
-
101
- # filter empty boxes
102
- keep = boxes.nonempty(threshold=min_box_size)
103
- lvl = level_ids
104
- if keep.sum().item() != len(boxes):
105
- boxes, scores_per_img, lvl = (boxes[keep], scores_per_img[keep], level_ids[keep])
106
-
107
- keep = batched_nms_rotated(boxes.tensor, scores_per_img, lvl, nms_thresh)
108
- # In Detectron1, there was different behavior during training vs. testing.
109
- # (https://github.com/facebookresearch/Detectron/issues/459)
110
- # During training, topk is over the proposals from *all* images in the training batch.
111
- # During testing, it is over the proposals for each image separately.
112
- # As a result, the training behavior becomes batch-dependent,
113
- # and the configuration "POST_NMS_TOPK_TRAIN" end up relying on the batch size.
114
- # This bug is addressed in Detectron2 to make the behavior independent of batch size.
115
- keep = keep[:post_nms_topk]
116
-
117
- res = Instances(image_size)
118
- res.proposal_boxes = boxes[keep]
119
- res.objectness_logits = scores_per_img[keep]
120
- results.append(res)
121
- return results
122
-
123
-
124
- @PROPOSAL_GENERATOR_REGISTRY.register()
125
- class RRPN(RPN):
126
- """
127
- Rotated Region Proposal Network described in :paper:`RRPN`.
128
- """
129
-
130
- @configurable
131
- def __init__(self, *args, **kwargs):
132
- super().__init__(*args, **kwargs)
133
- if self.anchor_boundary_thresh >= 0:
134
- raise NotImplementedError(
135
- "anchor_boundary_thresh is a legacy option not implemented for RRPN."
136
- )
137
-
138
- @classmethod
139
- def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
140
- ret = super().from_config(cfg, input_shape)
141
- ret["box2box_transform"] = Box2BoxTransformRotated(weights=cfg.MODEL.RPN.BBOX_REG_WEIGHTS)
142
- return ret
143
-
144
- @torch.no_grad()
145
- def label_and_sample_anchors(self, anchors: List[RotatedBoxes], gt_instances: List[Instances]):
146
- """
147
- Args:
148
- anchors (list[RotatedBoxes]): anchors for each feature map.
149
- gt_instances: the ground-truth instances for each image.
150
-
151
- Returns:
152
- list[Tensor]:
153
- List of #img tensors. i-th element is a vector of labels whose length is
154
- the total number of anchors across feature maps. Label values are in {-1, 0, 1},
155
- with meanings: -1 = ignore; 0 = negative class; 1 = positive class.
156
- list[Tensor]:
157
- i-th element is a Nx5 tensor, where N is the total number of anchors across
158
- feature maps. The values are the matched gt boxes for each anchor.
159
- Values are undefined for those anchors not labeled as 1.
160
- """
161
- anchors = RotatedBoxes.cat(anchors)
162
-
163
- gt_boxes = [x.gt_boxes for x in gt_instances]
164
- del gt_instances
165
-
166
- gt_labels = []
167
- matched_gt_boxes = []
168
- for gt_boxes_i in gt_boxes:
169
- """
170
- gt_boxes_i: ground-truth boxes for i-th image
171
- """
172
- match_quality_matrix = retry_if_cuda_oom(pairwise_iou_rotated)(gt_boxes_i, anchors)
173
- matched_idxs, gt_labels_i = retry_if_cuda_oom(self.anchor_matcher)(match_quality_matrix)
174
- # Matching is memory-expensive and may result in CPU tensors. But the result is small
175
- gt_labels_i = gt_labels_i.to(device=gt_boxes_i.device)
176
-
177
- # A vector of labels (-1, 0, 1) for each anchor
178
- gt_labels_i = self._subsample_labels(gt_labels_i)
179
-
180
- if len(gt_boxes_i) == 0:
181
- # These values won't be used anyway since the anchor is labeled as background
182
- matched_gt_boxes_i = torch.zeros_like(anchors.tensor)
183
- else:
184
- # TODO wasted indexing computation for ignored boxes
185
- matched_gt_boxes_i = gt_boxes_i[matched_idxs].tensor
186
-
187
- gt_labels.append(gt_labels_i) # N,AHW
188
- matched_gt_boxes.append(matched_gt_boxes_i)
189
- return gt_labels, matched_gt_boxes
190
-
191
- @torch.no_grad()
192
- def predict_proposals(self, anchors, pred_objectness_logits, pred_anchor_deltas, image_sizes):
193
- pred_proposals = self._decode_proposals(anchors, pred_anchor_deltas)
194
- return find_top_rrpn_proposals(
195
- pred_proposals,
196
- pred_objectness_logits,
197
- image_sizes,
198
- self.nms_thresh,
199
- self.pre_nms_topk[self.training],
200
- self.post_nms_topk[self.training],
201
- self.min_box_size,
202
- self.training,
203
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CanonOverseer/Canons-Den/Dockerfile DELETED
@@ -1,11 +0,0 @@
1
- FROM node:18-bullseye-slim
2
- RUN apt-get update && \
3
- apt-get install -y git
4
- RUN git clone https://gitgud.io/khanon/oai-reverse-proxy.git /app
5
- WORKDIR /app
6
- RUN npm install
7
- COPY Dockerfile greeting.md* .env* ./
8
- RUN npm run build
9
- EXPOSE 7860
10
- ENV NODE_ENV=production
11
- CMD [ "npm", "start" ]
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CarlDennis/Lovelive-VITS-JPZH/text/korean.py DELETED
@@ -1,205 +0,0 @@
1
- import re
2
- from jamo import h2j, j2hcj
3
- import ko_pron
4
-
5
-
6
- # This is a list of Korean classifiers preceded by pure Korean numerals.
7
- _korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통'
8
-
9
- # List of (hangul, hangul divided) pairs:
10
- _hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [
11
- ('ㄳ', 'ㄱㅅ'),
12
- ('ㄵ', 'ㄴㅈ'),
13
- ('ㄶ', 'ㄴㅎ'),
14
- ('ㄺ', 'ㄹㄱ'),
15
- ('ㄻ', 'ㄹㅁ'),
16
- ('ㄼ', 'ㄹㅂ'),
17
- ('ㄽ', 'ㄹㅅ'),
18
- ('ㄾ', 'ㄹㅌ'),
19
- ('ㄿ', 'ㄹㅍ'),
20
- ('ㅀ', 'ㄹㅎ'),
21
- ('ㅄ', 'ㅂㅅ'),
22
- ('ㅘ', 'ㅗㅏ'),
23
- ('ㅙ', 'ㅗㅐ'),
24
- ('ㅚ', 'ㅗㅣ'),
25
- ('ㅝ', 'ㅜㅓ'),
26
- ('ㅞ', 'ㅜㅔ'),
27
- ('ㅟ', 'ㅜㅣ'),
28
- ('ㅢ', 'ㅡㅣ'),
29
- ('ㅑ', 'ㅣㅏ'),
30
- ('ㅒ', 'ㅣㅐ'),
31
- ('ㅕ', 'ㅣㅓ'),
32
- ('ㅖ', 'ㅣㅔ'),
33
- ('ㅛ', 'ㅣㅗ'),
34
- ('ㅠ', 'ㅣㅜ')
35
- ]]
36
-
37
- # List of (Latin alphabet, hangul) pairs:
38
- _latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
39
- ('a', '에이'),
40
- ('b', '비'),
41
- ('c', '시'),
42
- ('d', '디'),
43
- ('e', '이'),
44
- ('f', '에프'),
45
- ('g', '지'),
46
- ('h', '에이치'),
47
- ('i', '아이'),
48
- ('j', '제이'),
49
- ('k', '케이'),
50
- ('l', '엘'),
51
- ('m', '엠'),
52
- ('n', '엔'),
53
- ('o', '오'),
54
- ('p', '피'),
55
- ('q', '큐'),
56
- ('r', '아르'),
57
- ('s', '에스'),
58
- ('t', '티'),
59
- ('u', '유'),
60
- ('v', '브이'),
61
- ('w', '더블유'),
62
- ('x', '엑스'),
63
- ('y', '와이'),
64
- ('z', '제트')
65
- ]]
66
-
67
- # List of (ipa, lazy ipa) pairs:
68
- _ipa_to_lazy_ipa = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
69
- ('t͡ɕ','ʧ'),
70
- ('d͡ʑ','ʥ'),
71
- ('ɲ','n^'),
72
- ('ɕ','ʃ'),
73
- ('ʷ','w'),
74
- ('ɭ','l`'),
75
- ('ʎ','ɾ'),
76
- ('ɣ','ŋ'),
77
- ('ɰ','ɯ'),
78
- ('ʝ','j'),
79
- ('ʌ','ə'),
80
- ('ɡ','g'),
81
- ('\u031a','#'),
82
- ('\u0348','='),
83
- ('\u031e',''),
84
- ('\u0320',''),
85
- ('\u0339','')
86
- ]]
87
-
88
-
89
- def latin_to_hangul(text):
90
- for regex, replacement in _latin_to_hangul:
91
- text = re.sub(regex, replacement, text)
92
- return text
93
-
94
-
95
- def divide_hangul(text):
96
- text = j2hcj(h2j(text))
97
- for regex, replacement in _hangul_divided:
98
- text = re.sub(regex, replacement, text)
99
- return text
100
-
101
-
102
- def hangul_number(num, sino=True):
103
- '''Reference https://github.com/Kyubyong/g2pK'''
104
- num = re.sub(',', '', num)
105
-
106
- if num == '0':
107
- return '영'
108
- if not sino and num == '20':
109
- return '스무'
110
-
111
- digits = '123456789'
112
- names = '일이삼사오육칠팔구'
113
- digit2name = {d: n for d, n in zip(digits, names)}
114
-
115
- modifiers = '한 두 세 네 다섯 여섯 일곱 여덟 아홉'
116
- decimals = '열 스물 서른 마흔 쉰 예순 일흔 여든 아흔'
117
- digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())}
118
- digit2dec = {d: dec for d, dec in zip(digits, decimals.split())}
119
-
120
- spelledout = []
121
- for i, digit in enumerate(num):
122
- i = len(num) - i - 1
123
- if sino:
124
- if i == 0:
125
- name = digit2name.get(digit, '')
126
- elif i == 1:
127
- name = digit2name.get(digit, '') + '십'
128
- name = name.replace('일십', '십')
129
- else:
130
- if i == 0:
131
- name = digit2mod.get(digit, '')
132
- elif i == 1:
133
- name = digit2dec.get(digit, '')
134
- if digit == '0':
135
- if i % 4 == 0:
136
- last_three = spelledout[-min(3, len(spelledout)):]
137
- if ''.join(last_three) == '':
138
- spelledout.append('')
139
- continue
140
- else:
141
- spelledout.append('')
142
- continue
143
- if i == 2:
144
- name = digit2name.get(digit, '') + '백'
145
- name = name.replace('일백', '백')
146
- elif i == 3:
147
- name = digit2name.get(digit, '') + '천'
148
- name = name.replace('일천', '천')
149
- elif i == 4:
150
- name = digit2name.get(digit, '') + '만'
151
- name = name.replace('일만', '만')
152
- elif i == 5:
153
- name = digit2name.get(digit, '') + '십'
154
- name = name.replace('일십', '십')
155
- elif i == 6:
156
- name = digit2name.get(digit, '') + '백'
157
- name = name.replace('일백', '백')
158
- elif i == 7:
159
- name = digit2name.get(digit, '') + '천'
160
- name = name.replace('일천', '천')
161
- elif i == 8:
162
- name = digit2name.get(digit, '') + '억'
163
- elif i == 9:
164
- name = digit2name.get(digit, '') + '십'
165
- elif i == 10:
166
- name = digit2name.get(digit, '') + '백'
167
- elif i == 11:
168
- name = digit2name.get(digit, '') + '천'
169
- elif i == 12:
170
- name = digit2name.get(digit, '') + '조'
171
- elif i == 13:
172
- name = digit2name.get(digit, '') + '십'
173
- elif i == 14:
174
- name = digit2name.get(digit, '') + '백'
175
- elif i == 15:
176
- name = digit2name.get(digit, '') + '천'
177
- spelledout.append(name)
178
- return ''.join(elem for elem in spelledout)
179
-
180
-
181
- def number_to_hangul(text):
182
- '''Reference https://github.com/Kyubyong/g2pK'''
183
- tokens = set(re.findall(r'(\d[\d,]*)([\uac00-\ud71f]+)', text))
184
- for token in tokens:
185
- num, classifier = token
186
- if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers:
187
- spelledout = hangul_number(num, sino=False)
188
- else:
189
- spelledout = hangul_number(num, sino=True)
190
- text = text.replace(f'{num}{classifier}', f'{spelledout}{classifier}')
191
- # digit by digit for remaining digits
192
- digits = '0123456789'
193
- names = '영일이삼사오육칠팔구'
194
- for d, n in zip(digits, names):
195
- text = text.replace(d, n)
196
- return text
197
-
198
-
199
- def korean_to_lazy_ipa(text):
200
- text = latin_to_hangul(text)
201
- text = number_to_hangul(text)
202
- text=re.sub('[\uac00-\ud7af]+',lambda x:ko_pron.romanise(x.group(0),'ipa'),text).split('] ~ [')[0]
203
- for regex, replacement in _ipa_to_lazy_ipa:
204
- text = re.sub(regex, replacement, text)
205
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChandraMohanNayal/AutoGPT/autogpt/__main__.py DELETED
@@ -1,5 +0,0 @@
1
- """Auto-GPT: A GPT powered AI Assistant"""
2
- import autogpt.cli
3
-
4
- if __name__ == "__main__":
5
- autogpt.cli.main()
 
 
 
 
 
 
spaces/ChrisCaviar/ControlNet-v1-1/app_segmentation.py DELETED
@@ -1,104 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- import gradio as gr
4
-
5
- from utils import randomize_seed_fn
6
-
7
-
8
- def create_demo(process, max_images=12, default_num_images=3):
9
- with gr.Blocks() as demo:
10
- with gr.Row():
11
- with gr.Column():
12
- image = gr.Image()
13
- prompt = gr.Textbox(label='Prompt')
14
- run_button = gr.Button('Run')
15
- with gr.Accordion('Advanced options', open=False):
16
- preprocessor_name = gr.Radio(label='Preprocessor',
17
- choices=['UPerNet', 'None'],
18
- type='value',
19
- value='UPerNet')
20
- num_samples = gr.Slider(label='Number of images',
21
- minimum=1,
22
- maximum=max_images,
23
- value=default_num_images,
24
- step=1)
25
- image_resolution = gr.Slider(label='Image resolution',
26
- minimum=256,
27
- maximum=512,
28
- value=512,
29
- step=256)
30
- preprocess_resolution = gr.Slider(
31
- label='Preprocess resolution',
32
- minimum=128,
33
- maximum=512,
34
- value=512,
35
- step=1)
36
- num_steps = gr.Slider(label='Number of steps',
37
- minimum=1,
38
- maximum=100,
39
- value=20,
40
- step=1)
41
- guidance_scale = gr.Slider(label='Guidance scale',
42
- minimum=0.1,
43
- maximum=30.0,
44
- value=9.0,
45
- step=0.1)
46
- seed = gr.Slider(label='Seed',
47
- minimum=0,
48
- maximum=1000000,
49
- step=1,
50
- value=0,
51
- randomize=True)
52
- randomize_seed = gr.Checkbox(label='Randomize seed',
53
- value=True)
54
- a_prompt = gr.Textbox(
55
- label='Additional prompt',
56
- value='best quality, extremely detailed')
57
- n_prompt = gr.Textbox(
58
- label='Negative prompt',
59
- value=
60
- 'longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality'
61
- )
62
- with gr.Column():
63
- result = gr.Gallery(label='Output', show_label=False).style(
64
- columns=2, object_fit='scale-down')
65
- inputs = [
66
- image,
67
- prompt,
68
- a_prompt,
69
- n_prompt,
70
- num_samples,
71
- image_resolution,
72
- preprocess_resolution,
73
- num_steps,
74
- guidance_scale,
75
- seed,
76
- preprocessor_name,
77
- ]
78
- prompt.submit(
79
- fn=randomize_seed_fn,
80
- inputs=[seed, randomize_seed],
81
- outputs=seed,
82
- ).then(
83
- fn=process,
84
- inputs=inputs,
85
- outputs=result,
86
- )
87
- run_button.click(
88
- fn=randomize_seed_fn,
89
- inputs=[seed, randomize_seed],
90
- outputs=seed,
91
- ).then(
92
- fn=process,
93
- inputs=inputs,
94
- outputs=result,
95
- api_name='segmentation',
96
- )
97
- return demo
98
-
99
-
100
- if __name__ == '__main__':
101
- from model import Model
102
- model = Model(task_name='segmentation')
103
- demo = create_demo(model.process_segmentation)
104
- demo.queue().launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/model/red/tool.js DELETED
@@ -1,428 +0,0 @@
1
- import fs from 'fs'
2
- import { createHash, randomUUID } from 'crypto'
3
- import { resolve, join, dirname, basename } from 'path'
4
- import fetch, { FormData, Blob } from 'node-fetch'
5
- import { fileURLToPath } from 'url'
6
- import { exec, spawn } from 'child_process'
7
- import os from 'os'
8
- import _ from 'lodash'
9
- import { Stream } from "stream"
10
- import YAML from 'yaml'
11
- import { TMP_DIR } from '../tool.js'
12
-
13
- const user = os.userInfo().username
14
- let redPath = `C:/Users/${user}/.chronocat`
15
- if (!fs.existsSync(redPath)) {
16
- redPath = `C:/Users/${user}/AppData/Roaming/BetterUniverse/QQNT`
17
- }
18
-
19
- const roleMap = {
20
- 2: 'member',
21
- 3: 'admin',
22
- 4: 'owner'
23
- }
24
-
25
- async function uploadImg(bot, msg) {
26
- const file = await upload(bot, msg, 'image/png')
27
- if (!file.imageInfo) throw "获取图片信息失败,请检查图片状态"
28
- return {
29
- elementType: 2,
30
- picElement: {
31
- md5HexStr: file.md5,
32
- fileSize: file.fileSize,
33
- picHeight: file.imageInfo.height,
34
- picWidth: file.imageInfo.width,
35
- fileName: basename(file.ntFilePath),
36
- sourcePath: file.ntFilePath,
37
- picType: file.imageInfo.type === 'gif' ? 2000 : 1000
38
- }
39
- }
40
- }
41
-
42
- async function upload(bot, msg, contentType) {
43
- if (!msg) throw { noLog: true }
44
- let buffer
45
- if (msg instanceof Stream.Readable) {
46
- buffer = fs.readFileSync(msg.path)
47
- contentType = contentType.split('/')[0] + '/' + msg.path.substring(msg.path.lastIndexOf('.') + 1)
48
- } if (Buffer.isBuffer(msg)) {
49
- buffer = msg
50
- } else if (msg.match(/^base64:\/\//)) {
51
- buffer = Buffer.from(msg.replace(/^base64:\/\//, ""), 'base64')
52
- } else if (msg.startsWith('http')) {
53
- const img = await fetch(msg)
54
- const type = img.headers.get('content-type');
55
- if (type) contentType = type
56
- const arrayBuffer = await img.arrayBuffer()
57
- buffer = Buffer.from(arrayBuffer)
58
- } else if (msg.startsWith('file://')) {
59
- buffer = fs.readFileSync(msg.replace(/file:\/{2,3}/, ''))
60
- contentType = contentType.split('/')[0] + '/' + msg.substring(msg.lastIndexOf('.') + 1)
61
- } else {
62
- buffer = fs.readFileSync(msg)
63
- contentType = contentType.split('/')[0] + '/' + msg.substring(msg.lastIndexOf('.') + 1)
64
- }
65
- const blob = new Blob([buffer], { type: contentType })
66
- const formData = new FormData()
67
- formData.append('file', blob, 'ws-plugin.' + contentType.split('/')[1])
68
- const file = await bot.sendApi('POST', 'upload', formData)
69
- if (file.error) {
70
- throw file.error
71
- }
72
- file.contentType = contentType
73
- return file
74
- }
75
-
76
- async function uploadAudio(file) {
77
- let buffer
78
- if (file.match(/^base64:\/\//)) {
79
- buffer = Buffer.from(file.replace(/^base64:\/\//, ""), 'base64')
80
- } else if (file.startsWith('http')) {
81
- const http = await fetch(file)
82
- const arrayBuffer = await http.arrayBuffer()
83
- buffer = Buffer.from(arrayBuffer)
84
- } else if (file.startsWith('file://')) {
85
- buffer = fs.readFileSync(file.replace(/file:\/{2,3}/, ''))
86
- }
87
- const head = buffer.subarray(0, 7).toString()
88
- let filePath
89
- let duration = 0
90
- if (!head.includes('SILK')) {
91
- const tmpPath = await saveTmp(buffer)
92
- duration = await getDuration(tmpPath)
93
- const res = await audioTrans(tmpPath)
94
- filePath = res.silkFile
95
- buffer = fs.readFileSync(filePath)
96
- } else {
97
- filePath = await saveTmp(buffer)
98
- }
99
-
100
- const hash = createHash('md5')
101
- hash.update(buffer.toString('binary'), 'binary')
102
- const md5 = hash.digest('hex')
103
- return {
104
- elementType: 4,
105
- pttElement: {
106
- md5HexStr: md5,
107
- fileSize: buffer.length,
108
- fileName: md5 + '.amr',
109
- filePath: filePath,
110
- // waveAmplitudes: [36, 28, 68, 28, 84, 28],
111
- waveAmplitudes: [
112
- 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99
113
- ],
114
- duration: duration
115
- }
116
- }
117
- }
118
-
119
- function audioTrans(tmpPath, samplingRate = '24000') {
120
- return new Promise((resolve, reject) => {
121
- const pcmFile = join(TMP_DIR, randomUUID({ disableEntropyCache: true }))
122
- exec(`ffmpeg -y -i "${tmpPath}" -ar ${samplingRate} -ac 1 -f s16le "${pcmFile}"`, async () => {
123
- fs.unlink(tmpPath, () => { })
124
- fs.access(pcmFile, fs.constants.F_OK, (err) => {
125
- if (err) {
126
- reject('音频转码失败, 请确保你的 ffmpeg 已正确安装')
127
- }
128
- })
129
-
130
- const silkFile = join(TMP_DIR, randomUUID({ disableEntropyCache: true }))
131
- try {
132
- await pcmToSilk(pcmFile, silkFile, samplingRate)
133
- } catch (error) {
134
- reject('red发送语音暂不支持非win系统')
135
- }
136
- fs.unlink(pcmFile, () => { })
137
-
138
- resolve({
139
- silkFile
140
- })
141
- })
142
- })
143
- }
144
-
145
- function pcmToSilk(input, output, samplingRate) {
146
- return new Promise((resolve, reject) => {
147
- const args = ['-i', input, '-s', samplingRate, '-o', output]
148
- const __filename = fileURLToPath(import.meta.url);
149
- const __dirname = dirname(__filename);
150
- const child = spawn(join(__dirname, './cli.exe'), args)
151
- child.on('exit', () => {
152
- fs.access(output, fs.constants.F_OK, (err) => {
153
- if (err) {
154
- reject('音频转码失败')
155
- }
156
- })
157
- // fs.stat(output, (err, stats) => {
158
- // if (err) {
159
- // console.error(err);
160
- // return;
161
- // }
162
- // fs.truncate(output, stats.size - 1, err => {
163
- // if (err) {
164
- // console.error(err);
165
- // return;
166
- // }
167
- // });
168
- // });
169
- resolve()
170
- })
171
- })
172
- }
173
-
174
- function getDuration(file) {
175
- return new Promise((resolve, reject) => {
176
- exec(`ffmpeg -i ${file}`, function (err, stdout, stderr) {
177
- const outStr = stderr.toString()
178
- const regDuration = /Duration\: ([0-9\:\.]+),/
179
- const rs = regDuration.exec(outStr)
180
- if (rs === null) {
181
- reject("获取音频时长失败, 请确保你的 ffmpeg 已正确安装")
182
- } else if (rs[1]) {
183
- const time = rs[1]
184
- const parts = time.split(":")
185
- const seconds = (+parts[0]) * 3600 + (+parts[1]) * 60 + (+parts[2])
186
- const round = seconds.toString().split('.')[0]
187
- resolve(+ round)
188
- }
189
- })
190
- })
191
- }
192
-
193
- async function saveTmp(data, ext = null) {
194
- ext = ext ? '.' + ext : ''
195
- const filename = randomUUID({ disableEntropyCache: true }) + ext
196
- const tmpPath = resolve(TMP_DIR, filename)
197
- fs.writeFileSync(tmpPath, data)
198
- return tmpPath
199
- }
200
-
201
- async function getNtPath(bot) {
202
- let dataPath
203
- try {
204
- const buffer = fs.readFileSync('./plugins/ws-plugin/resources/common/cont/logo.png')
205
- const blob = new Blob([buffer], { type: 'image/png' })
206
- const formData = new FormData()
207
- formData.append('file', blob, '1.png')
208
- const file = await bot.sendApi('POST', 'upload', formData)
209
- fs.unlinkSync(file.ntFilePath)
210
- const index = file.ntFilePath.indexOf('nt_data');
211
- dataPath = file.ntFilePath.slice(0, index + 'nt_data'.length);
212
- } catch (error) {
213
- return null
214
- }
215
- return dataPath
216
- }
217
-
218
- async function uploadVideo(bot, file) {
219
- let type = 'mp4'
220
- if (file.match(/^base64:\/\//)) {
221
- const buffer = Buffer.from(file.replace(/^base64:\/\//, ""), 'base64')
222
- file = join(TMP_DIR, randomUUID({ disableEntropyCache: true }) + '.' + type)
223
- fs.writeFileSync(file, buffer)
224
- } else {
225
- file = file.replace(/file:\/{2,3}/, '')
226
- type = file.substring(file.lastIndexOf('.') + 1)
227
- const Temp = join(TMP_DIR, randomUUID({ disableEntropyCache: true }) + '.' + type)
228
- fs.copyFileSync(file, Temp)
229
- file = Temp
230
- }
231
- const ntPath = await getNtPath(bot)
232
- if (!ntPath) return
233
- const now = new Date();
234
- const year = now.getFullYear();
235
- const month = now.getMonth() + 1;
236
- const date = `${year}-${month.toString().padStart(2, '0')}`;
237
- const video = await getVideoInfo(file)
238
-
239
- let oriPath = `${ntPath}/Video`
240
- if (!fs.existsSync(oriPath)) fs.mkdirSync(oriPath)
241
- oriPath = `${oriPath}/${date}`
242
- if (!fs.existsSync(oriPath)) fs.mkdirSync(oriPath)
243
- oriPath = `${oriPath}/Ori`
244
- if (!fs.existsSync(oriPath)) fs.mkdirSync(oriPath)
245
- oriPath = `${oriPath}/${video.videoMd5}.${type}`
246
-
247
- let thumbPath = `${ntPath}/Video/${date}/Thumb`
248
- if (!fs.existsSync(thumbPath)) fs.mkdirSync(thumbPath)
249
- thumbPath = `${thumbPath}/${video.videoMd5}_0.png`
250
-
251
- fs.copyFileSync(file, oriPath)
252
- fs.unlinkSync(file)
253
- const thumb = await getThumbInfo(oriPath, thumbPath)
254
- return {
255
- elementType: 5,
256
- videoElement: {
257
- filePath: oriPath,
258
- fileName: video.videoMd5 + '.' + type,
259
- videoMd5: video.videoMd5,
260
- thumbMd5: thumb.thumbMd5,
261
- fileTime: video.fileTime,
262
- thumbSize: thumb.thumbSize,
263
- fileSize: video.fileSize,
264
- thumbWidth: thumb.thumbWidth,
265
- thumbHeight: thumb.thumbHeight
266
- }
267
- }
268
- }
269
-
270
- async function getVideoInfo(file) {
271
- const fileTime = await getVideoTime(file)
272
- const videoMd5 = await getVideoMd5(file)
273
- const fileSize = fs.readFileSync(file).length
274
- return {
275
- fileTime,
276
- videoMd5,
277
- fileSize
278
- }
279
- }
280
-
281
- function getVideoMd5(file) {
282
- return new Promise((resolve, reject) => {
283
- const stream = fs.createReadStream(file);
284
- const hash = createHash('md5');
285
- stream.on('data', chunk => {
286
- hash.update(chunk);
287
- });
288
- stream.on('end', () => {
289
- const md5 = hash.digest('hex');
290
- resolve(md5)
291
- });
292
- })
293
- }
294
-
295
- function getVideoTime(file) {
296
- return new Promise((resolve, reject) => {
297
- exec(`ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 "${file}"`, (error, stdout, stderr) => {
298
- if (error) {
299
- reject('获取视频长度失败, 请确保你的 ffmpeg 已正确安装')
300
- }
301
- const durationInSeconds = parseInt(stdout);
302
- resolve(durationInSeconds)
303
- });
304
- })
305
- }
306
-
307
- async function getThumbInfo(file, thumbPath) {
308
-
309
- const tempPath = join(TMP_DIR, randomUUID({ disableEntropyCache: true }) + '.jpg')
310
-
311
- const { thumbMd5, thumbSize } = await extractThumbnail(file, tempPath);
312
-
313
- const { thumbWidth, thumbHeight } = getImageSize(tempPath);
314
-
315
- fs.copyFileSync(tempPath, thumbPath)
316
- fs.unlinkSync(tempPath)
317
-
318
- return { thumbMd5, thumbWidth, thumbHeight, thumbSize };
319
- }
320
-
321
- function extractThumbnail(inputFile, outputFile) {
322
- return new Promise((resolve, reject) => {
323
- exec(`ffmpeg -i "${inputFile}" -ss 00:00:00.000 -vframes 1 -vf "scale=iw/3:ih/3" "${outputFile}"
324
- `, async () => {
325
- fs.access(outputFile, fs.constants.F_OK, (err) => {
326
- if (err) {
327
- reject('获取视频封面失败, 请确保你的 ffmpeg 已正确安装')
328
- }
329
- })
330
-
331
- const buffer = fs.readFileSync(outputFile);
332
- const hash = createHash('md5');
333
- hash.update(buffer);
334
- resolve({
335
- thumbMd5: hash.digest('hex'),
336
- thumbSize: buffer.length
337
- })
338
- })
339
- })
340
- }
341
-
342
- function getImageSize(file) {
343
- const buffer = fs.readFileSync(file);
344
- const start = buffer.indexOf(Buffer.from([0xff, 0xc0]));
345
- const thumbHeight = buffer.readUInt16BE(start + 5);
346
- const thumbWidth = buffer.readUInt16BE(start + 7);
347
- return { thumbWidth, thumbHeight };
348
- }
349
-
350
- async function uploadFile(file) {
351
- let buffer, name, path = process.cwd() + '/plugins/ws-plugin/Temp/'
352
- if (file.startsWith('http')) {
353
- const http = await fetch(file)
354
- const arrayBuffer = await http.arrayBuffer()
355
- buffer = Buffer.from(arrayBuffer)
356
- name = file.substring(file.lastIndexOf('/') + 1)
357
- path = path + name
358
- fs.writeFileSync(path, buffer);
359
- } else if (file.startsWith('file://')) {
360
- buffer = fs.readFileSync(file.replace(/file:\/{2,3}/, ''))
361
- name = file.substring(file.lastIndexOf('/') + 1)
362
- path = path + name
363
- fs.copyFileSync(file, path)
364
- } else if (Buffer.isBuffer(file)) {
365
- buffer = file
366
- name = 'buffer'
367
- path = path + name
368
- fs.writeFileSync(path, buffer);
369
- } else {
370
- buffer = fs.readFileSync(file)
371
- name = file.substring(file.lastIndexOf('/') + 1)
372
- path = path + name
373
- fs.copyFileSync(file, path)
374
- }
375
- const size = buffer.length
376
- const hash = createHash('md5');
377
- hash.update(buffer);
378
- const md5 = hash.digest('hex')
379
- return {
380
- elementType: 3,
381
- fileElement: {
382
- fileMd5: md5,
383
- fileName: name,
384
- filePath: path,
385
- fileSize: size,
386
- }
387
- }
388
- }
389
-
390
- function getToken() {
391
- let tokenPath
392
- try {
393
- if (os.platform() === 'win32') {
394
- tokenPath = `${redPath}/config/chronocat.yml`
395
- if (fs.existsSync(tokenPath)) {
396
- const data = YAML.parse(fs.readFileSync(tokenPath, 'utf-8'))
397
- for (const i of data?.servers || []) {
398
- if (i.type === 'red') {
399
- return i.token
400
- }
401
- }
402
- logger.error('[ws-plugin] 请检查chronocat配置是否开启red服务')
403
- return false
404
- } else {
405
- tokenPath = `${redPath}/RED_PROTOCOL_TOKEN`
406
- return fs.readFileSync(tokenPath, 'utf-8')
407
- }
408
- } else {
409
- logger.error('[ws-plugin] 非Windows系统请自行获取Token')
410
- return false
411
- }
412
- } catch (error) {
413
- logger.error('[ws-plugin] QQNT自动获取Token失败,请检查是否已安装Chronocat并尝试手动获取')
414
- logger.error(error)
415
- return false
416
- }
417
- }
418
-
419
- export {
420
- uploadImg,
421
- uploadAudio,
422
- uploadVideo,
423
- uploadFile,
424
- getToken,
425
- getNtPath,
426
- roleMap,
427
- redPath
428
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CjangCjengh/Sanskrit-TTS/utils.py DELETED
@@ -1,75 +0,0 @@
1
- import logging
2
- from json import loads
3
- from torch import load, FloatTensor
4
- from numpy import float32
5
- import librosa
6
-
7
-
8
- class HParams():
9
- def __init__(self, **kwargs):
10
- for k, v in kwargs.items():
11
- if type(v) == dict:
12
- v = HParams(**v)
13
- self[k] = v
14
-
15
- def keys(self):
16
- return self.__dict__.keys()
17
-
18
- def items(self):
19
- return self.__dict__.items()
20
-
21
- def values(self):
22
- return self.__dict__.values()
23
-
24
- def __len__(self):
25
- return len(self.__dict__)
26
-
27
- def __getitem__(self, key):
28
- return getattr(self, key)
29
-
30
- def __setitem__(self, key, value):
31
- return setattr(self, key, value)
32
-
33
- def __contains__(self, key):
34
- return key in self.__dict__
35
-
36
- def __repr__(self):
37
- return self.__dict__.__repr__()
38
-
39
-
40
- def load_checkpoint(checkpoint_path, model):
41
- checkpoint_dict = load(checkpoint_path, map_location='cpu')
42
- iteration = checkpoint_dict['iteration']
43
- saved_state_dict = checkpoint_dict['model']
44
- if hasattr(model, 'module'):
45
- state_dict = model.module.state_dict()
46
- else:
47
- state_dict = model.state_dict()
48
- new_state_dict= {}
49
- for k, v in state_dict.items():
50
- try:
51
- new_state_dict[k] = saved_state_dict[k]
52
- except:
53
- logging.info("%s is not in the checkpoint" % k)
54
- new_state_dict[k] = v
55
- if hasattr(model, 'module'):
56
- model.module.load_state_dict(new_state_dict)
57
- else:
58
- model.load_state_dict(new_state_dict)
59
- logging.info("Loaded checkpoint '{}' (iteration {})" .format(
60
- checkpoint_path, iteration))
61
- return
62
-
63
-
64
- def get_hparams_from_file(config_path):
65
- with open(config_path, "r") as f:
66
- data = f.read()
67
- config = loads(data)
68
-
69
- hparams = HParams(**config)
70
- return hparams
71
-
72
-
73
- def load_audio_to_torch(full_path, target_sampling_rate):
74
- audio, sampling_rate = librosa.load(full_path, sr=target_sampling_rate, mono=True)
75
- return FloatTensor(audio.astype(float32))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/LengthConverter/style.css DELETED
@@ -1,28 +0,0 @@
1
- body {
2
- padding: 2rem;
3
- font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
4
- }
5
-
6
- h1 {
7
- font-size: 16px;
8
- margin-top: 0;
9
- }
10
-
11
- p {
12
- color: rgb(107, 114, 128);
13
- font-size: 15px;
14
- margin-bottom: 10px;
15
- margin-top: 5px;
16
- }
17
-
18
- .card {
19
- max-width: 620px;
20
- margin: 0 auto;
21
- padding: 16px;
22
- border: 1px solid lightgray;
23
- border-radius: 16px;
24
- }
25
-
26
- .card p:last-child {
27
- margin-bottom: 0;
28
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat.v1/web.html DELETED
@@ -1,60 +0,0 @@
1
- <!DOCTYPE html>
2
- <html>
3
- <head>
4
- <title>API Demo</title>
5
- </head>
6
- <body>
7
- <h1>API Demo</h1>
8
- <label for="day">Select a day:</label>
9
- <select id="day">
10
- <option value="monday">Monday</option>
11
- <option value="tuesday">Tuesday</option>
12
- <option value="wednesday">Wednesday</option>
13
- <option value="thursday">Thursday</option>
14
- <option value="friday">Friday</option>
15
- <option value="saturday">Saturday</option>
16
- <option value="sunday">Sunday</option>
17
- </select>
18
- <label for="data">Select data:</label>
19
- <select id="data" name="data"></select>
20
- <br><br>
21
- <input type="button" value="Submit" onclick="fetchData()">
22
-
23
- <!-- <br><br>
24
- <button onclick="fetchAPI()">Submit</button>
25
- <br><br> -->
26
- <div id="result"></div>
27
- <script>
28
- function fetchData() {
29
- const day = document.getElementById("day").value;
30
- const dataSelect = document.getElementById("data");
31
- const resultDiv = document.getElementById("result");
32
-
33
- // Clear previous results
34
- resultDiv.innerHTML = "";
35
-
36
- // Make API request
37
- fetch(`https://api.example.com/data/${day}`)
38
- .then(response => response.json())
39
- .then(data => {
40
- // Populate data dropdown
41
- dataSelect.innerHTML = "";
42
- data.forEach(item => {
43
- const option = document.createElement("option");
44
- option.value = item.value;
45
- option.textContent = item.label;
46
- dataSelect.appendChild(option);
47
- });
48
-
49
- // Show fetched data
50
- resultDiv.innerHTML = `Data for ${day}: ${JSON.stringify(data)}`;
51
- })
52
- .catch(error => {
53
- console.error(error);
54
- resultDiv.innerHTML = "Error fetching data.";
55
- });
56
- }
57
-
58
- </script>
59
- </body>
60
- </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat/g4f/models.py DELETED
@@ -1,233 +0,0 @@
1
- from g4f import Provider
2
-
3
-
4
- class Model:
5
- class model:
6
- name: str
7
- base_provider: str
8
- best_provider: str
9
-
10
- class gpt_35_turbo:
11
- name: str = 'gpt-3.5-turbo'
12
- base_provider: str = 'openai'
13
- best_provider: Provider.Provider = Provider.Wewordle
14
-
15
- class gpt_35_turbo_0613:
16
- name: str = 'gpt-3.5-turbo-0613'
17
- base_provider: str = 'openai'
18
- best_provider: Provider.Provider = Provider.Zeabur
19
-
20
- class gpt_35_turbo_0301:
21
- name: str = 'gpt-3.5-turbo-0301'
22
- base_provider: str = 'openai'
23
- best_provider: Provider.Provider = Provider.Zeabur
24
-
25
- class gpt_35_turbo_16k_0613:
26
- name: str = 'gpt-3.5-turbo-16k-0613'
27
- base_provider: str = 'openai'
28
- best_provider: Provider.Provider = Provider.Zeabur
29
-
30
- class gpt_35_turbo_16k:
31
- name: str = 'gpt-3.5-turbo-16k'
32
- base_provider: str = 'openai'
33
- best_provider: Provider.Provider = Provider.ChatFree
34
-
35
- class gpt_4_dev:
36
- name: str = 'gpt-4-for-dev'
37
- base_provider: str = 'openai'
38
- best_provider: Provider.Provider = Provider.Phind
39
-
40
- class gpt_4:
41
- name: str = 'gpt-4'
42
- base_provider: str = 'openai'
43
- best_provider: Provider.Provider = Provider.ChatgptAi
44
-
45
- class gpt_4_0613:
46
- name: str = 'gpt-4-0613'
47
- base_provider: str = 'openai'
48
- best_provider: Provider.Provider = Provider.Lockchat
49
- best_providers: list = [Provider.Bing, Provider.Lockchat]
50
-
51
- class claude_instant_v1_100k:
52
- name: str = 'claude-instant-v1-100k'
53
- base_provider: str = 'anthropic'
54
- best_provider: Provider.Provider = Provider.Vercel
55
-
56
- class claude_instant_v1:
57
- name: str = 'claude-instant-v1'
58
- base_provider: str = 'anthropic'
59
- best_provider: Provider.Provider = Provider.Vercel
60
-
61
- class claude_v1_100k:
62
- name: str = 'claude-v1-100k'
63
- base_provider: str = 'anthropic'
64
- best_provider: Provider.Provider = Provider.Vercel
65
-
66
- class claude_v1:
67
- name: str = 'claude-v1'
68
- base_provider: str = 'anthropic'
69
- best_provider: Provider.Provider = Provider.Vercel
70
-
71
- class alpaca_7b:
72
- name: str = 'alpaca-7b'
73
- base_provider: str = 'replicate'
74
- best_provider: Provider.Provider = Provider.Vercel
75
-
76
- class stablelm_tuned_alpha_7b:
77
- name: str = 'stablelm-tuned-alpha-7b'
78
- base_provider: str = 'replicate'
79
- best_provider: Provider.Provider = Provider.Vercel
80
-
81
- class bloom:
82
- name: str = 'bloom'
83
- base_provider: str = 'huggingface'
84
- best_provider: Provider.Provider = Provider.Vercel
85
-
86
- class bloomz:
87
- name: str = 'bloomz'
88
- base_provider: str = 'huggingface'
89
- best_provider: Provider.Provider = Provider.Vercel
90
-
91
- class flan_t5_xxl:
92
- name: str = 'flan-t5-xxl'
93
- base_provider: str = 'huggingface'
94
- best_provider: Provider.Provider = Provider.Vercel
95
-
96
- class flan_ul2:
97
- name: str = 'flan-ul2'
98
- base_provider: str = 'huggingface'
99
- best_provider: Provider.Provider = Provider.Vercel
100
-
101
- class gpt_neox_20b:
102
- name: str = 'gpt-neox-20b'
103
- base_provider: str = 'huggingface'
104
- best_provider: Provider.Provider = Provider.Vercel
105
-
106
- class oasst_sft_4_pythia_12b_epoch_35:
107
- name: str = 'oasst-sft-4-pythia-12b-epoch-3.5'
108
- base_provider: str = 'huggingface'
109
- best_provider: Provider.Provider = Provider.Vercel
110
-
111
- class santacoder:
112
- name: str = 'santacoder'
113
- base_provider: str = 'huggingface'
114
- best_provider: Provider.Provider = Provider.Vercel
115
-
116
- class command_medium_nightly:
117
- name: str = 'command-medium-nightly'
118
- base_provider: str = 'cohere'
119
- best_provider: Provider.Provider = Provider.Vercel
120
-
121
- class command_xlarge_nightly:
122
- name: str = 'command-xlarge-nightly'
123
- base_provider: str = 'cohere'
124
- best_provider: Provider.Provider = Provider.Vercel
125
-
126
- class code_cushman_001:
127
- name: str = 'code-cushman-001'
128
- base_provider: str = 'openai'
129
- best_provider: Provider.Provider = Provider.Vercel
130
-
131
- class code_davinci_002:
132
- name: str = 'code-davinci-002'
133
- base_provider: str = 'openai'
134
- best_provider: Provider.Provider = Provider.Vercel
135
-
136
- class text_ada_001:
137
- name: str = 'text-ada-001'
138
- base_provider: str = 'openai'
139
- best_provider: Provider.Provider = Provider.Vercel
140
-
141
- class text_babbage_001:
142
- name: str = 'text-babbage-001'
143
- base_provider: str = 'openai'
144
- best_provider: Provider.Provider = Provider.Vercel
145
-
146
- class text_curie_001:
147
- name: str = 'text-curie-001'
148
- base_provider: str = 'openai'
149
- best_provider: Provider.Provider = Provider.Vercel
150
-
151
- class text_davinci_002:
152
- name: str = 'text-davinci-002'
153
- base_provider: str = 'openai'
154
- best_provider: Provider.Provider = Provider.Vercel
155
-
156
- class text_davinci_003:
157
- name: str = 'text-davinci-003'
158
- base_provider: str = 'openai'
159
- best_provider: Provider.Provider = Provider.Vercel
160
-
161
- class palm:
162
- name: str = 'palm2'
163
- base_provider: str = 'google'
164
- best_provider: Provider.Provider = Provider.Bard
165
-
166
- class falcon_40b:
167
- name: str = 'falcon-40b'
168
- base_provider: str = 'huggingface'
169
- best_provider: Provider.Provider = Provider.H2o
170
-
171
- class falcon_7b:
172
- name: str = 'falcon-7b'
173
- base_provider: str = 'huggingface'
174
- best_provider: Provider.Provider = Provider.H2o
175
-
176
- class llama_13b:
177
- name: str = 'llama-13b'
178
- base_provider: str = 'huggingface'
179
- best_provider: Provider.Provider = Provider.H2o
180
-
181
-
182
- class ModelUtils:
183
- convert: dict = {
184
- 'gpt-3.5-turbo': Model.gpt_35_turbo,
185
- 'gpt-3.5-turbo-0613': Model.gpt_35_turbo_0613,
186
- 'gpt-3.5-turbo-0301': Model.gpt_35_turbo_0301,
187
- 'gpt-4': Model.gpt_4,
188
- 'gpt-4-0613': Model.gpt_4_0613,
189
- 'gpt-4-for-dev': Model.gpt_4_dev,
190
- 'gpt-3.5-turbo-16k': Model.gpt_35_turbo_16k,
191
- 'gpt-3.5-turbo-16k-0613': Model.gpt_35_turbo_16k_0613,
192
-
193
- 'claude-instant-v1-100k': Model.claude_instant_v1_100k,
194
- 'claude-v1-100k': Model.claude_v1_100k,
195
- 'claude-instant-v1': Model.claude_instant_v1,
196
- 'claude-v1': Model.claude_v1,
197
-
198
- 'alpaca-7b': Model.alpaca_7b,
199
- 'stablelm-tuned-alpha-7b': Model.stablelm_tuned_alpha_7b,
200
-
201
- 'bloom': Model.bloom,
202
- 'bloomz': Model.bloomz,
203
-
204
- 'flan-t5-xxl': Model.flan_t5_xxl,
205
- 'flan-ul2': Model.flan_ul2,
206
-
207
- 'gpt-neox-20b': Model.gpt_neox_20b,
208
- 'oasst-sft-4-pythia-12b-epoch-3.5': Model.oasst_sft_4_pythia_12b_epoch_35,
209
- 'santacoder': Model.santacoder,
210
-
211
- 'command-medium-nightly': Model.command_medium_nightly,
212
- 'command-xlarge-nightly': Model.command_xlarge_nightly,
213
-
214
- 'code-cushman-001': Model.code_cushman_001,
215
- 'code-davinci-002': Model.code_davinci_002,
216
-
217
- 'text-ada-001': Model.text_ada_001,
218
- 'text-babbage-001': Model.text_babbage_001,
219
- 'text-curie-001': Model.text_curie_001,
220
- 'text-davinci-002': Model.text_davinci_002,
221
- 'text-davinci-003': Model.text_davinci_003,
222
-
223
- 'palm2': Model.palm,
224
- 'palm': Model.palm,
225
- 'google': Model.palm,
226
- 'google-bard': Model.palm,
227
- 'google-palm': Model.palm,
228
- 'bard': Model.palm,
229
-
230
- 'falcon-40b': Model.falcon_40b,
231
- 'falcon-7b': Model.falcon_7b,
232
- 'llama-13b': Model.llama_13b,
233
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CyberHarem/find_my_waifu/civitai.py DELETED
@@ -1,26 +0,0 @@
1
- from gchar.games.dispatch.access import GAME_CHARS
2
-
3
-
4
- def try_find_title(char_name, game_name):
5
- try:
6
- game_cls = GAME_CHARS[game_name.lower()]
7
- ch = game_cls.get(char_name)
8
- if ch:
9
- names = []
10
- if ch.enname:
11
- names.append(str(ch.enname))
12
- if ch.jpname:
13
- names.append(str(ch.jpname))
14
- if ch.cnname:
15
- names.append(str(ch.cnname))
16
- if hasattr(ch, 'krname') and ch.krname:
17
- names.append(str(ch.krname))
18
-
19
- return f"{'/'.join(names)} ({game_cls.__official_name__})"
20
-
21
- else:
22
- cname = ' '.join(list(map(str.capitalize, char_name.split(' '))))
23
- return f'{cname} ({game_cls.__official_name__})'
24
-
25
- except KeyError:
26
- return None