parquet-converter commited on
Commit
b643e7a
·
1 Parent(s): c907fcf

Update parquet files (step 29 of 121)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download IDM Full Crack Bagas31 - Is It Safe and Legal?.md +0 -37
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download The Jupiter - Il Destino Delluniverso Full Movie Italian Dubbed In Torrent.md +0 -20
  3. spaces/1gistliPinn/ChatGPT4/Examples/Citroen Service Box Keygen Free WORK Download.md +0 -11
  4. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Age of History II APK A Wasteland Editor and Flag Maker for Android Wargamers.md +0 -98
  5. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Brawl Stars APK Club The Most Fun and Addictive Game Ever.md +0 -179
  6. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Build Your Fantasy Empire with War and Order APK for Android.md +0 -111
  7. spaces/44ov41za8i/FreeVC/speaker_encoder/data_objects/speaker_batch.py +0 -12
  8. spaces/52Hz/CMFNet_deblurring/main_test_CMFNet.py +0 -88
  9. spaces/AB-TW/team-ai/agents/tools/smart_domain/entity.py +0 -115
  10. spaces/ADOPLE/AdopleAI-Website-DocumentQA/README.md +0 -13
  11. spaces/AIFILMS/generate_human_motion/VQ-Trans/options/option_vq.py +0 -61
  12. spaces/AIGC-Audio/Make_An_Audio/ldm/modules/ema.py +0 -76
  13. spaces/ASJMO/freegpt/server/website.py +0 -58
  14. spaces/AlexN/pull_up/README.md +0 -37
  15. spaces/AlhitawiMohammed22/HTD_HTR/builder.py +0 -305
  16. spaces/Alpaca233/SadTalker/src/face3d/data/flist_dataset.py +0 -125
  17. spaces/Alycer/VITS-Umamusume-voice-synthesizer/monotonic_align/__init__.py +0 -19
  18. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/value_guided_sampling.md +0 -32
  19. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/cross_attention.py +0 -94
  20. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py +0 -373
  21. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_karras_ve_flax.py +0 -237
  22. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/deepfloyd_if/test_if_inpainting_superresolution.py +0 -90
  23. spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/deoldify/layers.py +0 -48
  24. spaces/Aristo/trafficsign/app.py +0 -39
  25. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/pyparsing/diagram/__init__.py +0 -642
  26. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/utils/registry.py +0 -60
  27. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/modeling/test_rpn.py +0 -262
  28. spaces/AzumaSeren100/XuanShen-Bert-VITS2/bert_gen.py +0 -54
  29. spaces/Bajr/softly/README.md +0 -11
  30. spaces/Benson/text-generation/Examples/Caramelo Crush Soda Saga Juego Gratis Para Pc.md +0 -103
  31. spaces/Benson/text-generation/Examples/Cmo Descargar Blockman Ir En El PC Gratis.md +0 -57
  32. spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/docs/bcdoc/docstringparser.py +0 -315
  33. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/themes.py +0 -5
  34. spaces/Billyosoro/ESRGAN/tests/test_discriminator_arch.py +0 -19
  35. spaces/CVPR/LIVE/pybind11/include/pybind11/pybind11.h +0 -0
  36. spaces/CVPR/lama-example/saicinpainting/evaluation/masks/__init__.py +0 -0
  37. spaces/Callimethee/Imagine-CR/app.py +0 -29
  38. spaces/Caoyunkang/Segment-Any-Anomaly/SAM/CODE_OF_CONDUCT.md +0 -80
  39. spaces/Cloudyy/bark-voice-cloning/hubert/__init__.py +0 -0
  40. spaces/CognitiveLabs/Research-Assistant/config/config.py +0 -82
  41. spaces/CoreyMorris/MMLU-by-task-Leaderboard/test_paths.py +0 -19
  42. spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/data/util.py +0 -24
  43. spaces/Cvandi/remake/realesrgan/archs/discriminator_arch.py +0 -67
  44. spaces/DAMO-NLP-SG/Video-LLaMA/README.md +0 -14
  45. spaces/DHEIVER/timeseries-anomaly-detection-autoencoders/README.md +0 -13
  46. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/charset_normalizer/utils.py +0 -414
  47. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/utils.py +0 -228
  48. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/h11/_util.py +0 -135
  49. spaces/Datasculptor/MusicGen/tests/data/test_audio_utils.py +0 -110
  50. spaces/Datasculptor/StyleGAN-NADA/op/fused_act_cpu.py +0 -41
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download IDM Full Crack Bagas31 - Is It Safe and Legal?.md DELETED
@@ -1,37 +0,0 @@
1
-
2
- <h1>How to Download IDM Full Crack Bagas31 for Free</h1>
3
- <p>IDM, or Internet Download Manager, is a popular software that can help you download files from the internet faster and easier. It can increase your download speed up to 5 times, resume and schedule downloads, and manage your downloaded files efficiently. It can also download videos from various websites, such as YouTube, Vimeo, and others.</p>
4
- <h2>download idm full crack bagas31</h2><br /><p><b><b>DOWNLOAD</b> &ndash;&ndash;&ndash; <a href="https://byltly.com/2uKxFo">https://byltly.com/2uKxFo</a></b></p><br /><br />
5
- <p>However, IDM is not a free software. You need to pay for a license or serial key to use it without any limitations or interruptions. If you don't want to spend money on IDM, you may be tempted to look for a cracked version of IDM that can bypass the registration process and let you use it for free. One of the websites that offer IDM full crack for free download is Bagas31.</p>
6
- <p>Bagas31 is a website that provides various software and games for free download. It also provides IDM full crack with the latest version and updates. But is it safe and legal to download IDM full crack Bagas31? What are the risks and benefits of using IDM full crack Bagas31? In this article, we will answer these questions and provide you with a guide on how to download IDM full crack Bagas31 for free.</p>
7
-
8
- <h2>Is It Safe and Legal to Download IDM Full Crack Bagas31?</h2>
9
- <p>The answer to this question is no. Downloading IDM full crack Bagas31 is neither safe nor legal. Here are some of the reasons why:</p>
10
- <ul>
11
- <li>Downloading IDM full crack Bagas31 is an act of software piracy. You are violating the intellectual property rights of the original developer of IDM. You are also breaking the law and may face legal consequences if you get caught.</li>
12
- <li>Downloading IDM full crack Bagas31 may expose your computer or device to viruses, malware, or other harmful components. The cracked version of IDM may contain malicious code or hidden programs that may damage your system or steal your data. You may also download fake or modified versions of IDM that may not work properly or cause problems.</li>
13
- <li>Downloading IDM full crack Bagas31 may not guarantee you a quality and reliable data recovery service. The cracked version of IDM may not be compatible with some devices or file systems that require special treatment or attention. It may also fail to detect some files or recover them partially or corruptly. It may also overwrite your existing data or damage your device if you use it improperly or carelessly.</li>
14
- <li>Downloading IDM full crack Bagas31 may not protect your data and privacy from potential risks or threats. The cracked version of IDM may not have the same security and privacy features as the original version. It may also leak your personal information or data to hackers or third parties without your consent or knowledge.</li>
15
- </ul>
16
- <p>Therefore, downloading IDM full crack Bagas31 is not a wise choice. You may end up losing more than what you gain. You may also put yourself in danger or trouble by using a cracked software.</p>
17
-
18
- <h2>How to Download IDM Full Crack Bagas31 for Free</h2>
19
- <p>If you still want to try downloading IDM full crack Bagas31 for free, despite the risks and drawbacks, here are the steps that you need to follow:</p>
20
- <p></p>
21
- <ol>
22
- <li>Go to https://bagas31.pw/</li>
23
- <li>Search for IDM full crack in the search box or browse through the categories</li>
24
- <li>Select the latest version of IDM full crack that matches your system requirements</li>
25
- <li>Click on the download button and wait for the download to finish</li>
26
- <li>Extract the file with Winrar v6.1 or later</li>
27
- <li>Run the setup.exe file and install Internet Download Manager full version on your PC</li>
28
- <li>Close the application from the tray icon and copy the patch.exe file to C:\\Program Files (x86)\\Internet Download Manager</li>
29
- <li>Run the patch.exe file as administrator and click on Patch button</li>
30
- <li>Enjoy using IDM full crack for free!</li>
31
- </ol>
32
-
33
- <h2>A Better Alternative to Downloading IDM Full Crack Bagas31</h2>
34
- <p>If you want a better and safer alternative to downloading IDM full crack Bagas31, you should consider using a legitimate and reputable data recovery software that can offer you a quality and reliable data recovery service without any risks or drawbacks. One of such software is FoneDog Data Recovery.</p>
35
- <p>Fone</p> ddb901b051<br />
36
- <br />
37
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download The Jupiter - Il Destino Delluniverso Full Movie Italian Dubbed In Torrent.md DELETED
@@ -1,20 +0,0 @@
1
- <br />
2
- <h1>Download the Jupiter - Il destino dell'universo full movie italian dubbed in torrent</h1>
3
-
4
- <p>Jupiter - Il destino dell'universo (Jupiter Ascending) is a science fiction movie from 2015 written and directed by Lana and Andy Wachowski, starring Mila Kunis and Channing Tatum. It is the first movie of the Wachowski sisters made in 3D [^1^].</p>
5
- <h2>Download the Jupiter - Il destino dell'universo full movie italian dubbed in torrent</h2><br /><p><b><b>Download Zip</b> &rarr;&rarr;&rarr; <a href="https://byltly.com/2uKzU7">https://byltly.com/2uKzU7</a></b></p><br /><br />
6
-
7
- <p>The movie tells the story of Jupiter Jones (Mila Kunis), a girl with a very special genetic code. She works as a maid for her wealthy neighbors, but she dreams of a better future. One day, she discovers that she is the object of desire of a family of noble aliens who want to exploit her for their own benefit. She is rescued by Caine (Channing Tatum), a mercenary half-man half-dog, who takes her on an adventure across the galaxy to reveal her true destiny.</p>
8
-
9
- <p>If you want to watch this movie in italian dubbed version, you can download it in torrent from this link: <a href="https://example.com/jupiter-ascending-italian-torrent">https://example.com/jupiter-ascending-italian-torrent</a>. You will need a torrent client like uTorrent or BitTorrent to download the file. Make sure you have enough space on your device and a good internet connection.</p>
10
-
11
- <p>Jupiter - Il destino dell'universo is a movie full of action, adventure and fantasy, with stunning visual effects and a captivating soundtrack by Michael Giacchino. It is a movie that will take you beyond the known, through space and inside unknown realms. Don't miss this opportunity to download it in torrent and enjoy it at home!</p>
12
- <p></p>
13
-
14
- <p>The movie features a talented cast of actors and actresses, who bring to life the complex and diverse characters of the story. Mila Kunis plays Jupiter Jones, a humble and courageous heroine who discovers her royal heritage and fights for her freedom. Channing Tatum plays Caine Wise, a loyal and brave protector who falls in love with Jupiter and helps her in her quest. Sean Bean plays Stinger Apini, a former comrade of Caine who joins them in their mission. Eddie Redmayne plays Balem Abrasax, the eldest and most ruthless of the Abrasax siblings, who wants to harvest Earth for his own profit. Douglas Booth plays Titus Abrasax, the youngest and most charming of the Abrasax siblings, who tries to seduce Jupiter and trick her into marrying him. Tuppence Middleton plays Kalique Abrasax, the middle and most mysterious of the Abrasax siblings, who seems to have a hidden agenda behind her kindness.</p>
15
-
16
- <p>The movie also features a cameo appearance by Terry Gilliam, who plays a minister in a bureaucratic scene that pays homage to his movie Brazil. Other supporting actors include James D'Arcy as Max Jones, Jupiter's father; Bae Doona as Razo, a bounty hunter; Tim Pigott-Smith as Malidictes, Balem's henchman; Vanessa Kirby as Katharine Dunlevy, Jupiter's friend; Jeremy Swift as Vasilliy Bolodnikov, Jupiter's uncle; Ramon Tikaram as Phylo Percadium, an Aegis captain; and Maria Doyle Kennedy as Aleksa, Jupiter's mother.</p>
17
-
18
- <p>Jupiter - Il destino dell'universo is a movie that explores themes such as identity, destiny, family, love, greed, power and rebellion. It is a movie that challenges the status quo and celebrates the potential of every individual. It is a movie that invites you to dream big and reach for the stars. Download it now in torrent and join Jupiter and Caine in their epic journey!</p> 7b8c122e87<br />
19
- <br />
20
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Citroen Service Box Keygen Free WORK Download.md DELETED
@@ -1,11 +0,0 @@
1
- <br />
2
- <p> Un jeu en utilisant la langue anglaise: Au nom du bien ou pour le mal? <br> iMDB: La Redditerie Tue <br> Rankrakuta na kimi kakikaeshi download <br> Doobiedooey: A Colorful Line Drawing 2 <br> - Kindle Apps <br> Mokochien Gyoushi <br> </p>
3
- <p> Download Gameboy Color GBGC2O <br> nahab.no.go.gay.otaku.org.sina <br> iokananale <br> Mandriva Corporate Security 2019 Service Pack 1.2 Beta <br> I Pradalai Samayalai Kathalai Vandhanikka <br> EOS.TV.Scout <br> EOS iDocs Free Edition <br> </p>
4
- <h2>citroen service box keygen free download</h2><br /><p><b><b>Download</b> &#187; <a href="https://imgfil.com/2uxXju">https://imgfil.com/2uxXju</a></b></p><br /><br />
5
- <p> Download Dash v1.11.6 with Crack <br> Aaron Winters Member Pro Club <br> Why the concept of "the global village" is a load of malarkey <br> Dr. R. DiVanni Public Administration Review <br> A Dynamic Credit-score Generator <br> Download MCI S.A.F.E. 6.7 Crack Free <br> Broke-Ass Tagger 2017 <br> </p>
6
- <p> Download Gameboy Color GBGC2O <br> lynnkelly.inngenuity.com.au <br> CaptainFingerz 2009 Portable [MAC] <br> Reemplaza el Web Browser Mozilla Firefox por el Web Browser Internet Explorer 8+ <br> Daring Energy Systems S.L. 2008 <br> Ayuda para Informacion <br> </p>
7
- <p> Best Of Wall Of Text Generator Wallpaper Generator Background Generator Pinterest Generator Download Online Generator Pages Generator. By clicking the "continue" button below, I agree to be contact</p>
8
- <p>Langaray home theater system video <br> THE DOYEN OF THE SKEET NET Ivan Jirsak Recenserne <br> The anti-virus security solutions for Exchange are often well-funded and may offer features that are unique to it. In most cases, you'll have to buy each service separately, unless they sell a "complete package" with their best-selling solution. Today, a typical email spam attack can employ hundreds of viruses or spyware programs that can invade your system and destroy it. Check with your antivirus vendor for details.</p>
9
- <p></p> 899543212b<br />
10
- <br />
11
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Age of History II APK A Wasteland Editor and Flag Maker for Android Wargamers.md DELETED
@@ -1,98 +0,0 @@
1
- <br />
2
- <h1>Download Age of History II APK - A Grand Strategy Wargame for Android</h1>
3
- <p>Are you a fan of history and strategy games? Do you want to experience the thrill of leading your own Civilization from the dawn of civilization to the future of mankind? If yes, then you should download Age of History II APK, a grand strategy wargame that is simple to learn yet hard to master.</p>
4
- <p>Age of History II is a game that lets you explore the whole history of humanity, Age by Age, beginning in the Age of Civilizations and leading into the far future. You can play as many Civilizations ranging from the largest empire to the smallest tribe, and lead your people to glory in a campaign spanning thousands of years. You can also create your own scenarios and worlds using the in-game editors, and share them with other players.</p>
5
- <h2>download age of history 2 apk</h2><br /><p><b><b>Download</b> &#9999; &#9999; &#9999; <a href="https://urlin.us/2uST9r">https://urlin.us/2uST9r</a></b></p><br /><br />
6
- <p>In this article, we will tell you what is Age of History II, what are its features, how to download and install it on your Android device, and some tips and tricks for playing it. Let's get started!</p>
7
- <h2>What is Age of History II?</h2>
8
- <p>Age of History II is a grand strategy wargame developed by Łukasz Jakowski, an independent game developer from Poland. It is the sequel to Age of Civilizations, which was released in 2014. Age of History II was released in 2018 for Windows, macOS, Linux, and Android platforms.</p>
9
- <p>Age of History II is a game that simulates the history of the world from ancient times to the far future. You can choose from hundreds of Civilizations to play as, each with their own unique culture, history, and challenges. You can also create your own custom Civilizations using the Civilization Creator tool.</p>
10
- <p>The game has two main modes: Historical Grand Campaign and Custom Scenario. In Historical Grand Campaign, you can play through the entire history of humanity, starting from any Age you want. You can also choose from different scenarios that focus on specific regions or events, such as World War I, World War II, Cold War, Modern Day, etc.</p>
11
- <p>In Custom Scenario, you can create your own scenarios using the Scenario Editor tool. You can set up the map, the Civilizations, the events, the rules, and everything else according to your preferences. You can also download and play scenarios made by other players from the Steam Workshop or other sources.</p>
12
- <h3>Features of Age of History II</h3>
13
- <p>Age of History II is a game that offers a lot of features and options for players who love history and strategy games. Some of these features are:</p>
14
- <h4>Detailed map of the world with many historical borders</h4>
15
- <p>The game has a detailed map of the world that covers every continent and region. The map has over 4000 provinces that represent different territories and states throughout history. The map also has many historical borders that change according to the time period and the events that happen in the game.</p>
16
- <p>Download Age of History II Lite APK<br />
17
- Age of History II Android Game Free Download<br />
18
- How to Install Age of History II APK on Windows<br />
19
- Age of History II APK Latest Version 2023<br />
20
- Age of History II Strategy Wargame for Android<br />
21
- Download Age of History II Mod APK Unlimited Money<br />
22
- Age of History II APK + OBB Data Download<br />
23
- Age of History II Grand Strategy Game Review<br />
24
- Age of History II APK Download for PC<br />
25
- Age of History II APK No Ads Version<br />
26
- Age of History II APK Full Version Free Download<br />
27
- Age of History II APK Old Versions Download<br />
28
- Age of History II APK Offline Mode<br />
29
- Age of History II APK Cheats and Hacks<br />
30
- Age of History II APK Multiplayer Mode<br />
31
- Download Age of History II APK from APKCombo<br />
32
- Age of History II APK Requirements and Compatibility<br />
33
- Age of History II APK Update and Patch Notes<br />
34
- Age of History II APK Tips and Tricks<br />
35
- Age of History II APK Best Civilizations to Play<br />
36
- Download Age of History II APK for Android TV<br />
37
- Age of History II APK Features and Gameplay<br />
38
- Age of History II APK Editor and Custom Scenarios<br />
39
- Age of History II APK Download Link and QR Code<br />
40
- Age of History II APK Ratings and Reviews<br />
41
- Download Age of History II Premium APK Unlocked<br />
42
- Age of History II APK Alternatives and Similar Games<br />
43
- Age of History II APK Bug Fixes and Improvements<br />
44
- Age of History II APK Support and Contact Information<br />
45
- Age of History II APK Size and Performance Optimization<br />
46
- Download Age of History II Modded APK with All DLCs<br />
47
- Age of History II APK Historical Grand Campaign Guide<br />
48
- Age of History II APK How to Conquer the World<br />
49
- Age of History II APK How to Use Diplomacy and Trade<br />
50
- Age of History II APK How to Create Own Flag and Civilization<br />
51
- Download Age of History II Cracked APK No Root Required<br />
52
- Age of History II APK How to Play Hotseat Mode with Friends<br />
53
- Age of History II APK How to Change Language and Settings<br />
54
- Age of History II APK How to Enable Wasteland Mode<br />
55
- Age of History II APK How to Watch End Game Timelapses<br />
56
- Download Age of History II Beta APK Test New Features<br />
57
- Age of History II APK How to Unlock Achievements and Rewards<br />
58
- Age of History II APK How to Backup and Restore Data<br />
59
- Age of History II APK How to Install Mods and Addons<br />
60
- Age of History II APK How to Access Developer Options and Console Commands</p>
61
- <h4>Deeper diplomatic system between Civilizations</h4>
62
- <p>The game has a deeper diplomatic system that allows you to interact with other Civilizations in various ways. You can declare war or peace, form alliances or coalitions, send or receive trade offers, demand or offer tribute, support or oppose revolutions, etc. You can also use diplomacy points to influence other Civilizations' opinions and actions.</p>
63
- <h4>Create own History using in-game editors</h4>
64
- <p>The game has several in-game editors that let you create your own custom content. You can use the Civilization Creator to make your own Civilizations with custom flags, names, colors, and stats. You can use the Scenario Editor to make your own scenarios with custom maps, Civilizations, events, rules, and more. You can also use the Map Editor to edit the existing map or create a new one from scratch.</p>
65
- <h4>Hotseat, play with as many players as Civilizations in scenario!</h4>
66
- <p>The game has a hotseat mode that allows you to play with your friends on the same device. You can play with as many players as there are Civilizations in the scenario, and take turns controlling your actions. You can also play online multiplayer with other players using Steam or other platforms.</p>
67
- <h3>How to download and install Age of History II APK?</h3>
68
- <p>If you want to download and install Age of History II APK on your Android device, you need to follow these steps:</p>
69
- <h4>Step 1: Download the APK file from a trusted source</h4>
70
- <p>The first step is to download the APK file of Age of History II from a trusted source. You can find the APK file on various websites that offer Android apps and games, such as APKPure, APKMirror, etc. Make sure you download the latest version of the game and check the file size and permissions before downloading.</p>
71
- <h4>Step 2: Enable unknown sources on your device</h4>
72
- <p>The second step is to enable unknown sources on your device. This is necessary because Age of History II is not available on the Google Play Store, and you need to allow your device to install apps from other sources. To do this, go to Settings > Security > Unknown Sources and toggle it on. You may also need to confirm this action by tapping OK or Allow.</p>
73
- <h4>Step 3: Install the APK file and launch the game</h4>
74
- <p>The third step is to install the APK file and launch the game. To do this, locate the downloaded APK file on your device using a file manager app, such as ES File Explorer, and tap on it. You may need to grant some permissions for the installation process to proceed. Once the installation is complete, you can launch the game by tapping on its icon on your home screen or app drawer.</p>
75
- <h3>Tips and tricks for playing Age of History II</h3>
76
- <p>Age of History II is a game that requires strategy, planning, and patience. If you want to succeed in this game, you need to follow some tips and tricks that will help you improve your gameplay. Here are some of them:</p>
77
- <h4>Learn the basics of the game mechanics</h4>
78
- <p>The first tip is to learn the basics of the game mechanics. You need to understand how the game works, such as how to move your units, how to fight battles, how to manage your resources, how to use diplomacy, etc. You can find tutorials and guides on the game's official website or YouTube channel that will explain these concepts in detail.</p>
79
- <h4>Choose your Civilization wisely</h4>
80
- <p>The second tip is to choose your Civilization wisely. You need to consider several factors when choosing your Civilization, such as their location, their culture, their history, their strengths and weaknesses, their goals, etc. You also need to consider the scenario you are playing and the challenges you will face. For example, if you are playing a World War II scenario, you may want to choose a Civilization that was involved in that war and has relevant units and abilities.</p>
81
- <h4>Manage your economy and military</h4>
82
- <p>The third tip is to manage your economy and military. You need to balance your income and expenses, and make sure you have enough resources to sustain your Civilization. You also need to build and upgrade your buildings, such as farms, mines, factories, barracks, etc., that will provide you with more resources and units. You also need to train and deploy your military units, such as infantry, cavalry, tanks, planes, ships, etc., that will help you defend your territory and conquer others.</p>
83
- <h4>Use diplomacy and alliances to your advantage</h4>
84
- <p>The fourth tip is to use diplomacy and alliances to your advantage. You need to interact with other Civilizations in various ways, such as declaring war or peace, forming alliances or coalitions, sending or receiving trade offers, demanding or offering tribute, supporting or opposing revolutions, etc. You can also use diplomacy points to influence other Civilizations' opinions and actions. You need to use diplomacy and alliances to your advantage, as they can help you gain allies, enemies, resources, territories, and more.</p>
85
- <h2>Conclusion</h2>
86
- <p>Age of History II is a grand strategy wargame that lets you explore the whole history of humanity, Age by Age, beginning in the Age of Civilizations and leading into the far future. You can play as many Civilizations ranging from the largest empire to the smallest tribe, and lead your people to glory in a campaign spanning thousands of years. You can also create your own scenarios and worlds using the in-game editors, and share them with other players.</p>
87
- <p>If you want to download and install Age of History II APK on your Android device, you need to follow the steps we mentioned above. You also need to follow some tips and tricks we shared to improve your gameplay and have more fun. Age of History II is a game that will challenge your strategic skills and test your historical knowledge. Are you ready to make history?</p>
88
- <h3>FAQs</h3>
89
- <p>Here are some frequently asked questions about Age of History II:</p>
90
- <table>
91
- <tr><td><b>Q: How much does Age of History II cost?</b></td><td><b>A: Age of History II costs $4.99 on Steam and $2.99 on Google Play Store.</b></td></tr>
92
- <tr><td><b>Q: Is Age of History II available for iOS devices?</b></td><td><b>A: No, Age of History II is not available for iOS devices at the moment.</b></td></tr>
93
- <tr><td><b>Q: How can I update Age of History II?</b></td><td><b>A: You can update Age of History II by downloading the latest version of the APK file from a trusted source and installing it over the existing one.</b></td></tr>
94
- <tr><td><b>Q: How can I contact the developer of Age of History II?</b></td><td><b>A: You can contact the developer of Age of History II by visiting his official website or sending him an email at [email protected].</b></td></tr>
95
- <tr><td><b>Q: How can I support the development of Age of History II?</b></td><td><b>A: You can support the development of Age of History II by buying the game, leaving a positive review, sharing it with your friends, and donating to the developer via PayPal or Patreon.</b></td></tr>
96
- </table></p> 197e85843d<br />
97
- <br />
98
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Brawl Stars APK Club The Most Fun and Addictive Game Ever.md DELETED
@@ -1,179 +0,0 @@
1
- <br />
2
- <h1>Brawl Stars APK Club Indir: How to Download and Play the Popular Mobile Game</h1>
3
- <p>If you are looking for a fun and exciting mobile game that you can play with your friends or solo, you might want to check out Brawl Stars. Brawl Stars is a fast-paced multiplayer game that offers different modes, characters, and challenges for you to enjoy. In this article, we will tell you what Brawl Stars is, how to download it using APK Club or other sources, and how to play it like a pro.</p>
4
- <h2>brawl stars apk club indir</h2><br /><p><b><b>Download Zip</b> &#9193; <a href="https://urlin.us/2uT35k">https://urlin.us/2uT35k</a></b></p><br /><br />
5
- <h2>What is Brawl Stars?</h2>
6
- <h3>A fast-paced multiplayer game with different modes and characters</h3>
7
- <p>Brawl Stars is a mobile game developed by Supercell, the makers of Clash of Clans and Clash Royale. It is a twin-stick shooter with a MOBA twist, where you can choose from over 20 unique brawlers with different abilities and classes. You can team up with your friends or play solo across various game modes, such as Gem Grab, Showdown, Bounty, Heist, Brawl Ball, and more. Each match lasts for under three minutes, making it perfect for quick bursts of fun.</p>
8
- <h3>A free-to-play game with in-app purchases and rewards</h3>
9
- <p>Brawl Stars is free to download and play on Android and iOS devices, but it also offers in-app purchases for gems, coins, skins, and other items. Gems are the premium currency that you can use to buy brawl boxes, skins, coins, power points, and more. Coins are the regular currency that you can use to upgrade your brawlers' power level. You can also earn gems, coins, power points, and other rewards by playing the game, completing quests, opening brawl boxes, reaching milestones on Trophy Road, and participating in special events.</p>
10
- <h2>How to download Brawl Stars APK Club Indir?</h2>
11
- <h3>The official way: Google Play Store or App Store</h3>
12
- <p>The easiest and safest way to download Brawl Stars is through the official Google Play Store or App Store. All you need to do is search for "Brawl Stars" on your device's store app and tap on the install button. This will ensure that you get the latest version of the game that is compatible with your device and region. You will also get automatic updates and support from Supercell.</p>
13
- <h3>The alternative way: APK Club website or other third-party sources</h3>
14
- <p>If you want to download Brawl Stars from an alternative source, such as APK Club or other third-party websites, you will need to follow some extra steps. APK Club is a website that offers free downloads of various Android apps and games, including Brawl Stars. To download Brawl Stars from APK Club, you will need to:</p>
15
- <ul>
16
- <li>Go to [10](https://www.apkclub.com/brawl-stars-apk-download/) on your browser or scan the QR code on the website.</li>
17
- <li>Tap on the download button and wait for the APK file to be downloaded.</li>
18
- <li>Go to your device's settings and enable the option to install apps from unknown sources.</li>
19
- <li>Locate the downloaded APK file on your device and tap on it to install it.</li>
20
- <li>Launch the game and enjoy.</li>
21
- </ul>
22
- <h4>The pros and cons of using APK Club</h4>
23
- <p>Some of the advantages of using APK Club to download Brawl Stars are:</p>
24
- <ul>
25
- <li>You can access the game even if it is not available in your region or device.</li>
26
- <li>You can get the latest version of the game before it is released on the official store.</li>
27
- <li>You can download the game without any ads or surveys.</li>
28
- </ul>
29
- <p>Some of the disadvantages of using APK Club to download Brawl Stars are:</p>
30
- <p>brawl stars apk club indir android<br />
31
- brawl stars apk club indir ios<br />
32
- brawl stars apk club indir pc<br />
33
- brawl stars apk club indir ücretsiz<br />
34
- brawl stars apk club indir son sürüm<br />
35
- brawl stars apk club indir hileli<br />
36
- brawl stars apk club indir güncel<br />
37
- brawl stars apk club indir türkçe<br />
38
- brawl stars apk club indir oyna<br />
39
- brawl stars apk club indir yükle<br />
40
- brawl stars apk club indir link<br />
41
- brawl stars apk club indir nasıl yapılır<br />
42
- brawl stars apk club indir kurulumu<br />
43
- brawl stars apk club indir modlu<br />
44
- brawl stars apk club indir online<br />
45
- brawl stars apk club indir yeni<br />
46
- brawl stars apk club indir en iyi<br />
47
- brawl stars apk club indir full<br />
48
- brawl stars apk club indir bedava<br />
49
- brawl stars apk club indir resmi<br />
50
- brawl stars apk club indir orjinal<br />
51
- brawl stars apk club indir sınırsız<br />
52
- brawl stars apk club indir hızlı<br />
53
- brawl stars apk club indir kolay<br />
54
- brawl stars apk club indir güvenli<br />
55
- brawl stars apk club indir 2023<br />
56
- brawl stars apk club indir 2022<br />
57
- brawl stars apk club indir 2021<br />
58
- brawl stars apk club indir 2020<br />
59
- brawl stars apk club indir 2019<br />
60
- brawl stars apk club indir 2018<br />
61
- brawl stars apk club indir 2017<br />
62
- brawl stars apk club indir 2016<br />
63
- brawl stars apk club indir 2015<br />
64
- brawl stars apk club indir 2014<br />
65
- brawl stars apk club indir 2013<br />
66
- brawl stars apk club indir 2012<br />
67
- brawl stars apk club indir 2011<br />
68
- brawl stars apk club indir 2010<br />
69
- brawl stars apk club indir google play[^1^]<br />
70
- brawl stars apk club indir app store[^1^]<br />
71
- brawl stars apk club indir supertcell[^1^]<br />
72
- brawl stars apk club indir multiplayer[^1^]<br />
73
- brawl stars apk club indir battle royale[^1^]<br />
74
- brawl stars apk club indir brawlers[^1^]<br />
75
- brawl stars apk club indir skins[^1^]<br />
76
- brawl stars apk club indir events[^1^]<br />
77
- brawl stars apk club indir maps[^1^]<br />
78
- brawl stars apk club indir clubs[^1^]</p>
79
- <ul>
80
- <li>You may encounter compatibility issues or bugs with the game.</li>
81
- <li>You may not be able to update the game automatically or use some features that require an official account.</li>
82
- <li>You may expose your device to malware or viruses that may harm your data or privacy.</li>
83
- </ul>
84
- <h4>The risks and precautions of using third-party sources</h4>
85
- <p>If you decide to download Brawl Stars from other third-party sources, such as websites, forums, or file-sharing platforms, you should be aware of the potential risks and take some precautions. Some of the risks are:</p>
86
- <ul>
87
- <li>You may download a fake or modified version of the game that may contain malicious code or unwanted content.</li>
88
- <li>You may violate the terms of service or end-user license agreement of Supercell and get banned from playing the game.</li>
89
- <li>You may lose your progress or account if you switch to a different device or source.</li>
90
- </ul>
91
- <p>Some of the precautions are:</p>
92
- <ul>
93
- <li>Always check the reputation and reviews of the source before downloading anything.</li>
94
- <li>Always scan the downloaded file with a reliable antivirus software before installing it.</li>
95
- <li>Always backup your data and account before switching to a different device or source.</li>
96
- </ul>
97
- <h2>How to play Brawl Stars?</h2> <h3>The basics: choose a brawler, join a match, and fight</h3>
98
- <p>Before you start a match, you need to choose a brawler that suits your playstyle and the game mode. You can see the stats, abilities, and skins of each brawler by tapping on them in the Brawlers menu. You can also see their power level, which indicates how much you have upgraded them with power points and coins. The higher the power level, the stronger the brawler.</p>
99
- <p>Once you have selected a brawler, you can join a match by tapping on the Play button. You can either play with random teammates or invite your friends to join you. You can also play solo in some game modes, such as Showdown or Solo Showdown. Depending on the game mode, you will be matched with 2 to 9 other players in a map.</p>
100
- <p>The objective of each match is different, but the basic gameplay is the same: you need to use your brawler's attacks and super to defeat your enemies and achieve your goal. You can move your brawler with the blue joystick on the left side of the screen, and aim and fire your attacks with the red joystick on the right side. You can also tap on the red joystick to quickfire, which will automatically target the nearest enemy. You can also use gadgets and star powers, which are special abilities that you unlock at higher power levels.</p>
101
- <h3>The game modes: Smash & Grab, Showdown, Bounty, Heist, Brawl Ball, and more</h3>
102
- <p>Brawl Stars offers a variety of game modes that test your skills and strategy in different ways. Here are some of the most popular game modes and how to play them:</p>
103
- <table>
104
- <tr>
105
- <th>Game Mode</th>
106
- <th>Description</th>
107
- <th>Objective</th>
108
- </tr>
109
- <tr>
110
- <td>Smash & Grab</td>
111
- <td>A 3v3 mode where gems spawn from a mine in the center of the map.</td>
112
- <td>Collect and hold 10 gems for 15 seconds to win. If you die, you drop your gems.</td>
113
- </tr>
114
- <tr>
115
- <td>Showdown</td>
116
- <td>A solo or duo mode where 10 players fight in a shrinking map.</td>
117
- <td>Be the last brawler or team standing. Collect power cubes from crates or enemies to boost your stats.</td>
118
- </tr>
119
- <tr>
120
- <td>Bounty</td>
121
- <td>A 3v3 mode where each kill gives you a star and increases your bounty.</td>
122
- <td>Earn more stars than the enemy team by killing them. The higher your bounty, the more stars you drop when you die.</td>
123
- </tr>
124
- <tr>
125
- <td>Heist</td>
126
- <td>A 3v3 mode where each team has a safe to protect and attack.</td>
127
- <td>Destroy the enemy safe or deal more damage to it than they do to yours.</td>
128
- </tr>
129
- <tr>
130
- <td>Brawl Ball</td>
131
- <td>A 3v3 mode where each team tries to score goals with a ball.</td>
132
- <td>Score two goals before the enemy team or have more goals when the time runs out. You can kick or carry the ball, but you can't attack while holding it.</td>
133
- </tr>
134
- <tr>
135
- <td>Siege</td>
136
- <td>A 3v3 mode where each team tries to destroy the enemy's IKE turret with a siege bot.</td>
137
- <td>Collect bolts to build your siege bot, which will attack the enemy turret. Destroy the enemy turret or deal more damage to it than they do to yours.</td>
138
- </tr>
139
- <tr>
140
- <td>Hot Zone</td>
141
- <td>A 3v3 mode where each team tries to control zones on the map.</td>
142
- <td>Earn points by staying in the zones. The first team to reach 100 points or have more points when the time runs out wins.</td>
143
- </tr>
144
- </table>
145
- <h3>The tips and tricks: use obstacles, team up, run away, and more</h3>
146
- <p>Brawl Stars is not just about shooting and smashing your enemies. You also need to use your brain and skills to outsmart and outplay them. Here are some tips and tricks that can help you improve your game:</p>
147
- <ul>
148
- <li>Use obstacles to hide from enemy fire or ambush them. You can also destroy some obstacles with your attacks or super.</li>
149
- <li>Team up with your allies and coordinate your attacks. You can use voice chat or quick chat messages to communicate with them.</li>
150
- <li>Run away when you are low on health or outnumbered. You can heal by staying out of combat for a few seconds.</li>
151
- <li>Use your super wisely. Don't waste it on weak enemies or when you are about to die. Save it for critical moments or combo it with other supers.</li>
152
- <li>Learn from your mistakes or watch replays of your matches. You can see what you did wrong or right, and learn from other players' strategies and tactics.</li>
153
- <li>Have fun and experiment with different brawlers, modes, and maps. You may discover new ways to play or enjoy the game.</li>
154
- </ul>
155
- <h2>Conclusion</h2>
156
- <h3>A summary of the main points</h3>
157
- <p>Brawl Stars is a mobile game that you can download and play for free on your Android or iOS device. It is a multiplayer game that offers different modes, characters, and challenges for you to enjoy. You can download it from the official Google Play Store or App Store, or from alternative sources such as APK Club or other third-party websites. However, you should be aware of the pros and cons of using these sources, and take some precautions to avoid any risks. You can also improve your skills and have more fun by following some tips and tricks that we shared in this article.</p>
158
- <h3>A call to action for the readers</h3>
159
- <p>If you are interested in Brawl Stars, we encourage you to give it a try and see for yourself why it is one of the most popular mobile games in the world. You can also join the Brawl Stars community and share your feedback, opinions, questions, and suggestions with other players and developers. You can find them on social media platforms such as Facebook, Twitter, Instagram, YouTube, Reddit, Discord, and more. You can also visit the official Brawl Stars website for more information and updates on the game.</p>
160
- <h2>FAQs</h2>
161
- <h3>What are the best brawlers in Brawl Stars?</h3>
162
- <p>There is no definitive answer to this question, as different brawlers have different strengths and weaknesses, and may perform better or worse depending on the game mode, map, team composition, and personal preference. However, some of the brawlers that are generally considered to be strong and versatile are:</p>
163
- <ul>
164
- <li>Colette: a chromatic brawler who deals damage based on the enemy's health, making her effective against any target.</li>
165
- <li>Edgar: an epic brawler who can jump over obstacles and enemies, healing himself with each attack.</li>
166
- <li>Spike: a legendary brawler who can throw cactus bombs that explode into spikes, dealing area damage and slowing down enemies.</li>
167
- <li>Byron: a mythic brawler who can heal his allies or poison his enemies with his shots, as well as use his super to deal massive damage or healing over time.</li>
168
- <li>Belle: a chromatic brawler who can shoot electric bullets that bounce between enemies, as well as use her super to mark an enemy for extra damage.</li>
169
- </ul>
170
- <h3>How to get new brawlers in Brawl Stars?</h3>
171
- <p>You can get new brawlers in Brawl Stars by opening brawl boxes, which are containers that contain various rewards such as coins, power points, gadgets, star powers, and brawlers. You can get brawl boxes by playing the game, completing quests, reaching milestones on Trophy Road, participating in special events, or buying them with gems. The chances of getting a new brawler depend on the rarity of the brawler and your luck. The rarer the brawler, the lower the chance of getting it. You can see the odds of getting a new brawler by tapping on the info button on the brawl box screen.</p>
172
- <h3>How to get gems and coins in Brawl Stars?</h3>
173
- <p>You can get gems and coins in Brawl Stars by playing the game , completing quests, opening brawl boxes, reaching milestones on Trophy Road, or participating in special events. You can also buy gems with real money through in-app purchases. Gems are the premium currency that you can use to buy brawl boxes, skins, coins, power points, and more. Coins are the regular currency that you can use to upgrade your brawlers' power level.</p>
174
- <h3>How to join or create a club in Brawl Stars?</h3>
175
- <p>A club is a group of players who can chat, play, and compete together in Brawl Stars. You can join or create a club by tapping on the social button on the main screen. You can search for an existing club by name or tag, or browse the recommended clubs based on your region and trophies. You can also create your own club by choosing a name, a tag, a badge, a description, and a type (open, invite only, or closed). You can invite your friends to join your club by tapping on the invite button and sending them a link. You can also leave or switch clubs at any time by tapping on the settings button and choosing the appropriate option.</p>
176
- <h3>How to contact Supercell for support or feedback?</h3>
177
- <p>If you have any issues, questions, or suggestions regarding Brawl Stars, you can contact Supercell for support or feedback by tapping on the settings button on the main screen and choosing the help and support option. You can browse the frequently asked questions or contact the support team directly by tapping on the message button. You can also visit the official Brawl Stars website for more information and updates on the game.</p> 197e85843d<br />
178
- <br />
179
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Build Your Fantasy Empire with War and Order APK for Android.md DELETED
@@ -1,111 +0,0 @@
1
- <br />
2
- <h1>War and Order APK: A Strategy Game for Android</h1>
3
- <p>If you are looking for a strategy game that combines real-time combat, tower defense, and castle building, then you might want to check out War and Order APK. This is a game that lets you build your own fantasy empire in a gorgeous 3D medieval world. You can command orcs, elves, mages, and other races to fight against enemies from all over the world. You can also join an alliance and cooperate with other players to conquer new lands and castles. In this article, we will tell you more about War and Order APK, how to download and install it, how to play it, what are its features, and what are its pros and cons.</p>
4
- <h2>What is War and Order APK?</h2>
5
- <p>War and Order APK is an Android game developed by Camel Games. It is a real-time strategy, tower defense, and castle building game that has received several global Google recommendations. It is one of the most popular games in its genre, with over 10 million downloads on Google Play.</p>
6
- <h2>war and order apk</h2><br /><p><b><b>Download</b> &#128504; <a href="https://urlin.us/2uSUFR">https://urlin.us/2uSUFR</a></b></p><br /><br />
7
- <h3>A real-time strategy, tower defense, and castle building game</h3>
8
- <p>In War and Order APK, you can build your own empire by constructing and upgrading various buildings, such as barracks, farms, mines, workshops, walls, towers, etc. You can also recruit and train over 50 different types of soldiers, such as orcs, elves, humans, mages, beasts, angels, etc. You can use these soldiers to defend your base from enemy attacks or to attack other players' bases. You can also research new magic and technology to unlock new units, buffs, and weapons.</p>
9
- <h3>A 3D medieval game world with orcs, elves, and mages</h3>
10
- <p>War and Order APK has a stunning 3D graphics that immerses you in a medieval fantasy world. You can see your buildings, soldiers, enemies, and battles in full detail. You can also zoom in or out to get a better view of the action. The game also has a realistic sound effects that enhance the atmosphere of the game.</p>
11
- <h3>A global game with players from all over the world</h3>
12
- <p>War and Order APK is not just a single-player game. You can also interact with other players from around the world in real time. You can chat with them, make friends or enemies, form alliances or rivalries. You can also fight together or against each other in huge battles that involve hundreds or thousands of players. You can also compete for rankings, rewards, territories, castles, etc.</p>
13
- <h2>How to Download and Install War and Order APK?</h2>
14
- <p>If you want to play War and Order APK on your Android device, you need to download and install the APK file first. Here are the steps to do so:</p>
15
- <h3>Download the APK file from a trusted source</h3>
16
- <p>You can download the War and Order APK file from a trusted source, such as Softonic or APKCombo. You can also scan the APK file with an antivirus software before installing it to ensure its safety.</p>
17
- <p>war and order apk download<br />
18
- war and order apk mod<br />
19
- war and order apk latest version<br />
20
- war and order apk update<br />
21
- war and order apk free<br />
22
- war and order apk hack<br />
23
- war and order apk offline<br />
24
- war and order apk old version<br />
25
- war and order apk for pc<br />
26
- war and order apk for android<br />
27
- war and order apk obb<br />
28
- war and order apk xapk<br />
29
- war and order apk unlimited gems<br />
30
- war and order apk revdl<br />
31
- war and order apk pure<br />
32
- war and order apk data<br />
33
- war and order apk mirror<br />
34
- war and order apk rexdl<br />
35
- war and order apk 2023<br />
36
- war and order apk 2022<br />
37
- war and order apk 2021<br />
38
- war and order apk 2020<br />
39
- war and order apk 2019<br />
40
- war and order apk 2018<br />
41
- war and order apk 2017<br />
42
- war and order apk 2016<br />
43
- war and order apk 2015<br />
44
- war and order apk full version<br />
45
- war and order apk no root<br />
46
- war and order apk no ads<br />
47
- war and order apk cheat engine<br />
48
- war and order apk unlimited money<br />
49
- war and order apk unlimited resources<br />
50
- war and order apk unlimited troops<br />
51
- war and order apk unlimited everything<br />
52
- war and order apk mega mod<br />
53
- war and order apk god mode<br />
54
- war and order apk vip mod<br />
55
- war and order apk pro mod<br />
56
- war and order apk premium mod<br />
57
- war and order apk cracked mod<br />
58
- war and order apk hacked mod<br />
59
- war and order apk modded mod<br />
60
- war and order apk patched mod<br />
61
- war and order apk unlocked mod<br />
62
- war and order apk original mod<br />
63
- war and order apk latest mod<br />
64
- war and order apk new mod</p>
65
- <h3>Enable unknown sources on your device settings</h3>
66
- <p>Before you can install the War and Order APK file, you need to enable unknown sources on your device settings. This will allow you to install apps from sources other than Google Play. To do this, go to Settings > Security > Unknown Sources and toggle it on. You may also need to confirm this action by tapping OK or Allow.</p>
67
- <h3>Install the APK file and launch the game</h3>
68
- <p>Once you have downloaded and enabled unknown sources, you can install the War and Order APK file by tapping on it. You may need to grant some permissions to the app, such as access to storage, location, contacts, etc. After the installation is complete, you can launch the game by tapping on its icon on your home screen or app drawer.</p>
69
- <h2>How to Play War and Order APK?</h2>
70
- <p>Now that you have installed War and Order APK, you can start playing it. Here are some basic tips on how to play the game:</p>
71
- <h3>Build your own empire with various buildings and soldiers</h3>
72
- <p>The first thing you need to do in War and Order APK is to build your own empire. You can do this by constructing and upgrading various buildings, such as barracks, farms, mines, workshops, walls, towers, etc. Each building has a different function and benefit for your empire. For example, barracks allow you to recruit and train soldiers, farms produce food for your army, mines generate gold for your treasury, workshops produce materials for your weapons and equipment, walls protect your base from enemy attacks, towers provide defense and support for your troops, etc. You can also decorate your base with flags, statues, fountains, etc. to make it more attractive.</p>
73
- <p>You also need to recruit and train soldiers for your army. You can do this by tapping on the barracks and selecting the type of unit you want to recruit. There are over 50 different types of units in War and Order APK, such as orcs, elves, humans, mages, beasts, angels, etc. Each unit has a different cost, speed, attack, defense, range, and skill. You can also upgrade your units by researching new magic and technology in the academy. You can also equip your units with weapons and armor that you can craft in the workshop or buy in the market.</p>
74
- <h3>Research new magic and technology for advanced tactics and weapons</h3>
75
- <p>Another important aspect of War and Order APK is to research new magic and technology for your empire. You can do this by tapping on the academy and selecting the type of research you want to conduct. There are four categories of research in War and Order APK: Development, Military, Defense, and Magic. Each category has several subcategories that contain various research topics. For example, Development research allows you to improve your production, storage, speed, etc., Military research allows you to unlock new units, buffs, weapons, etc., Defense research allows you to enhance your walls, towers, traps, etc., and Magic research allows you to learn new spells, runes, potions, etc. Researching new magic and technology can give you an edge over your enemies and allies in the game.</p>
76
- <h3>Join an alliance and cooperate with other players to conquer territories and castles</h3>
77
- <p>One of the most fun and exciting features of War and Order APK is to join an alliance and cooperate with other players. You can do this by tapping on the alliance button and choosing to join an existing alliance or create your own. Joining an alliance can give you many benefits, such as sharing resources, information, troops, gifts, etc. You can also chat with your alliance members, make friends or enemies, form strategies and plans, etc.</p>
78
- <p>One of the main goals of an alliance is to conquer new territories and castles in the game world. You can do this by tapping on the map and selecting a target to attack. You can also scout, rally, reinforce, or support your allies or enemies in the map. Conquering new territories and castles can give you more resources, prestige, and power in the game. You can also defend your territories and castles from enemy attacks by building defenses and sending troops.</p>
79
- <h2>What are the Features of War and Order APK?</h2>
80
- <p>War and Order APK has many features that make it a fun and addictive game. Here are some of them:</p>
81
- <h3>Huge battles with fully animated graphics and sound effects</h3>
82
- <p>War and Order APK has huge battles that involve hundreds or thousands of players and units. You can see your soldiers fight in real time with fully animated graphics and sound effects. You can also zoom in or out to get a better view of the action. The game also has a realistic physics engine that simulates the movement, collision, and damage of the units.</p>
83
- <h3>Diverse units and races with different abilities and skills</h3>
84
- <p>War and Order APK has diverse units and races that have different abilities and skills. You can command orcs, elves, humans, mages, beasts, angels, etc. Each unit has a different cost, speed, attack, defense, range, and skill. You can also upgrade your units by researching new magic and technology. You can also equip your units with weapons and armor that you can craft or buy.</p>
85
- <h3>A dynamic world with monsters, events, and challenges</h3>
86
- <p>War and Order APK has a dynamic world that changes according to the actions of the players. You can encounter monsters, events, and challenges in the game world that can give you rewards or risks. For example, you can fight against dragons, giants, zombies, etc. that drop rare items or resources. You can also participate in events such as festivals, tournaments, sieges, etc. that offer rewards or rankings. You can also face challenges such as quests, missions, achievements, etc. that test your skills and strategy.</p>
87
- <h2>What are the Pros and Cons of War and Order APK?</h2>
88
- <p>Like any other game, War and Order APK has its pros and cons. Here are some of them:</p>
89
- <h3>Pros: Fun, addictive, and strategic gameplay; Free to play; Regular updates; Friendly community</h3>
90
- <p>War and Order APK has a fun, addictive, and strategic gameplay that can keep you entertained for hours. You can build your own empire, recruit and train your army, research new magic and technology, join an alliance, fight against other players, conquer new territories and castles, etc. The game is also free to play, although you can buy some in-game items with real money if you want to. The game also has regular updates that add new features, content, and improvements to the game. The game also has a friendly community that you can chat with, make friends or enemies, form alliances or rivalries, etc.</p>
91
- <h3>Cons: Requires internet connection; May consume battery and data; May have bugs or glitches</h3>
92
- <p>War and Order APK requires an internet connection to play, which means you cannot play it offline. The game may also consume a lot of battery and data on your device, especially if you play it for a long time or participate in large battles. The game may also have some bugs or glitches that may affect your gameplay or experience. For example, you may encounter crashes, freezes, lags, errors, etc.</p>
93
- <h2>Conclusion</h2>
94
- <p>War and Order APK is a strategy game for Android that lets you build your own fantasy empire in a 3D medieval world. You can command orcs, elves, mages, and other races to fight against enemies from all over the world. You can also join an alliance and cooperate with other players to conquer new lands and castles. The game has many features that make it fun and addictive, such as huge battles, diverse units, dynamic world, etc. The game also has some pros and cons that you should consider before playing it.</p>
95
- <h2>FAQs</h2>
96
- <p>Here are some frequently asked questions about War and Order APK:</p>
97
- <h3>Q: Is War and Order APK safe to download and install?</h3>
98
- <p>A: Yes, War and Order APK is safe to download and install as long as you get it from a trusted source. You can also scan the APK file with an antivirus software before installing it to ensure its safety.</p>
99
- <h3>Q: How can I get more resources in War and Order APK?</h3>
100
- <p>A: You can get more resources in War and Order APK by building and upgrading your farms, mines, workshops, etc. You can also collect resources from the map by attacking monsters, events, or other players. You can also trade resources with your alliance members or buy them with real money.</p>
101
- <h3>Q: How can I get more gems in War and Order APK?</h3>
102
- <p>A: Gems are the premium currency in War and Order APK that can be used to buy special items, speed up processes, etc. You can get more gems by completing quests, achievements, challenges, etc. You can also get gems as rewards from events, tournaments, sieges, etc. You can also get gems by participating in the daily lottery or watching ads. You can also buy gems with real money.</p>
103
- <h3>Q: How can I join or create an alliance in War and Order APK?</h3>
104
- <p>A: You can join or create an alliance in War and Order APK by tapping on the alliance button and choosing to join an existing alliance or create your own. To join an existing alliance, you need to apply for it and wait for the approval of the leader or the elders. To create your own alliance, you need to pay a certain amount of gold and choose a name, flag, and description for your alliance. You can also invite other players to join your alliance or accept their applications.</p>
105
- <h3>Q: How can I change my name, avatar, or flag in War and Order APK?</h3>
106
- <p>A: You can change your name, avatar, or flag in War and Order APK by tapping on your profile button and choosing to edit your information. You can change your name once for free and then you need to pay gems for each change. You can change your avatar by selecting from the default options or uploading your own image. You can change your flag by selecting from the default options or creating your own design.</p>
107
- <h3>Q: How can I contact the customer service or report a problem in War and Order APK?</h3>
108
- <p>A: You can contact the customer service or report a problem in War and Order APK by tapping on the settings button and choosing to contact us or report a problem. You can also send an email to [email protected] or visit their official website or Facebook page for more information and support.</p>
109
- References: : https://war-and-order.en.softonic.com/android : https://apkcombo.com/war-and-order/com.camelgames.superking/ : https://www.warandorder.net/ : https://www.facebook.com/WarandOrder1/</p> 197e85843d<br />
110
- <br />
111
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/44ov41za8i/FreeVC/speaker_encoder/data_objects/speaker_batch.py DELETED
@@ -1,12 +0,0 @@
1
- import numpy as np
2
- from typing import List
3
- from speaker_encoder.data_objects.speaker import Speaker
4
-
5
- class SpeakerBatch:
6
- def __init__(self, speakers: List[Speaker], utterances_per_speaker: int, n_frames: int):
7
- self.speakers = speakers
8
- self.partials = {s: s.random_partial(utterances_per_speaker, n_frames) for s in speakers}
9
-
10
- # Array of shape (n_speakers * n_utterances, n_frames, mel_n), e.g. for 3 speakers with
11
- # 4 utterances each of 160 frames of 40 mel coefficients: (12, 160, 40)
12
- self.data = np.array([frames for s in speakers for _, frames, _ in self.partials[s]])
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/52Hz/CMFNet_deblurring/main_test_CMFNet.py DELETED
@@ -1,88 +0,0 @@
1
- import argparse
2
- import cv2
3
- import glob
4
- import numpy as np
5
- from collections import OrderedDict
6
- from skimage import img_as_ubyte
7
- import os
8
- import torch
9
- import requests
10
- from PIL import Image
11
- import torchvision.transforms.functional as TF
12
- import torch.nn.functional as F
13
- from natsort import natsorted
14
- from model.CMFNet import CMFNet
15
-
16
- def main():
17
- parser = argparse.ArgumentParser(description='Demo Image Deblur')
18
- parser.add_argument('--input_dir', default='test/', type=str, help='Input images')
19
- parser.add_argument('--result_dir', default='results/', type=str, help='Directory for results')
20
- parser.add_argument('--weights',
21
- default='experiments/pretrained_models/deblur_GoPro_CMFNet.pth', type=str,
22
- help='Path to weights')
23
-
24
- args = parser.parse_args()
25
-
26
- inp_dir = args.input_dir
27
- out_dir = args.result_dir
28
-
29
- os.makedirs(out_dir, exist_ok=True)
30
-
31
- files = natsorted(glob.glob(os.path.join(inp_dir, '*')))
32
-
33
- if len(files) == 0:
34
- raise Exception(f"No files found at {inp_dir}")
35
-
36
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
37
-
38
- # Load corresponding models architecture and weights
39
- model = CMFNet()
40
- model = model.to(device)
41
- model.eval()
42
- load_checkpoint(model, args.weights)
43
-
44
-
45
- mul = 8
46
- for file_ in files:
47
- img = Image.open(file_).convert('RGB')
48
- input_ = TF.to_tensor(img).unsqueeze(0).to(device)
49
-
50
- # Pad the input if not_multiple_of 8
51
- h, w = input_.shape[2], input_.shape[3]
52
- H, W = ((h + mul) // mul) * mul, ((w + mul) // mul) * mul
53
- padh = H - h if h % mul != 0 else 0
54
- padw = W - w if w % mul != 0 else 0
55
- input_ = F.pad(input_, (0, padw, 0, padh), 'reflect')
56
-
57
- with torch.no_grad():
58
- restored = model(input_)
59
-
60
- restored = torch.clamp(restored, 0, 1)
61
- restored = restored[:, :, :h, :w]
62
- restored = restored.permute(0, 2, 3, 1).cpu().detach().numpy()
63
- restored = img_as_ubyte(restored[0])
64
-
65
- f = os.path.splitext(os.path.split(file_)[-1])[0]
66
- save_img((os.path.join(out_dir, f + '.png')), restored)
67
-
68
-
69
-
70
- def save_img(filepath, img):
71
- cv2.imwrite(filepath, cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
72
-
73
-
74
- def load_checkpoint(model, weights):
75
- checkpoint = torch.load(weights, map_location=torch.device('cpu'))
76
- try:
77
- model.load_state_dict(checkpoint["state_dict"])
78
- except:
79
- state_dict = checkpoint["state_dict"]
80
- new_state_dict = OrderedDict()
81
- for k, v in state_dict.items():
82
- name = k[7:] # remove `module.`
83
- new_state_dict[name] = v
84
- model.load_state_dict(new_state_dict)
85
-
86
-
87
- if __name__ == '__main__':
88
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AB-TW/team-ai/agents/tools/smart_domain/entity.py DELETED
@@ -1,115 +0,0 @@
1
- from langchain.prompts import PromptTemplate
2
- from langchain.chains import LLMChain
3
- from langchain.agents import tool
4
- from agents.tools.smart_domain.common import getPrefix
5
- from models import llm
6
-
7
- entity_architecture = """
8
- Entity: This component is use to represents business concepts and encapsulates business rules.
9
- It may include 3 parts:
10
- - id(identity of entity)
11
- - description (properties package of entity represent the value of entity),
12
- - associations (collection of associated entiy)
13
- ---example code:
14
- @Getter
15
- @AllArgsConstructor
16
- public class Feature {{
17
- // id
18
- private FeatureId id;
19
-
20
- // description
21
- private FeatureDescription description;
22
-
23
- // associations
24
- private FeatureConfigs configs;
25
-
26
- public record FeatureId(String featureKey) {{
27
-
28
- }}
29
-
30
- @Builder
31
- public record FeatureDescription(String name,
32
- String description,
33
- Boolean isEnable,
34
- LocalDateTime updatedAt,
35
- LocalDateTime createdAt))) {{
36
-
37
- }}
38
-
39
- public Feature update(Feature newFeature) {{
40
- this.description = FeatureDescription.builder()
41
- .name(newFeature.description.name())
42
- .description(newFeature.description.description())
43
- .isEnable(this.description.isEnable())
44
- .updatedAt(LocalDateTime.now())
45
- .createdAt(this.description.createdAt());
46
-
47
- return this;
48
- }}
49
-
50
- public interface FeatureConfigs() {{
51
- Flux<FeatureConfig> findAll();
52
- Flux<FeatureConfig> subCollection(long from, long to);
53
- Mono<FeatureConfig> findById(FeatureConfigId id);
54
- }}
55
- }}
56
- ---end of example code
57
- """
58
-
59
- entity_test_strategy = """
60
- For the Entity, we can write unit test to ensure that the business rules related to Merchandise are correctly encapsulated.
61
- ---example code
62
- class FeatureTest {{
63
- @Test
64
- void should_update_feature_description() {{
65
- // given
66
- Feature feature = Feature.builder()
67
- .id(new FeatureId("featureKey"))
68
- .description(new FeatureDescription("name", "description", true, LocalDateTime.now(), LocalDateTime.now()))
69
- .build();
70
- Feature newFeature = Feature.builder()
71
- .id(new FeatureId("featureKey"))
72
- .description(new FeatureDescription("newName", "newDescription", true, LocalDateTime.now(), LocalDateTime.now()))
73
- .build();
74
- // when
75
- feature.update(newFeature);
76
- // then
77
- assertThat(feature.description().name()).isEqualTo("newName");
78
- assertThat(feature.description().description()).isEqualTo("newDescription");
79
- }}
80
- }}
81
- ---end of example code
82
- """
83
-
84
- entity_tech_stack = """
85
- Java17、reactor、lombok、Junit5、reactor test、Mockito
86
- """
87
-
88
- entity_task = """Your task is to generate the Enity of domain layer tests and product code."""
89
- ENTITY = getPrefix(entity_task, entity_tech_stack, entity_architecture, entity_test_strategy) + """
90
-
91
- Use the following format:
92
- request: the request that you need to fulfill
93
-
94
- Entity:
95
- ```
96
- the Entity code that you write to fulfill the request, follow TechStack and Architecture
97
- ```
98
-
99
- Test:
100
- ```
101
- the test code that you write to fulfill the request, follow TechStack Architecture and TestStrategy
102
- ```
103
-
104
- request: {input}"""
105
-
106
- ENTITY_PROMPT = PromptTemplate(input_variables=["input"], template=ENTITY,)
107
-
108
- entityChain = LLMChain(llm = llm(temperature=0.1), prompt=ENTITY_PROMPT)
109
-
110
-
111
- @tool("Generate Entity Code", return_direct=True)
112
- def entityCodeGenerator(input: str) -> str:
113
- '''useful for when you need to generate entity code'''
114
- response = entityChain.run(input)
115
- return response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ADOPLE/AdopleAI-Website-DocumentQA/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: DocumentQA Website
3
- emoji: 🏃
4
- colorFrom: red
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.35.2
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: ADOPLE/Adopleai-DocumentQA
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/generate_human_motion/VQ-Trans/options/option_vq.py DELETED
@@ -1,61 +0,0 @@
1
- import argparse
2
-
3
- def get_args_parser():
4
- parser = argparse.ArgumentParser(description='Optimal Transport AutoEncoder training for AIST',
5
- add_help=True,
6
- formatter_class=argparse.ArgumentDefaultsHelpFormatter)
7
-
8
- ## dataloader
9
- parser.add_argument('--dataname', type=str, default='kit', help='dataset directory')
10
- parser.add_argument('--batch-size', default=128, type=int, help='batch size')
11
- parser.add_argument('--window-size', type=int, default=64, help='training motion length')
12
-
13
- ## optimization
14
- parser.add_argument('--total-iter', default=200000, type=int, help='number of total iterations to run')
15
- parser.add_argument('--warm-up-iter', default=1000, type=int, help='number of total iterations for warmup')
16
- parser.add_argument('--lr', default=2e-4, type=float, help='max learning rate')
17
- parser.add_argument('--lr-scheduler', default=[50000, 400000], nargs="+", type=int, help="learning rate schedule (iterations)")
18
- parser.add_argument('--gamma', default=0.05, type=float, help="learning rate decay")
19
-
20
- parser.add_argument('--weight-decay', default=0.0, type=float, help='weight decay')
21
- parser.add_argument("--commit", type=float, default=0.02, help="hyper-parameter for the commitment loss")
22
- parser.add_argument('--loss-vel', type=float, default=0.1, help='hyper-parameter for the velocity loss')
23
- parser.add_argument('--recons-loss', type=str, default='l2', help='reconstruction loss')
24
-
25
- ## vqvae arch
26
- parser.add_argument("--code-dim", type=int, default=512, help="embedding dimension")
27
- parser.add_argument("--nb-code", type=int, default=512, help="nb of embedding")
28
- parser.add_argument("--mu", type=float, default=0.99, help="exponential moving average to update the codebook")
29
- parser.add_argument("--down-t", type=int, default=2, help="downsampling rate")
30
- parser.add_argument("--stride-t", type=int, default=2, help="stride size")
31
- parser.add_argument("--width", type=int, default=512, help="width of the network")
32
- parser.add_argument("--depth", type=int, default=3, help="depth of the network")
33
- parser.add_argument("--dilation-growth-rate", type=int, default=3, help="dilation growth rate")
34
- parser.add_argument("--output-emb-width", type=int, default=512, help="output embedding width")
35
- parser.add_argument('--vq-act', type=str, default='relu', choices = ['relu', 'silu', 'gelu'], help='dataset directory')
36
- parser.add_argument('--vq-norm', type=str, default=None, help='dataset directory')
37
-
38
- ## quantizer
39
- parser.add_argument("--quantizer", type=str, default='ema_reset', choices = ['ema', 'orig', 'ema_reset', 'reset'], help="eps for optimal transport")
40
- parser.add_argument('--beta', type=float, default=1.0, help='commitment loss in standard VQ')
41
-
42
- ## resume
43
- parser.add_argument("--resume-pth", type=str, default=None, help='resume pth for VQ')
44
- parser.add_argument("--resume-gpt", type=str, default=None, help='resume pth for GPT')
45
-
46
-
47
- ## output directory
48
- parser.add_argument('--out-dir', type=str, default='output_vqfinal/', help='output directory')
49
- parser.add_argument('--results-dir', type=str, default='visual_results/', help='output directory')
50
- parser.add_argument('--visual-name', type=str, default='baseline', help='output directory')
51
- parser.add_argument('--exp-name', type=str, default='exp_debug', help='name of the experiment, will create a file inside out-dir')
52
- ## other
53
- parser.add_argument('--print-iter', default=200, type=int, help='print frequency')
54
- parser.add_argument('--eval-iter', default=1000, type=int, help='evaluation frequency')
55
- parser.add_argument('--seed', default=123, type=int, help='seed for initializing training.')
56
-
57
- parser.add_argument('--vis-gt', action='store_true', help='whether visualize GT motions')
58
- parser.add_argument('--nb-vis', default=20, type=int, help='nb of visualizations')
59
-
60
-
61
- return parser.parse_args()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio/ldm/modules/ema.py DELETED
@@ -1,76 +0,0 @@
1
- import torch
2
- from torch import nn
3
-
4
-
5
- class LitEma(nn.Module):
6
- def __init__(self, model, decay=0.9999, use_num_upates=True):
7
- super().__init__()
8
- if decay < 0.0 or decay > 1.0:
9
- raise ValueError('Decay must be between 0 and 1')
10
-
11
- self.m_name2s_name = {}
12
- self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))
13
- self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates
14
- else torch.tensor(-1,dtype=torch.int))
15
-
16
- for name, p in model.named_parameters():
17
- if p.requires_grad:
18
- #remove as '.'-character is not allowed in buffers
19
- s_name = name.replace('.','')
20
- self.m_name2s_name.update({name:s_name})
21
- self.register_buffer(s_name,p.clone().detach().data)
22
-
23
- self.collected_params = []
24
-
25
- def forward(self,model):
26
- decay = self.decay
27
-
28
- if self.num_updates >= 0:
29
- self.num_updates += 1
30
- decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))
31
-
32
- one_minus_decay = 1.0 - decay
33
-
34
- with torch.no_grad():
35
- m_param = dict(model.named_parameters())
36
- shadow_params = dict(self.named_buffers())
37
-
38
- for key in m_param:
39
- if m_param[key].requires_grad:
40
- sname = self.m_name2s_name[key]
41
- shadow_params[sname] = shadow_params[sname].type_as(m_param[key])
42
- shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))
43
- else:
44
- assert not key in self.m_name2s_name
45
-
46
- def copy_to(self, model):
47
- m_param = dict(model.named_parameters())
48
- shadow_params = dict(self.named_buffers())
49
- for key in m_param:
50
- if m_param[key].requires_grad:
51
- m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)
52
- else:
53
- assert not key in self.m_name2s_name
54
-
55
- def store(self, parameters):
56
- """
57
- Save the current parameters for restoring later.
58
- Args:
59
- parameters: Iterable of `torch.nn.Parameter`; the parameters to be
60
- temporarily stored.
61
- """
62
- self.collected_params = [param.clone() for param in parameters]
63
-
64
- def restore(self, parameters):
65
- """
66
- Restore the parameters stored with the `store` method.
67
- Useful to validate the model with EMA parameters without affecting the
68
- original optimization process. Store the parameters before the
69
- `copy_to` method. After validation (or model saving), use this to
70
- restore the former parameters.
71
- Args:
72
- parameters: Iterable of `torch.nn.Parameter`; the parameters to be
73
- updated with the stored parameters.
74
- """
75
- for c_param, param in zip(self.collected_params, parameters):
76
- param.data.copy_(c_param.data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ASJMO/freegpt/server/website.py DELETED
@@ -1,58 +0,0 @@
1
- from flask import render_template, redirect, url_for, request, session
2
- from flask_babel import refresh
3
- from time import time
4
- from os import urandom
5
- from server.babel import get_locale, get_languages
6
-
7
-
8
- class Website:
9
- def __init__(self, bp, url_prefix) -> None:
10
- self.bp = bp
11
- self.url_prefix = url_prefix
12
- self.routes = {
13
- '/': {
14
- 'function': lambda: redirect(url_for('._index')),
15
- 'methods': ['GET', 'POST']
16
- },
17
- '/chat/': {
18
- 'function': self._index,
19
- 'methods': ['GET', 'POST']
20
- },
21
- '/chat/<conversation_id>': {
22
- 'function': self._chat,
23
- 'methods': ['GET', 'POST']
24
- },
25
- '/change-language': {
26
- 'function': self.change_language,
27
- 'methods': ['POST']
28
- },
29
- '/get-locale': {
30
- 'function': self.get_locale,
31
- 'methods': ['GET']
32
- },
33
- '/get-languages': {
34
- 'function': self.get_languages,
35
- 'methods': ['GET']
36
- }
37
- }
38
-
39
- def _chat(self, conversation_id):
40
- if '-' not in conversation_id:
41
- return redirect(url_for('._index'))
42
-
43
- return render_template('index.html', chat_id=conversation_id, url_prefix=self.url_prefix)
44
-
45
- def _index(self):
46
- return render_template('index.html', chat_id=f'{urandom(4).hex()}-{urandom(2).hex()}-{urandom(2).hex()}-{urandom(2).hex()}-{hex(int(time() * 1000))[2:]}', url_prefix=self.url_prefix)
47
-
48
- def change_language(self):
49
- data = request.get_json()
50
- session['language'] = data.get('language')
51
- refresh()
52
- return '', 204
53
-
54
- def get_locale(self):
55
- return get_locale()
56
-
57
- def get_languages(self):
58
- return get_languages()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlexN/pull_up/README.md DELETED
@@ -1,37 +0,0 @@
1
- ---
2
- title: Pull_up
3
- emoji: 💪
4
- colorFrom: pink
5
- colorTo: yellow
6
- sdk: gradio
7
- app_file: app.py
8
- pinned: false
9
- ---
10
-
11
- # Configuration
12
-
13
- `title`: _string_
14
- Display title for the Space
15
-
16
- `emoji`: _string_
17
- Space emoji (emoji-only character allowed)
18
-
19
- `colorFrom`: _string_
20
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
21
-
22
- `colorTo`: _string_
23
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
24
-
25
- `sdk`: _string_
26
- Can be either `gradio` or `streamlit`
27
-
28
- `sdk_version` : _string_
29
- Only applicable for `streamlit` SDK.
30
- See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
31
-
32
- `app_file`: _string_
33
- Path to your main application file (which contains either `gradio` or `streamlit` Python code).
34
- Path is relative to the root of the repository.
35
-
36
- `pinned`: _boolean_
37
- Whether the Space stays on top of your list.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlhitawiMohammed22/HTD_HTR/builder.py DELETED
@@ -1,305 +0,0 @@
1
-
2
- # Copyright (C) 2021, Mindee.
3
-
4
- # This program is licensed under the Apache License version 2.
5
- # See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
6
-
7
-
8
- from typing import Any, Dict, List, Tuple
9
- import pandas as pd
10
-
11
- import numpy as np
12
- from scipy.cluster.hierarchy import fclusterdata
13
-
14
- from doctr.utils.geometry import estimate_page_angle, resolve_enclosing_bbox, resolve_enclosing_rbbox, rotate_boxes
15
- from doctr.utils.repr import NestedObject
16
-
17
- __all__ = ['DocumentBuilder']
18
-
19
-
20
- class DocumentBuilder(NestedObject):
21
- """Implements a document builder
22
- Args:
23
- resolve_lines: whether words should be automatically grouped into lines
24
- resolve_blocks: whether lines should be automatically grouped into blocks
25
- paragraph_break: relative length of the minimum space separating paragraphs
26
- export_as_straight_boxes: if True, force straight boxes in the export (fit a rectangle
27
- box to all rotated boxes). Else, keep the boxes format unchanged, no matter what it is.
28
- """
29
-
30
- def __init__(
31
- self,
32
- resolve_lines: bool = True,
33
- resolve_blocks: bool = True,
34
- paragraph_break: float = 0.035,
35
- export_as_straight_boxes: bool = False,
36
- ) -> None:
37
-
38
- self.resolve_lines = resolve_lines
39
- self.resolve_blocks = resolve_blocks
40
- self.paragraph_break = paragraph_break
41
- self.export_as_straight_boxes = export_as_straight_boxes
42
-
43
- @staticmethod
44
- def _sort_boxes(boxes: np.ndarray) -> np.ndarray:
45
- """Sort bounding boxes from top to bottom, left to right
46
- Args:
47
- boxes: bounding boxes of shape (N, 4) or (N, 4, 2) (in case of rotated bbox)
48
- Returns:
49
- tuple: indices of ordered boxes of shape (N,), boxes
50
- If straight boxes are passed tpo the function, boxes are unchanged
51
- else: boxes returned are straight boxes fitted to the straightened rotated boxes
52
- so that we fit the lines afterwards to the straigthened page
53
- """
54
- if boxes.ndim == 3:
55
- boxes = rotate_boxes(
56
- loc_preds=boxes,
57
- angle=-estimate_page_angle(boxes),
58
- orig_shape=(1024, 1024),
59
- min_angle=5.,
60
- )
61
- boxes = np.concatenate((boxes.min(1), boxes.max(1)), -1)
62
- return (boxes[:, 0] + 2 * boxes[:, 3] / np.median(boxes[:, 3] - boxes[:, 1])).argsort(), boxes
63
-
64
- def _resolve_sub_lines(self, boxes: np.ndarray, word_idcs: List[int]) -> List[List[int]]:
65
- """Split a line in sub_lines
66
- Args:
67
- boxes: bounding boxes of shape (N, 4)
68
- word_idcs: list of indexes for the words of the line
69
- Returns:
70
- A list of (sub-)lines computed from the original line (words)
71
- """
72
- lines = []
73
- # Sort words horizontally
74
- word_idcs = [word_idcs[idx]
75
- for idx in boxes[word_idcs, 0].argsort().tolist()]
76
-
77
- # Eventually split line horizontally
78
- if len(word_idcs) < 2:
79
- lines.append(word_idcs)
80
- else:
81
- sub_line = [word_idcs[0]]
82
- for i in word_idcs[1:]:
83
- horiz_break = True
84
-
85
- prev_box = boxes[sub_line[-1]]
86
- # Compute distance between boxes
87
- dist = boxes[i, 0] - prev_box[2]
88
- # If distance between boxes is lower than paragraph break, same sub-line
89
- if dist < self.paragraph_break:
90
- horiz_break = False
91
-
92
- if horiz_break:
93
- lines.append(sub_line)
94
- sub_line = []
95
-
96
- sub_line.append(i)
97
- lines.append(sub_line)
98
-
99
- return lines
100
-
101
- def _resolve_lines(self, boxes: np.ndarray) -> List[List[int]]:
102
- """Order boxes to group them in lines
103
- Args:
104
- boxes: bounding boxes of shape (N, 4) or (N, 4, 2) in case of rotated bbox
105
- Returns:
106
- nested list of box indices
107
- """
108
-
109
- # Sort boxes, and straighten the boxes if they are rotated
110
- idxs, boxes = self._sort_boxes(boxes)
111
-
112
- # Compute median for boxes heights
113
- y_med = np.median(boxes[:, 3] - boxes[:, 1])
114
-
115
- lines = []
116
- words = [idxs[0]] # Assign the top-left word to the first line
117
- # Define a mean y-center for the line
118
- y_center_sum = boxes[idxs[0]][[1, 3]].mean()
119
-
120
- for idx in idxs[1:]:
121
- vert_break = True
122
-
123
- # Compute y_dist
124
- y_dist = abs(boxes[idx][[1, 3]].mean() - y_center_sum / len(words))
125
- # If y-center of the box is close enough to mean y-center of the line, same line
126
- if y_dist < y_med / 2:
127
- vert_break = False
128
-
129
- if vert_break:
130
- # Compute sub-lines (horizontal split)
131
- lines.extend(self._resolve_sub_lines(boxes, words))
132
- words = []
133
- y_center_sum = 0
134
-
135
- words.append(idx)
136
- y_center_sum += boxes[idx][[1, 3]].mean()
137
-
138
- # Use the remaining words to form the last(s) line(s)
139
- if len(words) > 0:
140
- # Compute sub-lines (horizontal split)
141
- lines.extend(self._resolve_sub_lines(boxes, words))
142
-
143
- return lines
144
-
145
- @staticmethod
146
- def _resolve_blocks(boxes: np.ndarray, lines: List[List[int]]) -> List[List[List[int]]]:
147
- """Order lines to group them in blocks
148
- Args:
149
- boxes: bounding boxes of shape (N, 4) or (N, 4, 2)
150
- lines: list of lines, each line is a list of idx
151
- Returns:
152
- nested list of box indices
153
- """
154
- # Resolve enclosing boxes of lines
155
- if boxes.ndim == 3:
156
- box_lines = np.asarray([
157
- resolve_enclosing_rbbox(
158
- [tuple(boxes[idx, :, :]) for idx in line])
159
- for line in lines # type: ignore[misc]
160
- ])
161
- else:
162
- _box_lines = [
163
- resolve_enclosing_bbox([
164
- # type: ignore[misc]
165
- (tuple(boxes[idx, :2]), tuple(boxes[idx, 2:])) for idx in line
166
- ])
167
- for line in lines
168
- ]
169
- box_lines = np.asarray([(x1, y1, x2, y2)
170
- for ((x1, y1), (x2, y2)) in _box_lines])
171
-
172
- # Compute geometrical features of lines to clusterize
173
- # Clusterizing only with box centers yield to poor results for complex documents
174
- if boxes.ndim == 3:
175
- box_features = np.stack(
176
- (
177
- (box_lines[:, 0, 0] + box_lines[:, 0, 1]) / 2,
178
- (box_lines[:, 0, 0] + box_lines[:, 2, 0]) / 2,
179
- (box_lines[:, 0, 0] + box_lines[:, 2, 1]) / 2,
180
- (box_lines[:, 0, 1] + box_lines[:, 2, 1]) / 2,
181
- (box_lines[:, 0, 1] + box_lines[:, 2, 0]) / 2,
182
- (box_lines[:, 2, 0] + box_lines[:, 2, 1]) / 2,
183
- ), axis=-1
184
- )
185
- else:
186
- box_features = np.stack(
187
- (
188
- (box_lines[:, 0] + box_lines[:, 3]) / 2,
189
- (box_lines[:, 1] + box_lines[:, 2]) / 2,
190
- (box_lines[:, 0] + box_lines[:, 2]) / 2,
191
- (box_lines[:, 1] + box_lines[:, 3]) / 2,
192
- box_lines[:, 0],
193
- box_lines[:, 1],
194
- ), axis=-1
195
- )
196
- # Compute clusters
197
- clusters = fclusterdata(
198
- box_features, t=0.1, depth=4, criterion='distance', metric='euclidean')
199
-
200
- _blocks: Dict[int, List[int]] = {}
201
- # Form clusters
202
- for line_idx, cluster_idx in enumerate(clusters):
203
- if cluster_idx in _blocks.keys():
204
- _blocks[cluster_idx].append(line_idx)
205
- else:
206
- _blocks[cluster_idx] = [line_idx]
207
-
208
- # Retrieve word-box level to return a fully nested structure
209
- blocks = [[lines[idx] for idx in block] for block in _blocks.values()]
210
-
211
- return blocks
212
-
213
- def _build_blocks(self, boxes: np.ndarray, word_preds: List[Tuple[str, float]], page_shapes: List[Tuple[int, int]]) -> Any:
214
- """Gather independent words in structured blocks
215
- Args:
216
- boxes: bounding boxes of all detected words of the page, of shape (N, 5) or (N, 4, 2)
217
- word_preds: list of all detected words of the page, of shape N
218
- Returns:
219
- list of block elements
220
- """
221
-
222
- if boxes.shape[0] != len(word_preds):
223
- raise ValueError(
224
- f"Incompatible argument lengths: {boxes.shape[0]}, {len(word_preds)}")
225
-
226
- if boxes.shape[0] == 0:
227
- return []
228
-
229
- # Decide whether we try to form lines
230
- _boxes = boxes
231
- if self.resolve_lines:
232
- lines = self._resolve_lines(
233
- _boxes if _boxes.ndim == 3 else _boxes[:, :4])
234
- # Decide whether we try to form blocks
235
- if self.resolve_blocks and len(lines) > 1:
236
- _blocks = self._resolve_blocks(
237
- _boxes if _boxes.ndim == 3 else _boxes[:, :4], lines)
238
- else:
239
- _blocks = [lines]
240
- else:
241
- # Sort bounding boxes, one line for all boxes, one block for the line
242
- lines = [self._sort_boxes(
243
- _boxes if _boxes.ndim == 3 else _boxes[:, :4])[0]]
244
- _blocks = [lines]
245
-
246
- rows = []
247
- for block_idx, lines in enumerate(_blocks):
248
- for line_idx, line in enumerate(lines):
249
- for i,idx in enumerate(line):
250
- h, w = page_shapes
251
- row = (
252
- block_idx, line_idx, i, word_preds[idx],
253
- int(round(boxes[idx, 0]*w)
254
- ), int(round(boxes[idx, 1]*h)),
255
- int(round(boxes[idx, 2]*w)
256
- ), int(round(boxes[idx, 3]*h)),
257
- int(round(boxes[idx, 4]*100))
258
- )
259
- rows.append(row)
260
-
261
- return rows
262
-
263
- def extra_repr(self) -> str:
264
- return (f"resolve_lines={self.resolve_lines}, resolve_blocks={self.resolve_blocks}, "
265
- f"paragraph_break={self.paragraph_break}, "
266
- f"export_as_straight_boxes={self.export_as_straight_boxes}")
267
-
268
- def __call__(
269
- self,
270
- boxes: List[np.ndarray],
271
- text_preds: List[List[Tuple[str, float]]],
272
- page_shapes: List[Tuple[int, int]]
273
- ) -> pd.DataFrame:
274
- """Re-arrange detected words into structured blocks
275
- Args:
276
- boxes: list of N elements, where each element represents the localization predictions, of shape (*, 5)
277
- or (*, 6) for all words for a given page
278
- text_preds: list of N elements, where each element is the list of all word prediction (text + confidence)
279
- page_shape: shape of each page, of size N
280
- Returns:
281
- document object
282
- """
283
- if len(boxes) != len(text_preds) or len(boxes) != len(page_shapes):
284
- raise ValueError(
285
- "All arguments are expected to be lists of the same size")
286
-
287
- if self.export_as_straight_boxes and len(boxes) > 0:
288
- # If boxes are already straight OK, else fit a bounding rect
289
- if boxes[0].ndim == 3:
290
- straight_boxes = []
291
- # Iterate over pages
292
- for p_boxes in boxes:
293
- # Iterate over boxes of the pages
294
- straight_boxes.append(np.concatenate(
295
- (p_boxes.min(1), p_boxes.max(1)), 1))
296
- boxes = straight_boxes
297
-
298
- _pages = [
299
- pd.DataFrame.from_records(self._build_blocks(page_boxes, word_preds, shape), columns=[
300
- "block_num", "line_num", "word_num" ,"word", "xmin", "ymin", "xmax", "ymax", "confidence_score"
301
- ])
302
- for _idx, shape, page_boxes, word_preds in zip(range(len(boxes)), page_shapes, boxes, text_preds)
303
- ]
304
-
305
- return _pages
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/face3d/data/flist_dataset.py DELETED
@@ -1,125 +0,0 @@
1
- """This script defines the custom dataset for Deep3DFaceRecon_pytorch
2
- """
3
-
4
- import os.path
5
- from data.base_dataset import BaseDataset, get_transform, get_affine_mat, apply_img_affine, apply_lm_affine
6
- from data.image_folder import make_dataset
7
- from PIL import Image
8
- import random
9
- import util.util as util
10
- import numpy as np
11
- import json
12
- import torch
13
- from scipy.io import loadmat, savemat
14
- import pickle
15
- from util.preprocess import align_img, estimate_norm
16
- from util.load_mats import load_lm3d
17
-
18
-
19
- def default_flist_reader(flist):
20
- """
21
- flist format: impath label\nimpath label\n ...(same to caffe's filelist)
22
- """
23
- imlist = []
24
- with open(flist, 'r') as rf:
25
- for line in rf.readlines():
26
- impath = line.strip()
27
- imlist.append(impath)
28
-
29
- return imlist
30
-
31
- def jason_flist_reader(flist):
32
- with open(flist, 'r') as fp:
33
- info = json.load(fp)
34
- return info
35
-
36
- def parse_label(label):
37
- return torch.tensor(np.array(label).astype(np.float32))
38
-
39
-
40
- class FlistDataset(BaseDataset):
41
- """
42
- It requires one directories to host training images '/path/to/data/train'
43
- You can train the model with the dataset flag '--dataroot /path/to/data'.
44
- """
45
-
46
- def __init__(self, opt):
47
- """Initialize this dataset class.
48
-
49
- Parameters:
50
- opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
51
- """
52
- BaseDataset.__init__(self, opt)
53
-
54
- self.lm3d_std = load_lm3d(opt.bfm_folder)
55
-
56
- msk_names = default_flist_reader(opt.flist)
57
- self.msk_paths = [os.path.join(opt.data_root, i) for i in msk_names]
58
-
59
- self.size = len(self.msk_paths)
60
- self.opt = opt
61
-
62
- self.name = 'train' if opt.isTrain else 'val'
63
- if '_' in opt.flist:
64
- self.name += '_' + opt.flist.split(os.sep)[-1].split('_')[0]
65
-
66
-
67
- def __getitem__(self, index):
68
- """Return a data point and its metadata information.
69
-
70
- Parameters:
71
- index (int) -- a random integer for data indexing
72
-
73
- Returns a dictionary that contains A, B, A_paths and B_paths
74
- img (tensor) -- an image in the input domain
75
- msk (tensor) -- its corresponding attention mask
76
- lm (tensor) -- its corresponding 3d landmarks
77
- im_paths (str) -- image paths
78
- aug_flag (bool) -- a flag used to tell whether its raw or augmented
79
- """
80
- msk_path = self.msk_paths[index % self.size] # make sure index is within then range
81
- img_path = msk_path.replace('mask/', '')
82
- lm_path = '.'.join(msk_path.replace('mask', 'landmarks').split('.')[:-1]) + '.txt'
83
-
84
- raw_img = Image.open(img_path).convert('RGB')
85
- raw_msk = Image.open(msk_path).convert('RGB')
86
- raw_lm = np.loadtxt(lm_path).astype(np.float32)
87
-
88
- _, img, lm, msk = align_img(raw_img, raw_lm, self.lm3d_std, raw_msk)
89
-
90
- aug_flag = self.opt.use_aug and self.opt.isTrain
91
- if aug_flag:
92
- img, lm, msk = self._augmentation(img, lm, self.opt, msk)
93
-
94
- _, H = img.size
95
- M = estimate_norm(lm, H)
96
- transform = get_transform()
97
- img_tensor = transform(img)
98
- msk_tensor = transform(msk)[:1, ...]
99
- lm_tensor = parse_label(lm)
100
- M_tensor = parse_label(M)
101
-
102
-
103
- return {'imgs': img_tensor,
104
- 'lms': lm_tensor,
105
- 'msks': msk_tensor,
106
- 'M': M_tensor,
107
- 'im_paths': img_path,
108
- 'aug_flag': aug_flag,
109
- 'dataset': self.name}
110
-
111
- def _augmentation(self, img, lm, opt, msk=None):
112
- affine, affine_inv, flip = get_affine_mat(opt, img.size)
113
- img = apply_img_affine(img, affine_inv)
114
- lm = apply_lm_affine(lm, affine, flip, img.size)
115
- if msk is not None:
116
- msk = apply_img_affine(msk, affine_inv, method=Image.BILINEAR)
117
- return img, lm, msk
118
-
119
-
120
-
121
-
122
- def __len__(self):
123
- """Return the total number of images in the dataset.
124
- """
125
- return self.size
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alycer/VITS-Umamusume-voice-synthesizer/monotonic_align/__init__.py DELETED
@@ -1,19 +0,0 @@
1
- import numpy as np
2
- import torch
3
- from .monotonic_align.core import maximum_path_c
4
-
5
-
6
- def maximum_path(neg_cent, mask):
7
- """ Cython optimized version.
8
- neg_cent: [b, t_t, t_s]
9
- mask: [b, t_t, t_s]
10
- """
11
- device = neg_cent.device
12
- dtype = neg_cent.dtype
13
- neg_cent = neg_cent.data.cpu().numpy().astype(np.float32)
14
- path = np.zeros(neg_cent.shape, dtype=np.int32)
15
-
16
- t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32)
17
- t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32)
18
- maximum_path_c(path, neg_cent, t_t_max, t_s_max)
19
- return torch.from_numpy(path).to(device=device, dtype=dtype)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/en/api/pipelines/value_guided_sampling.md DELETED
@@ -1,32 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # Value-guided planning
14
-
15
- <Tip warning={true}>
16
-
17
- 🧪 This is an experimental pipeline for reinforcement learning!
18
-
19
- </Tip>
20
-
21
- This pipeline is based on the [Planning with Diffusion for Flexible Behavior Synthesis](https://huggingface.co/papers/2205.09991) paper by Michael Janner, Yilun Du, Joshua B. Tenenbaum, Sergey Levine.
22
-
23
- The abstract from the paper is:
24
-
25
- *Model-based reinforcement learning methods often use learning only for the purpose of estimating an approximate dynamics model, offloading the rest of the decision-making work to classical trajectory optimizers. While conceptually simple, this combination has a number of empirical shortcomings, suggesting that learned models may not be well-suited to standard trajectory optimization. In this paper, we consider what it would look like to fold as much of the trajectory optimization pipeline as possible into the modeling problem, such that sampling from the model and planning with it become nearly identical. The core of our technical approach lies in a diffusion probabilistic model that plans by iteratively denoising trajectories. We show how classifier-guided sampling and image inpainting can be reinterpreted as coherent planning strategies, explore the unusual and useful properties of diffusion-based planning methods, and demonstrate the effectiveness of our framework in control settings that emphasize long-horizon decision-making and test-time flexibility*.
26
-
27
- You can find additional information about the model on the [project page](https://diffusion-planning.github.io/), the [original codebase](https://github.com/jannerm/diffuser), or try it out in a demo [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/reinforcement_learning_with_diffusers.ipynb).
28
-
29
- The script to run the model is available [here](https://github.com/huggingface/diffusers/tree/main/examples/reinforcement_learning).
30
-
31
- ## ValueGuidedRLPipeline
32
- [[autodoc]] diffusers.experimental.ValueGuidedRLPipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/models/cross_attention.py DELETED
@@ -1,94 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- from ..utils import deprecate
15
- from .attention_processor import ( # noqa: F401
16
- Attention,
17
- AttentionProcessor,
18
- AttnAddedKVProcessor,
19
- AttnProcessor2_0,
20
- LoRAAttnProcessor,
21
- LoRALinearLayer,
22
- LoRAXFormersAttnProcessor,
23
- SlicedAttnAddedKVProcessor,
24
- SlicedAttnProcessor,
25
- XFormersAttnProcessor,
26
- )
27
- from .attention_processor import AttnProcessor as AttnProcessorRename # noqa: F401
28
-
29
-
30
- deprecate(
31
- "cross_attention",
32
- "0.20.0",
33
- "Importing from cross_attention is deprecated. Please import from diffusers.models.attention_processor instead.",
34
- standard_warn=False,
35
- )
36
-
37
-
38
- AttnProcessor = AttentionProcessor
39
-
40
-
41
- class CrossAttention(Attention):
42
- def __init__(self, *args, **kwargs):
43
- deprecation_message = f"{self.__class__.__name__} is deprecated and will be removed in `0.20.0`. Please use `from diffusers.models.attention_processor import {''.join(self.__class__.__name__.split('Cross'))} instead."
44
- deprecate("cross_attention", "0.20.0", deprecation_message, standard_warn=False)
45
- super().__init__(*args, **kwargs)
46
-
47
-
48
- class CrossAttnProcessor(AttnProcessorRename):
49
- def __init__(self, *args, **kwargs):
50
- deprecation_message = f"{self.__class__.__name__} is deprecated and will be removed in `0.20.0`. Please use `from diffusers.models.attention_processor import {''.join(self.__class__.__name__.split('Cross'))} instead."
51
- deprecate("cross_attention", "0.20.0", deprecation_message, standard_warn=False)
52
- super().__init__(*args, **kwargs)
53
-
54
-
55
- class LoRACrossAttnProcessor(LoRAAttnProcessor):
56
- def __init__(self, *args, **kwargs):
57
- deprecation_message = f"{self.__class__.__name__} is deprecated and will be removed in `0.20.0`. Please use `from diffusers.models.attention_processor import {''.join(self.__class__.__name__.split('Cross'))} instead."
58
- deprecate("cross_attention", "0.20.0", deprecation_message, standard_warn=False)
59
- super().__init__(*args, **kwargs)
60
-
61
-
62
- class CrossAttnAddedKVProcessor(AttnAddedKVProcessor):
63
- def __init__(self, *args, **kwargs):
64
- deprecation_message = f"{self.__class__.__name__} is deprecated and will be removed in `0.20.0`. Please use `from diffusers.models.attention_processor import {''.join(self.__class__.__name__.split('Cross'))} instead."
65
- deprecate("cross_attention", "0.20.0", deprecation_message, standard_warn=False)
66
- super().__init__(*args, **kwargs)
67
-
68
-
69
- class XFormersCrossAttnProcessor(XFormersAttnProcessor):
70
- def __init__(self, *args, **kwargs):
71
- deprecation_message = f"{self.__class__.__name__} is deprecated and will be removed in `0.20.0`. Please use `from diffusers.models.attention_processor import {''.join(self.__class__.__name__.split('Cross'))} instead."
72
- deprecate("cross_attention", "0.20.0", deprecation_message, standard_warn=False)
73
- super().__init__(*args, **kwargs)
74
-
75
-
76
- class LoRAXFormersCrossAttnProcessor(LoRAXFormersAttnProcessor):
77
- def __init__(self, *args, **kwargs):
78
- deprecation_message = f"{self.__class__.__name__} is deprecated and will be removed in `0.20.0`. Please use `from diffusers.models.attention_processor import {''.join(self.__class__.__name__.split('Cross'))} instead."
79
- deprecate("cross_attention", "0.20.0", deprecation_message, standard_warn=False)
80
- super().__init__(*args, **kwargs)
81
-
82
-
83
- class SlicedCrossAttnProcessor(SlicedAttnProcessor):
84
- def __init__(self, *args, **kwargs):
85
- deprecation_message = f"{self.__class__.__name__} is deprecated and will be removed in `0.20.0`. Please use `from diffusers.models.attention_processor import {''.join(self.__class__.__name__.split('Cross'))} instead."
86
- deprecate("cross_attention", "0.20.0", deprecation_message, standard_warn=False)
87
- super().__init__(*args, **kwargs)
88
-
89
-
90
- class SlicedCrossAttnAddedKVProcessor(SlicedAttnAddedKVProcessor):
91
- def __init__(self, *args, **kwargs):
92
- deprecation_message = f"{self.__class__.__name__} is deprecated and will be removed in `0.20.0`. Please use `from diffusers.models.attention_processor import {''.join(self.__class__.__name__.split('Cross'))} instead."
93
- deprecate("cross_attention", "0.20.0", deprecation_message, standard_warn=False)
94
- super().__init__(*args, **kwargs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2_img2img.py DELETED
@@ -1,373 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- from typing import Callable, List, Optional, Union
16
-
17
- import numpy as np
18
- import PIL
19
- import torch
20
- from PIL import Image
21
-
22
- from ...models import UNet2DConditionModel, VQModel
23
- from ...schedulers import DDPMScheduler
24
- from ...utils import (
25
- is_accelerate_available,
26
- is_accelerate_version,
27
- logging,
28
- randn_tensor,
29
- replace_example_docstring,
30
- )
31
- from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
32
-
33
-
34
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
35
-
36
- EXAMPLE_DOC_STRING = """
37
- Examples:
38
- ```py
39
- >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
40
- >>> from diffusers.utils import load_image
41
- >>> import torch
42
-
43
- >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
44
- ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
45
- ... )
46
- >>> pipe_prior.to("cuda")
47
-
48
- >>> prompt = "A red cartoon frog, 4k"
49
- >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
50
-
51
- >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
52
- ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
53
- ... )
54
- >>> pipe.to("cuda")
55
-
56
- >>> init_image = load_image(
57
- ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
58
- ... "/kandinsky/frog.png"
59
- ... )
60
-
61
- >>> image = pipe(
62
- ... image=init_image,
63
- ... image_embeds=image_emb,
64
- ... negative_image_embeds=zero_image_emb,
65
- ... height=768,
66
- ... width=768,
67
- ... num_inference_steps=100,
68
- ... strength=0.2,
69
- ... ).images
70
-
71
- >>> image[0].save("red_frog.png")
72
- ```
73
- """
74
-
75
-
76
- # Copied from diffusers.pipelines.kandinsky2_2.pipeline_kandinsky2_2.downscale_height_and_width
77
- def downscale_height_and_width(height, width, scale_factor=8):
78
- new_height = height // scale_factor**2
79
- if height % scale_factor**2 != 0:
80
- new_height += 1
81
- new_width = width // scale_factor**2
82
- if width % scale_factor**2 != 0:
83
- new_width += 1
84
- return new_height * scale_factor, new_width * scale_factor
85
-
86
-
87
- # Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_img2img.prepare_image
88
- def prepare_image(pil_image, w=512, h=512):
89
- pil_image = pil_image.resize((w, h), resample=Image.BICUBIC, reducing_gap=1)
90
- arr = np.array(pil_image.convert("RGB"))
91
- arr = arr.astype(np.float32) / 127.5 - 1
92
- arr = np.transpose(arr, [2, 0, 1])
93
- image = torch.from_numpy(arr).unsqueeze(0)
94
- return image
95
-
96
-
97
- class KandinskyV22Img2ImgPipeline(DiffusionPipeline):
98
- """
99
- Pipeline for image-to-image generation using Kandinsky
100
-
101
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
102
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
103
-
104
- Args:
105
- scheduler ([`DDIMScheduler`]):
106
- A scheduler to be used in combination with `unet` to generate image latents.
107
- unet ([`UNet2DConditionModel`]):
108
- Conditional U-Net architecture to denoise the image embedding.
109
- movq ([`VQModel`]):
110
- MoVQ Decoder to generate the image from the latents.
111
- """
112
-
113
- def __init__(
114
- self,
115
- unet: UNet2DConditionModel,
116
- scheduler: DDPMScheduler,
117
- movq: VQModel,
118
- ):
119
- super().__init__()
120
-
121
- self.register_modules(
122
- unet=unet,
123
- scheduler=scheduler,
124
- movq=movq,
125
- )
126
- self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1)
127
-
128
- # Copied from diffusers.pipelines.kandinsky.pipeline_kandinsky_img2img.KandinskyImg2ImgPipeline.get_timesteps
129
- def get_timesteps(self, num_inference_steps, strength, device):
130
- # get the original timestep using init_timestep
131
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
132
-
133
- t_start = max(num_inference_steps - init_timestep, 0)
134
- timesteps = self.scheduler.timesteps[t_start:]
135
-
136
- return timesteps, num_inference_steps - t_start
137
-
138
- def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
139
- if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
140
- raise ValueError(
141
- f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
142
- )
143
-
144
- image = image.to(device=device, dtype=dtype)
145
-
146
- batch_size = batch_size * num_images_per_prompt
147
-
148
- if image.shape[1] == 4:
149
- init_latents = image
150
-
151
- else:
152
- if isinstance(generator, list) and len(generator) != batch_size:
153
- raise ValueError(
154
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
155
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
156
- )
157
-
158
- elif isinstance(generator, list):
159
- init_latents = [
160
- self.movq.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
161
- ]
162
- init_latents = torch.cat(init_latents, dim=0)
163
- else:
164
- init_latents = self.movq.encode(image).latent_dist.sample(generator)
165
-
166
- init_latents = self.movq.config.scaling_factor * init_latents
167
-
168
- init_latents = torch.cat([init_latents], dim=0)
169
-
170
- shape = init_latents.shape
171
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
172
-
173
- # get latents
174
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
175
-
176
- latents = init_latents
177
-
178
- return latents
179
-
180
- # Copied from diffusers.pipelines.kandinsky2_2.pipeline_kandinsky2_2.KandinskyV22Pipeline.enable_model_cpu_offload
181
- def enable_model_cpu_offload(self, gpu_id=0):
182
- r"""
183
- Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
184
- to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
185
- method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
186
- `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
187
- """
188
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
189
- from accelerate import cpu_offload_with_hook
190
- else:
191
- raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
192
-
193
- device = torch.device(f"cuda:{gpu_id}")
194
-
195
- if self.device.type != "cpu":
196
- self.to("cpu", silence_dtype_warnings=True)
197
- torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
198
-
199
- hook = None
200
- for cpu_offloaded_model in [self.unet, self.movq]:
201
- _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
202
-
203
- # We'll offload the last model manually.
204
- self.final_offload_hook = hook
205
-
206
- @torch.no_grad()
207
- @replace_example_docstring(EXAMPLE_DOC_STRING)
208
- def __call__(
209
- self,
210
- image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]],
211
- image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]],
212
- negative_image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]],
213
- height: int = 512,
214
- width: int = 512,
215
- num_inference_steps: int = 100,
216
- guidance_scale: float = 4.0,
217
- strength: float = 0.3,
218
- num_images_per_prompt: int = 1,
219
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
220
- output_type: Optional[str] = "pil",
221
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
222
- callback_steps: int = 1,
223
- return_dict: bool = True,
224
- ):
225
- """
226
- Function invoked when calling the pipeline for generation.
227
-
228
- Args:
229
- image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`):
230
- The clip image embeddings for text prompt, that will be used to condition the image generation.
231
- image (`torch.FloatTensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
232
- `Image`, or tensor representing an image batch, that will be used as the starting point for the
233
- process. Can also accpet image latents as `image`, if passing latents directly, it will not be encoded
234
- again.
235
- strength (`float`, *optional*, defaults to 0.8):
236
- Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
237
- will be used as a starting point, adding more noise to it the larger the `strength`. The number of
238
- denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
239
- be maximum and the denoising process will run for the full number of iterations specified in
240
- `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
241
- negative_image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`):
242
- The clip image embeddings for negative text prompt, will be used to condition the image generation.
243
- height (`int`, *optional*, defaults to 512):
244
- The height in pixels of the generated image.
245
- width (`int`, *optional*, defaults to 512):
246
- The width in pixels of the generated image.
247
- num_inference_steps (`int`, *optional*, defaults to 100):
248
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
249
- expense of slower inference.
250
- guidance_scale (`float`, *optional*, defaults to 4.0):
251
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
252
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
253
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
254
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
255
- usually at the expense of lower image quality.
256
- num_images_per_prompt (`int`, *optional*, defaults to 1):
257
- The number of images to generate per prompt.
258
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
259
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
260
- to make generation deterministic.
261
- output_type (`str`, *optional*, defaults to `"pil"`):
262
- The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
263
- (`np.array`) or `"pt"` (`torch.Tensor`).
264
- callback (`Callable`, *optional*):
265
- A function that calls every `callback_steps` steps during inference. The function is called with the
266
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
267
- callback_steps (`int`, *optional*, defaults to 1):
268
- The frequency at which the `callback` function is called. If not specified, the callback is called at
269
- every step.
270
- return_dict (`bool`, *optional*, defaults to `True`):
271
- Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
272
-
273
- Examples:
274
-
275
- Returns:
276
- [`~pipelines.ImagePipelineOutput`] or `tuple`
277
- """
278
- device = self._execution_device
279
-
280
- do_classifier_free_guidance = guidance_scale > 1.0
281
-
282
- if isinstance(image_embeds, list):
283
- image_embeds = torch.cat(image_embeds, dim=0)
284
- batch_size = image_embeds.shape[0]
285
- if isinstance(negative_image_embeds, list):
286
- negative_image_embeds = torch.cat(negative_image_embeds, dim=0)
287
-
288
- if do_classifier_free_guidance:
289
- image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
290
- negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
291
-
292
- image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to(
293
- dtype=self.unet.dtype, device=device
294
- )
295
-
296
- if not isinstance(image, list):
297
- image = [image]
298
- if not all(isinstance(i, (PIL.Image.Image, torch.Tensor)) for i in image):
299
- raise ValueError(
300
- f"Input is in incorrect format: {[type(i) for i in image]}. Currently, we only support PIL image and pytorch tensor"
301
- )
302
-
303
- image = torch.cat([prepare_image(i, width, height) for i in image], dim=0)
304
- image = image.to(dtype=image_embeds.dtype, device=device)
305
-
306
- latents = self.movq.encode(image)["latents"]
307
- latents = latents.repeat_interleave(num_images_per_prompt, dim=0)
308
- self.scheduler.set_timesteps(num_inference_steps, device=device)
309
- timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
310
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
311
- height, width = downscale_height_and_width(height, width, self.movq_scale_factor)
312
- latents = self.prepare_latents(
313
- latents, latent_timestep, batch_size, num_images_per_prompt, image_embeds.dtype, device, generator
314
- )
315
- for i, t in enumerate(self.progress_bar(timesteps)):
316
- # expand the latents if we are doing classifier free guidance
317
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
318
-
319
- added_cond_kwargs = {"image_embeds": image_embeds}
320
- noise_pred = self.unet(
321
- sample=latent_model_input,
322
- timestep=t,
323
- encoder_hidden_states=None,
324
- added_cond_kwargs=added_cond_kwargs,
325
- return_dict=False,
326
- )[0]
327
-
328
- if do_classifier_free_guidance:
329
- noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1)
330
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
331
- _, variance_pred_text = variance_pred.chunk(2)
332
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
333
- noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1)
334
-
335
- if not (
336
- hasattr(self.scheduler.config, "variance_type")
337
- and self.scheduler.config.variance_type in ["learned", "learned_range"]
338
- ):
339
- noise_pred, _ = noise_pred.split(latents.shape[1], dim=1)
340
-
341
- # compute the previous noisy sample x_t -> x_t-1
342
- latents = self.scheduler.step(
343
- noise_pred,
344
- t,
345
- latents,
346
- generator=generator,
347
- )[0]
348
-
349
- if callback is not None and i % callback_steps == 0:
350
- callback(i, t, latents)
351
-
352
- # post-processing
353
- image = self.movq.decode(latents, force_not_quantize=True)["sample"]
354
-
355
- # Offload last model to CPU
356
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
357
- self.final_offload_hook.offload()
358
-
359
- if output_type not in ["pt", "np", "pil"]:
360
- raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
361
-
362
- if output_type in ["np", "pil"]:
363
- image = image * 0.5 + 0.5
364
- image = image.clamp(0, 1)
365
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
366
-
367
- if output_type == "pil":
368
- image = self.numpy_to_pil(image)
369
-
370
- if not return_dict:
371
- return (image,)
372
-
373
- return ImagePipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_karras_ve_flax.py DELETED
@@ -1,237 +0,0 @@
1
- # Copyright 2023 NVIDIA and The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
-
16
- from dataclasses import dataclass
17
- from typing import Optional, Tuple, Union
18
-
19
- import flax
20
- import jax.numpy as jnp
21
- from jax import random
22
-
23
- from ..configuration_utils import ConfigMixin, register_to_config
24
- from ..utils import BaseOutput
25
- from .scheduling_utils_flax import FlaxSchedulerMixin
26
-
27
-
28
- @flax.struct.dataclass
29
- class KarrasVeSchedulerState:
30
- # setable values
31
- num_inference_steps: Optional[int] = None
32
- timesteps: Optional[jnp.ndarray] = None
33
- schedule: Optional[jnp.ndarray] = None # sigma(t_i)
34
-
35
- @classmethod
36
- def create(cls):
37
- return cls()
38
-
39
-
40
- @dataclass
41
- class FlaxKarrasVeOutput(BaseOutput):
42
- """
43
- Output class for the scheduler's step function output.
44
-
45
- Args:
46
- prev_sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images):
47
- Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
48
- denoising loop.
49
- derivative (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)` for images):
50
- Derivative of predicted original image sample (x_0).
51
- state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class.
52
- """
53
-
54
- prev_sample: jnp.ndarray
55
- derivative: jnp.ndarray
56
- state: KarrasVeSchedulerState
57
-
58
-
59
- class FlaxKarrasVeScheduler(FlaxSchedulerMixin, ConfigMixin):
60
- """
61
- Stochastic sampling from Karras et al. [1] tailored to the Variance-Expanding (VE) models [2]. Use Algorithm 2 and
62
- the VE column of Table 1 from [1] for reference.
63
-
64
- [1] Karras, Tero, et al. "Elucidating the Design Space of Diffusion-Based Generative Models."
65
- https://arxiv.org/abs/2206.00364 [2] Song, Yang, et al. "Score-based generative modeling through stochastic
66
- differential equations." https://arxiv.org/abs/2011.13456
67
-
68
- [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
69
- function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
70
- [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
71
- [`~SchedulerMixin.from_pretrained`] functions.
72
-
73
- For more details on the parameters, see the original paper's Appendix E.: "Elucidating the Design Space of
74
- Diffusion-Based Generative Models." https://arxiv.org/abs/2206.00364. The grid search values used to find the
75
- optimal {s_noise, s_churn, s_min, s_max} for a specific model are described in Table 5 of the paper.
76
-
77
- Args:
78
- sigma_min (`float`): minimum noise magnitude
79
- sigma_max (`float`): maximum noise magnitude
80
- s_noise (`float`): the amount of additional noise to counteract loss of detail during sampling.
81
- A reasonable range is [1.000, 1.011].
82
- s_churn (`float`): the parameter controlling the overall amount of stochasticity.
83
- A reasonable range is [0, 100].
84
- s_min (`float`): the start value of the sigma range where we add noise (enable stochasticity).
85
- A reasonable range is [0, 10].
86
- s_max (`float`): the end value of the sigma range where we add noise.
87
- A reasonable range is [0.2, 80].
88
- """
89
-
90
- @property
91
- def has_state(self):
92
- return True
93
-
94
- @register_to_config
95
- def __init__(
96
- self,
97
- sigma_min: float = 0.02,
98
- sigma_max: float = 100,
99
- s_noise: float = 1.007,
100
- s_churn: float = 80,
101
- s_min: float = 0.05,
102
- s_max: float = 50,
103
- ):
104
- pass
105
-
106
- def create_state(self):
107
- return KarrasVeSchedulerState.create()
108
-
109
- def set_timesteps(
110
- self, state: KarrasVeSchedulerState, num_inference_steps: int, shape: Tuple = ()
111
- ) -> KarrasVeSchedulerState:
112
- """
113
- Sets the continuous timesteps used for the diffusion chain. Supporting function to be run before inference.
114
-
115
- Args:
116
- state (`KarrasVeSchedulerState`):
117
- the `FlaxKarrasVeScheduler` state data class.
118
- num_inference_steps (`int`):
119
- the number of diffusion steps used when generating samples with a pre-trained model.
120
-
121
- """
122
- timesteps = jnp.arange(0, num_inference_steps)[::-1].copy()
123
- schedule = [
124
- (
125
- self.config.sigma_max**2
126
- * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
127
- )
128
- for i in timesteps
129
- ]
130
-
131
- return state.replace(
132
- num_inference_steps=num_inference_steps,
133
- schedule=jnp.array(schedule, dtype=jnp.float32),
134
- timesteps=timesteps,
135
- )
136
-
137
- def add_noise_to_input(
138
- self,
139
- state: KarrasVeSchedulerState,
140
- sample: jnp.ndarray,
141
- sigma: float,
142
- key: random.KeyArray,
143
- ) -> Tuple[jnp.ndarray, float]:
144
- """
145
- Explicit Langevin-like "churn" step of adding noise to the sample according to a factor gamma_i ≥ 0 to reach a
146
- higher noise level sigma_hat = sigma_i + gamma_i*sigma_i.
147
-
148
- TODO Args:
149
- """
150
- if self.config.s_min <= sigma <= self.config.s_max:
151
- gamma = min(self.config.s_churn / state.num_inference_steps, 2**0.5 - 1)
152
- else:
153
- gamma = 0
154
-
155
- # sample eps ~ N(0, S_noise^2 * I)
156
- key = random.split(key, num=1)
157
- eps = self.config.s_noise * random.normal(key=key, shape=sample.shape)
158
- sigma_hat = sigma + gamma * sigma
159
- sample_hat = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
160
-
161
- return sample_hat, sigma_hat
162
-
163
- def step(
164
- self,
165
- state: KarrasVeSchedulerState,
166
- model_output: jnp.ndarray,
167
- sigma_hat: float,
168
- sigma_prev: float,
169
- sample_hat: jnp.ndarray,
170
- return_dict: bool = True,
171
- ) -> Union[FlaxKarrasVeOutput, Tuple]:
172
- """
173
- Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
174
- process from the learned model outputs (most often the predicted noise).
175
-
176
- Args:
177
- state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class.
178
- model_output (`torch.FloatTensor` or `np.ndarray`): direct output from learned diffusion model.
179
- sigma_hat (`float`): TODO
180
- sigma_prev (`float`): TODO
181
- sample_hat (`torch.FloatTensor` or `np.ndarray`): TODO
182
- return_dict (`bool`): option for returning tuple rather than FlaxKarrasVeOutput class
183
-
184
- Returns:
185
- [`~schedulers.scheduling_karras_ve_flax.FlaxKarrasVeOutput`] or `tuple`: Updated sample in the diffusion
186
- chain and derivative. [`~schedulers.scheduling_karras_ve_flax.FlaxKarrasVeOutput`] if `return_dict` is
187
- True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor.
188
- """
189
-
190
- pred_original_sample = sample_hat + sigma_hat * model_output
191
- derivative = (sample_hat - pred_original_sample) / sigma_hat
192
- sample_prev = sample_hat + (sigma_prev - sigma_hat) * derivative
193
-
194
- if not return_dict:
195
- return (sample_prev, derivative, state)
196
-
197
- return FlaxKarrasVeOutput(prev_sample=sample_prev, derivative=derivative, state=state)
198
-
199
- def step_correct(
200
- self,
201
- state: KarrasVeSchedulerState,
202
- model_output: jnp.ndarray,
203
- sigma_hat: float,
204
- sigma_prev: float,
205
- sample_hat: jnp.ndarray,
206
- sample_prev: jnp.ndarray,
207
- derivative: jnp.ndarray,
208
- return_dict: bool = True,
209
- ) -> Union[FlaxKarrasVeOutput, Tuple]:
210
- """
211
- Correct the predicted sample based on the output model_output of the network. TODO complete description
212
-
213
- Args:
214
- state (`KarrasVeSchedulerState`): the `FlaxKarrasVeScheduler` state data class.
215
- model_output (`torch.FloatTensor` or `np.ndarray`): direct output from learned diffusion model.
216
- sigma_hat (`float`): TODO
217
- sigma_prev (`float`): TODO
218
- sample_hat (`torch.FloatTensor` or `np.ndarray`): TODO
219
- sample_prev (`torch.FloatTensor` or `np.ndarray`): TODO
220
- derivative (`torch.FloatTensor` or `np.ndarray`): TODO
221
- return_dict (`bool`): option for returning tuple rather than FlaxKarrasVeOutput class
222
-
223
- Returns:
224
- prev_sample (TODO): updated sample in the diffusion chain. derivative (TODO): TODO
225
-
226
- """
227
- pred_original_sample = sample_prev + sigma_prev * model_output
228
- derivative_corr = (sample_prev - pred_original_sample) / sigma_prev
229
- sample_prev = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
230
-
231
- if not return_dict:
232
- return (sample_prev, derivative, state)
233
-
234
- return FlaxKarrasVeOutput(prev_sample=sample_prev, derivative=derivative, state=state)
235
-
236
- def add_noise(self, state: KarrasVeSchedulerState, original_samples, noise, timesteps):
237
- raise NotImplementedError()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/deepfloyd_if/test_if_inpainting_superresolution.py DELETED
@@ -1,90 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import random
17
- import unittest
18
-
19
- import torch
20
-
21
- from diffusers import IFInpaintingSuperResolutionPipeline
22
- from diffusers.utils import floats_tensor
23
- from diffusers.utils.import_utils import is_xformers_available
24
- from diffusers.utils.testing_utils import skip_mps, torch_device
25
-
26
- from ..pipeline_params import (
27
- TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
28
- TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
29
- )
30
- from ..test_pipelines_common import PipelineTesterMixin
31
- from . import IFPipelineTesterMixin
32
-
33
-
34
- @skip_mps
35
- class IFInpaintingSuperResolutionPipelineFastTests(PipelineTesterMixin, IFPipelineTesterMixin, unittest.TestCase):
36
- pipeline_class = IFInpaintingSuperResolutionPipeline
37
- params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
38
- batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"})
39
- required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"}
40
-
41
- def get_dummy_components(self):
42
- return self._get_superresolution_dummy_components()
43
-
44
- def get_dummy_inputs(self, device, seed=0):
45
- if str(device).startswith("mps"):
46
- generator = torch.manual_seed(seed)
47
- else:
48
- generator = torch.Generator(device=device).manual_seed(seed)
49
-
50
- image = floats_tensor((1, 3, 16, 16), rng=random.Random(seed)).to(device)
51
- original_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
52
- mask_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
53
-
54
- inputs = {
55
- "prompt": "A painting of a squirrel eating a burger",
56
- "image": image,
57
- "original_image": original_image,
58
- "mask_image": mask_image,
59
- "generator": generator,
60
- "num_inference_steps": 2,
61
- "output_type": "numpy",
62
- }
63
-
64
- return inputs
65
-
66
- @unittest.skipIf(
67
- torch_device != "cuda" or not is_xformers_available(),
68
- reason="XFormers attention is only available with CUDA and `xformers` installed",
69
- )
70
- def test_xformers_attention_forwardGenerator_pass(self):
71
- self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3)
72
-
73
- def test_save_load_optional_components(self):
74
- self._test_save_load_optional_components()
75
-
76
- @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA")
77
- def test_save_load_float16(self):
78
- # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
79
- super().test_save_load_float16(expected_max_diff=1e-1)
80
-
81
- def test_attention_slicing_forward_pass(self):
82
- self._test_attention_slicing_forward_pass(expected_max_diff=1e-2)
83
-
84
- def test_save_load_local(self):
85
- self._test_save_load_local()
86
-
87
- def test_inference_batch_single_identical(self):
88
- self._test_inference_batch_single_identical(
89
- expected_max_diff=1e-2,
90
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArchitSharma/Digital-Photo-Color-Restoration/src/deoldify/layers.py DELETED
@@ -1,48 +0,0 @@
1
- from fastai.layers import *
2
- from fastai.torch_core import *
3
- from torch.nn.parameter import Parameter
4
- from torch.autograd import Variable
5
-
6
-
7
- # The code below is meant to be merged into fastaiv1 ideally
8
-
9
-
10
- def custom_conv_layer(
11
- ni: int,
12
- nf: int,
13
- ks: int = 3,
14
- stride: int = 1,
15
- padding: int = None,
16
- bias: bool = None,
17
- is_1d: bool = False,
18
- norm_type: Optional[NormType] = NormType.Batch,
19
- use_activ: bool = True,
20
- leaky: float = None,
21
- transpose: bool = False,
22
- init: Callable = nn.init.kaiming_normal_,
23
- self_attention: bool = False,
24
- extra_bn: bool = False,
25
- ):
26
- "Create a sequence of convolutional (`ni` to `nf`), ReLU (if `use_activ`) and batchnorm (if `bn`) layers."
27
- if padding is None:
28
- padding = (ks - 1) // 2 if not transpose else 0
29
- bn = norm_type in (NormType.Batch, NormType.BatchZero) or extra_bn == True
30
- if bias is None:
31
- bias = not bn
32
- conv_func = nn.ConvTranspose2d if transpose else nn.Conv1d if is_1d else nn.Conv2d
33
- conv = init_default(
34
- conv_func(ni, nf, kernel_size=ks, bias=bias, stride=stride, padding=padding),
35
- init,
36
- )
37
- if norm_type == NormType.Weight:
38
- conv = weight_norm(conv)
39
- elif norm_type == NormType.Spectral:
40
- conv = spectral_norm(conv)
41
- layers = [conv]
42
- if use_activ:
43
- layers.append(relu(True, leaky=leaky))
44
- if bn:
45
- layers.append((nn.BatchNorm1d if is_1d else nn.BatchNorm2d)(nf))
46
- if self_attention:
47
- layers.append(SelfAttention(nf))
48
- return nn.Sequential(*layers)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aristo/trafficsign/app.py DELETED
@@ -1,39 +0,0 @@
1
- import gradio as gr
2
- import PIL
3
- import numpy
4
- import matplotlib.pyplot as plt
5
- #load the trained model to classify sign
6
- from keras.models import load_model
7
- model = load_model('traffic_classifier.h5')
8
- #dictionary to label all traffic signs class.
9
- classes = { 1:'Speed limit (20km/h)',
10
- 2:'Speed limit (30km/h)',
11
- 3:'Speed limit (50km/h)',
12
- 4:'Speed limit (60km/h)',
13
- 5:'Speed limit (70km/h)',
14
- 6:'Speed limit (80km/h)',
15
- 7:'End of speed limit (80km/h)',
16
- 8:'Speed limit (100km/h)',
17
- 9:'Speed limit (120km/h)',
18
- 10:'Veh > 3.5 tons prohibited',
19
- 11:'Bumpy road',
20
- 12:'Slippery road',
21
- 13:'Road narrows on the right',
22
- 14:'Road work',
23
- 15:'Pedestrians',
24
- 16:'Turn right ahead',
25
- 17:'Turn left ahead',
26
- 18:'Ahead only',
27
- 19:'Go straight or right',
28
- 20:'Go straight or left',
29
- 21:'Keep right',
30
- 22:'Keep left',
31
- 23:'Roundabout mandatory'}
32
- #initialise GUI
33
- def predict(img):
34
- img = numpy.expand_dims(img, axis=0)
35
- predict_x = model.predict(img)
36
- pred = numpy.argmax(predict_x, axis = 1)
37
- sign = classes[pred[0]+1]
38
- return sign
39
- gr.Interface(fn= predict, inputs = gr.inputs.Image(shape = (30,30)), outputs= "textbox" ).launch(share=True, debug = True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pkg_resources/_vendor/pyparsing/diagram/__init__.py DELETED
@@ -1,642 +0,0 @@
1
- import railroad
2
- import pyparsing
3
- import typing
4
- from typing import (
5
- List,
6
- NamedTuple,
7
- Generic,
8
- TypeVar,
9
- Dict,
10
- Callable,
11
- Set,
12
- Iterable,
13
- )
14
- from jinja2 import Template
15
- from io import StringIO
16
- import inspect
17
-
18
-
19
- jinja2_template_source = """\
20
- <!DOCTYPE html>
21
- <html>
22
- <head>
23
- {% if not head %}
24
- <style type="text/css">
25
- .railroad-heading {
26
- font-family: monospace;
27
- }
28
- </style>
29
- {% else %}
30
- {{ head | safe }}
31
- {% endif %}
32
- </head>
33
- <body>
34
- {{ body | safe }}
35
- {% for diagram in diagrams %}
36
- <div class="railroad-group">
37
- <h1 class="railroad-heading">{{ diagram.title }}</h1>
38
- <div class="railroad-description">{{ diagram.text }}</div>
39
- <div class="railroad-svg">
40
- {{ diagram.svg }}
41
- </div>
42
- </div>
43
- {% endfor %}
44
- </body>
45
- </html>
46
- """
47
-
48
- template = Template(jinja2_template_source)
49
-
50
- # Note: ideally this would be a dataclass, but we're supporting Python 3.5+ so we can't do this yet
51
- NamedDiagram = NamedTuple(
52
- "NamedDiagram",
53
- [("name", str), ("diagram", typing.Optional[railroad.DiagramItem]), ("index", int)],
54
- )
55
- """
56
- A simple structure for associating a name with a railroad diagram
57
- """
58
-
59
- T = TypeVar("T")
60
-
61
-
62
- class EachItem(railroad.Group):
63
- """
64
- Custom railroad item to compose a:
65
- - Group containing a
66
- - OneOrMore containing a
67
- - Choice of the elements in the Each
68
- with the group label indicating that all must be matched
69
- """
70
-
71
- all_label = "[ALL]"
72
-
73
- def __init__(self, *items):
74
- choice_item = railroad.Choice(len(items) - 1, *items)
75
- one_or_more_item = railroad.OneOrMore(item=choice_item)
76
- super().__init__(one_or_more_item, label=self.all_label)
77
-
78
-
79
- class AnnotatedItem(railroad.Group):
80
- """
81
- Simple subclass of Group that creates an annotation label
82
- """
83
-
84
- def __init__(self, label: str, item):
85
- super().__init__(item=item, label="[{}]".format(label) if label else label)
86
-
87
-
88
- class EditablePartial(Generic[T]):
89
- """
90
- Acts like a functools.partial, but can be edited. In other words, it represents a type that hasn't yet been
91
- constructed.
92
- """
93
-
94
- # We need this here because the railroad constructors actually transform the data, so can't be called until the
95
- # entire tree is assembled
96
-
97
- def __init__(self, func: Callable[..., T], args: list, kwargs: dict):
98
- self.func = func
99
- self.args = args
100
- self.kwargs = kwargs
101
-
102
- @classmethod
103
- def from_call(cls, func: Callable[..., T], *args, **kwargs) -> "EditablePartial[T]":
104
- """
105
- If you call this function in the same way that you would call the constructor, it will store the arguments
106
- as you expect. For example EditablePartial.from_call(Fraction, 1, 3)() == Fraction(1, 3)
107
- """
108
- return EditablePartial(func=func, args=list(args), kwargs=kwargs)
109
-
110
- @property
111
- def name(self):
112
- return self.kwargs["name"]
113
-
114
- def __call__(self) -> T:
115
- """
116
- Evaluate the partial and return the result
117
- """
118
- args = self.args.copy()
119
- kwargs = self.kwargs.copy()
120
-
121
- # This is a helpful hack to allow you to specify varargs parameters (e.g. *args) as keyword args (e.g.
122
- # args=['list', 'of', 'things'])
123
- arg_spec = inspect.getfullargspec(self.func)
124
- if arg_spec.varargs in self.kwargs:
125
- args += kwargs.pop(arg_spec.varargs)
126
-
127
- return self.func(*args, **kwargs)
128
-
129
-
130
- def railroad_to_html(diagrams: List[NamedDiagram], **kwargs) -> str:
131
- """
132
- Given a list of NamedDiagram, produce a single HTML string that visualises those diagrams
133
- :params kwargs: kwargs to be passed in to the template
134
- """
135
- data = []
136
- for diagram in diagrams:
137
- if diagram.diagram is None:
138
- continue
139
- io = StringIO()
140
- diagram.diagram.writeSvg(io.write)
141
- title = diagram.name
142
- if diagram.index == 0:
143
- title += " (root)"
144
- data.append({"title": title, "text": "", "svg": io.getvalue()})
145
-
146
- return template.render(diagrams=data, **kwargs)
147
-
148
-
149
- def resolve_partial(partial: "EditablePartial[T]") -> T:
150
- """
151
- Recursively resolves a collection of Partials into whatever type they are
152
- """
153
- if isinstance(partial, EditablePartial):
154
- partial.args = resolve_partial(partial.args)
155
- partial.kwargs = resolve_partial(partial.kwargs)
156
- return partial()
157
- elif isinstance(partial, list):
158
- return [resolve_partial(x) for x in partial]
159
- elif isinstance(partial, dict):
160
- return {key: resolve_partial(x) for key, x in partial.items()}
161
- else:
162
- return partial
163
-
164
-
165
- def to_railroad(
166
- element: pyparsing.ParserElement,
167
- diagram_kwargs: typing.Optional[dict] = None,
168
- vertical: int = 3,
169
- show_results_names: bool = False,
170
- show_groups: bool = False,
171
- ) -> List[NamedDiagram]:
172
- """
173
- Convert a pyparsing element tree into a list of diagrams. This is the recommended entrypoint to diagram
174
- creation if you want to access the Railroad tree before it is converted to HTML
175
- :param element: base element of the parser being diagrammed
176
- :param diagram_kwargs: kwargs to pass to the Diagram() constructor
177
- :param vertical: (optional) - int - limit at which number of alternatives should be
178
- shown vertically instead of horizontally
179
- :param show_results_names - bool to indicate whether results name annotations should be
180
- included in the diagram
181
- :param show_groups - bool to indicate whether groups should be highlighted with an unlabeled
182
- surrounding box
183
- """
184
- # Convert the whole tree underneath the root
185
- lookup = ConverterState(diagram_kwargs=diagram_kwargs or {})
186
- _to_diagram_element(
187
- element,
188
- lookup=lookup,
189
- parent=None,
190
- vertical=vertical,
191
- show_results_names=show_results_names,
192
- show_groups=show_groups,
193
- )
194
-
195
- root_id = id(element)
196
- # Convert the root if it hasn't been already
197
- if root_id in lookup:
198
- if not element.customName:
199
- lookup[root_id].name = ""
200
- lookup[root_id].mark_for_extraction(root_id, lookup, force=True)
201
-
202
- # Now that we're finished, we can convert from intermediate structures into Railroad elements
203
- diags = list(lookup.diagrams.values())
204
- if len(diags) > 1:
205
- # collapse out duplicate diags with the same name
206
- seen = set()
207
- deduped_diags = []
208
- for d in diags:
209
- # don't extract SkipTo elements, they are uninformative as subdiagrams
210
- if d.name == "...":
211
- continue
212
- if d.name is not None and d.name not in seen:
213
- seen.add(d.name)
214
- deduped_diags.append(d)
215
- resolved = [resolve_partial(partial) for partial in deduped_diags]
216
- else:
217
- # special case - if just one diagram, always display it, even if
218
- # it has no name
219
- resolved = [resolve_partial(partial) for partial in diags]
220
- return sorted(resolved, key=lambda diag: diag.index)
221
-
222
-
223
- def _should_vertical(
224
- specification: int, exprs: Iterable[pyparsing.ParserElement]
225
- ) -> bool:
226
- """
227
- Returns true if we should return a vertical list of elements
228
- """
229
- if specification is None:
230
- return False
231
- else:
232
- return len(_visible_exprs(exprs)) >= specification
233
-
234
-
235
- class ElementState:
236
- """
237
- State recorded for an individual pyparsing Element
238
- """
239
-
240
- # Note: this should be a dataclass, but we have to support Python 3.5
241
- def __init__(
242
- self,
243
- element: pyparsing.ParserElement,
244
- converted: EditablePartial,
245
- parent: EditablePartial,
246
- number: int,
247
- name: str = None,
248
- parent_index: typing.Optional[int] = None,
249
- ):
250
- #: The pyparsing element that this represents
251
- self.element: pyparsing.ParserElement = element
252
- #: The name of the element
253
- self.name: typing.Optional[str] = name
254
- #: The output Railroad element in an unconverted state
255
- self.converted: EditablePartial = converted
256
- #: The parent Railroad element, which we store so that we can extract this if it's duplicated
257
- self.parent: EditablePartial = parent
258
- #: The order in which we found this element, used for sorting diagrams if this is extracted into a diagram
259
- self.number: int = number
260
- #: The index of this inside its parent
261
- self.parent_index: typing.Optional[int] = parent_index
262
- #: If true, we should extract this out into a subdiagram
263
- self.extract: bool = False
264
- #: If true, all of this element's children have been filled out
265
- self.complete: bool = False
266
-
267
- def mark_for_extraction(
268
- self, el_id: int, state: "ConverterState", name: str = None, force: bool = False
269
- ):
270
- """
271
- Called when this instance has been seen twice, and thus should eventually be extracted into a sub-diagram
272
- :param el_id: id of the element
273
- :param state: element/diagram state tracker
274
- :param name: name to use for this element's text
275
- :param force: If true, force extraction now, regardless of the state of this. Only useful for extracting the
276
- root element when we know we're finished
277
- """
278
- self.extract = True
279
-
280
- # Set the name
281
- if not self.name:
282
- if name:
283
- # Allow forcing a custom name
284
- self.name = name
285
- elif self.element.customName:
286
- self.name = self.element.customName
287
- else:
288
- self.name = ""
289
-
290
- # Just because this is marked for extraction doesn't mean we can do it yet. We may have to wait for children
291
- # to be added
292
- # Also, if this is just a string literal etc, don't bother extracting it
293
- if force or (self.complete and _worth_extracting(self.element)):
294
- state.extract_into_diagram(el_id)
295
-
296
-
297
- class ConverterState:
298
- """
299
- Stores some state that persists between recursions into the element tree
300
- """
301
-
302
- def __init__(self, diagram_kwargs: typing.Optional[dict] = None):
303
- #: A dictionary mapping ParserElements to state relating to them
304
- self._element_diagram_states: Dict[int, ElementState] = {}
305
- #: A dictionary mapping ParserElement IDs to subdiagrams generated from them
306
- self.diagrams: Dict[int, EditablePartial[NamedDiagram]] = {}
307
- #: The index of the next unnamed element
308
- self.unnamed_index: int = 1
309
- #: The index of the next element. This is used for sorting
310
- self.index: int = 0
311
- #: Shared kwargs that are used to customize the construction of diagrams
312
- self.diagram_kwargs: dict = diagram_kwargs or {}
313
- self.extracted_diagram_names: Set[str] = set()
314
-
315
- def __setitem__(self, key: int, value: ElementState):
316
- self._element_diagram_states[key] = value
317
-
318
- def __getitem__(self, key: int) -> ElementState:
319
- return self._element_diagram_states[key]
320
-
321
- def __delitem__(self, key: int):
322
- del self._element_diagram_states[key]
323
-
324
- def __contains__(self, key: int):
325
- return key in self._element_diagram_states
326
-
327
- def generate_unnamed(self) -> int:
328
- """
329
- Generate a number used in the name of an otherwise unnamed diagram
330
- """
331
- self.unnamed_index += 1
332
- return self.unnamed_index
333
-
334
- def generate_index(self) -> int:
335
- """
336
- Generate a number used to index a diagram
337
- """
338
- self.index += 1
339
- return self.index
340
-
341
- def extract_into_diagram(self, el_id: int):
342
- """
343
- Used when we encounter the same token twice in the same tree. When this
344
- happens, we replace all instances of that token with a terminal, and
345
- create a new subdiagram for the token
346
- """
347
- position = self[el_id]
348
-
349
- # Replace the original definition of this element with a regular block
350
- if position.parent:
351
- ret = EditablePartial.from_call(railroad.NonTerminal, text=position.name)
352
- if "item" in position.parent.kwargs:
353
- position.parent.kwargs["item"] = ret
354
- elif "items" in position.parent.kwargs:
355
- position.parent.kwargs["items"][position.parent_index] = ret
356
-
357
- # If the element we're extracting is a group, skip to its content but keep the title
358
- if position.converted.func == railroad.Group:
359
- content = position.converted.kwargs["item"]
360
- else:
361
- content = position.converted
362
-
363
- self.diagrams[el_id] = EditablePartial.from_call(
364
- NamedDiagram,
365
- name=position.name,
366
- diagram=EditablePartial.from_call(
367
- railroad.Diagram, content, **self.diagram_kwargs
368
- ),
369
- index=position.number,
370
- )
371
-
372
- del self[el_id]
373
-
374
-
375
- def _worth_extracting(element: pyparsing.ParserElement) -> bool:
376
- """
377
- Returns true if this element is worth having its own sub-diagram. Simply, if any of its children
378
- themselves have children, then its complex enough to extract
379
- """
380
- children = element.recurse()
381
- return any(child.recurse() for child in children)
382
-
383
-
384
- def _apply_diagram_item_enhancements(fn):
385
- """
386
- decorator to ensure enhancements to a diagram item (such as results name annotations)
387
- get applied on return from _to_diagram_element (we do this since there are several
388
- returns in _to_diagram_element)
389
- """
390
-
391
- def _inner(
392
- element: pyparsing.ParserElement,
393
- parent: typing.Optional[EditablePartial],
394
- lookup: ConverterState = None,
395
- vertical: int = None,
396
- index: int = 0,
397
- name_hint: str = None,
398
- show_results_names: bool = False,
399
- show_groups: bool = False,
400
- ) -> typing.Optional[EditablePartial]:
401
-
402
- ret = fn(
403
- element,
404
- parent,
405
- lookup,
406
- vertical,
407
- index,
408
- name_hint,
409
- show_results_names,
410
- show_groups,
411
- )
412
-
413
- # apply annotation for results name, if present
414
- if show_results_names and ret is not None:
415
- element_results_name = element.resultsName
416
- if element_results_name:
417
- # add "*" to indicate if this is a "list all results" name
418
- element_results_name += "" if element.modalResults else "*"
419
- ret = EditablePartial.from_call(
420
- railroad.Group, item=ret, label=element_results_name
421
- )
422
-
423
- return ret
424
-
425
- return _inner
426
-
427
-
428
- def _visible_exprs(exprs: Iterable[pyparsing.ParserElement]):
429
- non_diagramming_exprs = (
430
- pyparsing.ParseElementEnhance,
431
- pyparsing.PositionToken,
432
- pyparsing.And._ErrorStop,
433
- )
434
- return [
435
- e
436
- for e in exprs
437
- if not (e.customName or e.resultsName or isinstance(e, non_diagramming_exprs))
438
- ]
439
-
440
-
441
- @_apply_diagram_item_enhancements
442
- def _to_diagram_element(
443
- element: pyparsing.ParserElement,
444
- parent: typing.Optional[EditablePartial],
445
- lookup: ConverterState = None,
446
- vertical: int = None,
447
- index: int = 0,
448
- name_hint: str = None,
449
- show_results_names: bool = False,
450
- show_groups: bool = False,
451
- ) -> typing.Optional[EditablePartial]:
452
- """
453
- Recursively converts a PyParsing Element to a railroad Element
454
- :param lookup: The shared converter state that keeps track of useful things
455
- :param index: The index of this element within the parent
456
- :param parent: The parent of this element in the output tree
457
- :param vertical: Controls at what point we make a list of elements vertical. If this is an integer (the default),
458
- it sets the threshold of the number of items before we go vertical. If True, always go vertical, if False, never
459
- do so
460
- :param name_hint: If provided, this will override the generated name
461
- :param show_results_names: bool flag indicating whether to add annotations for results names
462
- :returns: The converted version of the input element, but as a Partial that hasn't yet been constructed
463
- :param show_groups: bool flag indicating whether to show groups using bounding box
464
- """
465
- exprs = element.recurse()
466
- name = name_hint or element.customName or element.__class__.__name__
467
-
468
- # Python's id() is used to provide a unique identifier for elements
469
- el_id = id(element)
470
-
471
- element_results_name = element.resultsName
472
-
473
- # Here we basically bypass processing certain wrapper elements if they contribute nothing to the diagram
474
- if not element.customName:
475
- if isinstance(
476
- element,
477
- (
478
- # pyparsing.TokenConverter,
479
- # pyparsing.Forward,
480
- pyparsing.Located,
481
- ),
482
- ):
483
- # However, if this element has a useful custom name, and its child does not, we can pass it on to the child
484
- if exprs:
485
- if not exprs[0].customName:
486
- propagated_name = name
487
- else:
488
- propagated_name = None
489
-
490
- return _to_diagram_element(
491
- element.expr,
492
- parent=parent,
493
- lookup=lookup,
494
- vertical=vertical,
495
- index=index,
496
- name_hint=propagated_name,
497
- show_results_names=show_results_names,
498
- show_groups=show_groups,
499
- )
500
-
501
- # If the element isn't worth extracting, we always treat it as the first time we say it
502
- if _worth_extracting(element):
503
- if el_id in lookup:
504
- # If we've seen this element exactly once before, we are only just now finding out that it's a duplicate,
505
- # so we have to extract it into a new diagram.
506
- looked_up = lookup[el_id]
507
- looked_up.mark_for_extraction(el_id, lookup, name=name_hint)
508
- ret = EditablePartial.from_call(railroad.NonTerminal, text=looked_up.name)
509
- return ret
510
-
511
- elif el_id in lookup.diagrams:
512
- # If we have seen the element at least twice before, and have already extracted it into a subdiagram, we
513
- # just put in a marker element that refers to the sub-diagram
514
- ret = EditablePartial.from_call(
515
- railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"]
516
- )
517
- return ret
518
-
519
- # Recursively convert child elements
520
- # Here we find the most relevant Railroad element for matching pyparsing Element
521
- # We use ``items=[]`` here to hold the place for where the child elements will go once created
522
- if isinstance(element, pyparsing.And):
523
- # detect And's created with ``expr*N`` notation - for these use a OneOrMore with a repeat
524
- # (all will have the same name, and resultsName)
525
- if not exprs:
526
- return None
527
- if len(set((e.name, e.resultsName) for e in exprs)) == 1:
528
- ret = EditablePartial.from_call(
529
- railroad.OneOrMore, item="", repeat=str(len(exprs))
530
- )
531
- elif _should_vertical(vertical, exprs):
532
- ret = EditablePartial.from_call(railroad.Stack, items=[])
533
- else:
534
- ret = EditablePartial.from_call(railroad.Sequence, items=[])
535
- elif isinstance(element, (pyparsing.Or, pyparsing.MatchFirst)):
536
- if not exprs:
537
- return None
538
- if _should_vertical(vertical, exprs):
539
- ret = EditablePartial.from_call(railroad.Choice, 0, items=[])
540
- else:
541
- ret = EditablePartial.from_call(railroad.HorizontalChoice, items=[])
542
- elif isinstance(element, pyparsing.Each):
543
- if not exprs:
544
- return None
545
- ret = EditablePartial.from_call(EachItem, items=[])
546
- elif isinstance(element, pyparsing.NotAny):
547
- ret = EditablePartial.from_call(AnnotatedItem, label="NOT", item="")
548
- elif isinstance(element, pyparsing.FollowedBy):
549
- ret = EditablePartial.from_call(AnnotatedItem, label="LOOKAHEAD", item="")
550
- elif isinstance(element, pyparsing.PrecededBy):
551
- ret = EditablePartial.from_call(AnnotatedItem, label="LOOKBEHIND", item="")
552
- elif isinstance(element, pyparsing.Group):
553
- if show_groups:
554
- ret = EditablePartial.from_call(AnnotatedItem, label="", item="")
555
- else:
556
- ret = EditablePartial.from_call(railroad.Group, label="", item="")
557
- elif isinstance(element, pyparsing.TokenConverter):
558
- ret = EditablePartial.from_call(
559
- AnnotatedItem, label=type(element).__name__.lower(), item=""
560
- )
561
- elif isinstance(element, pyparsing.Opt):
562
- ret = EditablePartial.from_call(railroad.Optional, item="")
563
- elif isinstance(element, pyparsing.OneOrMore):
564
- ret = EditablePartial.from_call(railroad.OneOrMore, item="")
565
- elif isinstance(element, pyparsing.ZeroOrMore):
566
- ret = EditablePartial.from_call(railroad.ZeroOrMore, item="")
567
- elif isinstance(element, pyparsing.Group):
568
- ret = EditablePartial.from_call(
569
- railroad.Group, item=None, label=element_results_name
570
- )
571
- elif isinstance(element, pyparsing.Empty) and not element.customName:
572
- # Skip unnamed "Empty" elements
573
- ret = None
574
- elif len(exprs) > 1:
575
- ret = EditablePartial.from_call(railroad.Sequence, items=[])
576
- elif len(exprs) > 0 and not element_results_name:
577
- ret = EditablePartial.from_call(railroad.Group, item="", label=name)
578
- else:
579
- terminal = EditablePartial.from_call(railroad.Terminal, element.defaultName)
580
- ret = terminal
581
-
582
- if ret is None:
583
- return
584
-
585
- # Indicate this element's position in the tree so we can extract it if necessary
586
- lookup[el_id] = ElementState(
587
- element=element,
588
- converted=ret,
589
- parent=parent,
590
- parent_index=index,
591
- number=lookup.generate_index(),
592
- )
593
- if element.customName:
594
- lookup[el_id].mark_for_extraction(el_id, lookup, element.customName)
595
-
596
- i = 0
597
- for expr in exprs:
598
- # Add a placeholder index in case we have to extract the child before we even add it to the parent
599
- if "items" in ret.kwargs:
600
- ret.kwargs["items"].insert(i, None)
601
-
602
- item = _to_diagram_element(
603
- expr,
604
- parent=ret,
605
- lookup=lookup,
606
- vertical=vertical,
607
- index=i,
608
- show_results_names=show_results_names,
609
- show_groups=show_groups,
610
- )
611
-
612
- # Some elements don't need to be shown in the diagram
613
- if item is not None:
614
- if "item" in ret.kwargs:
615
- ret.kwargs["item"] = item
616
- elif "items" in ret.kwargs:
617
- # If we've already extracted the child, don't touch this index, since it's occupied by a nonterminal
618
- ret.kwargs["items"][i] = item
619
- i += 1
620
- elif "items" in ret.kwargs:
621
- # If we're supposed to skip this element, remove it from the parent
622
- del ret.kwargs["items"][i]
623
-
624
- # If all this items children are none, skip this item
625
- if ret and (
626
- ("items" in ret.kwargs and len(ret.kwargs["items"]) == 0)
627
- or ("item" in ret.kwargs and ret.kwargs["item"] is None)
628
- ):
629
- ret = EditablePartial.from_call(railroad.Terminal, name)
630
-
631
- # Mark this element as "complete", ie it has all of its children
632
- if el_id in lookup:
633
- lookup[el_id].complete = True
634
-
635
- if el_id in lookup and lookup[el_id].extract and lookup[el_id].complete:
636
- lookup.extract_into_diagram(el_id)
637
- if ret is not None:
638
- ret = EditablePartial.from_call(
639
- railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"]
640
- )
641
-
642
- return ret
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/utils/registry.py DELETED
@@ -1,60 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
-
3
- from typing import Any
4
- import pydoc
5
- from fvcore.common.registry import Registry # for backward compatibility.
6
-
7
- """
8
- ``Registry`` and `locate` provide ways to map a string (typically found
9
- in config files) to callable objects.
10
- """
11
-
12
- __all__ = ["Registry", "locate"]
13
-
14
-
15
- def _convert_target_to_string(t: Any) -> str:
16
- """
17
- Inverse of ``locate()``.
18
-
19
- Args:
20
- t: any object with ``__module__`` and ``__qualname__``
21
- """
22
- module, qualname = t.__module__, t.__qualname__
23
-
24
- # Compress the path to this object, e.g. ``module.submodule._impl.class``
25
- # may become ``module.submodule.class``, if the later also resolves to the same
26
- # object. This simplifies the string, and also is less affected by moving the
27
- # class implementation.
28
- module_parts = module.split(".")
29
- for k in range(1, len(module_parts)):
30
- prefix = ".".join(module_parts[:k])
31
- candidate = f"{prefix}.{qualname}"
32
- try:
33
- if locate(candidate) is t:
34
- return candidate
35
- except ImportError:
36
- pass
37
- return f"{module}.{qualname}"
38
-
39
-
40
- def locate(name: str) -> Any:
41
- """
42
- Locate and return an object ``x`` using an input string ``{x.__module__}.{x.__qualname__}``,
43
- such as "module.submodule.class_name".
44
-
45
- Raise Exception if it cannot be found.
46
- """
47
- obj = pydoc.locate(name)
48
-
49
- # Some cases (e.g. torch.optim.sgd.SGD) not handled correctly
50
- # by pydoc.locate. Try a private function from hydra.
51
- if obj is None:
52
- try:
53
- # from hydra.utils import get_method - will print many errors
54
- from hydra.utils import _locate
55
- except ImportError as e:
56
- raise ImportError(f"Cannot dynamically locate object {name}!") from e
57
- else:
58
- obj = _locate(name) # it raises if fails
59
-
60
- return obj
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tests/modeling/test_rpn.py DELETED
@@ -1,262 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import logging
3
- import unittest
4
- import torch
5
-
6
- from detectron2.config import get_cfg
7
- from detectron2.export import scripting_with_instances
8
- from detectron2.layers import ShapeSpec
9
- from detectron2.modeling.backbone import build_backbone
10
- from detectron2.modeling.proposal_generator import RPN, build_proposal_generator
11
- from detectron2.modeling.proposal_generator.proposal_utils import (
12
- add_ground_truth_to_proposals,
13
- find_top_rpn_proposals,
14
- )
15
- from detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes
16
- from detectron2.utils.events import EventStorage
17
-
18
- logger = logging.getLogger(__name__)
19
-
20
-
21
- class RPNTest(unittest.TestCase):
22
- def get_gt_and_features(self):
23
- num_images = 2
24
- images_tensor = torch.rand(num_images, 20, 30)
25
- image_sizes = [(10, 10), (20, 30)]
26
- images = ImageList(images_tensor, image_sizes)
27
- image_shape = (15, 15)
28
- num_channels = 1024
29
- features = {"res4": torch.rand(num_images, num_channels, 1, 2)}
30
- gt_boxes = torch.tensor([[1, 1, 3, 3], [2, 2, 6, 6]], dtype=torch.float32)
31
- gt_instances = Instances(image_shape)
32
- gt_instances.gt_boxes = Boxes(gt_boxes)
33
- return (gt_instances, features, images, image_sizes)
34
-
35
- def test_rpn(self):
36
- torch.manual_seed(121)
37
- cfg = get_cfg()
38
- backbone = build_backbone(cfg)
39
- proposal_generator = RPN(cfg, backbone.output_shape())
40
- (gt_instances, features, images, image_sizes) = self.get_gt_and_features()
41
- with EventStorage(): # capture events in a new storage to discard them
42
- proposals, proposal_losses = proposal_generator(
43
- images, features, [gt_instances[0], gt_instances[1]]
44
- )
45
-
46
- expected_losses = {
47
- "loss_rpn_cls": torch.tensor(0.08011703193),
48
- "loss_rpn_loc": torch.tensor(0.101470276),
49
- }
50
- for name in expected_losses.keys():
51
- err_msg = "proposal_losses[{}] = {}, expected losses = {}".format(
52
- name, proposal_losses[name], expected_losses[name]
53
- )
54
- self.assertTrue(torch.allclose(proposal_losses[name], expected_losses[name]), err_msg)
55
-
56
- self.assertEqual(len(proposals), len(image_sizes))
57
- for proposal, im_size in zip(proposals, image_sizes):
58
- self.assertEqual(proposal.image_size, im_size)
59
-
60
- expected_proposal_box = torch.tensor([[0, 0, 10, 10], [7.2702, 0, 10, 10]])
61
- expected_objectness_logit = torch.tensor([0.1596, -0.0007])
62
- self.assertTrue(
63
- torch.allclose(proposals[0].proposal_boxes.tensor, expected_proposal_box, atol=1e-4)
64
- )
65
- self.assertTrue(
66
- torch.allclose(proposals[0].objectness_logits, expected_objectness_logit, atol=1e-4)
67
- )
68
-
69
- def verify_rpn(self, conv_dims, expected_conv_dims):
70
- torch.manual_seed(121)
71
- cfg = get_cfg()
72
- cfg.MODEL.RPN.CONV_DIMS = conv_dims
73
- backbone = build_backbone(cfg)
74
- proposal_generator = RPN(cfg, backbone.output_shape())
75
- for k, conv in enumerate(proposal_generator.rpn_head.conv):
76
- self.assertEqual(expected_conv_dims[k], conv.out_channels)
77
- return proposal_generator
78
-
79
- def test_rpn_larger_num_convs(self):
80
- conv_dims = [64, 64, 64, 64, 64]
81
- proposal_generator = self.verify_rpn(conv_dims, conv_dims)
82
- (gt_instances, features, images, image_sizes) = self.get_gt_and_features()
83
- with EventStorage(): # capture events in a new storage to discard them
84
- proposals, proposal_losses = proposal_generator(
85
- images, features, [gt_instances[0], gt_instances[1]]
86
- )
87
- expected_losses = {
88
- "loss_rpn_cls": torch.tensor(0.08122821152),
89
- "loss_rpn_loc": torch.tensor(0.10064548254),
90
- }
91
- for name in expected_losses.keys():
92
- err_msg = "proposal_losses[{}] = {}, expected losses = {}".format(
93
- name, proposal_losses[name], expected_losses[name]
94
- )
95
- self.assertTrue(torch.allclose(proposal_losses[name], expected_losses[name]), err_msg)
96
-
97
- def test_rpn_conv_dims_not_set(self):
98
- conv_dims = [-1, -1, -1]
99
- expected_conv_dims = [1024, 1024, 1024]
100
- self.verify_rpn(conv_dims, expected_conv_dims)
101
-
102
- def test_rpn_scriptability(self):
103
- cfg = get_cfg()
104
- proposal_generator = RPN(cfg, {"res4": ShapeSpec(channels=1024, stride=16)}).eval()
105
- num_images = 2
106
- images_tensor = torch.rand(num_images, 30, 40)
107
- image_sizes = [(32, 32), (30, 40)]
108
- images = ImageList(images_tensor, image_sizes)
109
- features = {"res4": torch.rand(num_images, 1024, 1, 2)}
110
-
111
- fields = {"proposal_boxes": Boxes, "objectness_logits": torch.Tensor}
112
- proposal_generator_ts = scripting_with_instances(proposal_generator, fields)
113
-
114
- proposals, _ = proposal_generator(images, features)
115
- proposals_ts, _ = proposal_generator_ts(images, features)
116
-
117
- for proposal, proposal_ts in zip(proposals, proposals_ts):
118
- self.assertEqual(proposal.image_size, proposal_ts.image_size)
119
- self.assertTrue(
120
- torch.equal(proposal.proposal_boxes.tensor, proposal_ts.proposal_boxes.tensor)
121
- )
122
- self.assertTrue(torch.equal(proposal.objectness_logits, proposal_ts.objectness_logits))
123
-
124
- def test_rrpn(self):
125
- torch.manual_seed(121)
126
- cfg = get_cfg()
127
- cfg.MODEL.PROPOSAL_GENERATOR.NAME = "RRPN"
128
- cfg.MODEL.ANCHOR_GENERATOR.NAME = "RotatedAnchorGenerator"
129
- cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]]
130
- cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.25, 1]]
131
- cfg.MODEL.ANCHOR_GENERATOR.ANGLES = [[0, 60]]
132
- cfg.MODEL.RPN.BBOX_REG_WEIGHTS = (1, 1, 1, 1, 1)
133
- cfg.MODEL.RPN.HEAD_NAME = "StandardRPNHead"
134
- backbone = build_backbone(cfg)
135
- proposal_generator = build_proposal_generator(cfg, backbone.output_shape())
136
- num_images = 2
137
- images_tensor = torch.rand(num_images, 20, 30)
138
- image_sizes = [(10, 10), (20, 30)]
139
- images = ImageList(images_tensor, image_sizes)
140
- image_shape = (15, 15)
141
- num_channels = 1024
142
- features = {"res4": torch.rand(num_images, num_channels, 1, 2)}
143
- gt_boxes = torch.tensor([[2, 2, 2, 2, 0], [4, 4, 4, 4, 0]], dtype=torch.float32)
144
- gt_instances = Instances(image_shape)
145
- gt_instances.gt_boxes = RotatedBoxes(gt_boxes)
146
- with EventStorage(): # capture events in a new storage to discard them
147
- proposals, proposal_losses = proposal_generator(
148
- images, features, [gt_instances[0], gt_instances[1]]
149
- )
150
-
151
- expected_losses = {
152
- "loss_rpn_cls": torch.tensor(0.04291602224),
153
- "loss_rpn_loc": torch.tensor(0.145077362),
154
- }
155
- for name in expected_losses.keys():
156
- err_msg = "proposal_losses[{}] = {}, expected losses = {}".format(
157
- name, proposal_losses[name], expected_losses[name]
158
- )
159
- self.assertTrue(torch.allclose(proposal_losses[name], expected_losses[name]), err_msg)
160
-
161
- expected_proposal_box = torch.tensor(
162
- [
163
- [-1.77999556, 0.78155339, 68.04367828, 14.78156471, 60.59333801],
164
- [13.82740974, -1.50282836, 34.67269897, 29.19676590, -3.81942749],
165
- [8.10392570, -0.99071521, 145.39100647, 32.13126373, 3.67242432],
166
- [5.00000000, 4.57370186, 10.00000000, 9.14740372, 0.89196777],
167
- ]
168
- )
169
-
170
- expected_objectness_logit = torch.tensor([0.10924313, 0.09881870, 0.07649877, 0.05858029])
171
-
172
- torch.set_printoptions(precision=8, sci_mode=False)
173
-
174
- self.assertEqual(len(proposals), len(image_sizes))
175
-
176
- proposal = proposals[0]
177
- # It seems that there's some randomness in the result across different machines:
178
- # This test can be run on a local machine for 100 times with exactly the same result,
179
- # However, a different machine might produce slightly different results,
180
- # thus the atol here.
181
- err_msg = "computed proposal boxes = {}, expected {}".format(
182
- proposal.proposal_boxes.tensor, expected_proposal_box
183
- )
184
- self.assertTrue(
185
- torch.allclose(proposal.proposal_boxes.tensor[:4], expected_proposal_box, atol=1e-5),
186
- err_msg,
187
- )
188
-
189
- err_msg = "computed objectness logits = {}, expected {}".format(
190
- proposal.objectness_logits, expected_objectness_logit
191
- )
192
- self.assertTrue(
193
- torch.allclose(proposal.objectness_logits[:4], expected_objectness_logit, atol=1e-5),
194
- err_msg,
195
- )
196
-
197
- def test_find_rpn_proposals_inf(self):
198
- N, Hi, Wi, A = 3, 3, 3, 3
199
- proposals = [torch.rand(N, Hi * Wi * A, 4)]
200
- pred_logits = [torch.rand(N, Hi * Wi * A)]
201
- pred_logits[0][1][3:5].fill_(float("inf"))
202
- find_top_rpn_proposals(proposals, pred_logits, [(10, 10)], 0.5, 1000, 1000, 0, False)
203
-
204
- def test_find_rpn_proposals_tracing(self):
205
- N, Hi, Wi, A = 3, 50, 50, 9
206
- proposal = torch.rand(N, Hi * Wi * A, 4)
207
- pred_logit = torch.rand(N, Hi * Wi * A)
208
-
209
- def func(proposal, logit, image_size):
210
- r = find_top_rpn_proposals(
211
- [proposal], [logit], [image_size], 0.7, 1000, 1000, 0, False
212
- )[0]
213
- size = r.image_size
214
- if not isinstance(size, torch.Tensor):
215
- size = torch.tensor(size)
216
- return (size, r.proposal_boxes.tensor, r.objectness_logits)
217
-
218
- other_inputs = []
219
- # test that it generalizes to other shapes
220
- for Hi, Wi, shp in [(30, 30, 60), (10, 10, 800)]:
221
- other_inputs.append(
222
- (
223
- torch.rand(N, Hi * Wi * A, 4),
224
- torch.rand(N, Hi * Wi * A),
225
- torch.tensor([shp, shp]),
226
- )
227
- )
228
- torch.jit.trace(
229
- func, (proposal, pred_logit, torch.tensor([100, 100])), check_inputs=other_inputs
230
- )
231
-
232
- def test_append_gt_to_proposal(self):
233
- proposals = Instances(
234
- (10, 10),
235
- **{
236
- "proposal_boxes": Boxes(torch.empty((0, 4))),
237
- "objectness_logits": torch.tensor([]),
238
- "custom_attribute": torch.tensor([]),
239
- }
240
- )
241
- gt_boxes = Boxes(torch.tensor([[0, 0, 1, 1]]))
242
-
243
- self.assertRaises(AssertionError, add_ground_truth_to_proposals, [gt_boxes], [proposals])
244
-
245
- gt_instances = Instances((10, 10))
246
- gt_instances.gt_boxes = gt_boxes
247
-
248
- self.assertRaises(
249
- AssertionError, add_ground_truth_to_proposals, [gt_instances], [proposals]
250
- )
251
-
252
- gt_instances.custom_attribute = torch.tensor([1])
253
- gt_instances.custom_attribute2 = torch.tensor([1])
254
- new_proposals = add_ground_truth_to_proposals([gt_instances], [proposals])[0]
255
-
256
- self.assertEqual(new_proposals.custom_attribute[0], 1)
257
- # new proposals should only include the attributes in proposals
258
- self.assertRaises(AttributeError, lambda: new_proposals.custom_attribute2)
259
-
260
-
261
- if __name__ == "__main__":
262
- unittest.main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AzumaSeren100/XuanShen-Bert-VITS2/bert_gen.py DELETED
@@ -1,54 +0,0 @@
1
- import torch
2
- from torch.utils.data import DataLoader
3
- from multiprocessing import Pool
4
- import commons
5
- import utils
6
- from data_utils import TextAudioSpeakerLoader, TextAudioSpeakerCollate
7
- from tqdm import tqdm
8
- import warnings
9
-
10
- from text import cleaned_text_to_sequence, get_bert
11
-
12
- config_path = 'configs/config.json'
13
- hps = utils.get_hparams_from_file(config_path)
14
-
15
- def process_line(line):
16
- _id, spk, language_str, text, phones, tone, word2ph = line.strip().split("|")
17
- phone = phones.split(" ")
18
- tone = [int(i) for i in tone.split(" ")]
19
- word2ph = [int(i) for i in word2ph.split(" ")]
20
- w2pho = [i for i in word2ph]
21
- word2ph = [i for i in word2ph]
22
- phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)
23
-
24
- if hps.data.add_blank:
25
- phone = commons.intersperse(phone, 0)
26
- tone = commons.intersperse(tone, 0)
27
- language = commons.intersperse(language, 0)
28
- for i in range(len(word2ph)):
29
- word2ph[i] = word2ph[i] * 2
30
- word2ph[0] += 1
31
- wav_path = f'{_id}'
32
-
33
- bert_path = wav_path.replace(".wav", ".bert.pt")
34
-
35
- try:
36
- bert = torch.load(bert_path)
37
- assert bert.shape[-1] == len(phone)
38
- except:
39
- bert = get_bert(text, word2ph, language_str)
40
- assert bert.shape[-1] == len(phone)
41
- torch.save(bert, bert_path)
42
-
43
-
44
- if __name__ == '__main__':
45
- lines = []
46
- with open(hps.data.training_files, encoding='utf-8' ) as f:
47
- lines.extend(f.readlines())
48
-
49
- with open(hps.data.validation_files, encoding='utf-8' ) as f:
50
- lines.extend(f.readlines())
51
-
52
- with Pool(processes=6) as pool: #P40 24GB suitable config,if coom,please decrease the processess number.
53
- for _ in tqdm(pool.imap_unordered(process_line, lines)):
54
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bajr/softly/README.md DELETED
@@ -1,11 +0,0 @@
1
- ---
2
- title: Yummy Research
3
- emoji: 🍦
4
- colorFrom: red
5
- colorTo: blue
6
- sdk: docker
7
- pinned: false
8
- duplicated_from: Bajr/soft
9
- ---
10
-
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Caramelo Crush Soda Saga Juego Gratis Para Pc.md DELETED
@@ -1,103 +0,0 @@
1
-
2
- <h1>Candy Crush Soda Saga: Cómo descargar y jugar este divertido juego de puzzle en su PC</h1>
3
- <p>Si te gusta combinar dulces y resolver puzzles, es posible que haya oído hablar de Candy Crush Soda Saga, uno de los juegos más populares del mundo. Este juego es una secuela de la legendaria saga Candy Crush, y ofrece más diversión y desafíos con nuevos dulces, modos y características. En este artículo, te mostraremos cómo descargar y jugar este juego en tu PC de forma gratuita, utilizando la Epic Games Store. También le diremos por qué jugar juegos de puzzle en su PC es bueno para su cerebro y su estado de ánimo, y le daremos algunos consejos y trucos para aprovechar al máximo su experiencia de juego. </p>
4
- <h2>¿Qué es Candy Crush Soda Saga? </h2>
5
- <p>Candy Crush Soda Saga es un juego de puzzle match-3 desarrollado por King, una empresa líder en juegos casuales. El juego fue lanzado en 2014 como un spin-off de Candy Crush Saga, que tiene más de mil millones de descargas en todo el mundo. El juego sigue las aventuras de Kimmy, que está buscando a su hermana Tiffi en un mundo lleno de dulces. En el camino, se encuentra con nuevos personajes y se enfrenta a nuevos desafíos. </p>
6
- <h2>caramelo crush soda saga juego gratis para pc</h2><br /><p><b><b>Download File</b> &#9733;&#9733;&#9733; <a href="https://bltlly.com/2v6J0M">https://bltlly.com/2v6J0M</a></b></p><br /><br />
7
- <p>El juego tiene más de 10.000 niveles, cada uno con un objetivo y diseño diferentes. Tienes que combinar tres o más caramelos del mismo color para eliminarlos del tablero y crear dulces especiales que tienen efectos adicionales. También tienes que lidiar con varios obstáculos, como hielo, panal, mermelada, chocolate y botellas de refresco. El juego tiene diferentes modos, como Soda, Frosting, Honeycomb, Jam, Bubblegum y más. Cada modo tiene sus propias reglas y estrategias. </p>
8
- <p>El juego también tiene muchas características que lo hacen más divertido y atractivo. Puedes jugar con tus amigos en línea y competir por puntuaciones altas. También puedes unirte a equipos y cooperar con otros jugadores en eventos y desafíos. También puedes ganar recompensas y refuerzos que te ayudan en niveles difíciles. El juego también tiene actualizaciones mensuales que traen nuevo contenido y sorpresas. </p>
9
- <h2>¿Por qué jugar Candy Crush Soda Saga en su PC? </h2>
10
-
11
- <ul>
12
- <li> Puedes disfrutar de mejores gráficos y calidad de sonido en una pantalla más grande. </li>
13
- <li>Puedes usar el ratón y el teclado para controlar el juego más fácilmente. </li>
14
- <li> Puede ahorrar su vida útil de la batería y el uso de datos en su teléfono o tableta. </li>
15
- <li>Puedes evitar distracciones de notificaciones y llamadas mientras juegas. </li>
16
- </ul>
17
- <p>Una de las mejores maneras de jugar Candy Crush Soda Saga en tu PC es usar la Epic Games Store, una plataforma de distribución digital que te permite descargar juegos a tu PC a través del Lanzador de Epic Games. La Epic Games Store tiene muchas ventajas, como:</p>
18
- <ul>
19
- <li>Puedes acceder a cientos de juegos de varios géneros y categorías. </li>
20
- <li>Puedes obtener juegos gratis cada semana. </li>
21
- <li>Puedes disfrutar de ofertas y descuentos exclusivos. </li>
22
- <li>Puedes apoyar a los desarrolladores dándoles una mayor proporción de los ingresos. </li>
23
- <li>Puedes conectarte con tus amigos y otros jugadores a través de chat y funciones sociales. </li>
24
- </ul>
25
- <h2>¿Cómo descargar y jugar Candy Crush Soda Saga en su PC? </h2>
26
- <p>Descargar y jugar Candy Crush Soda Saga en tu PC es muy fácil. Solo sigue estos pasos:</p>
27
- <ol>
28
- <li>Instalar el lanzador de juegos épicos. Puede descargarlo desde [5](https:// epicgames.com/en-US/download) y ejecutar el instalador. Necesitarás crear una cuenta o iniciar sesión con la existente. </li>
29
- <li>Buscar Candy Crush Soda Saga en la Epic Games Store. Puedes encontrarlo en la sección de Juegos Gratis o usar la barra de búsqueda. </li>
30
- <li>Haga clic en el botón Obtener y confirme su pedido. El juego se agregará a su biblioteca. </li>
31
- <li>Vaya a su biblioteca y haga clic en el botón Instalar junto al juego. Elija una ubicación para los archivos del juego y espere a que se complete la descarga y la instalación. </li>
32
- <li>Inicie el juego desde su biblioteca o desde el acceso directo del escritorio. También puede ajustar la configuración y las preferencias del juego desde el lanzador. </li>
33
- </ol>
34
-
35
- <tabla>
36
- <tr>
37
- <th>Requisitos del sistema</th>
38
- <th>Mínimo</th>
39
- <th>Recomendado</th>
40
- </tr>
41
- <tr>
42
- <td>Sistema operativo</td>
43
- <td>Windows 7 o superior</td>
44
- <td>Windows 10</td>
45
- </tr>
46
- <tr>
47
- <td>Procesador</td>
48
- <td>Intel Core i3 o equivalente</td>
49
- <td>Intel Core i5 o equivalente</td>
50
- </tr>
51
- <tr>
52
- <td>Memoria</td>
53
- <td>4 GB de RAM</td>
54
- <td>8 GB de RAM</td>
55
- </tr>
56
- <tr>
57
- <td>Gráficos</td>
58
- <td>Intel HD Graphics 4000 o superior</td>
59
- <td>NVIDIA GeForce GTX 660 o superior</td>
60
- </tr>
61
- <tr>
62
- <td>Almacenamiento</td>
63
- <td>500 MB de espacio disponible</td>
64
- <td>1 GB de espacio disponible</td>
65
- </tr>
66
- <tr>
67
- <td>Conexión a Internet</td>
68
- <td>Conexión a Internet de banda ancha</td>
69
- <td>Conexión a Internet de banda ancha</td>
70
- </tr>
71
- <h2>Consejos y trucos para disfrutar de Candy Crush Soda Saga más</h2>
72
- <p>Candy Crush Soda Saga es un juego divertido y adictivo, pero también puede ser desafiante y frustrante a veces. Aquí hay algunos consejos y trucos para ayudarle a disfrutar del juego más:</p>
73
- <p></p>
74
- <ul>
75
- <li>Planifique sus movimientos. Trate de buscar partidos que pueden crear dulces especiales, como rayas, envueltos, pescado o caramelos para colorear. Estos pueden ayudarte a eliminar más caramelos y obstáculos en un solo movimiento. </li>
76
- <li>Use boosters sabiamente. Los boosters son elementos que pueden darle una ventaja en el juego, como movimientos adicionales, martillos de piruleta, interruptores libres y más. Puedes ganarlos completando niveles, eventos o desafíos, o comprándolos con dinero real. Sin embargo, no confíes demasiado en ellos, ya que son limitados y pueden agotarse rápidamente. </li>
77
- <li>Juega con amigos. Jugar con amigos puede hacer el juego más divertido y social. Puedes invitar a tus amigos a unirse a tu equipo, enviar y recibir vidas, chatear con ellos y competir por puntuaciones altas. También puedes pedirles ayuda cuando estás atascado en un nivel. </li>
78
-
79
- <li>Diviértete. No dejes que el juego te estrese o te frustre. Recuerda que es solo un juego, y el propósito principal es divertirse. Disfrute de los gráficos coloridos, la música pegadiza, y los personajes lindos. No tengas miedo de experimentar con diferentes estrategias y ver lo que funciona para ti. </li>
80
- <h2>Conclusión</h2>
81
- <p>Candy Crush Soda Saga es un gran juego para jugar en tu PC, especialmente si te gustan los juegos de puzzle y dulces. Puedes descargarlo gratis desde la Epic Games Store y disfrutar de sus características y beneficios. También puedes seguir nuestros consejos y trucos para aprovechar al máximo tu experiencia de juego. ¿Qué estás esperando? ¡Descarga Candy Crush Soda Saga hoy y únete a Kimmy en su dulce aventura! </p>
82
- <h2>Preguntas frecuentes</h2>
83
- <p>Aquí están algunas de las preguntas más frecuentes sobre Candy Crush Soda Saga:</p>
84
- <h3>P: ¿Cuántos niveles hay en Candy Crush Soda Saga? </h3>
85
- <p>A: Hay más de 10,000 niveles en Candy Crush Soda Saga a partir de junio de 2023, y se agregan más cada mes. </p>
86
- <h3>Q: ¿Cómo puedo sincronizar mi progreso a través de dispositivos? </h3>
87
- <p>A: Puedes sincronizar tu progreso entre dispositivos conectando tu juego a Facebook o King.com. Esto también le permitirá acceder a sus boosters y vidas salvadas. </p>
88
- <h3>Q: ¿Cómo consigo más vidas? </h3>
89
- <p>A: Tienes cinco vidas en Candy Crush Soda Saga, y pierdes una cada vez que fallas un nivel. Puedes obtener más vidas esperando a que se llenen (una vida cada 30 minutos), pidiendo a tus amigos que te envíen algunas, comprándolas con barras de oro o jugando las misiones diarias. </p>
90
- <h3>P: ¿Qué son las barras de oro y cómo las consigo? </h3>
91
- <p>A: Las barras de oro son la moneda premium en Candy Crush Soda Saga. Puedes usarlas para comprar potenciadores, vidas, movimientos y otros artículos. Puedes obtener barras de oro completando niveles, eventos o desafíos, o comprándolas con dinero real. </p>
92
- <h3>P: ¿Cuáles son los diferentes tipos de dulces especiales y cómo los hago? </h3>
93
-
94
- <ul>
95
- <li>Caramelo a rayas: Combina cuatro caramelos del mismo color en una fila o columna. Esto creará un caramelo a rayas que limpiará toda una fila o columna cuando coincida. </li>
96
- <li>Caramelo envuelto: Combina cinco caramelos del mismo color en forma de L o T. Esto creará un caramelo envuelto que explotará dos veces cuando coincida, despejando un área de 3x3 cada vez. </li>
97
- <li>Caramelo de pescado: Combina cuatro caramelos del mismo color en un cuadrado. Esto creará un caramelo de pescado que nadará a un caramelo o obstáculo al azar y lo despejará cuando coincida. </li>
98
- <li>Caramelo para colorear: combina seis o más caramelos del mismo color. Esto creará un caramelo para colorear que cambiará el color de todos los dulces que coincidan con su color cuando coincida. </li>
99
- <li>Bomba de color: Combina cinco caramelos del mismo color en una fila o columna. Esto creará una bomba de color que borrará todos los caramelos del mismo color con el que se intercambia. </li>
100
- <li>Pescado sueco: Este es un caramelo especial que solo se puede obtener mediante el uso de refuerzos o jugando ciertos niveles. Actúa como un caramelo de pescado, pero puede apuntar a dulces específicos u obstáculos que se necesitan para completar el nivel. </li>
101
- </ul></p> 64aa2da5cf<br />
102
- <br />
103
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Cmo Descargar Blockman Ir En El PC Gratis.md DELETED
@@ -1,57 +0,0 @@
1
-
2
- <h1>Cómo descargar Blockman Go en PC gratis</h1>
3
- <p>Blockman Go es un popular juego que combina elementos de sandbox, aventura, acción y juegos sociales. Puedes jugar varios minijuegos estilo bloque, chatear y hacer amigos con otros jugadores, y personalizar tu avatar y hogar con diferentes decoraciones. Pero ¿sabías que también puedes jugar Blockman Go en tu PC de forma gratuita? En este artículo, te mostraremos cómo descargar e instalar Blockman Go en tu PC con BlueStacks, un potente emulador de Android que te permite ejecutar aplicaciones y juegos de Android en tu ordenador o portátil. </p>
4
- <h2>cómo descargar blockman ir en el PC gratis</h2><br /><p><b><b>Download</b> &#9658;&#9658;&#9658; <a href="https://bltlly.com/2v6INR">https://bltlly.com/2v6INR</a></b></p><br /><br />
5
- <h2>¿Qué es Blockman Go? </h2>
6
- <p>Blockman Go es una aplicación gratuita desarrollada por Blockman GO Studio. Es un juego sandbox que te permite jugar, crear y compartir tus experiencias divertidas con tus amigos. Puedes elegir entre un amplio catálogo de minijuegos, que se actualizan continuamente para mantener las cosas frescas y divertidas. Algunos de los minijuegos populares son Bed Wars, Egg War, Sky Block, Free City RP, Anime Fighting Simulator y más. Puedes unirte a cualquier juego con un clic y ganar recompensas por jugar. </p>
7
- <p>Blockman Go es también una plataforma social donde puedes chatear y hacer amigos con otros jugadores. Puede unirse o crear fiestas, enviar mensajes, chat de voz e interactuar con otros de varias maneras. También puedes unirte a la creciente comunidad de desarrolladores y compartir tus creaciones con el mundo. </p>
8
- <p>Blockman Go es también una herramienta creativa que te permite personalizar tu avatar y hogar con diferentes accesorios, disfraces y decoraciones. Puedes expresar tu estilo y personalidad únicos con cientos de opciones disponibles. También puedes usar el Blockman Editor para crear tus propias experiencias de sandbox y minijuegos. </p>
9
- <h2>¿Por qué jugar Blockman ir en el PC? </h2>
10
- <p>Mientras que Blockman Go está diseñado para dispositivos móviles, también se puede jugar en su PC de forma gratuita con BlueStacks. Hay muchas ventajas de jugar Blockman Go en PC, tales como:</p>
11
- <ul>
12
-
13
- <li>Puedes usar el teclado y el ratón para controles más precisos, lo que te dará una ventaja en minijuegos competitivos. </li>
14
- <li>Puede acceder a miles de aplicaciones y herramientas de productividad con BlueStacks, que le ayudarán a trabajar de manera más eficiente y conveniente en su PC.</li>
15
- </ul>
16
- <h2>Cómo descargar e instalar Blockman Ir en el PC con BlueStacks</h2>
17
- <p>Para jugar Blockman Ir en el PC de forma gratuita, es necesario descargar e instalar BlueStacks en su PC primero. BlueStacks es un emulador de Android que te permite ejecutar aplicaciones y juegos de Android en tu ordenador o portátil. Estos son los pasos para descargar e instalar Blockman Ir en el PC con BlueStacks:</p>
18
- <p></p>
19
- <ol>
20
- <li>Descargue e instale BlueStacks en su PC desde <a href="( 5 )">este enlace</a>. </li>
21
- <li> Iniciar sesión completo en Google para acceder a Play Store, o hacerlo más tarde. </li>
22
- <li>Buscar Blockman Ir en el centro de aplicaciones o la barra de búsqueda en la esquina superior derecha. </li>
23
- <li>Haga clic para instalar Blockman Go desde los resultados de búsqueda. </li>
24
- <li>Haga clic en el icono de Blockman Go en la pantalla de inicio para comenzar a jugar. </li>
25
- </ol>
26
- <h2>Conclusión</h2>
27
- <p>Blockman Go es un juego divertido y versátil que ofrece mucho entretenimiento y creatividad. Puedes jugar varios minijuegos, chatear y hacer amigos, y personalizar tu avatar y hogar. También puedes jugar a Blockman Go en tu PC gratis con BlueStacks, un emulador de Android que te permite ejecutar aplicaciones y juegos de Android en tu ordenador o portátil. Al jugar Blockman Go en PC, puedes disfrutar de una pantalla más grande, mejores gráficos, controles de teclado y ratón, y acceso a miles de aplicaciones y herramientas de productividad. Para descargar e instalar Blockman Ir en el PC con BlueStacks, solo tiene que seguir unos sencillos pasos. Esperamos que este artículo te haya ayudado a aprender a descargar Blockman Go en PC gratis. </p>
28
- <h2>Preguntas frecuentes</h2>
29
- <p>Aquí hay algunas preguntas frecuentes sobre Blockman Go y BlueStacks:</p>
30
- <tabla>
31
- <tr>
32
- <th>Pregunta</th>
33
- <th>Respuesta</th>
34
- </tr>
35
- <tr>
36
- <td>Es Blockman Go libre para jugar? </td>
37
-
38
- </tr>
39
- <tr>
40
- <td>¿Es seguro jugar a Blockman? </td>
41
- <td>Sí, Blockman Go es seguro para jugar. Tiene una calificación de 4.3 en la Google Play Store y una calificación de 4.6 en la App Store. También tiene controles parentales y sistemas anti-trucos para garantizar un entorno de juego justo y seguro. </td>
42
- </tr>
43
- <tr>
44
- <td>¿BlueStacks es de uso gratuito? </td>
45
- <td>Sí, BlueStacks es de uso gratuito. Puede descargarlo desde <a href="">este enlace</a>. También puede actualizar a BlueStacks Premium para obtener más características y beneficios. </td>
46
- </tr>
47
- <tr>
48
- <td>¿Es seguro usar BlueStacks? </td>
49
- <td>Sí, BlueStacks es seguro de usar. Es el emulador de Android más confiable y popular en el mundo, con más de 500 millones de usuarios. También tiene características avanzadas de seguridad y protección antivirus para garantizar su seguridad y privacidad. </td>
50
- </tr>
51
- <tr>
52
- <td>¿Cómo puedo contactar con el soporte de Blockman Go o BlueStacks? </td>
53
- <td>Si tiene algún problema o pregunta sobre Blockman Go o BlueStacks, puede ponerse en contacto con sus equipos de soporte a través de sus sitios web oficiales o canales de redes sociales. También puede consultar sus preguntas frecuentes y foros para obtener más información y soluciones. </td>
54
- </tr>
55
- </tabla></p> 64aa2da5cf<br />
56
- <br />
57
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/docs/bcdoc/docstringparser.py DELETED
@@ -1,315 +0,0 @@
1
- # Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"). You
4
- # may not use this file except in compliance with the License. A copy of
5
- # the License is located at
6
- #
7
- # http://aws.amazon.com/apache2.0/
8
- #
9
- # or in the "license" file accompanying this file. This file is
10
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11
- # ANY KIND, either express or implied. See the License for the specific
12
- # language governing permissions and limitations under the License.
13
- from html.parser import HTMLParser
14
- from itertools import zip_longest
15
-
16
- PRIORITY_PARENT_TAGS = ('code', 'a')
17
- OMIT_NESTED_TAGS = ('span', 'i', 'code', 'a')
18
- OMIT_SELF_TAGS = ('i', 'b')
19
- HTML_BLOCK_DISPLAY_TAGS = ('p', 'note', 'ul', 'li')
20
-
21
-
22
- class DocStringParser(HTMLParser):
23
- """
24
- A simple HTML parser. Focused on converting the subset of HTML
25
- that appears in the documentation strings of the JSON models into
26
- simple ReST format.
27
- """
28
-
29
- def __init__(self, doc):
30
- self.tree = None
31
- self.doc = doc
32
- super().__init__()
33
-
34
- def reset(self):
35
- HTMLParser.reset(self)
36
- self.tree = HTMLTree(self.doc)
37
-
38
- def feed(self, data):
39
- super().feed(data)
40
- self.tree.write()
41
- self.tree = HTMLTree(self.doc)
42
-
43
- def close(self):
44
- super().close()
45
- # Write if there is anything remaining.
46
- self.tree.write()
47
- self.tree = HTMLTree(self.doc)
48
-
49
- def handle_starttag(self, tag, attrs):
50
- self.tree.add_tag(tag, attrs=attrs)
51
-
52
- def handle_endtag(self, tag):
53
- self.tree.add_tag(tag, is_start=False)
54
-
55
- def handle_data(self, data):
56
- self.tree.add_data(data)
57
-
58
-
59
- class HTMLTree:
60
- """
61
- A tree which handles HTML nodes. Designed to work with a python HTML parser,
62
- meaning that the current_node will be the most recently opened tag. When
63
- a tag is closed, the current_node moves up to the parent node.
64
- """
65
-
66
- def __init__(self, doc):
67
- self.doc = doc
68
- self.head = StemNode()
69
- self.current_node = self.head
70
- self.unhandled_tags = []
71
-
72
- def add_tag(self, tag, attrs=None, is_start=True):
73
- if not self._doc_has_handler(tag, is_start):
74
- self.unhandled_tags.append(tag)
75
- return
76
-
77
- if is_start:
78
- node = TagNode(tag, attrs)
79
- self.current_node.add_child(node)
80
- self.current_node = node
81
- else:
82
- self.current_node = self.current_node.parent
83
-
84
- def _doc_has_handler(self, tag, is_start):
85
- if is_start:
86
- handler_name = 'start_%s' % tag
87
- else:
88
- handler_name = 'end_%s' % tag
89
-
90
- return hasattr(self.doc.style, handler_name)
91
-
92
- def add_data(self, data):
93
- self.current_node.add_child(DataNode(data))
94
-
95
- def write(self):
96
- self.head.write(self.doc)
97
-
98
-
99
- class Node:
100
- def __init__(self, parent=None):
101
- self.parent = parent
102
-
103
- def write(self, doc):
104
- raise NotImplementedError
105
-
106
-
107
- class StemNode(Node):
108
- def __init__(self, parent=None):
109
- super().__init__(parent)
110
- self.children = []
111
-
112
- def add_child(self, child):
113
- child.parent = self
114
- self.children.append(child)
115
-
116
- def write(self, doc):
117
- self.collapse_whitespace()
118
- self._write_children(doc)
119
-
120
- def _write_children(self, doc):
121
- for child, next_child in zip_longest(self.children, self.children[1:]):
122
- if isinstance(child, TagNode) and next_child is not None:
123
- child.write(doc, next_child)
124
- else:
125
- child.write(doc)
126
-
127
- def is_whitespace(self):
128
- return all(child.is_whitespace() for child in self.children)
129
-
130
- def startswith_whitespace(self):
131
- return self.children and self.children[0].startswith_whitespace()
132
-
133
- def endswith_whitespace(self):
134
- return self.children and self.children[-1].endswith_whitespace()
135
-
136
- def lstrip(self):
137
- while self.children and self.children[0].is_whitespace():
138
- self.children = self.children[1:]
139
- if self.children:
140
- self.children[0].lstrip()
141
-
142
- def rstrip(self):
143
- while self.children and self.children[-1].is_whitespace():
144
- self.children = self.children[:-1]
145
- if self.children:
146
- self.children[-1].rstrip()
147
-
148
- def collapse_whitespace(self):
149
- """Remove collapsible white-space from HTML.
150
-
151
- HTML in docstrings often contains extraneous white-space around tags,
152
- for readability. Browsers would collapse this white-space before
153
- rendering. If not removed before conversion to RST where white-space is
154
- part of the syntax, for example for indentation, it can result in
155
- incorrect output.
156
- """
157
- self.lstrip()
158
- self.rstrip()
159
- for child in self.children:
160
- child.collapse_whitespace()
161
-
162
-
163
- class TagNode(StemNode):
164
- """
165
- A generic Tag node. It will verify that handlers exist before writing.
166
- """
167
-
168
- def __init__(self, tag, attrs=None, parent=None):
169
- super().__init__(parent)
170
- self.attrs = attrs
171
- self.tag = tag
172
-
173
- def _has_nested_tags(self):
174
- # Returns True if any children are TagNodes and False otherwise.
175
- return any(isinstance(child, TagNode) for child in self.children)
176
-
177
- def write(self, doc, next_child=None):
178
- prioritize_nested_tags = (
179
- self.tag in OMIT_SELF_TAGS and self._has_nested_tags()
180
- )
181
- prioritize_parent_tag = (
182
- isinstance(self.parent, TagNode)
183
- and self.parent.tag in PRIORITY_PARENT_TAGS
184
- and self.tag in OMIT_NESTED_TAGS
185
- )
186
- if prioritize_nested_tags or prioritize_parent_tag:
187
- self._write_children(doc)
188
- return
189
-
190
- self._write_start(doc)
191
- self._write_children(doc)
192
- self._write_end(doc, next_child)
193
-
194
- def collapse_whitespace(self):
195
- """Remove collapsible white-space.
196
-
197
- All tags collapse internal whitespace. Block-display HTML tags also
198
- strip all leading and trailing whitespace.
199
-
200
- Approximately follows the specification used in browsers:
201
- https://www.w3.org/TR/css-text-3/#white-space-rules
202
- https://developer.mozilla.org/en-US/docs/Web/API/Document_Object_Model/Whitespace
203
- """
204
- if self.tag in HTML_BLOCK_DISPLAY_TAGS:
205
- self.lstrip()
206
- self.rstrip()
207
- # Collapse whitespace in situations like ``</b> <i> foo</i>`` into
208
- # ``</b><i> foo</i>``.
209
- for prev, cur in zip(self.children[:-1], self.children[1:]):
210
- if (
211
- isinstance(prev, DataNode)
212
- and prev.endswith_whitespace()
213
- and cur.startswith_whitespace()
214
- ):
215
- cur.lstrip()
216
- # Same logic, but for situations like ``<b>bar </b> <i>``:
217
- for cur, nxt in zip(self.children[:-1], self.children[1:]):
218
- if (
219
- isinstance(nxt, DataNode)
220
- and cur.endswith_whitespace()
221
- and nxt.startswith_whitespace()
222
- ):
223
- cur.rstrip()
224
- # Recurse into children
225
- for child in self.children:
226
- child.collapse_whitespace()
227
-
228
- def _write_start(self, doc):
229
- handler_name = 'start_%s' % self.tag
230
- if hasattr(doc.style, handler_name):
231
- getattr(doc.style, handler_name)(self.attrs)
232
-
233
- def _write_end(self, doc, next_child):
234
- handler_name = 'end_%s' % self.tag
235
- if hasattr(doc.style, handler_name):
236
- if handler_name == 'end_a':
237
- # We use lookahead to determine if a space is needed after a link node
238
- getattr(doc.style, handler_name)(next_child)
239
- else:
240
- getattr(doc.style, handler_name)()
241
-
242
-
243
- class DataNode(Node):
244
- """
245
- A Node that contains only string data.
246
- """
247
-
248
- def __init__(self, data, parent=None):
249
- super().__init__(parent)
250
- if not isinstance(data, str):
251
- raise ValueError("Expecting string type, %s given." % type(data))
252
- self._leading_whitespace = ''
253
- self._trailing_whitespace = ''
254
- self._stripped_data = ''
255
- if data == '':
256
- return
257
- if data.isspace():
258
- self._trailing_whitespace = data
259
- return
260
- first_non_space = next(
261
- idx for idx, ch in enumerate(data) if not ch.isspace()
262
- )
263
- last_non_space = len(data) - next(
264
- idx for idx, ch in enumerate(reversed(data)) if not ch.isspace()
265
- )
266
- self._leading_whitespace = data[:first_non_space]
267
- self._trailing_whitespace = data[last_non_space:]
268
- self._stripped_data = data[first_non_space:last_non_space]
269
-
270
- @property
271
- def data(self):
272
- return (
273
- f'{self._leading_whitespace}{self._stripped_data}'
274
- f'{self._trailing_whitespace}'
275
- )
276
-
277
- def is_whitespace(self):
278
- return self._stripped_data == '' and (
279
- self._leading_whitespace != '' or self._trailing_whitespace != ''
280
- )
281
-
282
- def startswith_whitespace(self):
283
- return self._leading_whitespace != '' or (
284
- self._stripped_data == '' and self._trailing_whitespace != ''
285
- )
286
-
287
- def endswith_whitespace(self):
288
- return self._trailing_whitespace != '' or (
289
- self._stripped_data == '' and self._leading_whitespace != ''
290
- )
291
-
292
- def lstrip(self):
293
- if self._leading_whitespace != '':
294
- self._leading_whitespace = ''
295
- elif self._stripped_data == '':
296
- self.rstrip()
297
-
298
- def rstrip(self):
299
- if self._trailing_whitespace != '':
300
- self._trailing_whitespace = ''
301
- elif self._stripped_data == '':
302
- self.lstrip()
303
-
304
- def collapse_whitespace(self):
305
- """Noop, ``DataNode.write`` always collapses whitespace"""
306
- return
307
-
308
- def write(self, doc):
309
- words = doc.translate_words(self._stripped_data.split())
310
- str_data = (
311
- f'{self._leading_whitespace}{" ".join(words)}'
312
- f'{self._trailing_whitespace}'
313
- )
314
- if str_data != '':
315
- doc.handle_data(str_data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/rich/themes.py DELETED
@@ -1,5 +0,0 @@
1
- from .default_styles import DEFAULT_STYLES
2
- from .theme import Theme
3
-
4
-
5
- DEFAULT = Theme(DEFAULT_STYLES)
 
 
 
 
 
 
spaces/Billyosoro/ESRGAN/tests/test_discriminator_arch.py DELETED
@@ -1,19 +0,0 @@
1
- import torch
2
-
3
- from realesrgan.archs.discriminator_arch import UNetDiscriminatorSN
4
-
5
-
6
- def test_unetdiscriminatorsn():
7
- """Test arch: UNetDiscriminatorSN."""
8
-
9
- # model init and forward (cpu)
10
- net = UNetDiscriminatorSN(num_in_ch=3, num_feat=4, skip_connection=True)
11
- img = torch.rand((1, 3, 32, 32), dtype=torch.float32)
12
- output = net(img)
13
- assert output.shape == (1, 1, 32, 32)
14
-
15
- # model init and forward (gpu)
16
- if torch.cuda.is_available():
17
- net.cuda()
18
- output = net(img.cuda())
19
- assert output.shape == (1, 1, 32, 32)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/include/pybind11/pybind11.h DELETED
The diff for this file is too large to render. See raw diff
 
spaces/CVPR/lama-example/saicinpainting/evaluation/masks/__init__.py DELETED
File without changes
spaces/Callimethee/Imagine-CR/app.py DELETED
@@ -1,29 +0,0 @@
1
- from transformers import GPT2LMHeadModel, GPT2Tokenizer
2
- import torch
3
- import gradio as gr
4
-
5
- tokenizer = GPT2Tokenizer.from_pretrained("./")
6
- model = GPT2LMHeadModel.from_pretrained("./")
7
-
8
-
9
- def generator(input_string):
10
- input_string = "<|startoftext|>" + " " * (input_string != "") + input_string
11
- prompt = torch.tensor(tokenizer.encode(input_string)).unsqueeze(0)
12
-
13
- generated = model.generate(
14
- prompt,
15
- do_sample=True,
16
- top_k=50,
17
- max_length=1024,
18
- top_p=0.95,
19
- num_return_sequences=5,
20
- )
21
- out = ""
22
- for tirade in generated:
23
- out += tokenizer.decode(tirade, skip_special_tokens=True) + "\n\n"
24
- return out
25
-
26
- desc = "> Artificial Intelligence, Eh? Sounds fancy - but it'll never replace geniuses such as myself.\n\n - *Huron Stahlmast, Exiled Hupperdook Engineer*\n\n\nThis generator allows you to generate your own transcripts from an imaginary episode of Critical Role! Input the start of a tirade (or nothing!), and let the magic of machine learning do the rest!\n\nFor the curious among you, this uses a fine-tuned version of GPT2."
27
-
28
- demo = gr.Interface(fn=generator, inputs="textbox", outputs="textbox", title="Critical Role Text Generator", examples=[["MATT:"], ["LAURA: I cast"]], description=desc)
29
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Caoyunkang/Segment-Any-Anomaly/SAM/CODE_OF_CONDUCT.md DELETED
@@ -1,80 +0,0 @@
1
- # Code of Conduct
2
-
3
- ## Our Pledge
4
-
5
- In the interest of fostering an open and welcoming environment, we as
6
- contributors and maintainers pledge to make participation in our project and
7
- our community a harassment-free experience for everyone, regardless of age, body
8
- size, disability, ethnicity, sex characteristics, gender identity and expression,
9
- level of experience, education, socio-economic status, nationality, personal
10
- appearance, race, religion, or sexual identity and orientation.
11
-
12
- ## Our Standards
13
-
14
- Examples of behavior that contributes to creating a positive environment
15
- include:
16
-
17
- * Using welcoming and inclusive language
18
- * Being respectful of differing viewpoints and experiences
19
- * Gracefully accepting constructive criticism
20
- * Focusing on what is best for the community
21
- * Showing empathy towards other community members
22
-
23
- Examples of unacceptable behavior by participants include:
24
-
25
- * The use of sexualized language or imagery and unwelcome sexual attention or
26
- advances
27
- * Trolling, insulting/derogatory comments, and personal or political attacks
28
- * Public or private harassment
29
- * Publishing others' private information, such as a physical or electronic
30
- address, without explicit permission
31
- * Other conduct which could reasonably be considered inappropriate in a
32
- professional setting
33
-
34
- ## Our Responsibilities
35
-
36
- Project maintainers are responsible for clarifying the standards of acceptable
37
- behavior and are expected to take appropriate and fair corrective action in
38
- response to any instances of unacceptable behavior.
39
-
40
- Project maintainers have the right and responsibility to remove, edit, or
41
- reject comments, commits, code, wiki edits, issues, and other contributions
42
- that are not aligned to this Code of Conduct, or to ban temporarily or
43
- permanently any contributor for other behaviors that they deem inappropriate,
44
- threatening, offensive, or harmful.
45
-
46
- ## Scope
47
-
48
- This Code of Conduct applies within all project spaces, and it also applies when
49
- an individual is representing the project or its community in public spaces.
50
- Examples of representing a project or community include using an official
51
- project e-mail address, posting via an official social media account, or acting
52
- as an appointed representative at an online or offline event. Representation of
53
- a project may be further defined and clarified by project maintainers.
54
-
55
- This Code of Conduct also applies outside the project spaces when there is a
56
- reasonable belief that an individual's behavior may have a negative impact on
57
- the project or its community.
58
-
59
- ## Enforcement
60
-
61
- Instances of abusive, harassing, or otherwise unacceptable behavior may be
62
- reported by contacting the project team at <[email protected]>. All
63
- complaints will be reviewed and investigated and will result in a response that
64
- is deemed necessary and appropriate to the circumstances. The project team is
65
- obligated to maintain confidentiality with regard to the reporter of an incident.
66
- Further details of specific enforcement policies may be posted separately.
67
-
68
- Project maintainers who do not follow or enforce the Code of Conduct in good
69
- faith may face temporary or permanent repercussions as determined by other
70
- members of the project's leadership.
71
-
72
- ## Attribution
73
-
74
- This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
75
- available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
76
-
77
- [homepage]: https://www.contributor-covenant.org
78
-
79
- For answers to common questions about this code of conduct, see
80
- https://www.contributor-covenant.org/faq
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cloudyy/bark-voice-cloning/hubert/__init__.py DELETED
File without changes
spaces/CognitiveLabs/Research-Assistant/config/config.py DELETED
@@ -1,82 +0,0 @@
1
- """Configuration class to store the state of bools for different scripts access."""
2
- import os
3
-
4
- import openai
5
- from colorama import Fore
6
- from dotenv import load_dotenv
7
-
8
- from config.singleton import Singleton
9
-
10
- load_dotenv(verbose=True)
11
-
12
-
13
- class Config(metaclass=Singleton):
14
- """
15
- Configuration class to store the state of bools for different scripts access.
16
- """
17
-
18
- def __init__(self) -> None:
19
- """Initialize the Config class"""
20
- self.debug_mode = False
21
- self.allow_downloads = False
22
-
23
- self.selenium_web_browser = os.getenv("USE_WEB_BROWSER", "chrome")
24
- self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
25
- self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
26
- self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 8000))
27
- self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
28
- self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 8192))
29
-
30
- self.openai_api_key = os.getenv("OPENAI_API_KEY")
31
- self.openai_api_base = os.getenv("OPENAI_API_BASE", openai.api_base)
32
- self.temperature = float(os.getenv("TEMPERATURE", "1"))
33
-
34
- self.user_agent = os.getenv(
35
- "USER_AGENT",
36
- "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36"
37
- " (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
38
- )
39
-
40
- self.memory_backend = os.getenv("MEMORY_BACKEND", "local")
41
- # Initialize the OpenAI API client
42
- openai.api_key = self.openai_api_key
43
-
44
- def set_fast_llm_model(self, value: str) -> None:
45
- """Set the fast LLM model value."""
46
- self.fast_llm_model = value
47
-
48
- def set_smart_llm_model(self, value: str) -> None:
49
- """Set the smart LLM model value."""
50
- self.smart_llm_model = value
51
-
52
- def set_fast_token_limit(self, value: int) -> None:
53
- """Set the fast token limit value."""
54
- self.fast_token_limit = value
55
-
56
- def set_smart_token_limit(self, value: int) -> None:
57
- """Set the smart token limit value."""
58
- self.smart_token_limit = value
59
-
60
- def set_browse_chunk_max_length(self, value: int) -> None:
61
- """Set the browse_website command chunk max length value."""
62
- self.browse_chunk_max_length = value
63
-
64
- def set_openai_api_key(self, value: str) -> None:
65
- """Set the OpenAI API key value."""
66
- self.openai_api_key = value
67
-
68
- def set_debug_mode(self, value: bool) -> None:
69
- """Set the debug mode value."""
70
- self.debug_mode = value
71
-
72
-
73
- def check_openai_api_key() -> None:
74
- """Check if the OpenAI API key is set in config.py or as an environment variable."""
75
- cfg = Config()
76
- if not cfg.openai_api_key:
77
- print(
78
- Fore.RED
79
- + "Please set your OpenAI API key in .env or as an environment variable."
80
- )
81
- print("You can get your key from https://platform.openai.com/account/api-keys")
82
- exit(1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CoreyMorris/MMLU-by-task-Leaderboard/test_paths.py DELETED
@@ -1,19 +0,0 @@
1
- import unittest
2
- import os
3
-
4
- class TestPaths(unittest.TestCase):
5
- def test_path_exists(self):
6
- # test that the path results exists
7
- self.assertTrue(os.path.exists('results'))
8
-
9
- def test_results_directory_is_not_empty(self):
10
- # test that the results directory is not empty
11
- self.assertGreater(len(os.listdir('results')), 0)
12
-
13
- def test_results_contain_json_files(self):
14
- # test that the results director contains json files in the sub directores
15
- # get a list of all the subdirectories
16
- subdirectories = [x[0] for x in os.walk('results')]
17
- # check if the subdirectories contain json files. only check one subdirectory
18
- subdirectory = subdirectories[1]
19
- self.assertGreater(len(os.listdir(subdirectory)), 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/ldm/data/util.py DELETED
@@ -1,24 +0,0 @@
1
- import torch
2
-
3
- from ldm.modules.midas.api import load_midas_transform
4
-
5
-
6
- class AddMiDaS(object):
7
- def __init__(self, model_type):
8
- super().__init__()
9
- self.transform = load_midas_transform(model_type)
10
-
11
- def pt2np(self, x):
12
- x = ((x + 1.0) * .5).detach().cpu().numpy()
13
- return x
14
-
15
- def np2pt(self, x):
16
- x = torch.from_numpy(x) * 2 - 1.
17
- return x
18
-
19
- def __call__(self, sample):
20
- # sample['jpg'] is tensor hwc in [-1, 1] at this point
21
- x = self.pt2np(sample['jpg'])
22
- x = self.transform({"image": x})["image"]
23
- sample['midas_in'] = x
24
- return sample
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cvandi/remake/realesrgan/archs/discriminator_arch.py DELETED
@@ -1,67 +0,0 @@
1
- from basicsr.utils.registry import ARCH_REGISTRY
2
- from torch import nn as nn
3
- from torch.nn import functional as F
4
- from torch.nn.utils import spectral_norm
5
-
6
-
7
- @ARCH_REGISTRY.register()
8
- class UNetDiscriminatorSN(nn.Module):
9
- """Defines a U-Net discriminator with spectral normalization (SN)
10
-
11
- It is used in Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data.
12
-
13
- Arg:
14
- num_in_ch (int): Channel number of inputs. Default: 3.
15
- num_feat (int): Channel number of base intermediate features. Default: 64.
16
- skip_connection (bool): Whether to use skip connections between U-Net. Default: True.
17
- """
18
-
19
- def __init__(self, num_in_ch, num_feat=64, skip_connection=True):
20
- super(UNetDiscriminatorSN, self).__init__()
21
- self.skip_connection = skip_connection
22
- norm = spectral_norm
23
- # the first convolution
24
- self.conv0 = nn.Conv2d(num_in_ch, num_feat, kernel_size=3, stride=1, padding=1)
25
- # downsample
26
- self.conv1 = norm(nn.Conv2d(num_feat, num_feat * 2, 4, 2, 1, bias=False))
27
- self.conv2 = norm(nn.Conv2d(num_feat * 2, num_feat * 4, 4, 2, 1, bias=False))
28
- self.conv3 = norm(nn.Conv2d(num_feat * 4, num_feat * 8, 4, 2, 1, bias=False))
29
- # upsample
30
- self.conv4 = norm(nn.Conv2d(num_feat * 8, num_feat * 4, 3, 1, 1, bias=False))
31
- self.conv5 = norm(nn.Conv2d(num_feat * 4, num_feat * 2, 3, 1, 1, bias=False))
32
- self.conv6 = norm(nn.Conv2d(num_feat * 2, num_feat, 3, 1, 1, bias=False))
33
- # extra convolutions
34
- self.conv7 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False))
35
- self.conv8 = norm(nn.Conv2d(num_feat, num_feat, 3, 1, 1, bias=False))
36
- self.conv9 = nn.Conv2d(num_feat, 1, 3, 1, 1)
37
-
38
- def forward(self, x):
39
- # downsample
40
- x0 = F.leaky_relu(self.conv0(x), negative_slope=0.2, inplace=True)
41
- x1 = F.leaky_relu(self.conv1(x0), negative_slope=0.2, inplace=True)
42
- x2 = F.leaky_relu(self.conv2(x1), negative_slope=0.2, inplace=True)
43
- x3 = F.leaky_relu(self.conv3(x2), negative_slope=0.2, inplace=True)
44
-
45
- # upsample
46
- x3 = F.interpolate(x3, scale_factor=2, mode='bilinear', align_corners=False)
47
- x4 = F.leaky_relu(self.conv4(x3), negative_slope=0.2, inplace=True)
48
-
49
- if self.skip_connection:
50
- x4 = x4 + x2
51
- x4 = F.interpolate(x4, scale_factor=2, mode='bilinear', align_corners=False)
52
- x5 = F.leaky_relu(self.conv5(x4), negative_slope=0.2, inplace=True)
53
-
54
- if self.skip_connection:
55
- x5 = x5 + x1
56
- x5 = F.interpolate(x5, scale_factor=2, mode='bilinear', align_corners=False)
57
- x6 = F.leaky_relu(self.conv6(x5), negative_slope=0.2, inplace=True)
58
-
59
- if self.skip_connection:
60
- x6 = x6 + x0
61
-
62
- # extra convolutions
63
- out = F.leaky_relu(self.conv7(x6), negative_slope=0.2, inplace=True)
64
- out = F.leaky_relu(self.conv8(out), negative_slope=0.2, inplace=True)
65
- out = self.conv9(out)
66
-
67
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DAMO-NLP-SG/Video-LLaMA/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: Video LLaMA
3
- emoji: 🚀
4
- colorFrom: purple
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.29.0
8
- app_file: app.py
9
- pinned: false
10
- license: other
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
14
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DHEIVER/timeseries-anomaly-detection-autoencoders/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Timeseries Anomaly Detection
3
- emoji: 🌍
4
- colorFrom: blue
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.0.1
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: keras-io/timeseries-anomaly-detection-autoencoders
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/charset_normalizer/utils.py DELETED
@@ -1,414 +0,0 @@
1
- import importlib
2
- import logging
3
- import unicodedata
4
- from codecs import IncrementalDecoder
5
- from encodings.aliases import aliases
6
- from functools import lru_cache
7
- from re import findall
8
- from typing import Generator, List, Optional, Set, Tuple, Union
9
-
10
- from _multibytecodec import MultibyteIncrementalDecoder
11
-
12
- from .constant import (
13
- ENCODING_MARKS,
14
- IANA_SUPPORTED_SIMILAR,
15
- RE_POSSIBLE_ENCODING_INDICATION,
16
- UNICODE_RANGES_COMBINED,
17
- UNICODE_SECONDARY_RANGE_KEYWORD,
18
- UTF8_MAXIMAL_ALLOCATION,
19
- )
20
-
21
-
22
- @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
23
- def is_accentuated(character: str) -> bool:
24
- try:
25
- description: str = unicodedata.name(character)
26
- except ValueError:
27
- return False
28
- return (
29
- "WITH GRAVE" in description
30
- or "WITH ACUTE" in description
31
- or "WITH CEDILLA" in description
32
- or "WITH DIAERESIS" in description
33
- or "WITH CIRCUMFLEX" in description
34
- or "WITH TILDE" in description
35
- )
36
-
37
-
38
- @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
39
- def remove_accent(character: str) -> str:
40
- decomposed: str = unicodedata.decomposition(character)
41
- if not decomposed:
42
- return character
43
-
44
- codes: List[str] = decomposed.split(" ")
45
-
46
- return chr(int(codes[0], 16))
47
-
48
-
49
- @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
50
- def unicode_range(character: str) -> Optional[str]:
51
- """
52
- Retrieve the Unicode range official name from a single character.
53
- """
54
- character_ord: int = ord(character)
55
-
56
- for range_name, ord_range in UNICODE_RANGES_COMBINED.items():
57
- if character_ord in ord_range:
58
- return range_name
59
-
60
- return None
61
-
62
-
63
- @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
64
- def is_latin(character: str) -> bool:
65
- try:
66
- description: str = unicodedata.name(character)
67
- except ValueError:
68
- return False
69
- return "LATIN" in description
70
-
71
-
72
- @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
73
- def is_ascii(character: str) -> bool:
74
- try:
75
- character.encode("ascii")
76
- except UnicodeEncodeError:
77
- return False
78
- return True
79
-
80
-
81
- @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
82
- def is_punctuation(character: str) -> bool:
83
- character_category: str = unicodedata.category(character)
84
-
85
- if "P" in character_category:
86
- return True
87
-
88
- character_range: Optional[str] = unicode_range(character)
89
-
90
- if character_range is None:
91
- return False
92
-
93
- return "Punctuation" in character_range
94
-
95
-
96
- @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
97
- def is_symbol(character: str) -> bool:
98
- character_category: str = unicodedata.category(character)
99
-
100
- if "S" in character_category or "N" in character_category:
101
- return True
102
-
103
- character_range: Optional[str] = unicode_range(character)
104
-
105
- if character_range is None:
106
- return False
107
-
108
- return "Forms" in character_range
109
-
110
-
111
- @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
112
- def is_emoticon(character: str) -> bool:
113
- character_range: Optional[str] = unicode_range(character)
114
-
115
- if character_range is None:
116
- return False
117
-
118
- return "Emoticons" in character_range
119
-
120
-
121
- @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
122
- def is_separator(character: str) -> bool:
123
- if character.isspace() or character in {"|", "+", "<", ">"}:
124
- return True
125
-
126
- character_category: str = unicodedata.category(character)
127
-
128
- return "Z" in character_category or character_category in {"Po", "Pd", "Pc"}
129
-
130
-
131
- @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
132
- def is_case_variable(character: str) -> bool:
133
- return character.islower() != character.isupper()
134
-
135
-
136
- def is_private_use_only(character: str) -> bool:
137
- character_category: str = unicodedata.category(character)
138
-
139
- return character_category == "Co"
140
-
141
-
142
- @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
143
- def is_cjk(character: str) -> bool:
144
- try:
145
- character_name = unicodedata.name(character)
146
- except ValueError:
147
- return False
148
-
149
- return "CJK" in character_name
150
-
151
-
152
- @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
153
- def is_hiragana(character: str) -> bool:
154
- try:
155
- character_name = unicodedata.name(character)
156
- except ValueError:
157
- return False
158
-
159
- return "HIRAGANA" in character_name
160
-
161
-
162
- @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
163
- def is_katakana(character: str) -> bool:
164
- try:
165
- character_name = unicodedata.name(character)
166
- except ValueError:
167
- return False
168
-
169
- return "KATAKANA" in character_name
170
-
171
-
172
- @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
173
- def is_hangul(character: str) -> bool:
174
- try:
175
- character_name = unicodedata.name(character)
176
- except ValueError:
177
- return False
178
-
179
- return "HANGUL" in character_name
180
-
181
-
182
- @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
183
- def is_thai(character: str) -> bool:
184
- try:
185
- character_name = unicodedata.name(character)
186
- except ValueError:
187
- return False
188
-
189
- return "THAI" in character_name
190
-
191
-
192
- @lru_cache(maxsize=len(UNICODE_RANGES_COMBINED))
193
- def is_unicode_range_secondary(range_name: str) -> bool:
194
- return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD)
195
-
196
-
197
- @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
198
- def is_unprintable(character: str) -> bool:
199
- return (
200
- character.isspace() is False # includes \n \t \r \v
201
- and character.isprintable() is False
202
- and character != "\x1A" # Why? Its the ASCII substitute character.
203
- and character != "\ufeff" # bug discovered in Python,
204
- # Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space.
205
- )
206
-
207
-
208
- def any_specified_encoding(sequence: bytes, search_zone: int = 4096) -> Optional[str]:
209
- """
210
- Extract using ASCII-only decoder any specified encoding in the first n-bytes.
211
- """
212
- if not isinstance(sequence, bytes):
213
- raise TypeError
214
-
215
- seq_len: int = len(sequence)
216
-
217
- results: List[str] = findall(
218
- RE_POSSIBLE_ENCODING_INDICATION,
219
- sequence[: min(seq_len, search_zone)].decode("ascii", errors="ignore"),
220
- )
221
-
222
- if len(results) == 0:
223
- return None
224
-
225
- for specified_encoding in results:
226
- specified_encoding = specified_encoding.lower().replace("-", "_")
227
-
228
- encoding_alias: str
229
- encoding_iana: str
230
-
231
- for encoding_alias, encoding_iana in aliases.items():
232
- if encoding_alias == specified_encoding:
233
- return encoding_iana
234
- if encoding_iana == specified_encoding:
235
- return encoding_iana
236
-
237
- return None
238
-
239
-
240
- @lru_cache(maxsize=128)
241
- def is_multi_byte_encoding(name: str) -> bool:
242
- """
243
- Verify is a specific encoding is a multi byte one based on it IANA name
244
- """
245
- return name in {
246
- "utf_8",
247
- "utf_8_sig",
248
- "utf_16",
249
- "utf_16_be",
250
- "utf_16_le",
251
- "utf_32",
252
- "utf_32_le",
253
- "utf_32_be",
254
- "utf_7",
255
- } or issubclass(
256
- importlib.import_module("encodings.{}".format(name)).IncrementalDecoder,
257
- MultibyteIncrementalDecoder,
258
- )
259
-
260
-
261
- def identify_sig_or_bom(sequence: bytes) -> Tuple[Optional[str], bytes]:
262
- """
263
- Identify and extract SIG/BOM in given sequence.
264
- """
265
-
266
- for iana_encoding in ENCODING_MARKS:
267
- marks: Union[bytes, List[bytes]] = ENCODING_MARKS[iana_encoding]
268
-
269
- if isinstance(marks, bytes):
270
- marks = [marks]
271
-
272
- for mark in marks:
273
- if sequence.startswith(mark):
274
- return iana_encoding, mark
275
-
276
- return None, b""
277
-
278
-
279
- def should_strip_sig_or_bom(iana_encoding: str) -> bool:
280
- return iana_encoding not in {"utf_16", "utf_32"}
281
-
282
-
283
- def iana_name(cp_name: str, strict: bool = True) -> str:
284
- cp_name = cp_name.lower().replace("-", "_")
285
-
286
- encoding_alias: str
287
- encoding_iana: str
288
-
289
- for encoding_alias, encoding_iana in aliases.items():
290
- if cp_name in [encoding_alias, encoding_iana]:
291
- return encoding_iana
292
-
293
- if strict:
294
- raise ValueError("Unable to retrieve IANA for '{}'".format(cp_name))
295
-
296
- return cp_name
297
-
298
-
299
- def range_scan(decoded_sequence: str) -> List[str]:
300
- ranges: Set[str] = set()
301
-
302
- for character in decoded_sequence:
303
- character_range: Optional[str] = unicode_range(character)
304
-
305
- if character_range is None:
306
- continue
307
-
308
- ranges.add(character_range)
309
-
310
- return list(ranges)
311
-
312
-
313
- def cp_similarity(iana_name_a: str, iana_name_b: str) -> float:
314
- if is_multi_byte_encoding(iana_name_a) or is_multi_byte_encoding(iana_name_b):
315
- return 0.0
316
-
317
- decoder_a = importlib.import_module(
318
- "encodings.{}".format(iana_name_a)
319
- ).IncrementalDecoder
320
- decoder_b = importlib.import_module(
321
- "encodings.{}".format(iana_name_b)
322
- ).IncrementalDecoder
323
-
324
- id_a: IncrementalDecoder = decoder_a(errors="ignore")
325
- id_b: IncrementalDecoder = decoder_b(errors="ignore")
326
-
327
- character_match_count: int = 0
328
-
329
- for i in range(255):
330
- to_be_decoded: bytes = bytes([i])
331
- if id_a.decode(to_be_decoded) == id_b.decode(to_be_decoded):
332
- character_match_count += 1
333
-
334
- return character_match_count / 254
335
-
336
-
337
- def is_cp_similar(iana_name_a: str, iana_name_b: str) -> bool:
338
- """
339
- Determine if two code page are at least 80% similar. IANA_SUPPORTED_SIMILAR dict was generated using
340
- the function cp_similarity.
341
- """
342
- return (
343
- iana_name_a in IANA_SUPPORTED_SIMILAR
344
- and iana_name_b in IANA_SUPPORTED_SIMILAR[iana_name_a]
345
- )
346
-
347
-
348
- def set_logging_handler(
349
- name: str = "charset_normalizer",
350
- level: int = logging.INFO,
351
- format_string: str = "%(asctime)s | %(levelname)s | %(message)s",
352
- ) -> None:
353
- logger = logging.getLogger(name)
354
- logger.setLevel(level)
355
-
356
- handler = logging.StreamHandler()
357
- handler.setFormatter(logging.Formatter(format_string))
358
- logger.addHandler(handler)
359
-
360
-
361
- def cut_sequence_chunks(
362
- sequences: bytes,
363
- encoding_iana: str,
364
- offsets: range,
365
- chunk_size: int,
366
- bom_or_sig_available: bool,
367
- strip_sig_or_bom: bool,
368
- sig_payload: bytes,
369
- is_multi_byte_decoder: bool,
370
- decoded_payload: Optional[str] = None,
371
- ) -> Generator[str, None, None]:
372
- if decoded_payload and is_multi_byte_decoder is False:
373
- for i in offsets:
374
- chunk = decoded_payload[i : i + chunk_size]
375
- if not chunk:
376
- break
377
- yield chunk
378
- else:
379
- for i in offsets:
380
- chunk_end = i + chunk_size
381
- if chunk_end > len(sequences) + 8:
382
- continue
383
-
384
- cut_sequence = sequences[i : i + chunk_size]
385
-
386
- if bom_or_sig_available and strip_sig_or_bom is False:
387
- cut_sequence = sig_payload + cut_sequence
388
-
389
- chunk = cut_sequence.decode(
390
- encoding_iana,
391
- errors="ignore" if is_multi_byte_decoder else "strict",
392
- )
393
-
394
- # multi-byte bad cutting detector and adjustment
395
- # not the cleanest way to perform that fix but clever enough for now.
396
- if is_multi_byte_decoder and i > 0:
397
- chunk_partial_size_chk: int = min(chunk_size, 16)
398
-
399
- if (
400
- decoded_payload
401
- and chunk[:chunk_partial_size_chk] not in decoded_payload
402
- ):
403
- for j in range(i, i - 4, -1):
404
- cut_sequence = sequences[j:chunk_end]
405
-
406
- if bom_or_sig_available and strip_sig_or_bom is False:
407
- cut_sequence = sig_payload + cut_sequence
408
-
409
- chunk = cut_sequence.decode(encoding_iana, errors="ignore")
410
-
411
- if chunk[:chunk_partial_size_chk] in decoded_payload:
412
- break
413
-
414
- yield chunk
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fastapi/utils.py DELETED
@@ -1,228 +0,0 @@
1
- import re
2
- import warnings
3
- from dataclasses import is_dataclass
4
- from typing import (
5
- TYPE_CHECKING,
6
- Any,
7
- Dict,
8
- MutableMapping,
9
- Optional,
10
- Set,
11
- Type,
12
- Union,
13
- cast,
14
- )
15
- from weakref import WeakKeyDictionary
16
-
17
- import fastapi
18
- from fastapi._compat import (
19
- PYDANTIC_V2,
20
- BaseConfig,
21
- ModelField,
22
- PydanticSchemaGenerationError,
23
- Undefined,
24
- UndefinedType,
25
- Validator,
26
- lenient_issubclass,
27
- )
28
- from fastapi.datastructures import DefaultPlaceholder, DefaultType
29
- from pydantic import BaseModel, create_model
30
- from pydantic.fields import FieldInfo
31
- from typing_extensions import Literal
32
-
33
- if TYPE_CHECKING: # pragma: nocover
34
- from .routing import APIRoute
35
-
36
- # Cache for `create_cloned_field`
37
- _CLONED_TYPES_CACHE: MutableMapping[
38
- Type[BaseModel], Type[BaseModel]
39
- ] = WeakKeyDictionary()
40
-
41
-
42
- def is_body_allowed_for_status_code(status_code: Union[int, str, None]) -> bool:
43
- if status_code is None:
44
- return True
45
- # Ref: https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.0.md#patterned-fields-1
46
- if status_code in {
47
- "default",
48
- "1XX",
49
- "2XX",
50
- "3XX",
51
- "4XX",
52
- "5XX",
53
- }:
54
- return True
55
- current_status_code = int(status_code)
56
- return not (current_status_code < 200 or current_status_code in {204, 304})
57
-
58
-
59
- def get_path_param_names(path: str) -> Set[str]:
60
- return set(re.findall("{(.*?)}", path))
61
-
62
-
63
- def create_response_field(
64
- name: str,
65
- type_: Type[Any],
66
- class_validators: Optional[Dict[str, Validator]] = None,
67
- default: Optional[Any] = Undefined,
68
- required: Union[bool, UndefinedType] = Undefined,
69
- model_config: Type[BaseConfig] = BaseConfig,
70
- field_info: Optional[FieldInfo] = None,
71
- alias: Optional[str] = None,
72
- mode: Literal["validation", "serialization"] = "validation",
73
- ) -> ModelField:
74
- """
75
- Create a new response field. Raises if type_ is invalid.
76
- """
77
- class_validators = class_validators or {}
78
- if PYDANTIC_V2:
79
- field_info = field_info or FieldInfo(
80
- annotation=type_, default=default, alias=alias
81
- )
82
- else:
83
- field_info = field_info or FieldInfo()
84
- kwargs = {"name": name, "field_info": field_info}
85
- if PYDANTIC_V2:
86
- kwargs.update({"mode": mode})
87
- else:
88
- kwargs.update(
89
- {
90
- "type_": type_,
91
- "class_validators": class_validators,
92
- "default": default,
93
- "required": required,
94
- "model_config": model_config,
95
- "alias": alias,
96
- }
97
- )
98
- try:
99
- return ModelField(**kwargs) # type: ignore[arg-type]
100
- except (RuntimeError, PydanticSchemaGenerationError):
101
- raise fastapi.exceptions.FastAPIError(
102
- "Invalid args for response field! Hint: "
103
- f"check that {type_} is a valid Pydantic field type. "
104
- "If you are using a return type annotation that is not a valid Pydantic "
105
- "field (e.g. Union[Response, dict, None]) you can disable generating the "
106
- "response model from the type annotation with the path operation decorator "
107
- "parameter response_model=None. Read more: "
108
- "https://fastapi.tiangolo.com/tutorial/response-model/"
109
- ) from None
110
-
111
-
112
- def create_cloned_field(
113
- field: ModelField,
114
- *,
115
- cloned_types: Optional[MutableMapping[Type[BaseModel], Type[BaseModel]]] = None,
116
- ) -> ModelField:
117
- if PYDANTIC_V2:
118
- return field
119
- # cloned_types caches already cloned types to support recursive models and improve
120
- # performance by avoiding unecessary cloning
121
- if cloned_types is None:
122
- cloned_types = _CLONED_TYPES_CACHE
123
-
124
- original_type = field.type_
125
- if is_dataclass(original_type) and hasattr(original_type, "__pydantic_model__"):
126
- original_type = original_type.__pydantic_model__
127
- use_type = original_type
128
- if lenient_issubclass(original_type, BaseModel):
129
- original_type = cast(Type[BaseModel], original_type)
130
- use_type = cloned_types.get(original_type)
131
- if use_type is None:
132
- use_type = create_model(original_type.__name__, __base__=original_type)
133
- cloned_types[original_type] = use_type
134
- for f in original_type.__fields__.values():
135
- use_type.__fields__[f.name] = create_cloned_field(
136
- f, cloned_types=cloned_types
137
- )
138
- new_field = create_response_field(name=field.name, type_=use_type)
139
- new_field.has_alias = field.has_alias # type: ignore[attr-defined]
140
- new_field.alias = field.alias # type: ignore[misc]
141
- new_field.class_validators = field.class_validators # type: ignore[attr-defined]
142
- new_field.default = field.default # type: ignore[misc]
143
- new_field.required = field.required # type: ignore[misc]
144
- new_field.model_config = field.model_config # type: ignore[attr-defined]
145
- new_field.field_info = field.field_info
146
- new_field.allow_none = field.allow_none # type: ignore[attr-defined]
147
- new_field.validate_always = field.validate_always # type: ignore[attr-defined]
148
- if field.sub_fields: # type: ignore[attr-defined]
149
- new_field.sub_fields = [ # type: ignore[attr-defined]
150
- create_cloned_field(sub_field, cloned_types=cloned_types)
151
- for sub_field in field.sub_fields # type: ignore[attr-defined]
152
- ]
153
- if field.key_field: # type: ignore[attr-defined]
154
- new_field.key_field = create_cloned_field( # type: ignore[attr-defined]
155
- field.key_field, cloned_types=cloned_types # type: ignore[attr-defined]
156
- )
157
- new_field.validators = field.validators # type: ignore[attr-defined]
158
- new_field.pre_validators = field.pre_validators # type: ignore[attr-defined]
159
- new_field.post_validators = field.post_validators # type: ignore[attr-defined]
160
- new_field.parse_json = field.parse_json # type: ignore[attr-defined]
161
- new_field.shape = field.shape # type: ignore[attr-defined]
162
- new_field.populate_validators() # type: ignore[attr-defined]
163
- return new_field
164
-
165
-
166
- def generate_operation_id_for_path(
167
- *, name: str, path: str, method: str
168
- ) -> str: # pragma: nocover
169
- warnings.warn(
170
- "fastapi.utils.generate_operation_id_for_path() was deprecated, "
171
- "it is not used internally, and will be removed soon",
172
- DeprecationWarning,
173
- stacklevel=2,
174
- )
175
- operation_id = name + path
176
- operation_id = re.sub(r"\W", "_", operation_id)
177
- operation_id = operation_id + "_" + method.lower()
178
- return operation_id
179
-
180
-
181
- def generate_unique_id(route: "APIRoute") -> str:
182
- operation_id = route.name + route.path_format
183
- operation_id = re.sub(r"\W", "_", operation_id)
184
- assert route.methods
185
- operation_id = operation_id + "_" + list(route.methods)[0].lower()
186
- return operation_id
187
-
188
-
189
- def deep_dict_update(main_dict: Dict[Any, Any], update_dict: Dict[Any, Any]) -> None:
190
- for key, value in update_dict.items():
191
- if (
192
- key in main_dict
193
- and isinstance(main_dict[key], dict)
194
- and isinstance(value, dict)
195
- ):
196
- deep_dict_update(main_dict[key], value)
197
- elif (
198
- key in main_dict
199
- and isinstance(main_dict[key], list)
200
- and isinstance(update_dict[key], list)
201
- ):
202
- main_dict[key] = main_dict[key] + update_dict[key]
203
- else:
204
- main_dict[key] = value
205
-
206
-
207
- def get_value_or_default(
208
- first_item: Union[DefaultPlaceholder, DefaultType],
209
- *extra_items: Union[DefaultPlaceholder, DefaultType],
210
- ) -> Union[DefaultPlaceholder, DefaultType]:
211
- """
212
- Pass items or `DefaultPlaceholder`s by descending priority.
213
-
214
- The first one to _not_ be a `DefaultPlaceholder` will be returned.
215
-
216
- Otherwise, the first item (a `DefaultPlaceholder`) will be returned.
217
- """
218
- items = (first_item,) + extra_items
219
- for item in items:
220
- if not isinstance(item, DefaultPlaceholder):
221
- return item
222
- return first_item
223
-
224
-
225
- def match_pydantic_error_url(error_type: str) -> Any:
226
- from dirty_equals import IsStr
227
-
228
- return IsStr(regex=rf"^https://errors\.pydantic\.dev/.*/v/{error_type}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/h11/_util.py DELETED
@@ -1,135 +0,0 @@
1
- from typing import Any, Dict, NoReturn, Pattern, Tuple, Type, TypeVar, Union
2
-
3
- __all__ = [
4
- "ProtocolError",
5
- "LocalProtocolError",
6
- "RemoteProtocolError",
7
- "validate",
8
- "bytesify",
9
- ]
10
-
11
-
12
- class ProtocolError(Exception):
13
- """Exception indicating a violation of the HTTP/1.1 protocol.
14
-
15
- This as an abstract base class, with two concrete base classes:
16
- :exc:`LocalProtocolError`, which indicates that you tried to do something
17
- that HTTP/1.1 says is illegal, and :exc:`RemoteProtocolError`, which
18
- indicates that the remote peer tried to do something that HTTP/1.1 says is
19
- illegal. See :ref:`error-handling` for details.
20
-
21
- In addition to the normal :exc:`Exception` features, it has one attribute:
22
-
23
- .. attribute:: error_status_hint
24
-
25
- This gives a suggestion as to what status code a server might use if
26
- this error occurred as part of a request.
27
-
28
- For a :exc:`RemoteProtocolError`, this is useful as a suggestion for
29
- how you might want to respond to a misbehaving peer, if you're
30
- implementing a server.
31
-
32
- For a :exc:`LocalProtocolError`, this can be taken as a suggestion for
33
- how your peer might have responded to *you* if h11 had allowed you to
34
- continue.
35
-
36
- The default is 400 Bad Request, a generic catch-all for protocol
37
- violations.
38
-
39
- """
40
-
41
- def __init__(self, msg: str, error_status_hint: int = 400) -> None:
42
- if type(self) is ProtocolError:
43
- raise TypeError("tried to directly instantiate ProtocolError")
44
- Exception.__init__(self, msg)
45
- self.error_status_hint = error_status_hint
46
-
47
-
48
- # Strategy: there are a number of public APIs where a LocalProtocolError can
49
- # be raised (send(), all the different event constructors, ...), and only one
50
- # public API where RemoteProtocolError can be raised
51
- # (receive_data()). Therefore we always raise LocalProtocolError internally,
52
- # and then receive_data will translate this into a RemoteProtocolError.
53
- #
54
- # Internally:
55
- # LocalProtocolError is the generic "ProtocolError".
56
- # Externally:
57
- # LocalProtocolError is for local errors and RemoteProtocolError is for
58
- # remote errors.
59
- class LocalProtocolError(ProtocolError):
60
- def _reraise_as_remote_protocol_error(self) -> NoReturn:
61
- # After catching a LocalProtocolError, use this method to re-raise it
62
- # as a RemoteProtocolError. This method must be called from inside an
63
- # except: block.
64
- #
65
- # An easy way to get an equivalent RemoteProtocolError is just to
66
- # modify 'self' in place.
67
- self.__class__ = RemoteProtocolError # type: ignore
68
- # But the re-raising is somewhat non-trivial -- you might think that
69
- # now that we've modified the in-flight exception object, that just
70
- # doing 'raise' to re-raise it would be enough. But it turns out that
71
- # this doesn't work, because Python tracks the exception type
72
- # (exc_info[0]) separately from the exception object (exc_info[1]),
73
- # and we only modified the latter. So we really do need to re-raise
74
- # the new type explicitly.
75
- # On py3, the traceback is part of the exception object, so our
76
- # in-place modification preserved it and we can just re-raise:
77
- raise self
78
-
79
-
80
- class RemoteProtocolError(ProtocolError):
81
- pass
82
-
83
-
84
- def validate(
85
- regex: Pattern[bytes], data: bytes, msg: str = "malformed data", *format_args: Any
86
- ) -> Dict[str, bytes]:
87
- match = regex.fullmatch(data)
88
- if not match:
89
- if format_args:
90
- msg = msg.format(*format_args)
91
- raise LocalProtocolError(msg)
92
- return match.groupdict()
93
-
94
-
95
- # Sentinel values
96
- #
97
- # - Inherit identity-based comparison and hashing from object
98
- # - Have a nice repr
99
- # - Have a *bonus property*: type(sentinel) is sentinel
100
- #
101
- # The bonus property is useful if you want to take the return value from
102
- # next_event() and do some sort of dispatch based on type(event).
103
-
104
- _T_Sentinel = TypeVar("_T_Sentinel", bound="Sentinel")
105
-
106
-
107
- class Sentinel(type):
108
- def __new__(
109
- cls: Type[_T_Sentinel],
110
- name: str,
111
- bases: Tuple[type, ...],
112
- namespace: Dict[str, Any],
113
- **kwds: Any
114
- ) -> _T_Sentinel:
115
- assert bases == (Sentinel,)
116
- v = super().__new__(cls, name, bases, namespace, **kwds)
117
- v.__class__ = v # type: ignore
118
- return v
119
-
120
- def __repr__(self) -> str:
121
- return self.__name__
122
-
123
-
124
- # Used for methods, request targets, HTTP versions, header names, and header
125
- # values. Accepts ascii-strings, or bytes/bytearray/memoryview/..., and always
126
- # returns bytes.
127
- def bytesify(s: Union[bytes, bytearray, memoryview, int, str]) -> bytes:
128
- # Fast-path:
129
- if type(s) is bytes:
130
- return s
131
- if isinstance(s, str):
132
- s = s.encode("ascii")
133
- if isinstance(s, int):
134
- raise TypeError("expected bytes-like object, not int")
135
- return bytes(s)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Datasculptor/MusicGen/tests/data/test_audio_utils.py DELETED
@@ -1,110 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import julius
8
- import torch
9
- import pytest
10
-
11
- from audiocraft.data.audio_utils import (
12
- _clip_wav,
13
- convert_audio_channels,
14
- convert_audio,
15
- normalize_audio
16
- )
17
- from ..common_utils import get_batch_white_noise
18
-
19
-
20
- class TestConvertAudioChannels:
21
-
22
- def test_convert_audio_channels_downmix(self):
23
- b, c, t = 2, 3, 100
24
- audio = get_batch_white_noise(b, c, t)
25
- mixed = convert_audio_channels(audio, channels=2)
26
- assert list(mixed.shape) == [b, 2, t]
27
-
28
- def test_convert_audio_channels_nochange(self):
29
- b, c, t = 2, 3, 100
30
- audio = get_batch_white_noise(b, c, t)
31
- mixed = convert_audio_channels(audio, channels=c)
32
- assert list(mixed.shape) == list(audio.shape)
33
-
34
- def test_convert_audio_channels_upmix(self):
35
- b, c, t = 2, 1, 100
36
- audio = get_batch_white_noise(b, c, t)
37
- mixed = convert_audio_channels(audio, channels=3)
38
- assert list(mixed.shape) == [b, 3, t]
39
-
40
- def test_convert_audio_channels_upmix_error(self):
41
- b, c, t = 2, 2, 100
42
- audio = get_batch_white_noise(b, c, t)
43
- with pytest.raises(ValueError):
44
- convert_audio_channels(audio, channels=3)
45
-
46
-
47
- class TestConvertAudio:
48
-
49
- def test_convert_audio_channels_downmix(self):
50
- b, c, dur = 2, 3, 4.
51
- sr = 128
52
- audio = get_batch_white_noise(b, c, int(sr * dur))
53
- out = convert_audio(audio, from_rate=sr, to_rate=sr, to_channels=2)
54
- assert list(out.shape) == [audio.shape[0], 2, audio.shape[-1]]
55
-
56
- def test_convert_audio_channels_upmix(self):
57
- b, c, dur = 2, 1, 4.
58
- sr = 128
59
- audio = get_batch_white_noise(b, c, int(sr * dur))
60
- out = convert_audio(audio, from_rate=sr, to_rate=sr, to_channels=3)
61
- assert list(out.shape) == [audio.shape[0], 3, audio.shape[-1]]
62
-
63
- def test_convert_audio_upsample(self):
64
- b, c, dur = 2, 1, 4.
65
- sr = 2
66
- new_sr = 3
67
- audio = get_batch_white_noise(b, c, int(sr * dur))
68
- out = convert_audio(audio, from_rate=sr, to_rate=new_sr, to_channels=c)
69
- out_j = julius.resample.resample_frac(audio, old_sr=sr, new_sr=new_sr)
70
- assert torch.allclose(out, out_j)
71
-
72
- def test_convert_audio_resample(self):
73
- b, c, dur = 2, 1, 4.
74
- sr = 3
75
- new_sr = 2
76
- audio = get_batch_white_noise(b, c, int(sr * dur))
77
- out = convert_audio(audio, from_rate=sr, to_rate=new_sr, to_channels=c)
78
- out_j = julius.resample.resample_frac(audio, old_sr=sr, new_sr=new_sr)
79
- assert torch.allclose(out, out_j)
80
-
81
-
82
- class TestNormalizeAudio:
83
-
84
- def test_clip_wav(self):
85
- b, c, dur = 2, 1, 4.
86
- sr = 3
87
- audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur))
88
- _clip_wav(audio)
89
- assert audio.abs().max() <= 1
90
-
91
- def test_normalize_audio_clip(self):
92
- b, c, dur = 2, 1, 4.
93
- sr = 3
94
- audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur))
95
- norm_audio = normalize_audio(audio, strategy='clip')
96
- assert norm_audio.abs().max() <= 1
97
-
98
- def test_normalize_audio_rms(self):
99
- b, c, dur = 2, 1, 4.
100
- sr = 3
101
- audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur))
102
- norm_audio = normalize_audio(audio, strategy='rms')
103
- assert norm_audio.abs().max() <= 1
104
-
105
- def test_normalize_audio_peak(self):
106
- b, c, dur = 2, 1, 4.
107
- sr = 3
108
- audio = 10.0 * get_batch_white_noise(b, c, int(sr * dur))
109
- norm_audio = normalize_audio(audio, strategy='peak')
110
- assert norm_audio.abs().max() <= 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Datasculptor/StyleGAN-NADA/op/fused_act_cpu.py DELETED
@@ -1,41 +0,0 @@
1
- import os
2
-
3
- import torch
4
- from torch import nn
5
- from torch.autograd import Function
6
- from torch.nn import functional as F
7
-
8
-
9
- module_path = os.path.dirname(__file__)
10
-
11
-
12
- class FusedLeakyReLU(nn.Module):
13
- def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
14
- super().__init__()
15
-
16
- self.bias = nn.Parameter(torch.zeros(channel))
17
- self.negative_slope = negative_slope
18
- self.scale = scale
19
-
20
- def forward(self, input):
21
- return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale)
22
-
23
- def fused_leaky_relu(input, bias=None, negative_slope=0.2, scale=2 ** 0.5):
24
- if input.device.type == "cpu":
25
- if bias is not None:
26
- rest_dim = [1] * (input.ndim - bias.ndim - 1)
27
- return (
28
- F.leaky_relu(
29
- input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=0.2
30
- )
31
- * scale
32
- )
33
-
34
- else:
35
- return F.leaky_relu(input, negative_slope=0.2) * scale
36
-
37
- else:
38
- return FusedLeakyReLUFunction.apply(
39
- input.contiguous(), bias, negative_slope, scale
40
- )
41
-