parquet-converter commited on
Commit
eb780e7
·
1 Parent(s): 272e524

Update parquet files (step 14 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Codebreaker 10.1 Patched Elf How to Install and Use It on Your PS2.md +0 -119
  2. spaces/1gistliPinn/ChatGPT4/Examples/Artificial Intelligence Full Movie Download !LINK! In Hindi.md +0 -6
  3. spaces/1gistliPinn/ChatGPT4/Examples/Ashtapathi Lyrics In Tamil Pdf [PORTABLE] Download.md +0 -6
  4. spaces/1gistliPinn/ChatGPT4/Examples/Dungeon Of The Endless 1.1.5 Crack [EXCLUSIVE] Mac Osx.md +0 -124
  5. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Buble Shooter Join the Bubble Popping Adventure.md +0 -123
  6. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Countries.csv The Ultimate Resource for Country Information.md +0 -112
  7. spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/README.md +0 -164
  8. spaces/4Taps/SadTalker/src/utils/audio.py +0 -136
  9. spaces/7eu7d7/anime-ai-detect-fucker/attacker/__init__.py +0 -3
  10. spaces/AIFILMS/StyleGANEX/models/stylegan2/lpips/base_model.py +0 -58
  11. spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/training/scheduler.py +0 -24
  12. spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/vocoder/bigvgan/models.py +0 -414
  13. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/work_dirs/mobilevit-small_4xb32_2000e_3c_noF/__init__.py +0 -0
  14. spaces/AUBADA-ALARABI/poetry202/README.md +0 -13
  15. spaces/Abhi5ingh/fashionsd/sdfile.py +0 -89
  16. spaces/Abubakari/Sepsis-fastapi-prediction-app/Dockerfile +0 -14
  17. spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/types/src/routes/r/[id]/message/[messageId]/prompt/$types.d.ts +0 -9
  18. spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/types/src/routes/settings/$types.d.ts +0 -28
  19. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateToast.js +0 -8
  20. spaces/Amrrs/DragGan-Inversion/stylegan_human/dnnlib/tflib/ops/fused_bias_act.py +0 -214
  21. spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/training/projectors/w_plus_projector.py +0 -163
  22. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/colossalai/inference.py +0 -12
  23. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_k_diffusion.py +0 -136
  24. spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r50-d8_512x1024_40k_cityscapes.py +0 -4
  25. spaces/Anew5128/Anew51/constants.py +0 -50
  26. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/multimodal/DOCS.md +0 -85
  27. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/superboogav2/notebook_handler.py +0 -40
  28. spaces/Aphrodite/AIChatBot-SL-Chatbot-Blenderbot/app.py +0 -28
  29. spaces/AzumaSeren100/XuanShen-Bert-VITS2/text/japanese.py +0 -104
  30. spaces/Bart92/RVC_HF/infer/modules/uvr5/mdxnet.py +0 -246
  31. spaces/Benson/text-generation/Examples/Caramelo Crush Amigos Saga Apkpure.md +0 -51
  32. spaces/Benson/text-generation/Examples/Ciudad Congelada Mod Apk Diamantes Ilimitados.md +0 -55
  33. spaces/Benson/text-generation/Examples/Descargar Arthdal Crnicas Episodio 16.md +0 -51
  34. spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/pyparsing/helpers.py +0 -1088
  35. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/install_data.py +0 -84
  36. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/csrc/README.md +0 -7
  37. spaces/CVPR/v-doc_abstractive_mac/interface.py +0 -23
  38. spaces/Caoyunkang/Segment-Any-Anomaly/install.sh +0 -45
  39. spaces/ChandraMohanNayal/AutoGPT/tests/test_token_counter.py +0 -63
  40. spaces/CikeyQI/meme-api/meme_generator/memes/bite/__init__.py +0 -33
  41. spaces/CofAI/chat.b4/g4f/Provider/Providers/Ezcht.py +0 -35
  42. spaces/CofAI/netlist/style.css +0 -28
  43. spaces/CognitiveLabs/Research-Assistant/app.py +0 -103
  44. spaces/Cpp4App/Cpp4App/SEM/region_pp_processing.py +0 -40
  45. spaces/Cran-May/SEA-Streamlit/app.py +0 -73
  46. spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/mediapipe_face_common.py +0 -155
  47. spaces/Curranj/chatbot/README.md +0 -12
  48. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/misc/__init__.py +0 -1
  49. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/pens/__init__.py +0 -1
  50. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/pens/recordingPen.py +0 -179
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Codebreaker 10.1 Patched Elf How to Install and Use It on Your PS2.md DELETED
@@ -1,119 +0,0 @@
1
- <br />
2
- <h1>Codebreaker 10.1 Patched Elf: What Is It and How to Use It</h1>
3
- <p>If you are a fan of playing PS2 games, you might have heard of <strong>Codebreaker</strong>, a cheat device that allows you to access various cheats and hacks for your favorite games. However, if you have a soft-modded PS2, you might have encountered some problems when trying to use Codebreaker with your backup games or burned discs. That's where <strong>Codebreaker 10.1 Patched Elf</strong> comes in handy.</p>
4
- <h2>Codebreaker 10.1 Patched Elf</h2><br /><p><b><b>DOWNLOAD</b> &#9734; <a href="https://byltly.com/2uKvbo">https://byltly.com/2uKvbo</a></b></p><br /><br />
5
- <p>In this article, we will explain what Codebreaker 10.1 Patched Elf is, how to install it on your PS2, how to use it to play burned games, and what are its benefits and limitations. By the end of this article, you will be able to enjoy your PS2 games with more fun and convenience.</p>
6
- <h2>How to Install Codebreaker 10.1 Patched Elf on Your PS2</h2>
7
- <p>Before we get into the details of how to use Codebreaker 10.1 Patched Elf, let's first see how to install it on your PS2. To do this, you will need the following:</p>
8
- <ul>
9
- <li>A soft-modded PS2 with Free McBoot (FMCB) installed on a memory card.</li>
10
- <li>A USB drive formatted as FAT32.</li>
11
- <li>A copy of Codebreaker 10 or 10.1 in ISO format.</li>
12
- <li>A patcher program that can modify the Codebreaker elf file.</li>
13
- <li>A file manager program that can copy files from USB to memory card.</li>
14
- </ul>
15
- <p>Once you have these ready, follow these steps:</p>
16
- <ol>
17
- <li>Download the patcher program from <a href="https://psx-scene.com/forums/showthread.php?t=57901">this link</a>. It is a zip file that contains two files: CB_launch.zip and CB_patch.zip.</li>
18
- <li>Extract the CB_patch.zip file and run the CB_patch.exe file on your computer.</li>
19
- <li>Select your Codebreaker ISO file as the input file and choose a destination folder for the output file.</li>
20
- <li>Click on "Patch" and wait for the process to finish.</li>
21
- <li>You should now have a patched elf file named "CODEBREAKER Vxx PATCHED BY ZALZZAR" in your destination folder.</li>
22
- <li>Rename this file as "CB_launch.elf" and copy it to your USB drive.</li>
23
- <li>Extract the CB_launch.zip file and copy the "CB_launch" folder to your USB drive as well.</li>
24
- <li>Plug your USB drive into your PS2 and turn it on.</li>
25
- <li>Launch FMCB from your memory card and select uLaunchELF from the menu.</li>
26
- <li>Browse to your USB drive using uLaunchELF and copy the "CB_launch" folder and the "CB_launch.elf" file to your memory card's "BOOT" folder.</li>
27
- <li>Go back to FMCB menu and select "Configure OSDSYS options".</li>
28
- <li>Select "Configure Item" and choose an empty slot.</li>
29
- <li>Select "Path1" and browse to your memory card's "BOOT" folder.</li>
30
- <li>Select "CB_launch.elf" as the path and press circle.</li>
31
- <li>Select "Name" and enter "Codebreaker" as the name and press circle.</li>
32
- <li>Select "Save CNF To MC0" and press circle.</li>
33
- <li>Exit FMCB menu and restart your PS2.</li>
34
- </ol>
35
- <p>You should now see "Codebreaker" as an option in your FMCB menu. Congratulations, you have successfully installed Codebreaker 10.1 Patched Elf on your PS2!</p>
36
- <p>How to use Codebreaker 10.1 Patched Elf on PS2<br />
37
- Codebreaker 10.1 Patched Elf download link<br />
38
- Codebreaker 10.1 Patched Elf compatibility list<br />
39
- Codebreaker 10.1 Patched Elf tutorial video<br />
40
- Codebreaker 10.1 Patched Elf vs Action Replay Max<br />
41
- Codebreaker 10.1 Patched Elf cheats database<br />
42
- Codebreaker 10.1 Patched Elf update history<br />
43
- Codebreaker 10.1 Patched Elf error fix<br />
44
- Codebreaker 10.1 Patched Elf review and rating<br />
45
- Codebreaker 10.1 Patched Elf best settings<br />
46
- Codebreaker 10.1 Patched Elf modding guide<br />
47
- Codebreaker 10.1 Patched Elf alternative software<br />
48
- Codebreaker 10.1 Patched Elf support forum<br />
49
- Codebreaker 10.1 Patched Elf license key<br />
50
- Codebreaker 10.1 Patched Elf free trial<br />
51
- Codebreaker 10.1 Patched Elf installation instructions<br />
52
- Codebreaker 10.1 Patched Elf system requirements<br />
53
- Codebreaker 10.1 Patched Elf features and benefits<br />
54
- Codebreaker 10.1 Patched Elf online store<br />
55
- Codebreaker 10.1 Patched Elf customer service<br />
56
- Codebreaker 10.1 Patched Elf FAQs and tips<br />
57
- Codebreaker 10.1 Patched Elf testimonials and feedback<br />
58
- Codebreaker 10.1 Patched Elf comparison with other products<br />
59
- Codebreaker 10.1 Patched Elf refund policy<br />
60
- Codebreaker 10.1 Patched Elf coupon code and discount<br />
61
- Codebreaker 10.1 Patched Elf launch date and price<br />
62
- Codebreaker 10.1 Patched Elf official website and blog<br />
63
- Codebreaker 10.1 Patched Elf developer and publisher<br />
64
- Codebreaker 10.1 Patched Elf source code and documentation<br />
65
- Codebreaker 10.1 Patched Elf security and privacy<br />
66
- Codebreaker 10.1 Patched Elf backup and restore<br />
67
- Codebreaker 10.1 Patched Elf troubleshooting and maintenance<br />
68
- Codebreaker 10.1 Patched Elf pros and cons<br />
69
- Codebreaker 10.1 Patched Elf screenshots and videos<br />
70
- Codebreaker 10.1 Patched Elf awards and recognition<br />
71
- Codebreaker 10.1 Patched Elf latest news and updates<br />
72
- Codebreaker 10.1 Patched Elf user manual and guide<br />
73
- Codebreaker 10.1 Patched Elf technical support and helpdesk<br />
74
- Codebreaker 10.1 Patched Elf affiliate program and commission<br />
75
- Codebreaker 10.1 Patched Elf warranty and guarantee<br />
76
- Codebreaker 10.1 Patched Elf customization and personalization<br />
77
- Codebreaker 10.1 Patched Elf integration and compatibility<br />
78
- Codebreaker 10.1 Patched Elf performance and speed<br />
79
- Codebreaker 10.1 Patched Elf quality and reliability<br />
80
- Codebreaker 10.1 Patched Elf feedback survey and questionnaire<br />
81
- Codebreaker 10.1 Patched Elf bonus and gift<br />
82
- Codebreaker 10.1 Patched Elf case study and success story<br />
83
- Codebreaker 10.1 Patched Elf demo and sample<br />
84
- Codebreaker 10.1 Patched Elf roadmap and future plans</p>
85
- <h2>How to Use Codebreaker 10.1 Patched Elf to Play Burned Games on Your PS2</h2>
86
- <p>Now that you have installed Codebreaker 10.1 Patched Elf on your PS2, you might be wondering how to use it to play burned games or backup discs on your console. To do this, you will need the following:</p>
87
- <ul>
88
- <li>A burned game or backup disc that has been patched for ESR (Enhanced Screen Resolution).</li>
89
- <li>A patcher program that can patch ISO images for ESR.</li>
90
- </ul>
91
- <p>If you don't know how to patch ISO images for ESR, follow these steps:</p>
92
- <ol>
93
- <li>Download the ESR Disc Patcher from <a href="http://psx-scene.com/forums/showthread.php?t=58441">this link</a>. It is a zip file that contains a single exe file.</li>
94
- <li>Extract the zip file and run the ESR Disc Patcher.exe file on your computer.</li>
95
- <li>Select your ISO image as the input file and choose a destination folder for the output file.</li>
96
- <li>Click on "Patch" and wait for the process to finish.</li>
97
- <li>You should now have a patched ISO image in your destination folder with "_ESR" added at the end of its name.</li>
98
- <li>Burn this image onto a DVD-R disc using any burning software of your choice.</li>
99
- </ol>
100
- <p>Once you have a patched disc ready, follow these steps:</p>
101
- <ol>
102
- <li>Insert your disc into your PS2's disc tray but don't close it yet.</li>
103
- <li>Select "Codebreaker" from your FMCB menu and press X.</li>
104
- <li>You should see a loading screen followed by a disclaimer screen. Press X to continue.</li>
105
- <li>You should now see the main menu of Codebreaker with various options such as Start Game, Select Cheats, Options, etc.</li>
106
- <li>Select "Options" and press X.</li>
107
- <li>Select "Disc Tray Status" and press X until it says "Off". This will prevent Codebreaker from ejecting your disc when you start the game.</li>
108
- <li>Select "Save Options" and press X.</li>
109
- <li>Select "Select Cheats" and press X.</li>
110
- <li>You should see a list of games that are compatible with Codebreaker. You can scroll through them using up/down buttons or search for them using left/right buttons.</li>
111
- <li>Select the game that matches your disc and press X.</li>
112
- <li>You should see a list of cheats available for that game. You can toggle them on/off using X button or select them all using square button.</li>
113
- <li>Select "Start Game With Selected Cheats" and press X.</li>
114
- <li>You should see a loading screen followed by another disclaimer screen. Press X to continue.</li>
115
- <li>You should now be taken back to FMCB menu automatically.</li>
116
- <li>Select ESR from FMCB menu and press X.</li>
117
- <li>You should see a loading screen</p> 0a6ba089eb<br />
118
- <br />
119
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Artificial Intelligence Full Movie Download !LINK! In Hindi.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Artificial Intelligence Full Movie Download In Hindi</h2><br /><p><b><b>DOWNLOAD</b> &#10084; <a href="https://imgfil.com/2uxZcJ">https://imgfil.com/2uxZcJ</a></b></p><br /><br />
2
- <br />
3
- Film Kyss mig (2011) Online HD,Film Online,Filme Online. ... The Last Kids on Earth (Season 3) [Hindi + English] Dual Audio WEB-DL 720p [NF Animated Series]. ... The automatic subtitle generators powered by artificial intelligence offer a ... 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Ashtapathi Lyrics In Tamil Pdf [PORTABLE] Download.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>ashtapathi lyrics in tamil pdf download</h2><br /><p><b><b>Download</b> &#10084;&#10084;&#10084; <a href="https://imgfil.com/2uxZp8">https://imgfil.com/2uxZp8</a></b></p><br /><br />
2
-
3
- Pdf - eBook and . ... PDF ebooks (user's guide, manuals, sheets) about Ashtapadi lyrics tamil pdf ready for download.... DownloadPDF, TXT or ... 1fdad05405<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Dungeon Of The Endless 1.1.5 Crack [EXCLUSIVE] Mac Osx.md DELETED
@@ -1,124 +0,0 @@
1
- <br />
2
- <h1>Dungeon of the Endless 1.1.5 Crack Mac Osx: How to Download and Play the Ultimate Dungeon Crawler</h1>
3
- <p>Dungeon of the Endless 1.1.5 Crack Mac Osx is a game that combines roguelike, tower defense, and RPG elements in a unique and challenging way. You play as a survivor of a prison ship that crashed on a mysterious planet, and you have to explore the endless dungeon below, fighting enemies, collecting resources, and building defenses along the way.</p>
4
- <h2>Dungeon of the Endless 1.1.5 Crack Mac Osx</h2><br /><p><b><b>Download</b> &mdash;&mdash;&mdash;>>> <a href="https://imgfil.com/2uxYai">https://imgfil.com/2uxYai</a></b></p><br /><br />
5
- <p>If you are looking for a game that will test your skills and strategy, Dungeon of the Endless 1.1.5 Crack Mac Osx is a perfect choice. In this article, we will show you how to download and play this game on your Mac computer.</p>
6
- <h2>How to Download Dungeon of the Endless 1.1.5 Crack Mac Osx</h2>
7
- <p>Dungeon of the Endless 1.1.5 Crack Mac Osx is a cracked version of the game that allows you to play it for free without any limitations or restrictions. You can download it from various websites that offer cracked games for Mac users, such as kidzshare.com or trailduro.com.</p>
8
- <p>Here are the steps to download Dungeon of the Endless 1.1.5 Crack Mac Osx:</p>
9
- <p></p>
10
- <ol>
11
- <li>Visit one of the websites that offer Dungeon of the Endless 1.1.5 Crack Mac Osx, such as kidzshare.com or trailduro.com.</li>
12
- <li>Find the download link for Dungeon of the Endless 1.1.5 Crack Mac Osx and click on it.</li>
13
- <li>Wait for the download to finish and extract the zip file to your desired location.</li>
14
- <li>Open the extracted folder and run the DungeonoftheEndless.app file to launch the game.</li>
15
- </ol>
16
- <h2>How to Play Dungeon of the Endless 1.1.5 Crack Mac Osx</h2>
17
- <p>Dungeon of the Endless 1.1.5 Crack Mac Osx is a game that requires strategy, skill, and luck to survive. You can play it solo or with up to three other players online or locally.</p>
18
- <p>Here are some tips and tricks to play Dungeon of the Endless 1.1.5 Crack Mac Osx:</p>
19
- <ul>
20
- <li>Choose your characters wisely: Each character has different stats, skills, and abilities that can affect your gameplay. You can also unlock more characters by completing certain achievements or using mods.</li>
21
- <li>Manage your resources carefully: You need to collect dust, food, industry, and science to power your rooms, heal your characters, build modules, and research new technologies.</li>
22
- <li>Build your defenses strategically: You can build various modules in your rooms to help you fight enemies, such as turrets, traps, generators, etc. You can also upgrade your modules with science or use special items to boost their effects.</li>
23
- <li>Explore cautiously: You can open doors to explore new rooms and floors, but be careful as enemies will spawn randomly and attack you. You can also find items, events, merchants, or allies in some rooms.</li>
24
- <li>Escape safely: Your goal is to find the exit on each floor and reach it with at least one character carrying the crystal that powers your ship. You need to protect your crystal from enemies while moving it from room to room.</li>
25
- </ul>
26
- <h2>Conclusion</h2>
27
- <p>Dungeon of the Endless 1.1.5 Crack Mac Osx is a game that will challenge you with its unique blend of roguelike, tower defense, and RPG elements. You can download it for free from various websites that offer cracked games for Mac users, such as kidzshare.com or trailduro.com.</p>
28
- <p>If you are looking for a game that will test your skills and strategy, Dungeon of the Endless 1.1.5 Crack Mac Osx is a perfect choice.</p>
29
- <h2>How to Unlock Secret Characters with Dungeon of the Endless 1.1.5 Crack Mac Osx</h2>
30
- <p>Dungeon of the Endless 1.1.5 Crack Mac Osx has a lot of characters to choose from, each with their own stats, skills, and abilities. However, some of them are hidden and can only be unlocked by certain methods or conditions.</p>
31
- <p>If you want to unlock all the secret characters in Dungeon of the Endless 1.1.5 Crack Mac Osx, you can use a mod called Secret Unlocker (DotE-Secrets) v.1.1.5, which is a patch that adds them to the character selection screen. You can download it from gamepressure.com or other websites that offer mods for Dungeon of the Endless.</p>
32
- <p>Here are the steps to install and use Secret Unlocker (DotE-Secrets) v.1.1.5:</p>
33
- <ol>
34
- <li>Download the mod file from gamepressure.com or other websites that offer mods for Dungeon of the Endless.</li>
35
- <li>Copy the mod file to DungeonoftheEndless_Data\\Managed inside your game folder.</li>
36
- <li>Run the installer and it will rename your original Assembly-CSharp.dll file to Assembly-CSharp.dll.backup.</li>
37
- <li>Launch the game and you will see all the secret characters available on the character selection screen.</li>
38
- </ol>
39
- <p>Here are the secret characters that you can unlock with Secret Unlocker (DotE-Secrets) v.1.1.5:</p>
40
- <ul>
41
- <li>Ayairi Whairydd (War Pug): A cute but fierce dog that can bite enemies and heal allies.</li>
42
- <li>Esseb Tarosh (Archivist): A mysterious alien that can manipulate time and space.</li>
43
- </ul>
44
- <h2>How to Install Mods for Dungeon of the Endless 1.1.5 Crack Mac Osx</h2>
45
- <p>Dungeon of the Endless 1.1.5 Crack Mac Osx is a game that can be enhanced and customized with various mods that add new features, functions, or content to the game. You can find many mods for Dungeon of the Endless on websites such as ali213.net or lastgame.ru.</p>
46
- <p>Here are the steps to install mods for Dungeon of the Endless 1.1.5 Crack Mac Osx:</p>
47
- <ol>
48
- <li>Download the mod file from ali213.net or lastgame.ru or other websites that offer mods for Dungeon of the Endless.</li>
49
- <li>Extract the zip file to your desired location.</li>
50
- <li>Open the extracted folder and copy the files or folders to your game folder, depending on the instructions of each mod.</li>
51
- <li>Launch the game and enjoy the modded features or content.</li>
52
- </ol>
53
- <p>Here are some examples of mods that you can install for Dungeon of the Endless 1.1.5 Crack Mac Osx:</p>
54
- <ul>
55
- <li>Dungeon of The Endless - More Heroes: A mod that adds more than 20 new heroes to the game, each with their own stats, skills, and abilities.</li>
56
- <li>Dungeon of The Endless - More Modules: A mod that adds more than 30 new modules to the game, each with their own effects and functions.</li>
57
- <li>Dungeon of The Endless - More Floors: A mod that adds more floors to the game, each with their own themes and challenges.</li>
58
- </ul>
59
- <h2>How to Update Dungeon of the Endless 1.1.5 Crack Mac Osx</h2>
60
- <p>Dungeon of the Endless 1.1.5 Crack Mac Osx is a cracked version of the game that allows you to play it for free without any limitations or restrictions. However, it may not be compatible with the latest updates or patches that are released by the developers.</p>
61
- <p>If you want to update Dungeon of the Endless 1.1.5 Crack Mac Osx to the latest version, you can use a tool called PatchMyPC, which is a free software that can automatically update your cracked games and apps on your Mac computer.</p>
62
- <p>Here are the steps to update Dungeon of the Endless 1.1.5 Crack Mac Osx with PatchMyPC:</p>
63
- <ol>
64
- <li>Download PatchMyPC from patchmypc.com or other websites that offer tools for cracked games and apps.</li>
65
- <li>Install PatchMyPC on your Mac computer and run it.</li>
66
- <li>Select Dungeon of the Endless 1.1.5 Crack Mac Osx from the list of games and apps that can be updated by PatchMyPC.</li>
67
- <li>Click Update button and wait for PatchMyPC to download and install the latest update or patch for Dungeon of the Endless 1.1.5 Crack Mac Osx.</li>
68
- <li>Launch Dungeon of the Endless 1.1.5 Crack Mac Osx and enjoy the updated features or content.</li>
69
- </ol>
70
- <h2>How to Fix Common Problems with Dungeon of the Endless 1.1.5 Crack Mac Osx</h2>
71
- <p>Dungeon of the Endless 1.1.5 Crack Mac Osx is a game that can run smoothly and flawlessly on most Mac computers, but it may also encounter some problems or errors that can affect your gameplay experience.</p>
72
- <p>If you face any common problems with Dungeon of the Endless 1.1.5 Crack Mac Osx, such as crashes, freezes, lag, black screen, sound issues, etc., you can try some solutions that can help you fix them.</p>
73
- <p>Here are some solutions that can help you fix common problems with Dungeon of the Endless 1.1.5 Crack Mac Osx:</p>
74
- <ul>
75
- <li>Make sure your Mac computer meets the minimum system requirements for Dungeon of the Endless 1.1.5 Crack Mac Osx, such as operating system, processor, memory, graphics card, etc.</li>
76
- <li>Make sure your Mac computer has enough free disk space and RAM to run Dungeon of the Endless 1.1.5 Crack Mac Osx smoothly and efficiently.</li>
77
- <li>Make sure your Mac computer has the latest drivers and software updates installed, especially for your graphics card and sound card.</li>
78
- <li>Make sure your Mac computer has no viruses or malware that can interfere with Dungeon of the Endless 1.1.5 Crack Mac Osx or cause performance issues.</li>
79
- <li>Make sure your internet connection is stable and fast enough to play Dungeon of the Endless 1.1.5 Crack Mac Osx online or multiplayer mode without lag or disconnects.</li>
80
- <li>Make sure you run Dungeon of the Endless 1.1.5 Crack Mac Osx as administrator and in compatibility mode if necessary.</li>
81
- <li>Make sure you disable any background programs or applications that can consume your CPU or GPU resources or conflict with Dungeon of the Endless 1.1.5 Crack Mac Osx.</li>
82
- <li>Make sure you adjust your game settings according to your preferences and hardware capabilities, such as resolution, graphics quality, sound volume, etc.</li>
83
- </ul>
84
- <h2>How to Customize Your Characters with Dungeon of the Endless 1.1.5 Crack Mac Osx</h2>
85
- <p>Dungeon of the Endless 1.1.5 Crack Mac Osx has a lot of characters to choose from, each with their own stats, skills, and abilities. However, you can also customize your characters with various items, equipment, and mods that can enhance their performance and appearance.</p>
86
- <p>If you want to customize your characters with Dungeon of the Endless 1.1.5 Crack Mac Osx, you can use a mod called More Heroes, which is a mod that adds more than 20 new heroes to the game, each with their own stats, skills, and abilities. You can download it from ali213.net or other websites that offer mods for Dungeon of the Endless.</p>
87
- <p>Here are the steps to install and use More Heroes mod for Dungeon of the Endless 1.1.5 Crack Mac Osx:</p>
88
- <ol>
89
- <li>Download the mod file from ali213.net or other websites that offer mods for Dungeon of the Endless.</li>
90
- <li>Extract the zip file to your desired location.</li>
91
- <li>Open the extracted folder and copy the files or folders to your game folder.</li>
92
- <li>Launch the game and you will see all the new heroes available on the character selection screen.</li>
93
- </ol>
94
- <p>Here are some examples of items, equipment, and mods that you can use to customize your characters with Dungeon of the Endless 1.1.5 Crack Mac Osx:</p>
95
- <ul>
96
- <li>Items: You can find various items in the dungeon that can give you temporary or permanent bonuses or effects, such as health, damage, speed, etc.</li>
97
- <li>Equipment: You can equip your characters with different weapons and armor that can improve their combat abilities and defense.</li>
98
- <li>Mods: You can install different mods on your characters that can modify their stats, skills, or abilities.</li>
99
- </ul>
100
- <h2>How to Enjoy Dungeon of the Endless 1.1.5 Crack Mac Osx with Your Friends</h2>
101
- <p>Dungeon of the Endless 1.1.5 Crack Mac Osx is a game that can be played solo or with up to three other players online or locally. Playing with your friends can make the game more fun and challenging, as you can cooperate and communicate with each other to survive the endless dungeon.</p>
102
- <p>If you want to enjoy Dungeon of the Endless 1.1.5 Crack Mac Osx with your friends, you can use a tool called Hamachi, which is a free software that can create a virtual private network (VPN) between your computers and allow you to play online games as if you were on the same local network.</p>
103
- <p>Here are the steps to enjoy Dungeon of the Endless 1.1.5 Crack Mac Osx with your friends using Hamachi:</p>
104
- <ol>
105
- <li>Download Hamachi from hamachi.com or other websites that offer tools for online gaming.</li>
106
- <li>Install Hamachi on your Mac computer and run it.</li>
107
- <li>Create a new network or join an existing one with your friends.</li>
108
- <li>Launch Dungeon of the Endless 1.1.5 Crack Mac Osx and select Multiplayer mode.</li>
109
- <li>Create a new game or join an existing one with your friends.</li>
110
- </ol>
111
- <p>Here are some tips and tricks to enjoy Dungeon of the Endless 1.1.5 Crack Mac Osx with your friends:</p>
112
- <ul>
113
- <li>Communicate with your friends using voice chat or text chat to coordinate your actions and strategies.</li>
114
- <li>Distribute your resources and roles among your friends according to your characters' strengths and weaknesses.</li>
115
- <li>Help each other out when in trouble or danger by healing, defending, or rescuing each other.</li>
116
- <li>Have fun and enjoy the game!</li>
117
- </ul>
118
- <h2>Conclusion</h2>
119
- <p>Dungeon of the Endless 1.1.5 Crack Mac Osx is a game that combines roguelike, tower defense, and RPG elements in a unique and challenging way. You play as a survivor of a prison ship that crashed on a mysterious planet, and you have to explore the endless dungeon below, fighting enemies, collecting resources, and building defenses along the way.</p>
120
- <p>If you are looking for a game that will test your skills and strategy, Dungeon of the Endless 1.1.5 Crack Mac Osx is a perfect choice. You can download it for free from various websites that offer cracked games for Mac users, such as kidzshare.com or trailduro.com.</p>
121
- <p>In this article, we have shown you how to download and play Dungeon of the Endless 1.1.5 Crack Mac Osx on your Mac computer. We have also given you some tips and tricks to survive the endless dungeon, unlock secret characters, install mods, update the game, fix common problems, and enjoy the game with your friends.</p>
122
- <p>We hope you have found this article helpful and informative. If you have any questions or comments, feel free to leave them below. Thank you for reading and have fun playing Dungeon of the Endless 1.1.5 Crack Mac Osx!</p> 3cee63e6c2<br />
123
- <br />
124
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Buble Shooter Join the Bubble Popping Adventure.md DELETED
@@ -1,123 +0,0 @@
1
- <br />
2
- <h1>Bubble Shooter: A Fun and Addictive Game for Everyone</h1>
3
- <p>If you are looking for a simple yet entertaining game to pass the time, you might want to try Bubble Shooter. Bubble Shooter is a popular online game that involves shooting bubbles to match three or more of the same color and make them pop. It is easy to learn, fun to play, and suitable for all ages. In this article, we will tell you everything you need to know about Bubble Shooter, including its history, rules, benefits, tips, and best versions.</p>
4
- <h2>What is Bubble Shooter?</h2>
5
- <p>Bubble Shooter is a type of puzzle game that belongs to the genre of "match three" games. The main objective of the game is to clear the screen of bubbles by shooting them with a bubble cannon. The bubbles are arranged in a grid or a cluster, and they come in different colors. To pop the bubbles, you need to aim and shoot a bubble of the same color at them. When three or more bubbles of the same color touch, they burst and disappear. The game ends when you clear all the bubbles or when one of them reaches the bottom of the screen.</p>
6
- <h2>buble shooter</h2><br /><p><b><b>DOWNLOAD</b> &#10038; <a href="https://urlin.us/2uSStg">https://urlin.us/2uSStg</a></b></p><br /><br />
7
- <h3>The history of Bubble Shooter</h3>
8
- <p>Bubble Shooter was originally developed by Taito Corporation in 1994 as an arcade game called Puzzle Bobble. It was later ported to various platforms such as PC, mobile, and web browsers. The game became very popular and spawned many sequels and spin-offs. One of the most successful versions of the game was Bubble Shooter, which was released in 2002 by Absolutist Games. This version introduced some new features such as power-ups, levels, and modes. Since then, Bubble Shooter has been played by millions of people around the world and has inspired many other similar games.</p>
9
- <h3>The rules of Bubble Shooter</h3>
10
- <p>The rules of Bubble Shooter are simple and intuitive. Here are the basic steps to play the game:</p>
11
- <ul>
12
- <li>At the bottom of the screen, you will see a bubble cannon that can be moved left and right with your mouse or finger.</li>
13
- <li>At the center of the cannon, you will see a bubble that is ready to be launched. You can also see the next bubble that will come after it.</li>
14
- <li>Aim the cannon at the cluster of bubbles at the top of the screen. You can see where the bubble will go by following the dotted line.</li>
15
- <li>Click or tap to shoot the bubble. Try to hit bubbles of the same color as your bubble.</li>
16
- <li>If you manage to connect three or more bubbles of the same color, they will pop and disappear. You will also earn points for each bubble you pop.</li>
17
- <li>If you miss or hit a different color, the bubble will stick to the cluster and make it bigger.</li>
18
- <li>If you pop all the bubbles on the screen, you will complete the level and move on to the next one.</li>
19
- <li>If one of the bubbles touches the bottom of the screen, you will lose a life and have to start over.</li>
20
- </ul>
21
- <h3>The benefits of playing Bubble Shooter</h3>
22
- <p>Bubble Shooter is not only a fun game but also a beneficial one. Here are some of the advantages of playing Bubble Shooter:</p>
23
- <ul>
24
- <li>It improves your concentration and focus. You have to pay attention to the colors, angles, and trajectories of the bubbles.</li>
25
- <li>It enhances your hand-eye coordination and reaction time. You have to move quickly and accurately to shoot the bubbles.</li>
26
- <li>It stimulates your brain and memory. You have to plan ahead and remember where the bubbles are located and how to pop them.</li>
27
- <li>It relaxes your mind and mood. You can enjoy the colorful graphics, the soothing sounds, and the satisfying feeling of popping bubbles.</li>
28
- <li>It challenges your skills and creativity. You can try different strategies and techniques to beat the levels and score higher.</li>
29
- </ul>
30
- <h2>How to play Bubble Shooter?</h2>
31
- <p>Now that you know what Bubble Shooter is and why you should play it, let's see how you can actually play it. Here are some tips and tricks to help you master the game:</p>
32
- <h3>Choose your device and platform</h3>
33
- <p>Bubble Shooter is available on various devices and platforms, such as PC, mobile, tablet, and web browser. You can choose the one that suits you best, depending on your preferences and convenience. For example, if you want to play on a bigger screen and use a mouse, you can play on your PC. If you want to play on the go and use touch controls, you can play on your mobile or tablet. If you want to play online and access different versions of the game, you can play on your web browser.</p>
34
- <h3>Aim and shoot the bubbles</h3>
35
- <p>The most important skill in Bubble Shooter is aiming and shooting the bubbles. You need to be precise and accurate to hit the right bubbles and avoid wasting shots. Here are some tips to improve your aiming and shooting:</p>
36
- <ul>
37
- <li>Use the dotted line as a guide. The dotted line shows you where the bubble will go when you shoot it. You can adjust the angle of the cannon by moving your mouse or finger.</li>
38
- <li>Use the walls as a bounce. The walls can help you reach bubbles that are hard to hit directly. You can bounce the bubble off the wall and make it ricochet to the target.</li>
39
- <li>Use the color of the next bubble as a hint. The color of the next bubble shows you what color will come after the current one. You can use this information to plan ahead and prepare for your next shot.</li>
40
- </ul>
41
- <h3>Use strategies and tips to improve your score</h3>
42
- <p>Besides aiming and shooting, there are also some strategies and tips that can help you improve your score and beat the levels. Here are some of them:</p>
43
- <ul>
44
- <li>Pop more bubbles with one shot. The more bubbles you pop with one shot, the more points you get. You can also trigger chain reactions and combos by popping bubbles that are connected to other bubbles of the same color.</li>
45
- <li>Clear the top rows first. The top rows are more dangerous because they are closer to the bottom of the screen. If you clear them first, you will have more space and time to shoot the lower rows.</li>
46
- <li>Avoid creating isolated bubbles. Isolated bubbles are bubbles that are not connected to any other bubbles of the same color. They are harder to pop and waste your shots. Try to avoid creating them by shooting at groups of bubbles instead of single ones.</li>
47
- <li>Use power-ups wisely. Power-ups are special bubbles that have different effects when popped. For example, some power-ups can clear a whole row or column of bubbles, change the color of nearby bubbles, or give you extra lives or shots. Use them wisely when you need them, but don't rely on them too much.</li>
48
- </ul>
49
- <h2>What are the best Bubble Shooter games?</h2>
50
- <p>Bubble Shooter is a very popular game that has many versions and variations. Some of them are more classic and simple, while others are more modern and complex. Here are some of the best Bubble Shooter games that you can try:</p>
51
- <p>bubble shooter game online free<br />
52
- bubble shooter classic play<br />
53
- bubble shooter extreme download<br />
54
- bubble shooter pro tips<br />
55
- bubble shooter candy crush<br />
56
- bubble shooter levels strategy<br />
57
- bubble shooter arcade mode<br />
58
- bubble shooter original version<br />
59
- bubble shooter puzzle bobble<br />
60
- bubble shooter smarty bubbles<br />
61
- bubble shooter hd graphics<br />
62
- bubble shooter crazy games<br />
63
- bubble shooter full screen<br />
64
- bubble shooter no ads<br />
65
- bubble shooter high score<br />
66
- bubble shooter fun games<br />
67
- bubble shooter relaxing music<br />
68
- bubble shooter space theme<br />
69
- bubble shooter halloween edition<br />
70
- bubble shooter christmas special<br />
71
- bubble shooter farm animals<br />
72
- bubble shooter underwater adventure<br />
73
- bubble shooter dragon pop<br />
74
- bubble shooter frozen bubbles<br />
75
- bubble shooter rainbow colors<br />
76
- bubble shooter magic spells<br />
77
- bubble shooter easter eggs<br />
78
- bubble shooter valentine hearts<br />
79
- bubble shooter jungle safari<br />
80
- bubble shooter fairy tale<br />
81
- bubble shooter dinosaur world<br />
82
- bubble shooter pirate treasure<br />
83
- bubble shooter soccer balls<br />
84
- bubble shooter fruit splash<br />
85
- bubble shooter flower garden<br />
86
- bubble shooter emoji blast<br />
87
- bubble shooter animal rescue<br />
88
- bubble shooter zombie apocalypse<br />
89
- bubble shooter superhero power<br />
90
- bubble shooter jewel match<br />
91
- bubble shooter marble legend<br />
92
- bubble shooter galaxy war<br />
93
- bubble shooter firework frenzy<br />
94
- bubble shooter forest friends<br />
95
- bubble shooter balloon popper<br />
96
- bubble shooter cake maker<br />
97
- bubble shooter candy saga<br />
98
- bubble shooter bird land<br />
99
- bubble shooter butterfly dream</p>
100
- <h3>Bubble Shooter Classic</h3>
101
- <p>Bubble Shooter Classic is one of the most original and iconic versions of the game. It has a simple design, a retro style, and a relaxing soundtrack. It is perfect for those who want to enjoy a nostalgic and timeless game experience.</p>
102
- <h3>Bubble Shooter Extreme</h3>
103
- <p>Bubble Shooter Extreme is one of the most challenging and exciting versions of the game. It has a fast-paced gameplay, a futuristic design, and a dynamic soundtrack. It is perfect for those who want to test their skills and reflexes in a thrilling game experience.</p>
104
- <h3>Bubble Shooter Candy</h3>
105
- <p>Bubble Shooter Candy is one of the most sweet and colorful versions of the game. It has a cute design, a candy theme, and a cheerful soundtrack. It is perfect for those who want to enjoy a fun and delightful game experience.</p>
106
- <h2>Conclusion</h2>
107
- <p>Bubble Shooter is a fun and addictive game that everyone can enjoy. It is easy to learn, fun to play, and suitable for all ages. It also has many benefits for your mind and mood, such as improving your concentration, coordination, memory, relaxation, and creativity. You can play Bubble Shooter on various devices and platforms, such as PC, mobile, tablet, and web browser. You can also choose from different versions and variations of the game, such as Bubble Shooter Classic, Bubble Shooter Extreme, and Bubble Shooter Candy. Whether you want a nostalgic, thrilling, or delightful game experience, Bubble Shooter has something for you. So what are you waiting for? Grab your bubble cannon and start popping bubbles today!</p>
108
- <h2>FAQs</h2>
109
- <p>Here are some of the frequently asked questions about Bubble Shooter:</p>
110
- <ul>
111
- <li><b>Q: How many levels are there in Bubble Shooter?</b></li>
112
- <li>A: The number of levels in Bubble Shooter depends on the version and platform you are playing on. Some versions have a fixed number of levels, while others have an infinite number of levels that are randomly generated. You can check the level number on the screen or the menu.</li>
113
- <li><b>Q: How do I get more lives or shots in Bubble Shooter?</b></li>
114
- <li>A: The number of lives or shots in Bubble Shooter also depends on the version and platform you are playing on. Some versions have a limited number of lives or shots that you can replenish by watching ads, buying coins, or waiting for a certain time. Other versions have an unlimited number of lives or shots that you can use freely.</li>
115
- <li><b>Q: How do I pause or resume the game in Bubble Shooter?</b></li>
116
- <li>A: To pause or resume the game in Bubble Shooter, you can click or tap on the pause button on the screen or the menu. This will stop the game and allow you to access other options such as settings, sound, music, help, or exit.</li>
117
- <li><b>Q: How do I save or load my progress in Bubble Shooter?</b></li>
118
- <li>A: To save or load your progress in Bubble Shooter, you need to have an account or a profile on the platform you are playing on. This will allow you to sync your data across different devices and resume your game from where you left off.</li>
119
- <li><b>Q: How do I change the difficulty or mode in Bubble Shooter?</b></li>
120
- <li>A: To change the difficulty or mode in Bubble Shooter, you can select the option on the screen or the menu before starting a new game. Some versions have different difficulty levels such as easy, medium, hard, or expert. Other versions have different modes such as arcade, puzzle, time trial, or survival.</li>
121
- </ul></p> 197e85843d<br />
122
- <br />
123
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Countries.csv The Ultimate Resource for Country Information.md DELETED
@@ -1,112 +0,0 @@
1
- <br />
2
- <h1>How to Download Countries.csv</h1>
3
- <p>A CSV file, or a comma-separated values file, is a plain text file that stores data in a tabular format. Each line of the file is a data record, and each record consists of one or more fields separated by commas. CSV files are often used to exchange data between different applications that use incompatible formats. For example, you can use a CSV file to transfer data from a database to a spreadsheet, or vice versa.</p>
4
- <p>One example of a CSV file that you might want to download is countries.csv. This file contains information about countries around the world, such as their names, ISO codes, coordinates, capitals, currencies, regions, and more. You can use this file for various purposes, such as creating maps, charts, reports, or quizzes. In this article, we will show you how to download countries.csv and open it in a program of your choice.</p>
5
- <h2>download countries.csv</h2><br /><p><b><b>DOWNLOAD</b> &#128504;&#128504;&#128504; <a href="https://urlin.us/2uSZLa">https://urlin.us/2uSZLa</a></b></p><br /><br />
6
- <h2>Step 1: Find a Source of Countries.csv Data</h2>
7
- <p>The first step is to find a reliable source of countries.csv data. There are many websites that offer this kind of data for free or for a fee. Some examples are:</p>
8
- <ul>
9
- <li><a href="(^1^)">Google Developers</a>: This website provides a canonical version of countries.csv that follows the ISO 3166 standard for country codes. It also includes some additional fields, such as time zones, latitude, longitude, emoji, and native name.</li>
10
- <li><a href="(^2^)">GitHub</a>: This website hosts a repository of countries, states, and cities data in JSON, SQL, XML, YAML, and CSV formats. The CSV files include an index column and are updated regularly.</li>
11
- <li><a href="(^3^)">Kaggle</a>: This website offers a dataset of countries of the world that links country names to region, population, area size, GDP, mortality, and more. The dataset is available in CSV format and can be downloaded or accessed through an API.</li>
12
- </ul>
13
- <p>You can choose any source that suits your needs and preferences. For this article, we will use the Google Developers version of countries.csv.</p>
14
- <h2>Step 2: Choose a Program to Open the CSV File</h2>
15
- <p>The next step is to choose a program that can open and display the CSV file. There are many options available, depending on your operating system and your goals. Some common programs are:</p>
16
- <ul>
17
- <li><strong>Text editors</strong>: These are programs that allow you to view and edit plain text files. Examples are Notepad, Notepad++, Sublime Text, Atom, or Visual Studio Code. Text editors are useful for quickly viewing the contents of a CSV file or making minor changes.</li>
18
- <li><strong>Spreadsheet programs</strong>: These are programs that allow you to organize and analyze data in tabular form. Examples are Microsoft Excel, Google Sheets, LibreOffice Calc, or Numbers. Spreadsheet programs are useful for performing calculations, creating charts, filtering data, or applying formulas.</li>
19
- <li><strong>Specialized applications</strong>: These are programs that are designed for specific purposes or tasks related to CSV files. Examples are CSV Editor Pro, CSVed, Ron's Editor, or Easy Data Transform. Specialized applications are useful for editing large or complex CSV files, converting formats, validating data, or transforming data.</li>
20
- </ul>
21
- <p>You can choose any program that meets your requirements and expectations. For this article, we will use Microsoft Excel as an example of a spreadsheet program.</p>
22
- <h2>Step 3: Download the CSV File from the Source</h2>
23
- <p>The third step is to download the CSV file from the source website. To do this:</p>
24
- <p>download countries.csv file<br />
25
- download countries.csv data<br />
26
- download countries.csv python<br />
27
- download countries.csv r<br />
28
- download countries.csv excel<br />
29
- download countries.csv pandas<br />
30
- download countries.csv sql<br />
31
- download countries.csv world bank<br />
32
- download countries.csv iso codes<br />
33
- download countries.csv population<br />
34
- download countries.csv gdp<br />
35
- download countries.csv map<br />
36
- download countries.csv covid<br />
37
- download countries.csv flags<br />
38
- download countries.csv currency<br />
39
- download countries.csv capital<br />
40
- download countries.csv continent<br />
41
- download countries.csv language<br />
42
- download countries.csv timezone<br />
43
- download countries.csv area<br />
44
- download countries.csv latitude longitude<br />
45
- download countries.csv shapefile<br />
46
- download countries.csv geojson<br />
47
- download countries.csv kaggle<br />
48
- download countries.csv github<br />
49
- how to download countries.csv<br />
50
- where to download countries.csv<br />
51
- free download countries.csv<br />
52
- best way to download countries.csv<br />
53
- easiest way to download countries.csv<br />
54
- fastest way to download countries.csv<br />
55
- how to use downloaded countries.csv file<br />
56
- how to import downloaded countries.csv data<br />
57
- how to read downloaded countries.csv python<br />
58
- how to load downloaded countries.csv r<br />
59
- how to open downloaded countries.csv excel<br />
60
- how to parse downloaded countries.csv pandas<br />
61
- how to query downloaded countries.csv sql<br />
62
- how to analyze downloaded countries.csv world bank data<br />
63
- how to convert downloaded countries.csv iso codes<br />
64
- how to visualize downloaded countries.csv population data<br />
65
- how to plot downloaded countries.csv gdp data<br />
66
- how to create a map from downloaded countries.csv data<br />
67
- how to update downloaded countries.csv covid data<br />
68
- how to display downloaded countries.csv flags on a map<br />
69
- how to calculate exchange rates from downloaded countries.csv currency data<br />
70
- how to find capital cities from downloaded countries.csv data<br />
71
- how to group by continent from downloaded countries.csv data<br />
72
- how to detect language from downloaded countries.csv data<br />
73
- how to get timezone from downloaded countries.csv data<br />
74
- how to measure area from downloaded countries.csv data</p>
75
- <ol>
76
- <li>Go to the website where the CSV file is hosted. In our case, it is <a href="(^1^)">https://developers.google.com/public-data/docs/canonical/countries_csv</a>.</li>
77
- <li>Right-click on the link to the CSV file and select "Save link as" or "Save target as". In our case, it is <a href="">https://developers.google.com/public-data/docs/canonical/countries_csv.csv</a>.</li>
78
- <li>Choose a location on your computer where you want to save the CSV file and click "Save".</li>
79
- </ol>
80
- <p>You have now downloaded the CSV file to your computer. You can find it in the location you specified.</p>
81
- <h2>Step 4: Open the CSV File in the Chosen Program</h2>
82
- <p>The fourth step is to open the CSV file in the program you selected. To do this:</p>
83
- <ol>
84
- <li>Launch the program on your computer. In our case, it is Microsoft Excel.</li>
85
- <li>Click on "File" and then "Open". Alternatively, you can use the keyboard shortcut Ctrl+O.</li>
86
- <li>Navigate to the location where you saved the CSV file and select it. Click "Open".</li>
87
- </ol>
88
- <p>You should now see the CSV file opened in the program. Depending on the program, you may need to adjust some settings, such as the delimiter, the encoding, or the format of the data. For example, in Excel, you may see a dialog box that asks you to choose how to import the data. You can select "Delimited" and then "Comma" as the delimiter. You can also choose the column data format as "General" or "Text". Click "Finish" to complete the import.</p>
89
- <h2>Step 5: Explore and Manipulate the Data as Needed</h2>
90
- <p>The final step is to explore and manipulate the data in the CSV file as needed. You can use the features and functions of the program to perform various tasks, such as:</p>
91
- <ul>
92
- <li><strong>Sort and filter</strong>: You can sort and filter the data by any column or criteria. For example, you can sort the countries by name, population, or region. You can also filter out countries that meet certain conditions, such as having a specific currency or language.</li>
93
- <li><strong>Calculate and analyze</strong>: You can calculate and analyze the data using formulas, functions, or tools. For example, you can calculate the average population, area, or GDP of countries in a region. You can also use tools such as pivot tables or charts to summarize and visualize the data.</li>
94
- <li><strong>Edit and format</strong>: You can edit and format the data to suit your needs and preferences. For example, you can add, delete, or modify rows or columns of data. You can also change the font, color, or alignment of the cells.</li>
95
- </ul>
96
- <p>You can explore and manipulate the data in any way you want. You can also save your changes or export the data to another format if needed.</p>
97
- <h1>Conclusion</h1>
98
- <p>In this article, we have shown you how to download countries.csv and open it in a program of your choice. We have also given you some examples of how to explore and manipulate the data in the CSV file. By following these steps, you can access a wealth of information about countries around the world and use it for various purposes.</p>
99
- <p>We hope you found this article helpful and informative. If you have any questions or feedback, please let us know in the comments below.</p>
100
- <h1>FAQs</h1>
101
- <h3>What is a CSV file?</h3>
102
- <p>A CSV file is a plain text file that stores data in a tabular format. Each line of the file is a data record, and each record consists of one or more fields separated by commas.</p>
103
- <h3>Why should I download countries.csv?</h3>
104
- <p>You should download countries.csv if you want to access information about countries around the world, such as their names, ISO codes, coordinates, capitals, currencies, regions, and more. You can use this information for various purposes, such as creating maps, charts, reports, or quizzes.</p>
105
- <h3>How do I open a CSV file?</h3>
106
- <p>You can open a CSV file using any program that can read and display plain text files. Some common programs are text editors, spreadsheet programs, or specialized applications.</p>
107
- <h3>Where can I find other sources of countries.csv data?</h3>
108
- <p>You can find other sources of countries.csv data by searching online for websites that offer this kind of data for free or for a fee. Some examples are GitHub, Kaggle, DataHub.io, World Bank Data Catalogue, or CIA World Factbook.</p>
109
- <h3>How do I convert a CSV file to another format?</h3>
110
- <p>You can convert a CSV file to another format using any program that can read and write different formats. Some common formats are JSON, SQL, XML, YAML, or HTML. You can also use online tools or converters that can perform this task for you.</p> 197e85843d<br />
111
- <br />
112
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/face3d/models/arcface_torch/README.md DELETED
@@ -1,164 +0,0 @@
1
- # Distributed Arcface Training in Pytorch
2
-
3
- This is a deep learning library that makes face recognition efficient, and effective, which can train tens of millions
4
- identity on a single server.
5
-
6
- ## Requirements
7
-
8
- - Install [pytorch](http://pytorch.org) (torch>=1.6.0), our doc for [install.md](docs/install.md).
9
- - `pip install -r requirements.txt`.
10
- - Download the dataset
11
- from [https://github.com/deepinsight/insightface/tree/master/recognition/_datasets_](https://github.com/deepinsight/insightface/tree/master/recognition/_datasets_)
12
- .
13
-
14
- ## How to Training
15
-
16
- To train a model, run `train.py` with the path to the configs:
17
-
18
- ### 1. Single node, 8 GPUs:
19
-
20
- ```shell
21
- python -m torch.distributed.launch --nproc_per_node=8 --nnodes=1 --node_rank=0 --master_addr="127.0.0.1" --master_port=1234 train.py configs/ms1mv3_r50
22
- ```
23
-
24
- ### 2. Multiple nodes, each node 8 GPUs:
25
-
26
- Node 0:
27
-
28
- ```shell
29
- python -m torch.distributed.launch --nproc_per_node=8 --nnodes=2 --node_rank=0 --master_addr="ip1" --master_port=1234 train.py train.py configs/ms1mv3_r50
30
- ```
31
-
32
- Node 1:
33
-
34
- ```shell
35
- python -m torch.distributed.launch --nproc_per_node=8 --nnodes=2 --node_rank=1 --master_addr="ip1" --master_port=1234 train.py train.py configs/ms1mv3_r50
36
- ```
37
-
38
- ### 3.Training resnet2060 with 8 GPUs:
39
-
40
- ```shell
41
- python -m torch.distributed.launch --nproc_per_node=8 --nnodes=1 --node_rank=0 --master_addr="127.0.0.1" --master_port=1234 train.py configs/ms1mv3_r2060.py
42
- ```
43
-
44
- ## Model Zoo
45
-
46
- - The models are available for non-commercial research purposes only.
47
- - All models can be found in here.
48
- - [Baidu Yun Pan](https://pan.baidu.com/s/1CL-l4zWqsI1oDuEEYVhj-g): e8pw
49
- - [onedrive](https://1drv.ms/u/s!AswpsDO2toNKq0lWY69vN58GR6mw?e=p9Ov5d)
50
-
51
- ### Performance on [**ICCV2021-MFR**](http://iccv21-mfr.com/)
52
-
53
- ICCV2021-MFR testset consists of non-celebrities so we can ensure that it has very few overlap with public available face
54
- recognition training set, such as MS1M and CASIA as they mostly collected from online celebrities.
55
- As the result, we can evaluate the FAIR performance for different algorithms.
56
-
57
- For **ICCV2021-MFR-ALL** set, TAR is measured on all-to-all 1:1 protocal, with FAR less than 0.000001(e-6). The
58
- globalised multi-racial testset contains 242,143 identities and 1,624,305 images.
59
-
60
- For **ICCV2021-MFR-MASK** set, TAR is measured on mask-to-nonmask 1:1 protocal, with FAR less than 0.0001(e-4).
61
- Mask testset contains 6,964 identities, 6,964 masked images and 13,928 non-masked images.
62
- There are totally 13,928 positive pairs and 96,983,824 negative pairs.
63
-
64
- | Datasets | backbone | Training throughout | Size / MB | **ICCV2021-MFR-MASK** | **ICCV2021-MFR-ALL** |
65
- | :---: | :--- | :--- | :--- |:--- |:--- |
66
- | MS1MV3 | r18 | - | 91 | **47.85** | **68.33** |
67
- | Glint360k | r18 | 8536 | 91 | **53.32** | **72.07** |
68
- | MS1MV3 | r34 | - | 130 | **58.72** | **77.36** |
69
- | Glint360k | r34 | 6344 | 130 | **65.10** | **83.02** |
70
- | MS1MV3 | r50 | 5500 | 166 | **63.85** | **80.53** |
71
- | Glint360k | r50 | 5136 | 166 | **70.23** | **87.08** |
72
- | MS1MV3 | r100 | - | 248 | **69.09** | **84.31** |
73
- | Glint360k | r100 | 3332 | 248 | **75.57** | **90.66** |
74
- | MS1MV3 | mobilefacenet | 12185 | 7.8 | **41.52** | **65.26** |
75
- | Glint360k | mobilefacenet | 11197 | 7.8 | **44.52** | **66.48** |
76
-
77
- ### Performance on IJB-C and Verification Datasets
78
-
79
- | Datasets | backbone | IJBC(1e-05) | IJBC(1e-04) | agedb30 | cfp_fp | lfw | log |
80
- | :---: | :--- | :--- | :--- | :--- |:--- |:--- |:--- |
81
- | MS1MV3 | r18 | 92.07 | 94.66 | 97.77 | 97.73 | 99.77 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/ms1mv3_arcface_r18_fp16/training.log)|
82
- | MS1MV3 | r34 | 94.10 | 95.90 | 98.10 | 98.67 | 99.80 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/ms1mv3_arcface_r34_fp16/training.log)|
83
- | MS1MV3 | r50 | 94.79 | 96.46 | 98.35 | 98.96 | 99.83 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/ms1mv3_arcface_r50_fp16/training.log)|
84
- | MS1MV3 | r100 | 95.31 | 96.81 | 98.48 | 99.06 | 99.85 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/ms1mv3_arcface_r100_fp16/training.log)|
85
- | MS1MV3 | **r2060**| 95.34 | 97.11 | 98.67 | 99.24 | 99.87 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/ms1mv3_arcface_r2060_fp16/training.log)|
86
- | Glint360k |r18-0.1 | 93.16 | 95.33 | 97.72 | 97.73 | 99.77 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/glint360k_cosface_r18_fp16_0.1/training.log)|
87
- | Glint360k |r34-0.1 | 95.16 | 96.56 | 98.33 | 98.78 | 99.82 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/glint360k_cosface_r34_fp16_0.1/training.log)|
88
- | Glint360k |r50-0.1 | 95.61 | 96.97 | 98.38 | 99.20 | 99.83 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/glint360k_cosface_r50_fp16_0.1/training.log)|
89
- | Glint360k |r100-0.1 | 95.88 | 97.32 | 98.48 | 99.29 | 99.82 |[log](https://raw.githubusercontent.com/anxiangsir/insightface_arcface_log/master/glint360k_cosface_r100_fp16_0.1/training.log)|
90
-
91
- [comment]: <> (More details see [model.md]&#40;docs/modelzoo.md&#41; in docs.)
92
-
93
-
94
- ## [Speed Benchmark](docs/speed_benchmark.md)
95
-
96
- **Arcface Torch** can train large-scale face recognition training set efficiently and quickly. When the number of
97
- classes in training sets is greater than 300K and the training is sufficient, partial fc sampling strategy will get same
98
- accuracy with several times faster training performance and smaller GPU memory.
99
- Partial FC is a sparse variant of the model parallel architecture for large sacle face recognition. Partial FC use a
100
- sparse softmax, where each batch dynamicly sample a subset of class centers for training. In each iteration, only a
101
- sparse part of the parameters will be updated, which can reduce a lot of GPU memory and calculations. With Partial FC,
102
- we can scale trainset of 29 millions identities, the largest to date. Partial FC also supports multi-machine distributed
103
- training and mixed precision training.
104
-
105
- ![Image text](https://github.com/anxiangsir/insightface_arcface_log/blob/master/partial_fc_v2.png)
106
-
107
- More details see
108
- [speed_benchmark.md](docs/speed_benchmark.md) in docs.
109
-
110
- ### 1. Training speed of different parallel methods (samples / second), Tesla V100 32GB * 8. (Larger is better)
111
-
112
- `-` means training failed because of gpu memory limitations.
113
-
114
- | Number of Identities in Dataset | Data Parallel | Model Parallel | Partial FC 0.1 |
115
- | :--- | :--- | :--- | :--- |
116
- |125000 | 4681 | 4824 | 5004 |
117
- |1400000 | **1672** | 3043 | 4738 |
118
- |5500000 | **-** | **1389** | 3975 |
119
- |8000000 | **-** | **-** | 3565 |
120
- |16000000 | **-** | **-** | 2679 |
121
- |29000000 | **-** | **-** | **1855** |
122
-
123
- ### 2. GPU memory cost of different parallel methods (MB per GPU), Tesla V100 32GB * 8. (Smaller is better)
124
-
125
- | Number of Identities in Dataset | Data Parallel | Model Parallel | Partial FC 0.1 |
126
- | :--- | :--- | :--- | :--- |
127
- |125000 | 7358 | 5306 | 4868 |
128
- |1400000 | 32252 | 11178 | 6056 |
129
- |5500000 | **-** | 32188 | 9854 |
130
- |8000000 | **-** | **-** | 12310 |
131
- |16000000 | **-** | **-** | 19950 |
132
- |29000000 | **-** | **-** | 32324 |
133
-
134
- ## Evaluation ICCV2021-MFR and IJB-C
135
-
136
- More details see [eval.md](docs/eval.md) in docs.
137
-
138
- ## Test
139
-
140
- We tested many versions of PyTorch. Please create an issue if you are having trouble.
141
-
142
- - [x] torch 1.6.0
143
- - [x] torch 1.7.1
144
- - [x] torch 1.8.0
145
- - [x] torch 1.9.0
146
-
147
- ## Citation
148
-
149
- ```
150
- @inproceedings{deng2019arcface,
151
- title={Arcface: Additive angular margin loss for deep face recognition},
152
- author={Deng, Jiankang and Guo, Jia and Xue, Niannan and Zafeiriou, Stefanos},
153
- booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
154
- pages={4690--4699},
155
- year={2019}
156
- }
157
- @inproceedings{an2020partical_fc,
158
- title={Partial FC: Training 10 Million Identities on a Single Machine},
159
- author={An, Xiang and Zhu, Xuhan and Xiao, Yang and Wu, Lan and Zhang, Ming and Gao, Yuan and Qin, Bin and
160
- Zhang, Debing and Fu Ying},
161
- booktitle={Arxiv 2010.05222},
162
- year={2020}
163
- }
164
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/utils/audio.py DELETED
@@ -1,136 +0,0 @@
1
- import librosa
2
- import librosa.filters
3
- import numpy as np
4
- # import tensorflow as tf
5
- from scipy import signal
6
- from scipy.io import wavfile
7
- from src.utils.hparams import hparams as hp
8
-
9
- def load_wav(path, sr):
10
- return librosa.core.load(path, sr=sr)[0]
11
-
12
- def save_wav(wav, path, sr):
13
- wav *= 32767 / max(0.01, np.max(np.abs(wav)))
14
- #proposed by @dsmiller
15
- wavfile.write(path, sr, wav.astype(np.int16))
16
-
17
- def save_wavenet_wav(wav, path, sr):
18
- librosa.output.write_wav(path, wav, sr=sr)
19
-
20
- def preemphasis(wav, k, preemphasize=True):
21
- if preemphasize:
22
- return signal.lfilter([1, -k], [1], wav)
23
- return wav
24
-
25
- def inv_preemphasis(wav, k, inv_preemphasize=True):
26
- if inv_preemphasize:
27
- return signal.lfilter([1], [1, -k], wav)
28
- return wav
29
-
30
- def get_hop_size():
31
- hop_size = hp.hop_size
32
- if hop_size is None:
33
- assert hp.frame_shift_ms is not None
34
- hop_size = int(hp.frame_shift_ms / 1000 * hp.sample_rate)
35
- return hop_size
36
-
37
- def linearspectrogram(wav):
38
- D = _stft(preemphasis(wav, hp.preemphasis, hp.preemphasize))
39
- S = _amp_to_db(np.abs(D)) - hp.ref_level_db
40
-
41
- if hp.signal_normalization:
42
- return _normalize(S)
43
- return S
44
-
45
- def melspectrogram(wav):
46
- D = _stft(preemphasis(wav, hp.preemphasis, hp.preemphasize))
47
- S = _amp_to_db(_linear_to_mel(np.abs(D))) - hp.ref_level_db
48
-
49
- if hp.signal_normalization:
50
- return _normalize(S)
51
- return S
52
-
53
- def _lws_processor():
54
- import lws
55
- return lws.lws(hp.n_fft, get_hop_size(), fftsize=hp.win_size, mode="speech")
56
-
57
- def _stft(y):
58
- if hp.use_lws:
59
- return _lws_processor(hp).stft(y).T
60
- else:
61
- return librosa.stft(y=y, n_fft=hp.n_fft, hop_length=get_hop_size(), win_length=hp.win_size)
62
-
63
- ##########################################################
64
- #Those are only correct when using lws!!! (This was messing with Wavenet quality for a long time!)
65
- def num_frames(length, fsize, fshift):
66
- """Compute number of time frames of spectrogram
67
- """
68
- pad = (fsize - fshift)
69
- if length % fshift == 0:
70
- M = (length + pad * 2 - fsize) // fshift + 1
71
- else:
72
- M = (length + pad * 2 - fsize) // fshift + 2
73
- return M
74
-
75
-
76
- def pad_lr(x, fsize, fshift):
77
- """Compute left and right padding
78
- """
79
- M = num_frames(len(x), fsize, fshift)
80
- pad = (fsize - fshift)
81
- T = len(x) + 2 * pad
82
- r = (M - 1) * fshift + fsize - T
83
- return pad, pad + r
84
- ##########################################################
85
- #Librosa correct padding
86
- def librosa_pad_lr(x, fsize, fshift):
87
- return 0, (x.shape[0] // fshift + 1) * fshift - x.shape[0]
88
-
89
- # Conversions
90
- _mel_basis = None
91
-
92
- def _linear_to_mel(spectogram):
93
- global _mel_basis
94
- if _mel_basis is None:
95
- _mel_basis = _build_mel_basis()
96
- return np.dot(_mel_basis, spectogram)
97
-
98
- def _build_mel_basis():
99
- assert hp.fmax <= hp.sample_rate // 2
100
- return librosa.filters.mel(sr=hp.sample_rate, n_fft=hp.n_fft, n_mels=hp.num_mels,
101
- fmin=hp.fmin, fmax=hp.fmax)
102
-
103
- def _amp_to_db(x):
104
- min_level = np.exp(hp.min_level_db / 20 * np.log(10))
105
- return 20 * np.log10(np.maximum(min_level, x))
106
-
107
- def _db_to_amp(x):
108
- return np.power(10.0, (x) * 0.05)
109
-
110
- def _normalize(S):
111
- if hp.allow_clipping_in_normalization:
112
- if hp.symmetric_mels:
113
- return np.clip((2 * hp.max_abs_value) * ((S - hp.min_level_db) / (-hp.min_level_db)) - hp.max_abs_value,
114
- -hp.max_abs_value, hp.max_abs_value)
115
- else:
116
- return np.clip(hp.max_abs_value * ((S - hp.min_level_db) / (-hp.min_level_db)), 0, hp.max_abs_value)
117
-
118
- assert S.max() <= 0 and S.min() - hp.min_level_db >= 0
119
- if hp.symmetric_mels:
120
- return (2 * hp.max_abs_value) * ((S - hp.min_level_db) / (-hp.min_level_db)) - hp.max_abs_value
121
- else:
122
- return hp.max_abs_value * ((S - hp.min_level_db) / (-hp.min_level_db))
123
-
124
- def _denormalize(D):
125
- if hp.allow_clipping_in_normalization:
126
- if hp.symmetric_mels:
127
- return (((np.clip(D, -hp.max_abs_value,
128
- hp.max_abs_value) + hp.max_abs_value) * -hp.min_level_db / (2 * hp.max_abs_value))
129
- + hp.min_level_db)
130
- else:
131
- return ((np.clip(D, 0, hp.max_abs_value) * -hp.min_level_db / hp.max_abs_value) + hp.min_level_db)
132
-
133
- if hp.symmetric_mels:
134
- return (((D + hp.max_abs_value) * -hp.min_level_db / (2 * hp.max_abs_value)) + hp.min_level_db)
135
- else:
136
- return ((D * -hp.min_level_db / hp.max_abs_value) + hp.min_level_db)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/7eu7d7/anime-ai-detect-fucker/attacker/__init__.py DELETED
@@ -1,3 +0,0 @@
1
- from .base import *
2
- from .PGD import *
3
- from .FGSM import *
 
 
 
 
spaces/AIFILMS/StyleGANEX/models/stylegan2/lpips/base_model.py DELETED
@@ -1,58 +0,0 @@
1
- import os
2
- import numpy as np
3
- import torch
4
- from torch.autograd import Variable
5
- from pdb import set_trace as st
6
- from IPython import embed
7
-
8
- class BaseModel():
9
- def __init__(self):
10
- pass;
11
-
12
- def name(self):
13
- return 'BaseModel'
14
-
15
- def initialize(self, use_gpu=True, gpu_ids=[0]):
16
- self.use_gpu = use_gpu
17
- self.gpu_ids = gpu_ids
18
-
19
- def forward(self):
20
- pass
21
-
22
- def get_image_paths(self):
23
- pass
24
-
25
- def optimize_parameters(self):
26
- pass
27
-
28
- def get_current_visuals(self):
29
- return self.input
30
-
31
- def get_current_errors(self):
32
- return {}
33
-
34
- def save(self, label):
35
- pass
36
-
37
- # helper saving function that can be used by subclasses
38
- def save_network(self, network, path, network_label, epoch_label):
39
- save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
40
- save_path = os.path.join(path, save_filename)
41
- torch.save(network.state_dict(), save_path)
42
-
43
- # helper loading function that can be used by subclasses
44
- def load_network(self, network, network_label, epoch_label):
45
- save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
46
- save_path = os.path.join(self.save_dir, save_filename)
47
- print('Loading network from %s'%save_path)
48
- network.load_state_dict(torch.load(save_path))
49
-
50
- def update_learning_rate():
51
- pass
52
-
53
- def get_image_paths(self):
54
- return self.image_paths
55
-
56
- def save_done(self, flag=False):
57
- np.save(os.path.join(self.save_dir, 'done_flag'),flag)
58
- np.savetxt(os.path.join(self.save_dir, 'done_flag'),[flag,],fmt='%i')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/training/scheduler.py DELETED
@@ -1,24 +0,0 @@
1
- import numpy as np
2
-
3
-
4
- def assign_learning_rate(optimizer, new_lr):
5
- for param_group in optimizer.param_groups:
6
- param_group["lr"] = new_lr
7
-
8
-
9
- def _warmup_lr(base_lr, warmup_length, step):
10
- return base_lr * (step + 1) / warmup_length
11
-
12
-
13
- def cosine_lr(optimizer, base_lr, warmup_length, steps):
14
- def _lr_adjuster(step):
15
- if step < warmup_length:
16
- lr = _warmup_lr(base_lr, warmup_length, step)
17
- else:
18
- e = step - warmup_length
19
- es = steps - warmup_length
20
- lr = 0.5 * (1 + np.cos(np.pi * e / es)) * base_lr
21
- assign_learning_rate(optimizer, lr)
22
- return lr
23
-
24
- return _lr_adjuster
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/vocoder/bigvgan/models.py DELETED
@@ -1,414 +0,0 @@
1
- # Copyright (c) 2022 NVIDIA CORPORATION.
2
- # Licensed under the MIT license.
3
-
4
- # Adapted from https://github.com/jik876/hifi-gan under the MIT license.
5
- # LICENSE is in incl_licenses directory.
6
-
7
-
8
- import torch
9
- import torch.nn.functional as F
10
- import torch.nn as nn
11
- from torch.nn import Conv1d, ConvTranspose1d, Conv2d
12
- from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
13
- import numpy as np
14
- from .activations import Snake,SnakeBeta
15
- from .alias_free_torch import *
16
- import os
17
- from omegaconf import OmegaConf
18
-
19
- LRELU_SLOPE = 0.1
20
-
21
- def init_weights(m, mean=0.0, std=0.01):
22
- classname = m.__class__.__name__
23
- if classname.find("Conv") != -1:
24
- m.weight.data.normal_(mean, std)
25
-
26
-
27
- def get_padding(kernel_size, dilation=1):
28
- return int((kernel_size*dilation - dilation)/2)
29
-
30
- class AMPBlock1(torch.nn.Module):
31
- def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5), activation=None):
32
- super(AMPBlock1, self).__init__()
33
- self.h = h
34
-
35
- self.convs1 = nn.ModuleList([
36
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
37
- padding=get_padding(kernel_size, dilation[0]))),
38
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
39
- padding=get_padding(kernel_size, dilation[1]))),
40
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
41
- padding=get_padding(kernel_size, dilation[2])))
42
- ])
43
- self.convs1.apply(init_weights)
44
-
45
- self.convs2 = nn.ModuleList([
46
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
47
- padding=get_padding(kernel_size, 1))),
48
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
49
- padding=get_padding(kernel_size, 1))),
50
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
51
- padding=get_padding(kernel_size, 1)))
52
- ])
53
- self.convs2.apply(init_weights)
54
-
55
- self.num_layers = len(self.convs1) + len(self.convs2) # total number of conv layers
56
-
57
- if activation == 'snake': # periodic nonlinearity with snake function and anti-aliasing
58
- self.activations = nn.ModuleList([
59
- Activation1d(
60
- activation=Snake(channels, alpha_logscale=h.snake_logscale))
61
- for _ in range(self.num_layers)
62
- ])
63
- elif activation == 'snakebeta': # periodic nonlinearity with snakebeta function and anti-aliasing
64
- self.activations = nn.ModuleList([
65
- Activation1d(
66
- activation=SnakeBeta(channels, alpha_logscale=h.snake_logscale))
67
- for _ in range(self.num_layers)
68
- ])
69
- else:
70
- raise NotImplementedError("activation incorrectly specified. check the config file and look for 'activation'.")
71
-
72
- def forward(self, x):
73
- acts1, acts2 = self.activations[::2], self.activations[1::2]
74
- for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2):
75
- xt = a1(x)
76
- xt = c1(xt)
77
- xt = a2(xt)
78
- xt = c2(xt)
79
- x = xt + x
80
-
81
- return x
82
-
83
- def remove_weight_norm(self):
84
- for l in self.convs1:
85
- remove_weight_norm(l)
86
- for l in self.convs2:
87
- remove_weight_norm(l)
88
-
89
-
90
- class AMPBlock2(torch.nn.Module):
91
- def __init__(self, h, channels, kernel_size=3, dilation=(1, 3), activation=None):
92
- super(AMPBlock2, self).__init__()
93
- self.h = h
94
-
95
- self.convs = nn.ModuleList([
96
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
97
- padding=get_padding(kernel_size, dilation[0]))),
98
- weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
99
- padding=get_padding(kernel_size, dilation[1])))
100
- ])
101
- self.convs.apply(init_weights)
102
-
103
- self.num_layers = len(self.convs) # total number of conv layers
104
-
105
- if activation == 'snake': # periodic nonlinearity with snake function and anti-aliasing
106
- self.activations = nn.ModuleList([
107
- Activation1d(
108
- activation=Snake(channels, alpha_logscale=h.snake_logscale))
109
- for _ in range(self.num_layers)
110
- ])
111
- elif activation == 'snakebeta': # periodic nonlinearity with snakebeta function and anti-aliasing
112
- self.activations = nn.ModuleList([
113
- Activation1d(
114
- activation=SnakeBeta(channels, alpha_logscale=h.snake_logscale))
115
- for _ in range(self.num_layers)
116
- ])
117
- else:
118
- raise NotImplementedError("activation incorrectly specified. check the config file and look for 'activation'.")
119
-
120
- def forward(self, x):
121
- for c, a in zip (self.convs, self.activations):
122
- xt = a(x)
123
- xt = c(xt)
124
- x = xt + x
125
-
126
- return x
127
-
128
- def remove_weight_norm(self):
129
- for l in self.convs:
130
- remove_weight_norm(l)
131
-
132
-
133
- class BigVGAN(torch.nn.Module):
134
- # this is our main BigVGAN model. Applies anti-aliased periodic activation for resblocks.
135
- def __init__(self, h):
136
- super(BigVGAN, self).__init__()
137
- self.h = h
138
-
139
- self.num_kernels = len(h.resblock_kernel_sizes)
140
- self.num_upsamples = len(h.upsample_rates)
141
-
142
- # pre conv
143
- self.conv_pre = weight_norm(Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3))
144
-
145
- # define which AMPBlock to use. BigVGAN uses AMPBlock1 as default
146
- resblock = AMPBlock1 if h.resblock == '1' else AMPBlock2
147
-
148
- # transposed conv-based upsamplers. does not apply anti-aliasing
149
- self.ups = nn.ModuleList()
150
- for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
151
- self.ups.append(nn.ModuleList([
152
- weight_norm(ConvTranspose1d(h.upsample_initial_channel // (2 ** i),
153
- h.upsample_initial_channel // (2 ** (i + 1)),
154
- k, u, padding=(k - u) // 2))
155
- ]))
156
-
157
- # residual blocks using anti-aliased multi-periodicity composition modules (AMP)
158
- self.resblocks = nn.ModuleList()
159
- for i in range(len(self.ups)):
160
- ch = h.upsample_initial_channel // (2 ** (i + 1))
161
- for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):
162
- self.resblocks.append(resblock(h, ch, k, d, activation=h.activation))
163
-
164
- # post conv
165
- if h.activation == "snake": # periodic nonlinearity with snake function and anti-aliasing
166
- activation_post = Snake(ch, alpha_logscale=h.snake_logscale)
167
- self.activation_post = Activation1d(activation=activation_post)
168
- elif h.activation == "snakebeta": # periodic nonlinearity with snakebeta function and anti-aliasing
169
- activation_post = SnakeBeta(ch, alpha_logscale=h.snake_logscale)
170
- self.activation_post = Activation1d(activation=activation_post)
171
- else:
172
- raise NotImplementedError("activation incorrectly specified. check the config file and look for 'activation'.")
173
-
174
- self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
175
-
176
- # weight initialization
177
- for i in range(len(self.ups)):
178
- self.ups[i].apply(init_weights)
179
- self.conv_post.apply(init_weights)
180
-
181
- def forward(self, x):
182
- # pre conv
183
- x = self.conv_pre(x)
184
-
185
- for i in range(self.num_upsamples):
186
- # upsampling
187
- for i_up in range(len(self.ups[i])):
188
- x = self.ups[i][i_up](x)
189
- # AMP blocks
190
- xs = None
191
- for j in range(self.num_kernels):
192
- if xs is None:
193
- xs = self.resblocks[i * self.num_kernels + j](x)
194
- else:
195
- xs += self.resblocks[i * self.num_kernels + j](x)
196
- x = xs / self.num_kernels
197
-
198
- # post conv
199
- x = self.activation_post(x)
200
- x = self.conv_post(x)
201
- x = torch.tanh(x)
202
-
203
- return x
204
-
205
- def remove_weight_norm(self):
206
- print('Removing weight norm...')
207
- for l in self.ups:
208
- for l_i in l:
209
- remove_weight_norm(l_i)
210
- for l in self.resblocks:
211
- l.remove_weight_norm()
212
- remove_weight_norm(self.conv_pre)
213
- remove_weight_norm(self.conv_post)
214
-
215
-
216
- class DiscriminatorP(torch.nn.Module):
217
- def __init__(self, h, period, kernel_size=5, stride=3, use_spectral_norm=False):
218
- super(DiscriminatorP, self).__init__()
219
- self.period = period
220
- self.d_mult = h.discriminator_channel_mult
221
- norm_f = weight_norm if use_spectral_norm == False else spectral_norm
222
- self.convs = nn.ModuleList([
223
- norm_f(Conv2d(1, int(32*self.d_mult), (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
224
- norm_f(Conv2d(int(32*self.d_mult), int(128*self.d_mult), (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
225
- norm_f(Conv2d(int(128*self.d_mult), int(512*self.d_mult), (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
226
- norm_f(Conv2d(int(512*self.d_mult), int(1024*self.d_mult), (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
227
- norm_f(Conv2d(int(1024*self.d_mult), int(1024*self.d_mult), (kernel_size, 1), 1, padding=(2, 0))),
228
- ])
229
- self.conv_post = norm_f(Conv2d(int(1024*self.d_mult), 1, (3, 1), 1, padding=(1, 0)))
230
-
231
- def forward(self, x):
232
- fmap = []
233
-
234
- # 1d to 2d
235
- b, c, t = x.shape
236
- if t % self.period != 0: # pad first
237
- n_pad = self.period - (t % self.period)
238
- x = F.pad(x, (0, n_pad), "reflect")
239
- t = t + n_pad
240
- x = x.view(b, c, t // self.period, self.period)
241
-
242
- for l in self.convs:
243
- x = l(x)
244
- x = F.leaky_relu(x, LRELU_SLOPE)
245
- fmap.append(x)
246
- x = self.conv_post(x)
247
- fmap.append(x)
248
- x = torch.flatten(x, 1, -1)
249
-
250
- return x, fmap
251
-
252
-
253
- class MultiPeriodDiscriminator(torch.nn.Module):
254
- def __init__(self, h):
255
- super(MultiPeriodDiscriminator, self).__init__()
256
- self.mpd_reshapes = h.mpd_reshapes
257
- print("mpd_reshapes: {}".format(self.mpd_reshapes))
258
- discriminators = [DiscriminatorP(h, rs, use_spectral_norm=h.use_spectral_norm) for rs in self.mpd_reshapes]
259
- self.discriminators = nn.ModuleList(discriminators)
260
-
261
- def forward(self, y, y_hat):
262
- y_d_rs = []
263
- y_d_gs = []
264
- fmap_rs = []
265
- fmap_gs = []
266
- for i, d in enumerate(self.discriminators):
267
- y_d_r, fmap_r = d(y)
268
- y_d_g, fmap_g = d(y_hat)
269
- y_d_rs.append(y_d_r)
270
- fmap_rs.append(fmap_r)
271
- y_d_gs.append(y_d_g)
272
- fmap_gs.append(fmap_g)
273
-
274
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
275
-
276
-
277
- class DiscriminatorR(nn.Module):
278
- def __init__(self, cfg, resolution):
279
- super().__init__()
280
-
281
- self.resolution = resolution
282
- assert len(self.resolution) == 3, \
283
- "MRD layer requires list with len=3, got {}".format(self.resolution)
284
- self.lrelu_slope = LRELU_SLOPE
285
-
286
- norm_f = weight_norm if cfg.use_spectral_norm == False else spectral_norm
287
- if hasattr(cfg, "mrd_use_spectral_norm"):
288
- print("INFO: overriding MRD use_spectral_norm as {}".format(cfg.mrd_use_spectral_norm))
289
- norm_f = weight_norm if cfg.mrd_use_spectral_norm == False else spectral_norm
290
- self.d_mult = cfg.discriminator_channel_mult
291
- if hasattr(cfg, "mrd_channel_mult"):
292
- print("INFO: overriding mrd channel multiplier as {}".format(cfg.mrd_channel_mult))
293
- self.d_mult = cfg.mrd_channel_mult
294
-
295
- self.convs = nn.ModuleList([
296
- norm_f(nn.Conv2d(1, int(32*self.d_mult), (3, 9), padding=(1, 4))),
297
- norm_f(nn.Conv2d(int(32*self.d_mult), int(32*self.d_mult), (3, 9), stride=(1, 2), padding=(1, 4))),
298
- norm_f(nn.Conv2d(int(32*self.d_mult), int(32*self.d_mult), (3, 9), stride=(1, 2), padding=(1, 4))),
299
- norm_f(nn.Conv2d(int(32*self.d_mult), int(32*self.d_mult), (3, 9), stride=(1, 2), padding=(1, 4))),
300
- norm_f(nn.Conv2d(int(32*self.d_mult), int(32*self.d_mult), (3, 3), padding=(1, 1))),
301
- ])
302
- self.conv_post = norm_f(nn.Conv2d(int(32 * self.d_mult), 1, (3, 3), padding=(1, 1)))
303
-
304
- def forward(self, x):
305
- fmap = []
306
-
307
- x = self.spectrogram(x)
308
- x = x.unsqueeze(1)
309
- for l in self.convs:
310
- x = l(x)
311
- x = F.leaky_relu(x, self.lrelu_slope)
312
- fmap.append(x)
313
- x = self.conv_post(x)
314
- fmap.append(x)
315
- x = torch.flatten(x, 1, -1)
316
-
317
- return x, fmap
318
-
319
- def spectrogram(self, x):
320
- n_fft, hop_length, win_length = self.resolution
321
- x = F.pad(x, (int((n_fft - hop_length) / 2), int((n_fft - hop_length) / 2)), mode='reflect')
322
- x = x.squeeze(1)
323
- x = torch.stft(x, n_fft=n_fft, hop_length=hop_length, win_length=win_length, center=False, return_complex=True)
324
- x = torch.view_as_real(x) # [B, F, TT, 2]
325
- mag = torch.norm(x, p=2, dim =-1) #[B, F, TT]
326
-
327
- return mag
328
-
329
-
330
- class MultiResolutionDiscriminator(nn.Module):
331
- def __init__(self, cfg, debug=False):
332
- super().__init__()
333
- self.resolutions = cfg.resolutions
334
- assert len(self.resolutions) == 3,\
335
- "MRD requires list of list with len=3, each element having a list with len=3. got {}".\
336
- format(self.resolutions)
337
- self.discriminators = nn.ModuleList(
338
- [DiscriminatorR(cfg, resolution) for resolution in self.resolutions]
339
- )
340
-
341
- def forward(self, y, y_hat):
342
- y_d_rs = []
343
- y_d_gs = []
344
- fmap_rs = []
345
- fmap_gs = []
346
-
347
- for i, d in enumerate(self.discriminators):
348
- y_d_r, fmap_r = d(x=y)
349
- y_d_g, fmap_g = d(x=y_hat)
350
- y_d_rs.append(y_d_r)
351
- fmap_rs.append(fmap_r)
352
- y_d_gs.append(y_d_g)
353
- fmap_gs.append(fmap_g)
354
-
355
- return y_d_rs, y_d_gs, fmap_rs, fmap_gs
356
-
357
-
358
- def feature_loss(fmap_r, fmap_g):
359
- loss = 0
360
- for dr, dg in zip(fmap_r, fmap_g):
361
- for rl, gl in zip(dr, dg):
362
- loss += torch.mean(torch.abs(rl - gl))
363
-
364
- return loss*2
365
-
366
-
367
- def discriminator_loss(disc_real_outputs, disc_generated_outputs):
368
- loss = 0
369
- r_losses = []
370
- g_losses = []
371
- for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
372
- r_loss = torch.mean((1-dr)**2)
373
- g_loss = torch.mean(dg**2)
374
- loss += (r_loss + g_loss)
375
- r_losses.append(r_loss.item())
376
- g_losses.append(g_loss.item())
377
-
378
- return loss, r_losses, g_losses
379
-
380
-
381
- def generator_loss(disc_outputs):
382
- loss = 0
383
- gen_losses = []
384
- for dg in disc_outputs:
385
- l = torch.mean((1-dg)**2)
386
- gen_losses.append(l)
387
- loss += l
388
-
389
- return loss, gen_losses
390
-
391
-
392
-
393
- class VocoderBigVGAN(object):
394
- def __init__(self, ckpt_vocoder,device='cuda'):
395
- vocoder_sd = torch.load(os.path.join(ckpt_vocoder,'best_netG.pt'), map_location='cpu')
396
-
397
- vocoder_args = OmegaConf.load(os.path.join(ckpt_vocoder,'args.yml'))
398
-
399
- self.generator = BigVGAN(vocoder_args)
400
- self.generator.load_state_dict(vocoder_sd['generator'])
401
- self.generator.eval()
402
-
403
- self.device = device
404
- self.generator.to(self.device)
405
-
406
- def vocode(self, spec):
407
- with torch.no_grad():
408
- if isinstance(spec,np.ndarray):
409
- spec = torch.from_numpy(spec).unsqueeze(0)
410
- spec = spec.to(dtype=torch.float32,device=self.device)
411
- return self.generator(spec).squeeze().cpu().numpy()
412
-
413
- def __call__(self, wav):
414
- return self.vocode(wav)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_2_ProfileRecogition/mmpretrain/work_dirs/mobilevit-small_4xb32_2000e_3c_noF/__init__.py DELETED
File without changes
spaces/AUBADA-ALARABI/poetry202/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Poetry2023
3
- emoji: 👁
4
- colorFrom: green
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.16.0
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: Abdllh/poetry202
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Abhi5ingh/fashionsd/sdfile.py DELETED
@@ -1,89 +0,0 @@
1
- import gc
2
- import datetime
3
- import os
4
- import re
5
- from typing import Literal
6
-
7
- import streamlit as st
8
- import torch
9
- from diffusers import (
10
- StableDiffusionPipeline,
11
- StableDiffusionControlNetPipeline,
12
- ControlNetModel,
13
- EulerDiscreteScheduler,
14
- DDIMScheduler,
15
- )
16
-
17
- PIPELINES = Literal["txt2img", "sketch2img"]
18
-
19
- @st.cache_resource(max_entries=1)
20
- def get_pipelines( name:PIPELINES, enable_cpu_offload = False, ) -> StableDiffusionPipeline:
21
- pipe = None
22
-
23
- if name == "txt2img":
24
- pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16,cache_dir="D:/huggingface/CACHE/")
25
- pipe.unet.load_attn_procs("./")
26
- pipe.safety_checker = lambda images, **kwargs: (images, [False] * len(images))
27
- elif name == "sketch2img":
28
- controlnet = ControlNetModel.from_pretrained("Abhi5ingh/model_dresscode", torch_dtype=torch.float16,cache_dir="D:/huggingface/CACHE/")
29
- pipe = StableDiffusionControlNetPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", controlnet = controlnet, torch_dtype = torch.float16,cache_dir="D:/huggingface/CACHE/")
30
- pipe.unet.load_attn_procs("./")
31
- pipe.safety_checker = lambda images, **kwargs: (images, [False] * len(images))
32
-
33
- if pipe is None:
34
- raise Exception(f"Pipeline not Found {name}")
35
-
36
- if enable_cpu_offload:
37
- print("Enabling cpu offloading for the given pipeline")
38
- pipe.enable_model_cpu_offload()
39
- else:
40
- pipe = pipe.to("cuda")
41
- return pipe
42
-
43
- def generate(
44
- prompt,
45
- pipeline_name: PIPELINES,
46
- image = None,
47
- num_inference_steps = 30,
48
- negative_prompt = None,
49
- width = 512,
50
- height = 512,
51
- guidance_scale = 7.5,
52
- controlnet_conditioning_scale = None,
53
- enable_cpu_offload= False):
54
- negative_prompt = negative_prompt if negative_prompt else None
55
- p = st.progress(0)
56
- callback = lambda step,*_: p.progress(step/num_inference_steps)
57
- pipe = get_pipelines(pipeline_name,enable_cpu_offload=enable_cpu_offload)
58
- torch.cuda.empty_cache()
59
-
60
- kwargs = dict(
61
- prompt = prompt,
62
- negative_prompt=negative_prompt,
63
- num_inference_steps=num_inference_steps,
64
- callback=callback,
65
- guidance_scale=guidance_scale,
66
- )
67
- print("kwargs",kwargs)
68
-
69
- if pipeline_name =="sketch2img" and image:
70
- kwargs.update(image=image,controlnet_conditioning_scale=controlnet_conditioning_scale)
71
- elif pipeline_name == "txt2img":
72
- kwargs.update(width = width, height = height)
73
- else:
74
- raise Exception(
75
- f"Cannot generate image for pipeline {pipeline_name} and {prompt}")
76
- images = pipe(**kwargs).images
77
- image = images[0]
78
-
79
- os.makedirs("outputs", exist_ok=True)
80
-
81
- filename = (
82
- "outputs/"
83
- + re.sub(r"\s+", "_",prompt)[:30]
84
- + f"_{datetime.datetime.now().timestamp()}"
85
- )
86
- image.save(f"{filename}.png")
87
- with open(f"{filename}.txt", "w") as f:
88
- f.write(f"Prompt: {prompt}\n\nNegative Prompt:{negative_prompt}")
89
- return image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Abubakari/Sepsis-fastapi-prediction-app/Dockerfile DELETED
@@ -1,14 +0,0 @@
1
- FROM python:3.9
2
-
3
- WORKDIR /code
4
-
5
- COPY ./requirements.txt /code/requirements.txt
6
-
7
- RUN pip install -r /code/requirements.txt
8
-
9
- COPY . .
10
-
11
- # Expose the port on which the application will run
12
- EXPOSE 7860
13
-
14
- CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/types/src/routes/r/[id]/message/[messageId]/prompt/$types.d.ts DELETED
@@ -1,9 +0,0 @@
1
- import type * as Kit from '@sveltejs/kit';
2
-
3
- type Expand<T> = T extends infer O ? { [K in keyof O]: O[K] } : never;
4
- type RouteParams = { id: string; messageId: string }
5
- type RouteId = '/r/[id]/message/[messageId]/prompt';
6
-
7
- export type EntryGenerator = () => Promise<Array<RouteParams>> | Array<RouteParams>;
8
- export type RequestHandler = Kit.RequestHandler<RouteParams, RouteId>;
9
- export type RequestEvent = Kit.RequestEvent<RouteParams, RouteId>;
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/types/src/routes/settings/$types.d.ts DELETED
@@ -1,28 +0,0 @@
1
- import type * as Kit from '@sveltejs/kit';
2
-
3
- type Expand<T> = T extends infer O ? { [K in keyof O]: O[K] } : never;
4
- type RouteParams = { }
5
- type RouteId = '/settings';
6
- type MaybeWithVoid<T> = {} extends T ? T | void : T;
7
- export type RequiredKeys<T> = { [K in keyof T]-?: {} extends { [P in K]: T[K] } ? never : K; }[keyof T];
8
- type OutputDataShape<T> = MaybeWithVoid<Omit<App.PageData, RequiredKeys<T>> & Partial<Pick<App.PageData, keyof T & keyof App.PageData>> & Record<string, any>>
9
- type EnsureDefined<T> = T extends null | undefined ? {} : T;
10
- type OptionalUnion<U extends Record<string, any>, A extends keyof U = U extends U ? keyof U : never> = U extends unknown ? { [P in Exclude<A, keyof U>]?: never } & U : never;
11
- export type Snapshot<T = any> = Kit.Snapshot<T>;
12
- type PageServerParentData = EnsureDefined<import('../$types.js').LayoutServerData>;
13
- type PageParentData = EnsureDefined<import('../$types.js').LayoutData>;
14
-
15
- export type PageServerLoad<OutputData extends OutputDataShape<PageServerParentData> = OutputDataShape<PageServerParentData>> = Kit.ServerLoad<RouteParams, PageServerParentData, OutputData, RouteId>;
16
- export type PageServerLoadEvent = Parameters<PageServerLoad>[0];
17
- type ExcludeActionFailure<T> = T extends Kit.ActionFailure<any> ? never : T extends void ? never : T;
18
- type ActionsSuccess<T extends Record<string, (...args: any) => any>> = { [Key in keyof T]: ExcludeActionFailure<Awaited<ReturnType<T[Key]>>>; }[keyof T];
19
- type ExtractActionFailure<T> = T extends Kit.ActionFailure<infer X> ? X extends void ? never : X : never;
20
- type ActionsFailure<T extends Record<string, (...args: any) => any>> = { [Key in keyof T]: Exclude<ExtractActionFailure<Awaited<ReturnType<T[Key]>>>, void>; }[keyof T];
21
- type ActionsExport = typeof import('../../../../../src/routes/settings/+page.server.js').actions
22
- export type SubmitFunction = Kit.SubmitFunction<Expand<ActionsSuccess<ActionsExport>>, Expand<ActionsFailure<ActionsExport>>>
23
- export type ActionData = Expand<Kit.AwaitedActions<ActionsExport>> | null;
24
- export type PageServerData = null;
25
- export type PageData = Expand<PageParentData>;
26
- export type Action<OutputData extends Record<string, any> | void = Record<string, any> | void> = Kit.Action<RouteParams, OutputData, RouteId>
27
- export type Actions<OutputData extends Record<string, any> | void = Record<string, any> | void> = Kit.Actions<RouteParams, OutputData, RouteId>
28
- export type RequestEvent = Kit.RequestEvent<RouteParams, RouteId>;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/builders/CreateToast.js DELETED
@@ -1,8 +0,0 @@
1
- import CreateAnyLabel from './utils/CreateAnyLabel.js';
2
- import Toast from '../../toast/Toast.js';
3
-
4
- var CreateToast = function (scene, data, view, styles, customBuilders) {
5
- return CreateAnyLabel(scene, data, view, styles, customBuilders, Toast);
6
- }
7
-
8
- export default CreateToast;
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/stylegan_human/dnnlib/tflib/ops/fused_bias_act.py DELETED
@@ -1,214 +0,0 @@
1
- # Copyright (c) SenseTime Research. All rights reserved.
2
-
3
- # Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
4
- #
5
- # This work is made available under the Nvidia Source Code License-NC.
6
- # To view a copy of this license, visit
7
- # https://nvlabs.github.io/stylegan2/license.html
8
-
9
- """Custom TensorFlow ops for efficient bias and activation."""
10
-
11
- import os
12
- import numpy as np
13
- import tensorflow as tf
14
- from .. import custom_ops
15
- from ...util import EasyDict
16
-
17
-
18
- def _get_plugin():
19
- return custom_ops.get_plugin(os.path.splitext(__file__)[0] + '.cu')
20
-
21
- # ----------------------------------------------------------------------------
22
-
23
-
24
- activation_funcs = {
25
- 'linear': EasyDict(func=lambda x, **_: x, def_alpha=None, def_gain=1.0, cuda_idx=1, ref='y', zero_2nd_grad=True),
26
- 'relu': EasyDict(func=lambda x, **_: tf.nn.relu(x), def_alpha=None, def_gain=np.sqrt(2), cuda_idx=2, ref='y', zero_2nd_grad=True),
27
- 'lrelu': EasyDict(func=lambda x, alpha, **_: tf.nn.leaky_relu(x, alpha), def_alpha=0.2, def_gain=np.sqrt(2), cuda_idx=3, ref='y', zero_2nd_grad=True),
28
- 'tanh': EasyDict(func=lambda x, **_: tf.nn.tanh(x), def_alpha=None, def_gain=1.0, cuda_idx=4, ref='y', zero_2nd_grad=False),
29
- 'sigmoid': EasyDict(func=lambda x, **_: tf.nn.sigmoid(x), def_alpha=None, def_gain=1.0, cuda_idx=5, ref='y', zero_2nd_grad=False),
30
- 'elu': EasyDict(func=lambda x, **_: tf.nn.elu(x), def_alpha=None, def_gain=1.0, cuda_idx=6, ref='y', zero_2nd_grad=False),
31
- 'selu': EasyDict(func=lambda x, **_: tf.nn.selu(x), def_alpha=None, def_gain=1.0, cuda_idx=7, ref='y', zero_2nd_grad=False),
32
- 'softplus': EasyDict(func=lambda x, **_: tf.nn.softplus(x), def_alpha=None, def_gain=1.0, cuda_idx=8, ref='y', zero_2nd_grad=False),
33
- 'swish': EasyDict(func=lambda x, **_: tf.nn.sigmoid(x) * x, def_alpha=None, def_gain=np.sqrt(2), cuda_idx=9, ref='x', zero_2nd_grad=False),
34
- }
35
-
36
- # ----------------------------------------------------------------------------
37
-
38
-
39
- def fused_bias_act(x, b=None, axis=1, act='linear', alpha=None, gain=None, impl='cuda'):
40
- r"""Fused bias and activation function.
41
-
42
- Adds bias `b` to activation tensor `x`, evaluates activation function `act`,
43
- and scales the result by `gain`. Each of the steps is optional. In most cases,
44
- the fused op is considerably more efficient than performing the same calculation
45
- using standard TensorFlow ops. It supports first and second order gradients,
46
- but not third order gradients.
47
-
48
- Args:
49
- x: Input activation tensor. Can have any shape, but if `b` is defined, the
50
- dimension corresponding to `axis`, as well as the rank, must be known.
51
- b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type
52
- as `x`. The shape must be known, and it must match the dimension of `x`
53
- corresponding to `axis`.
54
- axis: The dimension in `x` corresponding to the elements of `b`.
55
- The value of `axis` is ignored if `b` is not specified.
56
- act: Name of the activation function to evaluate, or `"linear"` to disable.
57
- Can be e.g. `"relu"`, `"lrelu"`, `"tanh"`, `"sigmoid"`, `"swish"`, etc.
58
- See `activation_funcs` for a full list. `None` is not allowed.
59
- alpha: Shape parameter for the activation function, or `None` to use the default.
60
- gain: Scaling factor for the output tensor, or `None` to use default.
61
- See `activation_funcs` for the default scaling of each activation function.
62
- If unsure, consider specifying `1.0`.
63
- impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
64
-
65
- Returns:
66
- Tensor of the same shape and datatype as `x`.
67
- """
68
-
69
- impl_dict = {
70
- 'ref': _fused_bias_act_ref,
71
- 'cuda': _fused_bias_act_cuda,
72
- }
73
- return impl_dict[impl](x=x, b=b, axis=axis, act=act, alpha=alpha, gain=gain)
74
-
75
- # ----------------------------------------------------------------------------
76
-
77
-
78
- def _fused_bias_act_ref(x, b, axis, act, alpha, gain):
79
- """Slow reference implementation of `fused_bias_act()` using standard TensorFlow ops."""
80
-
81
- # Validate arguments.
82
- x = tf.convert_to_tensor(x)
83
- b = tf.convert_to_tensor(
84
- b) if b is not None else tf.constant([], dtype=x.dtype)
85
- act_spec = activation_funcs[act]
86
- assert b.shape.rank == 1 and (
87
- b.shape[0] == 0 or b.shape[0] == x.shape[axis])
88
- assert b.shape[0] == 0 or 0 <= axis < x.shape.rank
89
- if alpha is None:
90
- alpha = act_spec.def_alpha
91
- if gain is None:
92
- gain = act_spec.def_gain
93
-
94
- # Add bias.
95
- if b.shape[0] != 0:
96
- x += tf.reshape(b, [-1 if i ==
97
- axis else 1 for i in range(x.shape.rank)])
98
-
99
- # Evaluate activation function.
100
- x = act_spec.func(x, alpha=alpha)
101
-
102
- # Scale by gain.
103
- if gain != 1:
104
- x *= gain
105
- return x
106
-
107
- # ----------------------------------------------------------------------------
108
-
109
-
110
- def _fused_bias_act_cuda(x, b, axis, act, alpha, gain):
111
- """Fast CUDA implementation of `fused_bias_act()` using custom ops."""
112
-
113
- # Validate arguments.
114
- x = tf.convert_to_tensor(x)
115
- empty_tensor = tf.constant([], dtype=x.dtype)
116
- b = tf.convert_to_tensor(b) if b is not None else empty_tensor
117
- act_spec = activation_funcs[act]
118
- assert b.shape.rank == 1 and (
119
- b.shape[0] == 0 or b.shape[0] == x.shape[axis])
120
- assert b.shape[0] == 0 or 0 <= axis < x.shape.rank
121
- if alpha is None:
122
- alpha = act_spec.def_alpha
123
- if gain is None:
124
- gain = act_spec.def_gain
125
-
126
- # Special cases.
127
- if act == 'linear' and b is None and gain == 1.0:
128
- return x
129
- if act_spec.cuda_idx is None:
130
- return _fused_bias_act_ref(x=x, b=b, axis=axis, act=act, alpha=alpha, gain=gain)
131
-
132
- # CUDA kernel.
133
- cuda_kernel = _get_plugin().fused_bias_act
134
- cuda_kwargs = dict(axis=axis, act=act_spec.cuda_idx,
135
- alpha=alpha, gain=gain)
136
-
137
- # Forward pass: y = func(x, b).
138
- def func_y(x, b):
139
- y = cuda_kernel(x=x, b=b, ref=empty_tensor, grad=0, **cuda_kwargs)
140
- y.set_shape(x.shape)
141
- return y
142
-
143
- # Backward pass: dx, db = grad(dy, x, y)
144
- def grad_dx(dy, x, y):
145
- ref = {'x': x, 'y': y}[act_spec.ref]
146
- dx = cuda_kernel(x=dy, b=empty_tensor, ref=ref, grad=1, **cuda_kwargs)
147
- dx.set_shape(x.shape)
148
- return dx
149
-
150
- def grad_db(dx):
151
- if b.shape[0] == 0:
152
- return empty_tensor
153
- db = dx
154
- if axis < x.shape.rank - 1:
155
- db = tf.reduce_sum(db, list(range(axis + 1, x.shape.rank)))
156
- if axis > 0:
157
- db = tf.reduce_sum(db, list(range(axis)))
158
- db.set_shape(b.shape)
159
- return db
160
-
161
- # Second order gradients: d_dy, d_x = grad2(d_dx, d_db, x, y)
162
- def grad2_d_dy(d_dx, d_db, x, y):
163
- ref = {'x': x, 'y': y}[act_spec.ref]
164
- d_dy = cuda_kernel(x=d_dx, b=d_db, ref=ref, grad=1, **cuda_kwargs)
165
- d_dy.set_shape(x.shape)
166
- return d_dy
167
-
168
- def grad2_d_x(d_dx, d_db, x, y):
169
- ref = {'x': x, 'y': y}[act_spec.ref]
170
- d_x = cuda_kernel(x=d_dx, b=d_db, ref=ref, grad=2, **cuda_kwargs)
171
- d_x.set_shape(x.shape)
172
- return d_x
173
-
174
- # Fast version for piecewise-linear activation funcs.
175
- @tf.custom_gradient
176
- def func_zero_2nd_grad(x, b):
177
- y = func_y(x, b)
178
-
179
- @tf.custom_gradient
180
- def grad(dy):
181
- dx = grad_dx(dy, x, y)
182
- db = grad_db(dx)
183
-
184
- def grad2(d_dx, d_db):
185
- d_dy = grad2_d_dy(d_dx, d_db, x, y)
186
- return d_dy
187
- return (dx, db), grad2
188
- return y, grad
189
-
190
- # Slow version for general activation funcs.
191
- @tf.custom_gradient
192
- def func_nonzero_2nd_grad(x, b):
193
- y = func_y(x, b)
194
-
195
- def grad_wrap(dy):
196
- @tf.custom_gradient
197
- def grad_impl(dy, x):
198
- dx = grad_dx(dy, x, y)
199
- db = grad_db(dx)
200
-
201
- def grad2(d_dx, d_db):
202
- d_dy = grad2_d_dy(d_dx, d_db, x, y)
203
- d_x = grad2_d_x(d_dx, d_db, x, y)
204
- return d_dy, d_x
205
- return (dx, db), grad2
206
- return grad_impl(dy, x)
207
- return y, grad_wrap
208
-
209
- # Which version to use?
210
- if act_spec.zero_2nd_grad:
211
- return func_zero_2nd_grad(x, b)
212
- return func_nonzero_2nd_grad(x, b)
213
-
214
- # ----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/stylegan_human/pti/training/projectors/w_plus_projector.py DELETED
@@ -1,163 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- """Project given image to the latent space of pretrained network pickle."""
10
-
11
- import copy
12
- import wandb
13
- import numpy as np
14
- import torch
15
- import torch.nn.functional as F
16
- from tqdm import tqdm
17
- from configs import global_config, hyperparameters
18
- import dnnlib
19
- from utils.log_utils import log_image_from_w
20
-
21
-
22
- def project(
23
- G,
24
- # [C,H,W] and dynamic range [0,255], W & H must match G output resolution
25
- target: torch.Tensor,
26
- *,
27
- num_steps=1000,
28
- w_avg_samples=10000,
29
- initial_learning_rate=0.01,
30
- initial_noise_factor=0.05,
31
- lr_rampdown_length=0.25,
32
- lr_rampup_length=0.05,
33
- noise_ramp_length=0.75,
34
- regularize_noise_weight=1e5,
35
- verbose=False,
36
- device: torch.device,
37
- use_wandb=False,
38
- initial_w=None,
39
- image_log_step=global_config.image_rec_result_log_snapshot,
40
- w_name: str
41
- ):
42
- print('inside training/projectors/w_plus_projector')
43
- print(target.shape, G.img_channels, G.img_resolution * 2, G.img_resolution)
44
- assert target.shape == (
45
- G.img_channels, G.img_resolution * 2, G.img_resolution)
46
-
47
- def logprint(*args):
48
- if verbose:
49
- print(*args)
50
-
51
- G = copy.deepcopy(G).eval().requires_grad_(
52
- False).to(device).float() # type: ignore
53
-
54
- # Compute w stats.
55
- logprint(
56
- f'Computing W midpoint and stddev using {w_avg_samples} samples...')
57
- z_samples = np.random.RandomState(123).randn(w_avg_samples, G.z_dim)
58
- w_samples = G.mapping(torch.from_numpy(
59
- z_samples).to(device), None) # [N, L, C]
60
- w_samples = w_samples[:, :1, :].cpu(
61
- ).numpy().astype(np.float32) # [N, 1, C]
62
- w_avg = np.mean(w_samples, axis=0, keepdims=True) # [1, 1, C]
63
- w_avg_tensor = torch.from_numpy(w_avg).to(global_config.device)
64
- w_std = (np.sum((w_samples - w_avg) ** 2) / w_avg_samples) ** 0.5
65
-
66
- start_w = initial_w if initial_w is not None else w_avg
67
-
68
- # Setup noise inputs.
69
- noise_bufs = {name: buf for (
70
- name, buf) in G.synthesis.named_buffers() if 'noise_const' in name}
71
-
72
- # Load VGG16 feature detector.
73
- url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/vgg16.pt'
74
- with dnnlib.util.open_url(url) as f:
75
- vgg16 = torch.jit.load(f).eval().to(device)
76
-
77
- # Features for target image.
78
- target_images = target.unsqueeze(0).to(device).to(torch.float32)
79
- if target_images.shape[2] > 256:
80
- target_images = F.interpolate(
81
- target_images, size=(256, 256), mode='area')
82
- target_features = vgg16(
83
- target_images, resize_images=False, return_lpips=True)
84
-
85
- start_w = np.repeat(start_w, G.mapping.num_ws, axis=1)
86
- w_opt = torch.tensor(start_w, dtype=torch.float32, device=device,
87
- requires_grad=True) # pylint: disable=not-callable
88
-
89
- optimizer = torch.optim.Adam([w_opt] + list(noise_bufs.values()), betas=(0.9, 0.999),
90
- lr=hyperparameters.first_inv_lr)
91
-
92
- # Init noise.
93
- for buf in noise_bufs.values():
94
- buf[:] = torch.randn_like(buf)
95
- buf.requires_grad = True
96
-
97
- for step in tqdm(range(num_steps)):
98
-
99
- # Learning rate schedule.
100
- t = step / num_steps
101
- w_noise_scale = w_std * initial_noise_factor * \
102
- max(0.0, 1.0 - t / noise_ramp_length) ** 2
103
- lr_ramp = min(1.0, (1.0 - t) / lr_rampdown_length)
104
- lr_ramp = 0.5 - 0.5 * np.cos(lr_ramp * np.pi)
105
- lr_ramp = lr_ramp * min(1.0, t / lr_rampup_length)
106
- lr = initial_learning_rate * lr_ramp
107
- for param_group in optimizer.param_groups:
108
- param_group['lr'] = lr
109
-
110
- # Synth images from opt_w.
111
- w_noise = torch.randn_like(w_opt) * w_noise_scale
112
- ws = (w_opt + w_noise)
113
-
114
- synth_images = G.synthesis(ws, noise_mode='const', force_fp32=True)
115
-
116
- # Downsample image to 256x256 if it's larger than that. VGG was built for 224x224 images.
117
- synth_images = (synth_images + 1) * (255 / 2)
118
- if synth_images.shape[2] > 256:
119
- synth_images = F.interpolate(
120
- synth_images, size=(256, 256), mode='area')
121
-
122
- # Features for synth images.
123
- synth_features = vgg16(
124
- synth_images, resize_images=False, return_lpips=True)
125
- dist = (target_features - synth_features).square().sum()
126
-
127
- # Noise regularization.
128
- reg_loss = 0.0
129
- for v in noise_bufs.values():
130
- noise = v[None, None, :, :] # must be [1,1,H,W] for F.avg_pool2d()
131
- while True:
132
- reg_loss += (noise * torch.roll(noise,
133
- shifts=1, dims=3)).mean() ** 2
134
- reg_loss += (noise * torch.roll(noise,
135
- shifts=1, dims=2)).mean() ** 2
136
- if noise.shape[2] <= 8:
137
- break
138
- noise = F.avg_pool2d(noise, kernel_size=2)
139
- loss = dist + reg_loss * regularize_noise_weight
140
-
141
- if step % image_log_step == 0:
142
- with torch.no_grad():
143
- if use_wandb:
144
- global_config.training_step += 1
145
- wandb.log({f'first projection _{w_name}': loss.detach(
146
- ).cpu()}, step=global_config.training_step)
147
- log_image_from_w(w_opt, G, w_name)
148
-
149
- # Step
150
- optimizer.zero_grad(set_to_none=True)
151
- loss.backward()
152
- optimizer.step()
153
- logprint(
154
- f'step {step + 1:>4d}/{num_steps}: dist {dist:<4.2f} loss {float(loss):<5.2f}')
155
-
156
- # Normalize noise.
157
- with torch.no_grad():
158
- for buf in noise_bufs.values():
159
- buf -= buf.mean()
160
- buf *= buf.square().mean().rsqrt()
161
-
162
- del G
163
- return w_opt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/colossalai/inference.py DELETED
@@ -1,12 +0,0 @@
1
- import torch
2
-
3
- from diffusers import StableDiffusionPipeline
4
-
5
-
6
- model_id = "path-to-your-trained-model"
7
- pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
8
-
9
- prompt = "A photo of sks dog in a bucket"
10
- image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
11
-
12
- image.save("dog-bucket.png")
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_k_diffusion.py DELETED
@@ -1,136 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 HuggingFace Inc.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import gc
17
- import unittest
18
-
19
- import numpy as np
20
- import torch
21
-
22
- from diffusers import StableDiffusionKDiffusionPipeline
23
- from diffusers.utils import slow, torch_device
24
- from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
25
-
26
-
27
- enable_full_determinism()
28
-
29
-
30
- @slow
31
- @require_torch_gpu
32
- class StableDiffusionPipelineIntegrationTests(unittest.TestCase):
33
- def tearDown(self):
34
- # clean up the VRAM after each test
35
- super().tearDown()
36
- gc.collect()
37
- torch.cuda.empty_cache()
38
-
39
- def test_stable_diffusion_1(self):
40
- sd_pipe = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
41
- sd_pipe = sd_pipe.to(torch_device)
42
- sd_pipe.set_progress_bar_config(disable=None)
43
-
44
- sd_pipe.set_scheduler("sample_euler")
45
-
46
- prompt = "A painting of a squirrel eating a burger"
47
- generator = torch.manual_seed(0)
48
- output = sd_pipe([prompt], generator=generator, guidance_scale=9.0, num_inference_steps=20, output_type="np")
49
-
50
- image = output.images
51
-
52
- image_slice = image[0, -3:, -3:, -1]
53
-
54
- assert image.shape == (1, 512, 512, 3)
55
- expected_slice = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339])
56
-
57
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
58
-
59
- def test_stable_diffusion_2(self):
60
- sd_pipe = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base")
61
- sd_pipe = sd_pipe.to(torch_device)
62
- sd_pipe.set_progress_bar_config(disable=None)
63
-
64
- sd_pipe.set_scheduler("sample_euler")
65
-
66
- prompt = "A painting of a squirrel eating a burger"
67
- generator = torch.manual_seed(0)
68
- output = sd_pipe([prompt], generator=generator, guidance_scale=9.0, num_inference_steps=20, output_type="np")
69
-
70
- image = output.images
71
-
72
- image_slice = image[0, -3:, -3:, -1]
73
-
74
- assert image.shape == (1, 512, 512, 3)
75
- expected_slice = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112])
76
-
77
- assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-1
78
-
79
- def test_stable_diffusion_karras_sigmas(self):
80
- sd_pipe = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base")
81
- sd_pipe = sd_pipe.to(torch_device)
82
- sd_pipe.set_progress_bar_config(disable=None)
83
-
84
- sd_pipe.set_scheduler("sample_dpmpp_2m")
85
-
86
- prompt = "A painting of a squirrel eating a burger"
87
- generator = torch.manual_seed(0)
88
- output = sd_pipe(
89
- [prompt],
90
- generator=generator,
91
- guidance_scale=7.5,
92
- num_inference_steps=15,
93
- output_type="np",
94
- use_karras_sigmas=True,
95
- )
96
-
97
- image = output.images
98
-
99
- image_slice = image[0, -3:, -3:, -1]
100
-
101
- assert image.shape == (1, 512, 512, 3)
102
- expected_slice = np.array(
103
- [0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048]
104
- )
105
-
106
- assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
107
-
108
- def test_stable_diffusion_noise_sampler_seed(self):
109
- sd_pipe = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
110
- sd_pipe = sd_pipe.to(torch_device)
111
- sd_pipe.set_progress_bar_config(disable=None)
112
-
113
- sd_pipe.set_scheduler("sample_dpmpp_sde")
114
-
115
- prompt = "A painting of a squirrel eating a burger"
116
- seed = 0
117
- images1 = sd_pipe(
118
- [prompt],
119
- generator=torch.manual_seed(seed),
120
- noise_sampler_seed=seed,
121
- guidance_scale=9.0,
122
- num_inference_steps=20,
123
- output_type="np",
124
- ).images
125
- images2 = sd_pipe(
126
- [prompt],
127
- generator=torch.manual_seed(seed),
128
- noise_sampler_seed=seed,
129
- guidance_scale=9.0,
130
- num_inference_steps=20,
131
- output_type="np",
132
- ).images
133
-
134
- assert images1.shape == (1, 512, 512, 3)
135
- assert images2.shape == (1, 512, 512, 3)
136
- assert np.abs(images1.flatten() - images2.flatten()).max() < 1e-2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/gcnet/gcnet_r50-d8_512x1024_40k_cityscapes.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/gcnet_r50-d8.py', '../_base_/datasets/cityscapes.py',
3
- '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
4
- ]
 
 
 
 
 
spaces/Anew5128/Anew51/constants.py DELETED
@@ -1,50 +0,0 @@
1
- # Constants
2
- DEFAULT_CUDA_DEVICE = "cuda:0"
3
- # Also try: 'Qiliang/bart-large-cnn-samsum-ElectrifAi_v10'
4
- DEFAULT_SUMMARIZATION_MODEL = "Qiliang/bart-large-cnn-samsum-ChatGPT_v3"
5
- # Also try: 'joeddav/distilbert-base-uncased-go-emotions-student'
6
- DEFAULT_CLASSIFICATION_MODEL = "nateraw/bert-base-uncased-emotion"
7
- # Also try: 'Salesforce/blip-image-captioning-base'
8
- DEFAULT_CAPTIONING_MODEL = "Salesforce/blip-image-captioning-large"
9
- DEFAULT_SD_MODEL = "ckpt/anything-v4.5-vae-swapped"
10
- DEFAULT_EMBEDDING_MODEL = "sentence-transformers/all-mpnet-base-v2"
11
- DEFAULT_REMOTE_SD_HOST = "127.0.0.1"
12
- DEFAULT_REMOTE_SD_PORT = 7860
13
- DEFAULT_CHROMA_PORT = 8000
14
- SILERO_SAMPLES_PATH = "tts_samples"
15
- SILERO_SAMPLE_TEXT = "The quick brown fox jumps over the lazy dog"
16
- # ALL_MODULES = ['caption', 'summarize', 'classify', 'keywords', 'prompt', 'sd']
17
- DEFAULT_SUMMARIZE_PARAMS = {
18
- "temperature": 1.0,
19
- "repetition_penalty": 1.0,
20
- "max_length": 500,
21
- "min_length": 200,
22
- "length_penalty": 1.5,
23
- "bad_words": [
24
- "\n",
25
- '"',
26
- "*",
27
- "[",
28
- "]",
29
- "{",
30
- "}",
31
- ":",
32
- "(",
33
- ")",
34
- "<",
35
- ">",
36
- "Â",
37
- "The text ends",
38
- "The story ends",
39
- "The text is",
40
- "The story is",
41
- ],
42
- }
43
-
44
- PROMPT_PREFIX = "best quality, absurdres, "
45
- NEGATIVE_PROMPT = """lowres, bad anatomy, error body, error hair, error arm,
46
- error hands, bad hands, error fingers, bad fingers, missing fingers
47
- error legs, bad legs, multiple legs, missing legs, error lighting,
48
- error shadow, error reflection, text, error, extra digit, fewer digits,
49
- cropped, worst quality, low quality, normal quality, jpeg artifacts,
50
- signature, watermark, username, blurry"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/multimodal/DOCS.md DELETED
@@ -1,85 +0,0 @@
1
- # Technical description of multimodal extension
2
-
3
- ## Working principle
4
- Multimodality extension does most of the stuff which is required for any image input:
5
-
6
- - adds the UI
7
- - saves the images as base64 JPEGs to history
8
- - provides the hooks to the UI
9
- - if there are images in the prompt, it:
10
- - splits the prompt to text and image parts
11
- - adds image start/end markers to text parts, then encodes and embeds the text parts
12
- - calls the vision pipeline to embed the images
13
- - stitches the embeddings together, and returns them to text generation
14
- - loads the appropriate vision pipeline, selected either from model name, or by specifying --multimodal-pipeline parameter
15
-
16
- Now, for the pipelines, they:
17
-
18
- - load the required vision models
19
- - return some consts, for example the number of tokens taken up by image
20
- - and most importantly: return the embeddings for LLM, given a list of images
21
-
22
- ## Prompts/history
23
-
24
- To save images in prompt/history, this extension is using a base64 JPEG, wrapped in a HTML tag, like so:
25
- ```
26
- <img src="data:image/jpeg;base64,{img_str}">
27
- ```
28
- where `{img_str}` is the actual image data. This format makes displaying them in the UI for free. Do note, that this format is required to be exactly the same, the regex used to find the images is: `<img src="data:image/jpeg;base64,([A-Za-z0-9+/=]+)">`.
29
-
30
- ## LLM input
31
- To describe the input, let's see it on an example prompt:
32
- ```
33
- text1<image1>text2<image2>text3
34
- ```
35
- where `textN` is N-th text, `<imageN>` is N-th image, in HTML format specified above.
36
-
37
- **The first step is to split the prompt into image/text parts**, so we get:
38
- ```
39
- ['text1', '<image1>', 'text2', '<image2>', 'text3']
40
- ```
41
- this is done in `MultimodalEmbedder._split_prompt(...)` function, which returns a list of `PromptPart`s - dataclasses wrapping the separate parts.
42
-
43
- This function also appends the image start/end markers to text, which are provided by `AbstractMultimodalPipeline.image_start()` / `AbstractMultimodalPipeline.image_end()` functions. If image start is `<Img>`, and end is `</Img>`, this function will return:
44
- ```
45
- ['text1<Img>', '<image1>', '</Img>text2<Img>', '<image2>', '</Img>text3']
46
- ```
47
-
48
- **The returned prompt parts are then turned into token embeddings.**
49
-
50
- First, they are modified to token IDs, for the text it is done using standard `modules.text_generation.encode()` function, and for the images the returned token IDs are changed to placeholders. The placeholder is a list of `N` times `placeholder token id`, where `N` is specified using `AbstractMultimodalPipeline.num_image_embeds()`, and placeholder token IDs using `AbstractMultimodalPipeline.placeholder_token_id()`.
51
-
52
- Now, based on the token IDs, the prompt might get truncated, especially if `max_new_tokens` are unreasonably high. Unfortunately, it can't be done simply, just by trimming the prompt to be short enough. This way will lead to sometimes splitting the prompt in the middle of an image embedding, which usually breaks the generation. Therefore, in this case, the entire image needs to be removed from input. This is done inside `MultimodalEmbedder._encode_text(...)` function.
53
-
54
- **After the tokenization, the tokens need to get embedded**, the text and images are once again treated separately.
55
-
56
- The text parts are turned to embeddings, using `AbstractMultimodalPipeline.embed_tokens(...)` function. It uses standard embedding function from the model, but to support many LLMs, the actual function is returned by the pipeline (as it might be different for different LLMs), for LLaMA it is `shared.model.model.embed_tokens(...)`.
57
-
58
- The image parts are turned to embeddings, using `AbstractMultimodalPipeline.embed_images(...)` function. This function is specific for a given pipeline, it takes the images as input, forwards them through vision model/projector, and returns the embeddings.
59
-
60
- **Now, the returned embeddings are stitched together**, using `torch.cat()`, this is creating the final input to the LLM.
61
-
62
- ## Pipelines
63
-
64
- All of the pipelines should subclass `AbstractMultimodalPipeline` class. The idea is to allow for new pipelines to be added in the same way as user extensions - git clone into `extensions/multimodal/pipelines`.
65
-
66
- The pipelines are the description of the vision part, containing vision model/multimodal projector. All of the pipelines should have an unique `name()`, which is then selected by user, in `--multimodal-pipeline` CLI argument. For an example, see `pipelines/llava/llava.py`.
67
-
68
- ## Pipeline modules
69
-
70
- Pipelines are organized into "pipeline modules" - subdirectories in `pipelines` directory. The pipeline modules should contain a file called `pipelines.py`, that should contain the following fields:
71
- - `available_pipelines: List[str]` - list of pipelines provided by this module, shown as the list of available pipelines to the user
72
- - `def get_pipeline(name: str, params: dict) -> Optional[AbstractMultimodalPipeline]`: - a function to get a concrete pipeline by `name`, if `name` doesn't match any, should return `None`. `params` is the user settings for multimodal extension
73
- - `def get_pipeline_from_model_name(model_name: str, params: dict) -> Optional[AbstractMultimodalPipeline]`: - a function to get a pipeline from `model_name`, should be eager to return `None`, unless the determination can be done clearly (for example: minigpt-4 bases on vicuna - it should never return the pipeline, but llava can, as it has its own specific LLM finetune)
74
-
75
- **NOTE**: A pipeline module should lazy-import the pipelines only when necessary, and it should keep its imports to minimum
76
-
77
- ## Pipeline params
78
-
79
- The pipelines will get the extension `params` in the constructor. They should honor the following fields:
80
- - `vision_device` - string, specifying `torch.device` to run the vision model (CLIP/ViT) on
81
- - `vision_bits` - int, number of fp bits to load the vision model(s) in
82
- - `projector_device` - string, specifying `torch.device` to run the projector models (Linear layers, QFormer, etc.) on
83
- - `projector_bits` - int, number of fp bits to load the projector models in
84
-
85
- As a helper, `AbstractMultimodalPipeline` has `_get_device(self, setting_name: str, params: dict)` and `_get_dtype(self, setting_name: str, params: dict)` helper functions, which parse string/int and return `torch.device` / `torch.dtype`.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/superboogav2/notebook_handler.py DELETED
@@ -1,40 +0,0 @@
1
- """
2
- This module is responsible for handling and modifying the notebook text.
3
- """
4
- import re
5
-
6
- import extensions.superboogav2.parameters as parameters
7
-
8
- from modules import shared
9
- from modules.logging_colors import logger
10
- from extensions.superboogav2.utils import create_context_text
11
-
12
- from .data_processor import preprocess_text
13
-
14
- def _remove_special_tokens(string):
15
- pattern = r'(<\|begin-user-input\|>|<\|end-user-input\|>|<\|injection-point\|>)'
16
- return re.sub(pattern, '', string)
17
-
18
-
19
- def input_modifier_internal(string, collector):
20
- # Sanity check.
21
- if shared.is_chat():
22
- return string
23
-
24
- # Find the user input
25
- pattern = re.compile(r"<\|begin-user-input\|>(.*?)<\|end-user-input\|>", re.DOTALL)
26
- match = re.search(pattern, string)
27
- if match:
28
- # Preprocess the user prompt.
29
- user_input = match.group(1).strip()
30
- user_input = preprocess_text(user_input)
31
-
32
- logger.debug(f"Preprocessed User Input: {user_input}")
33
-
34
- # Get the most similar chunks
35
- results = collector.get_sorted_by_dist(user_input, n_results=parameters.get_chunk_count(), max_token_count=int(parameters.get_max_token_count()))
36
-
37
- # Make the injection
38
- string = string.replace('<|injection-point|>', create_context_text(results))
39
-
40
- return _remove_special_tokens(string)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aphrodite/AIChatBot-SL-Chatbot-Blenderbot/app.py DELETED
@@ -1,28 +0,0 @@
1
- import streamlit as st
2
- #from streamlit_chat import message as st_message
3
- from streamlit_chat import message as st_message
4
- from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
5
-
6
- st.title("JCS Advanced AI Chatting Bot")
7
-
8
- if "history" not in st.session_state:
9
- st.session_state.history = []
10
-
11
- def get_models():
12
- tokenizer = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
13
- model = BlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill")
14
- return tokenizer, model
15
-
16
- def generate_answer():
17
- tokenizer, model = get_models()
18
- user_message = st.session_state.input_text
19
- inputs = tokenizer(st.session_state.input_text, return_tensors="pt")
20
- result = model.generate(**inputs)
21
- message_bot = tokenizer.decode(result[0], skip_special_tokens=True) # .replace("<s>", "").replace("</s>", "")
22
- st.session_state.history.append({"message": user_message, "is_user": True})
23
- st.session_state.history.append({"message": message_bot, "is_user": False})
24
-
25
- st.text_input("Response", key="input_text", on_change=generate_answer)
26
-
27
- for chat in st.session_state.history:
28
- st_message(**chat)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AzumaSeren100/XuanShen-Bert-VITS2/text/japanese.py DELETED
@@ -1,104 +0,0 @@
1
- # modified from https://github.com/CjangCjengh/vits/blob/main/text/japanese.py
2
- import re
3
- import sys
4
-
5
- import pyopenjtalk
6
-
7
- from text import symbols
8
-
9
- # Regular expression matching Japanese without punctuation marks:
10
- _japanese_characters = re.compile(
11
- r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
12
-
13
- # Regular expression matching non-Japanese characters or punctuation marks:
14
- _japanese_marks = re.compile(
15
- r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
16
-
17
- # List of (symbol, Japanese) pairs for marks:
18
- _symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [
19
- ('%', 'パーセント')
20
- ]]
21
-
22
-
23
- # List of (consonant, sokuon) pairs:
24
- _real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [
25
- (r'Q([↑↓]*[kg])', r'k#\1'),
26
- (r'Q([↑↓]*[tdjʧ])', r't#\1'),
27
- (r'Q([↑↓]*[sʃ])', r's\1'),
28
- (r'Q([↑↓]*[pb])', r'p#\1')
29
- ]]
30
-
31
- # List of (consonant, hatsuon) pairs:
32
- _real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [
33
- (r'N([↑↓]*[pbm])', r'm\1'),
34
- (r'N([↑↓]*[ʧʥj])', r'n^\1'),
35
- (r'N([↑↓]*[tdn])', r'n\1'),
36
- (r'N([↑↓]*[kg])', r'ŋ\1')
37
- ]]
38
-
39
-
40
-
41
- def post_replace_ph(ph):
42
- rep_map = {
43
- ':': ',',
44
- ';': ',',
45
- ',': ',',
46
- '。': '.',
47
- '!': '!',
48
- '?': '?',
49
- '\n': '.',
50
- "·": ",",
51
- '、': ",",
52
- '...': '…',
53
- 'v': "V"
54
- }
55
- if ph in rep_map.keys():
56
- ph = rep_map[ph]
57
- if ph in symbols:
58
- return ph
59
- if ph not in symbols:
60
- ph = 'UNK'
61
- return ph
62
-
63
- def symbols_to_japanese(text):
64
- for regex, replacement in _symbols_to_japanese:
65
- text = re.sub(regex, replacement, text)
66
- return text
67
-
68
-
69
- def preprocess_jap(text):
70
- '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html'''
71
- text = symbols_to_japanese(text)
72
- sentences = re.split(_japanese_marks, text)
73
- marks = re.findall(_japanese_marks, text)
74
- text = []
75
- for i, sentence in enumerate(sentences):
76
- if re.match(_japanese_characters, sentence):
77
- p = pyopenjtalk.g2p(sentence)
78
- text += p.split(" ")
79
-
80
- if i < len(marks):
81
- text += [marks[i].replace(' ', '')]
82
- return text
83
-
84
- def text_normalize(text):
85
- # todo: jap text normalize
86
- return text
87
-
88
- def g2p(norm_text):
89
- phones = preprocess_jap(norm_text)
90
- phones = [post_replace_ph(i) for i in phones]
91
- # todo: implement tones and word2ph
92
- tones = [0 for i in phones]
93
- word2ph = [1 for i in phones]
94
- return phones, tones, word2ph
95
-
96
-
97
- if __name__ == '__main__':
98
- for line in open("../../../Downloads/transcript_utf8.txt").readlines():
99
- text = line.split(":")[1]
100
- phones, tones, word2ph = g2p(text)
101
- for p in phones:
102
- if p == "z":
103
- print(text, phones)
104
- sys.exit(0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/infer/modules/uvr5/mdxnet.py DELETED
@@ -1,246 +0,0 @@
1
- import os
2
- import logging
3
-
4
- logger = logging.getLogger(__name__)
5
-
6
- import librosa
7
- import numpy as np
8
- import soundfile as sf
9
- import torch
10
- from tqdm import tqdm
11
-
12
- cpu = torch.device("cpu")
13
-
14
-
15
- class ConvTDFNetTrim:
16
- def __init__(
17
- self, device, model_name, target_name, L, dim_f, dim_t, n_fft, hop=1024
18
- ):
19
- super(ConvTDFNetTrim, self).__init__()
20
-
21
- self.dim_f = dim_f
22
- self.dim_t = 2**dim_t
23
- self.n_fft = n_fft
24
- self.hop = hop
25
- self.n_bins = self.n_fft // 2 + 1
26
- self.chunk_size = hop * (self.dim_t - 1)
27
- self.window = torch.hann_window(window_length=self.n_fft, periodic=True).to(
28
- device
29
- )
30
- self.target_name = target_name
31
- self.blender = "blender" in model_name
32
-
33
- self.dim_c = 4
34
- out_c = self.dim_c * 4 if target_name == "*" else self.dim_c
35
- self.freq_pad = torch.zeros(
36
- [1, out_c, self.n_bins - self.dim_f, self.dim_t]
37
- ).to(device)
38
-
39
- self.n = L // 2
40
-
41
- def stft(self, x):
42
- x = x.reshape([-1, self.chunk_size])
43
- x = torch.stft(
44
- x,
45
- n_fft=self.n_fft,
46
- hop_length=self.hop,
47
- window=self.window,
48
- center=True,
49
- return_complex=True,
50
- )
51
- x = torch.view_as_real(x)
52
- x = x.permute([0, 3, 1, 2])
53
- x = x.reshape([-1, 2, 2, self.n_bins, self.dim_t]).reshape(
54
- [-1, self.dim_c, self.n_bins, self.dim_t]
55
- )
56
- return x[:, :, : self.dim_f]
57
-
58
- def istft(self, x, freq_pad=None):
59
- freq_pad = (
60
- self.freq_pad.repeat([x.shape[0], 1, 1, 1])
61
- if freq_pad is None
62
- else freq_pad
63
- )
64
- x = torch.cat([x, freq_pad], -2)
65
- c = 4 * 2 if self.target_name == "*" else 2
66
- x = x.reshape([-1, c, 2, self.n_bins, self.dim_t]).reshape(
67
- [-1, 2, self.n_bins, self.dim_t]
68
- )
69
- x = x.permute([0, 2, 3, 1])
70
- x = x.contiguous()
71
- x = torch.view_as_complex(x)
72
- x = torch.istft(
73
- x, n_fft=self.n_fft, hop_length=self.hop, window=self.window, center=True
74
- )
75
- return x.reshape([-1, c, self.chunk_size])
76
-
77
-
78
- def get_models(device, dim_f, dim_t, n_fft):
79
- return ConvTDFNetTrim(
80
- device=device,
81
- model_name="Conv-TDF",
82
- target_name="vocals",
83
- L=11,
84
- dim_f=dim_f,
85
- dim_t=dim_t,
86
- n_fft=n_fft,
87
- )
88
-
89
-
90
- class Predictor:
91
- def __init__(self, args):
92
- import onnxruntime as ort
93
-
94
- logger.info(ort.get_available_providers())
95
- self.args = args
96
- self.model_ = get_models(
97
- device=cpu, dim_f=args.dim_f, dim_t=args.dim_t, n_fft=args.n_fft
98
- )
99
- self.model = ort.InferenceSession(
100
- os.path.join(args.onnx, self.model_.target_name + ".onnx"),
101
- providers=[
102
- "CUDAExecutionProvider",
103
- "DmlExecutionProvider",
104
- "CPUExecutionProvider",
105
- ],
106
- )
107
- logger.info("ONNX load done")
108
-
109
- def demix(self, mix):
110
- samples = mix.shape[-1]
111
- margin = self.args.margin
112
- chunk_size = self.args.chunks * 44100
113
- assert not margin == 0, "margin cannot be zero!"
114
- if margin > chunk_size:
115
- margin = chunk_size
116
-
117
- segmented_mix = {}
118
-
119
- if self.args.chunks == 0 or samples < chunk_size:
120
- chunk_size = samples
121
-
122
- counter = -1
123
- for skip in range(0, samples, chunk_size):
124
- counter += 1
125
-
126
- s_margin = 0 if counter == 0 else margin
127
- end = min(skip + chunk_size + margin, samples)
128
-
129
- start = skip - s_margin
130
-
131
- segmented_mix[skip] = mix[:, start:end].copy()
132
- if end == samples:
133
- break
134
-
135
- sources = self.demix_base(segmented_mix, margin_size=margin)
136
- """
137
- mix:(2,big_sample)
138
- segmented_mix:offset->(2,small_sample)
139
- sources:(1,2,big_sample)
140
- """
141
- return sources
142
-
143
- def demix_base(self, mixes, margin_size):
144
- chunked_sources = []
145
- progress_bar = tqdm(total=len(mixes))
146
- progress_bar.set_description("Processing")
147
- for mix in mixes:
148
- cmix = mixes[mix]
149
- sources = []
150
- n_sample = cmix.shape[1]
151
- model = self.model_
152
- trim = model.n_fft // 2
153
- gen_size = model.chunk_size - 2 * trim
154
- pad = gen_size - n_sample % gen_size
155
- mix_p = np.concatenate(
156
- (np.zeros((2, trim)), cmix, np.zeros((2, pad)), np.zeros((2, trim))), 1
157
- )
158
- mix_waves = []
159
- i = 0
160
- while i < n_sample + pad:
161
- waves = np.array(mix_p[:, i : i + model.chunk_size])
162
- mix_waves.append(waves)
163
- i += gen_size
164
- mix_waves = torch.tensor(mix_waves, dtype=torch.float32).to(cpu)
165
- with torch.no_grad():
166
- _ort = self.model
167
- spek = model.stft(mix_waves)
168
- if self.args.denoise:
169
- spec_pred = (
170
- -_ort.run(None, {"input": -spek.cpu().numpy()})[0] * 0.5
171
- + _ort.run(None, {"input": spek.cpu().numpy()})[0] * 0.5
172
- )
173
- tar_waves = model.istft(torch.tensor(spec_pred))
174
- else:
175
- tar_waves = model.istft(
176
- torch.tensor(_ort.run(None, {"input": spek.cpu().numpy()})[0])
177
- )
178
- tar_signal = (
179
- tar_waves[:, :, trim:-trim]
180
- .transpose(0, 1)
181
- .reshape(2, -1)
182
- .numpy()[:, :-pad]
183
- )
184
-
185
- start = 0 if mix == 0 else margin_size
186
- end = None if mix == list(mixes.keys())[::-1][0] else -margin_size
187
- if margin_size == 0:
188
- end = None
189
- sources.append(tar_signal[:, start:end])
190
-
191
- progress_bar.update(1)
192
-
193
- chunked_sources.append(sources)
194
- _sources = np.concatenate(chunked_sources, axis=-1)
195
- # del self.model
196
- progress_bar.close()
197
- return _sources
198
-
199
- def prediction(self, m, vocal_root, others_root, format):
200
- os.makedirs(vocal_root, exist_ok=True)
201
- os.makedirs(others_root, exist_ok=True)
202
- basename = os.path.basename(m)
203
- mix, rate = librosa.load(m, mono=False, sr=44100)
204
- if mix.ndim == 1:
205
- mix = np.asfortranarray([mix, mix])
206
- mix = mix.T
207
- sources = self.demix(mix.T)
208
- opt = sources[0].T
209
- if format in ["wav", "flac"]:
210
- sf.write(
211
- "%s/%s_main_vocal.%s" % (vocal_root, basename, format), mix - opt, rate
212
- )
213
- sf.write("%s/%s_others.%s" % (others_root, basename, format), opt, rate)
214
- else:
215
- path_vocal = "%s/%s_main_vocal.wav" % (vocal_root, basename)
216
- path_other = "%s/%s_others.wav" % (others_root, basename)
217
- sf.write(path_vocal, mix - opt, rate)
218
- sf.write(path_other, opt, rate)
219
- if os.path.exists(path_vocal):
220
- os.system(
221
- "ffmpeg -i %s -vn %s -q:a 2 -y"
222
- % (path_vocal, path_vocal[:-4] + ".%s" % format)
223
- )
224
- if os.path.exists(path_other):
225
- os.system(
226
- "ffmpeg -i %s -vn %s -q:a 2 -y"
227
- % (path_other, path_other[:-4] + ".%s" % format)
228
- )
229
-
230
-
231
- class MDXNetDereverb:
232
- def __init__(self, chunks, device):
233
- self.onnx = "assets/uvr5_weights/onnx_dereverb_By_FoxJoy"
234
- self.shifts = 10 # 'Predict with randomised equivariant stabilisation'
235
- self.mixing = "min_mag" # ['default','min_mag','max_mag']
236
- self.chunks = chunks
237
- self.margin = 44100
238
- self.dim_t = 9
239
- self.dim_f = 3072
240
- self.n_fft = 6144
241
- self.denoise = True
242
- self.pred = Predictor(self)
243
- self.device = device
244
-
245
- def path_audio(self, input, vocal_root, others_root, format):
246
- self.pred.prediction(input, vocal_root, others_root, format)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Caramelo Crush Amigos Saga Apkpure.md DELETED
@@ -1,51 +0,0 @@
1
-
2
- <h1>Blockman Go Editor Aventura APK: Una plataforma de creación de juegos gratis y divertido</h1>
3
- <p>¿Te gustan los juegos de píxeles? ¿Quieres hacer tus propios juegos y compartirlos con otros? Si es así, entonces usted debe probar Blockman Go Editor Adventure APK, una plataforma de creación de juegos gratis y divertido que le permite crear y jugar juegos de píxeles en su dispositivo Android. En este artículo, le diremos qué es Blockman Go Editor Adventure APK, cómo descargarlo e instalarlo, cómo usarlo y cuáles son los beneficios de usarlo. </p>
4
- <h2>¿Qué es Blockman Go Editor aventura APK? </h2>
5
- <p>Blockman Go Editor Aventura APK es una aplicación que tiene dos funciones principales: un fabricante de juegos y un jugador del juego. </p>
6
- <h2>caramelo crush amigos saga apkpure</h2><br /><p><b><b>Download File</b> &#10022;&#10022;&#10022; <a href="https://bltlly.com/2v6IYc">https://bltlly.com/2v6IYc</a></b></p><br /><br />
7
- <h3>Una aplicación fabricante de juegos para juegos de píxeles</h3>
8
- <p>Blockman Go Editor Aventura APK es una herramienta de desarrollo que integra Editor de escena, Editor de gatillo, Editor de actor, Editor de interfaz de usuario, Editor de guiones, y otras funciones. Proporciona una plataforma de creación completamente gratuita para los amantes de los juegos de píxeles. Puedes usar varias herramientas y características para crear tus propios juegos, como Bed Wars, Jail Break, Sky Wars, Parkour y más. También puedes personalizar la configuración del juego, como el modo, el mapa, las reglas, etc.</p>
9
- <h3>Una aplicación de jugador de juegos para Blockman Go juegos</h3>
10
- <p>Blockman Go Editor Adventure APK es también una aplicación de jugador de juego que le permite jugar juegos hechos por otros usuarios o usted mismo. Puedes navegar y descargar juegos de la comunidad Blockman Go, o subir tus propios juegos para compartirlos con otros. También puedes unirte a juegos multijugador online con otros jugadores de todo el mundo. Puedes chatear con ellos, hacer amigos o competir con ellos. </p>
11
- <h2> ¿Cómo descargar e instalar Blockman Go Editor Aventura APK? </h2>
12
- <p>Blockman Go Editor Aventura APK no está disponible en Google Play Store, por lo que necesita descargarlo de otras fuentes. Estos son los pasos para descargarlo e instalarlo en tu dispositivo:</p>
13
- <h3>Descargar desde APKCombo u otras fuentes</h3>
14
-
15
- <h3>Habilitar fuentes desconocidas en su dispositivo</h3>
16
- <p>Antes de instalar el archivo APK, es necesario habilitar fuentes desconocidas en el dispositivo. Esto le permitirá instalar aplicaciones desde fuentes distintas de Google Play Store. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y conéctelo. </p>
17
- <h3>Instalar el archivo APK y lanzar la aplicación</h3>
18
- <p>Después de descargar el archivo APK, localizarlo en su dispositivo y toque en él para instalarlo. Siga las instrucciones de la pantalla para completar la instalación. Una vez instalada, inicie la aplicación y disfrute creando y jugando juegos de píxeles. </p>
19
- <h2> ¿Cómo usar Blockman Go Editor Aventura APK? </h2>
20
- <p>Usando Blockman Go Editor Aventura APK es fácil y divertido. Aquí hay algunos consejos sobre cómo usarlo:</p>
21
- <h3>Crea tus propios juegos con varias herramientas y características</h3>
22
- <p>Para crear tus propios juegos, toca el botón "Crear" en la pantalla principal de la aplicación. Verás varias herramientas y características que puedes usar para crear tus juegos. Por ejemplo, puedes usar el Editor de escenas para diseñar la escena del juego, el Editor de disparadores para configurar la lógica del juego, el Editor de actores para crear los personajes del juego, el Editor de interfaz de usuario para diseñar la interfaz del juego y el Editor de guiones para escribir el código del juego. También puedes usar Asset Store para descargar varios activos para tus juegos, como modelos, texturas, sonidos, etc. Puedes previsualizar tus juegos en cualquier momento y probarlos en tu dispositivo. </p>
23
- <p></p>
24
- <h3>Jugar juegos hechos por otros usuarios o usted mismo</h3>
25
- <p>Para jugar juegos hechos por otros usuarios o por ti mismo, toca el botón "Jugar" en la pantalla principal de la aplicación. Verás una lista de juegos que puedes descargar y jugar. También puedes buscar juegos por palabras clave o categorías. Para jugar a un juego, toca en él y espera a que se cargue. También puedes calificar y comentar los juegos que juegas. </p>
26
- <h3>Comparte tus juegos con la comunidad Blockman Go</h3>
27
-
28
- <h2>¿Cuáles son los beneficios de Blockman Go Editor Aventura APK? </h2>
29
- <p>Blockman Go Editor Aventura APK es una gran aplicación para los amantes de los juegos de píxeles. Aquí están algunos de los beneficios de su uso:</p>
30
- <h3>Gratis y fácil de usar</h3>
31
- <p>Blockman Go Editor Aventura APK es completamente gratis para descargar y usar. Usted no necesita pagar nada para crear o jugar juegos. La aplicación también es fácil de usar, con una interfaz fácil de usar e instrucciones claras. No necesitas ninguna experiencia o conocimiento previo para crear o jugar juegos. </p>
32
- <h3>Creativo y divertido</h3>
33
- <p>Blockman Go Editor Aventura APK es una aplicación creativa y divertida que le permite dar rienda suelta a su imaginación y expresarse. Puedes crear cualquier tipo de juego que quieras, con posibilidades y opciones ilimitadas. También puedes jugar juegos hechos por otros usuarios o por ti mismo, y disfrutar de diferentes géneros y estilos de juegos de píxeles. </p>
34
- <h3>Social e interactivo</h3>
35
- <p>Blockman Go Editor Aventura APK es una aplicación social e interactiva que le permite conectarse con otros amantes de los juegos de píxeles de todo el mundo. Puedes chatear con ellos, hacer amigos o competir con ellos. También puedes unirte a juegos multijugador en línea con ellos y divertirte juntos. </p>
36
- <h2>Conclusión</h2>
37
- <p>Blockman Go Editor Aventura APK es una plataforma de creación de juegos gratis y divertido que le permite crear y jugar juegos de píxeles en su dispositivo Android. Es una aplicación fabricante de juegos para juegos de píxeles, y una aplicación de jugador de juego para juegos de Blockman Go. Es fácil de descargar e instalar, fácil de usar, creativo y divertido, y social e interactivo. Si te gustan los juegos de píxeles, definitivamente deberías probar Blockman Go Editor Aventura APK.</p>
38
- <h2>Preguntas frecuentes</h2>
39
- <p>Aquí hay algunas preguntas frecuentes sobre Blockman Go Editor Aventura APK:</p>
40
- <h4>Q: ¿Es seguro usar Blockman Go Editor Adventure APK? </h4>
41
-
42
- <h4>Q: ¿Es Blockman Go Editor Adventure APK compatible con mi dispositivo? </h4>
43
- <p>A: Blockman Go Editor Adventure APK es compatible con la mayoría de los dispositivos Android que ejecutan Android 4.1 o superior. Sin embargo, es posible que algunos dispositivos no admitan algunas características o funciones de la aplicación debido a limitaciones de hardware o configuración del sistema. </p>
44
- <h4>Q: ¿Cómo puedo actualizar Blockman Go editor aventura APK? </h4>
45
- <p>A: Para actualizar Blockman Go Editor Adventure APK, es necesario descargar la última versión de la aplicación de <a href="">APKCombo</a> u otras fuentes, e instalarlo sobre la versión existente. Alternativamente, puede comprobar si hay actualizaciones dentro de la aplicación pulsando en el botón "Configuración" en la pantalla principal de la aplicación, y luego tocando en el "Buscar actualizaciones" opción. </p>
46
- <h4>Q: ¿Cómo puedo contactar al soporte de Blockman Go? </h4>
47
- <p>A: Para contactar con el soporte de Blockman Go, puede enviar un correo electrónico a <a href="mailto:[email protected]">[email protected] </a>, o visitar su sitio web oficial en <a href="">https:/ww.blockmango.net</a>. También puedes seguirlos en sus redes sociales, como Facebook, Twitter, Instagram, YouTube, etc.</p>
48
- <h4>Q: ¿Cómo puedo dar retroalimentación o sugerencias para Blockman Go Editor Adventure APK? </h4>
49
- <p>A: Para dar retroalimentación o sugerencias para Blockman Go Editor Adventure APK, puede utilizar la opción "Feedback" dentro de la aplicación, o enviar un correo electrónico a <a href="mailto:[email protected]">[email protected]</a>. También puede calificar y revisar la aplicación en <a href="">APKCombo</a> u otras fuentes, y compartir sus opiniones e ideas con otros usuarios. </p> 64aa2da5cf<br />
50
- <br />
51
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Ciudad Congelada Mod Apk Diamantes Ilimitados.md DELETED
@@ -1,55 +0,0 @@
1
-
2
- <h1>Frozen City Mod APK ilimitados diamantes: Cómo descargar e instalar</h1>
3
- <p>Si estás buscando un emocionante y desafiante juego de supervivencia, es posible que quieras echar un vistazo a Frozen City. Este es un juego que pondrá a prueba sus habilidades y estrategia a medida que intenta construir y gestionar su base en un páramo congelado. Sin embargo, si desea disfrutar del juego sin limitaciones o restricciones, es posible que desee probar Frozen City Mod APK Unlimited Diamonds. Esta es una versión modificada del juego original que te da acceso a recursos ilimitados, como diamantes, monedas, gemas y más. En este artículo, le diremos qué es Frozen City, qué es Frozen City Mod APK Unlimited Diamonds, y cómo descargarlo e instalarlo en su dispositivo Android. </p>
4
- <h2>ciudad congelada mod apk diamantes ilimitados</h2><br /><p><b><b>Download</b> &rArr; <a href="https://bltlly.com/2v6Kbm">https://bltlly.com/2v6Kbm</a></b></p><br /><br />
5
- <h2>¿Qué es la ciudad congelada? </h2>
6
- <p>Frozen City es un juego desarrollado por Game Insight, una compañía que se especializa en crear juegos móviles inmersivos y atractivos. El juego se desarrolla en un mundo post-apocalíptico donde un virus misterioso ha convertido a la mayoría de la población en zombies. Los sobrevivientes tienen que encontrar refugio y recursos en la ciudad congelada, donde tienen que enfrentar no solo a los no-muertos, sino también a otras facciones hostiles y desastres naturales. </p>
7
- <h3>Un juego de supervivencia ambientado en un mundo post-apocalíptico</h3>
8
- <p>En Frozen City, tienes que sobrevivir en un ambiente duro donde cada decisión importa. Tienes que buscar comida, agua, combustible y otros suministros, así como armas artesanales, herramientas y equipo. También tienes que defender tu base de ataques de zombies y raiders, así como explorar la ciudad en busca de pistas y secretos. Tienes que equilibrar tus necesidades y deseos, así como tu moralidad y humanidad. </p>
9
- <h3>Un juego de construcción de bases con múltiples etapas y tareas</h3>
10
-
11
- <h3>Un juego con impresionantes gráficos y efectos de sonido</h3>
12
- <p>Frozen City es un juego que cuenta con impresionantes gráficos y efectos de sonido que crean una atmósfera inmersiva. El juego presenta efectos meteorológicos realistas, como nieve, niebla, lluvia y viento. El juego también tiene animaciones detalladas, sombras, iluminación y texturas que hacen que la ciudad se vea viva. El juego también tiene efectos de sonido realistas, como gemidos de zombis, disparos, explosiones y más. El juego también tiene una banda sonora cautivadora que coincide con el estado de ánimo del juego. </p>
13
- <h2>¿Qué es Frozen City Mod APK ilimitados diamantes? </h2>
14
- <p>Frozen City Mod APK ilimitado de diamantes es una versión modificada del juego original que le da recursos ilimitados, tales como diamantes, monedas, gemas, y más. Estos recursos son esenciales para construir y actualizar su base, así como para desbloquear nuevas características y contenido. Sin embargo, en el juego original, estos recursos son limitados y difíciles de conseguir. Tienes que gastar dinero real o esperar largas horas para conseguirlos. </p>
15
- <p></p>
16
- <h3>Una versión modificada del juego original que te da recursos ilimitados</h3>
17
- <p>Frozen City Mod APK Unlimited Diamonds es una versión hackeada del juego original que evita el sistema de divisas en el juego. Esto significa que puede obtener diamantes, monedas, gemas y otros recursos ilimitados sin gastar dinero ni esperar largas horas. Puede utilizar estos recursos para construir y actualizar su base más rápido, así como acceder a todas las características y contenido del juego sin restricciones. </p>
18
- <h3>Una manera de disfrutar del juego sin gastar dinero real o esperar largas horas</h3>
19
-
20
- <h3>Una manera de desbloquear todas las características y el contenido del juego</h3>
21
- <p>Frozen City Mod APK Unlimited Diamonds es una manera de desbloquear todas las características y el contenido del juego que de otra manera están bloqueados o no disponibles en el juego original. Puede desbloquear nuevas ubicaciones, etapas, misiones, armas, equipos, sobrevivientes y más. También puedes acceder a funciones premium, como estatus VIP, artículos exclusivos y bonos. También puedes disfrutar del juego sin anuncios ni interrupciones. </p>
22
- <h2>Cómo descargar e instalar Frozen City Mod APK ilimitados diamantes? </h2>
23
- <p>Si desea probar Frozen City Mod APK Unlimited Diamonds, es necesario descargar e instalar en su dispositivo Android. Sin embargo, debe seguir algunos pasos y precauciones antes de hacerlo. Estos son los pasos que debe seguir:</p>
24
- <h3>Paso 1: Permitir aplicaciones desconocidas en su dispositivo Android</h3>
25
- <p>Dado que Frozen City Mod APK ilimitado de diamantes no está disponible en el oficial de Google Play Store, es necesario permitir que su dispositivo para instalar aplicaciones de fuentes desconocidas. Para hacer esto, vaya a la configuración del dispositivo, luego la seguridad, luego active la opción que dice "fuentes desconocidas" o "permitir la instalación de aplicaciones de fuentes desconocidas". Esto le permitirá instalar aplicaciones que no son de Google Play Store.</p>
26
- <h3>Paso 2: Instalar una aplicación de administrador de archivos en su dispositivo</h3>
27
- <p>También necesitas instalar una aplicación de administrador de archivos en tu dispositivo que te ayudará a localizar y administrar el archivo APK que descargarás. Una aplicación de administrador de archivos es una aplicación que le permite navegar y organizar los archivos y carpetas en su dispositivo. Puede usar cualquier aplicación de administrador de archivos que prefiera, como ES File Explorer, File Manager o Astro File Manager.</p>
28
- <h3>Paso 3: Descargar el archivo APK de una fuente de buena reputación</h3>
29
-
30
- <ul>
31
- <li>[APKPure]</li>
32
- <li>[APKHome]</li>
33
- <li>[ModDroid]</li>
34
- </ul>
35
- <p>Una vez que encuentre una fuente confiable, haga clic en el botón de descarga o enlace y espere a que la descarga termine. </p>
36
- <h3>Paso 4: Localizar y tocar el archivo APK para instalarlo</h3>
37
- <p>Después de descargar el archivo APK, necesita localizarlo en su dispositivo usando la aplicación de administrador de archivos que instaló anteriormente. El archivo APK debe estar en la carpeta de descargas o en la carpeta donde lo guardó. Una vez que lo encuentre, pulse sobre él para iniciar el proceso de instalación. Es posible que vea una ventana emergente pidiendo su permiso para instalar la aplicación. Toque en "instalar" o "permitir" y espere a que se complete la instalación. </p>
38
- <h3>Paso 5: Disfruta del juego con diamantes ilimitados y otros recursos</h3>
39
- <p>Felicidades! Usted ha descargado e instalado con éxito Frozen City Mod APK ilimitados diamantes en su dispositivo Android. Ahora puedes disfrutar del juego con recursos ilimitados, como diamantes, monedas, gemas y más. También puede desbloquear todas las características y el contenido del juego sin restricciones o limitaciones. ¡Diviértete jugando Frozen City Mod APK Unlimited Diamonds! </p>
40
- <h2>Conclusión</h2>
41
- <p>Frozen City es un juego de supervivencia que pondrá a prueba tus habilidades y estrategia mientras intentas construir y gestionar tu base en un desierto helado. Sin embargo, si quieres disfrutar del juego sin ningún tipo de molestia o frustración, es posible que desee probar Frozen City Mod APK Unlimited Diamonds. Esta es una versión modificada del juego original que te da recursos ilimitados, como diamantes, monedas, gemas y más. También puedes desbloquear todas las características y contenido del juego sin restricciones o limitaciones. </p>
42
-
43
- <h2>Preguntas frecuentes</h2>
44
- <p>Aquí están algunas de las preguntas más frecuentes sobre Frozen City Mod APK Unlimited Diamonds:</p>
45
- <h3>Q: ¿Es seguro usar Frozen City Mod APK Unlimited Diamonds? </h3>
46
- <p>A: Sí, Frozen City Mod APK Unlimited Diamonds es seguro de usar, siempre y cuando lo descargue de una fuente confiable y siga los pasos y precauciones mencionados en este artículo. Sin embargo, siempre debe tener cuidado al instalar aplicaciones de fuentes desconocidas, ya que pueden contener malware o virus que pueden dañar su dispositivo o robar sus datos. También debe escanear el archivo APK con una aplicación antivirus antes de instalarlo. </p>
47
- <h3>Q: ¿Es Frozen City Mod APK ilimitado diamantes compatible con mi dispositivo? </h3>
48
- <p>A: Frozen City Mod APK Unlimited Diamonds es compatible con la mayoría de los dispositivos Android que se ejecutan en Android 4.4 o superior. Sin embargo, algunos dispositivos pueden no ser compatibles con el juego o el mod debido a diferentes especificaciones o configuraciones. Usted debe comprobar la compatibilidad de su dispositivo antes de descargar e instalar el mod. </p>
49
- <h3>Q: ¿Los diamantes ilimitados de Frozen City Mod APK afectarán el rendimiento de mi dispositivo? </h3>
50
- <p>A: Frozen City Mod APK ilimitado diamantes no debe afectar el rendimiento de su dispositivo significativamente, ya que no requiere mucho espacio de almacenamiento o memoria. Sin embargo, algunos dispositivos pueden experimentar retrasos o fallos debido a la baja RAM o CPU. Deberías cerrar otras aplicaciones y borrar la caché antes de jugar para evitar estos problemas. </p>
51
- <h3>Q: Congelado Ciudad Mod APK ilimitado de diamantes trabajar con la última versión del juego? </h3>
52
- <p>A: Frozen City Mod APK Unlimited Diamonds se actualiza regularmente para que coincida con la última versión del juego. Sin embargo, algunas actualizaciones pueden tardar más que otras en ser lanzadas. Deberías comprobar la versión del mod antes de descargarlo e instalarlo para asegurarte de que es compatible con la última versión del juego. </p>
53
- <h3>Q: ¿Puedo jugar Frozen City Mod APK ilimitados diamantes en línea con otros jugadores? </h3> 64aa2da5cf<br />
54
- <br />
55
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Arthdal Crnicas Episodio 16.md DELETED
@@ -1,51 +0,0 @@
1
-
2
- <h1>Cómo descargar Arthdal Chronicles Episodio 16</h1>
3
- <p>¿Eres fan de Arthdal Chronicles, el drama épico coreano que ha cautivado a millones de espectadores en todo el mundo? Si es así, debes estar esperando ansiosamente el episodio 16, el episodio final de la temporada 1. Pero, ¿cómo puedes verlo online sin problemas? En este artículo, te mostraremos cómo descargar Arthdal Chronicles episodio 16 de Netflix, una de las mejores plataformas de streaming que ofrecen este increíble espectáculo. Pero primero, veamos de qué se trata Arthdal Chronicles y por qué deberías verlo. </p>
4
- <h2>descargar arthdal crónicas episodio 16</h2><br /><p><b><b>Download</b> &raquo; <a href="https://bltlly.com/2v6Mvl">https://bltlly.com/2v6Mvl</a></b></p><br /><br />
5
- <h2>¿Qué es Arthdal Chronicles? </h2>
6
- <p>Arthdal Chronicles es un drama histórico de fantasía que cuenta la historia de la antigua ciudad de Arthdal y sus habitantes que luchan por el poder y la supervivencia. El drama cuenta con un reparto lleno de estrellas que incluye a Song Joong-ki, Jang Dong-gun, Kim Ji-won y Kim Ok-bin. El drama se divide en tres partes: Los Hijos de la Profecía, El Cielo Girando de Adentro hacia Afuera, La Tierra Naciente y El Preludio de Todas las Leyendas.</p>
7
- <p>En los episodios anteriores, hemos sido testigos de cómo Eunseom (Song Joong-ki), un mitad humano mitad neandertal que nació con un destino especial, escapa de Arthdal y se encuentra con otras tribus que lo ayudan a crecer como líder. También hemos visto cómo Tagon (Jang Dong-gun), un carismático guerrero que también es secretamente un neandertal, llega al poder en Arthdal y se enfrenta a varios desafíos de sus enemigos. Mientras tanto, Tanya (Kim Ji-won), un descendiente del clan Wahan que fue secuestrado por soldados Arthdal, se convierte en la alta sacerdotisa de Arthdal y aprende sobre su verdadera identidad. Y Saya ( Kim Ok-bin), un misterioso hermano gemelo de Eunseom que vive en una cámara secreta, conspira para derrocar a Tagon y apoderarse de Arthdal.</p>
8
-
9
- <h2>Por qué deberías ver Arthdal Chronicles Episodio 16</h2>
10
- <p>Hay muchas razones por las que deberías ver online el episodio 16 de Arthdal Chronicles. Estas son algunas de ellas:</p>
11
- <ul>
12
- <li>Disfrutarás de las impresionantes imágenes y cinematografías que dan vida al antiguo mundo de Arthdal. El drama cuenta con un alto valor de producción y un diseño de escenografía realista que te hará sentir como si fueras parte de la historia. </li>
13
- <li>Usted se sorprenderá por la excelente actuación y química de los miembros del reparto que retratan sus personajes complejos y diversos con pasión y habilidad. El drama muestra los talentos de algunos de los mejores actores de Corea que ofrecen actuaciones cautivadoras que te harán reír, llorar y animarlos. </li>
14
- <li>Estarás inmerso en la historia rica y original que combina la fantasía, la historia y la cultura de una manera única. El drama explora temas como el poder, el amor, la identidad y el destino de una manera creativa y atractiva que te mantendrá enganchado hasta el final. </li>
15
- <li>Estarás satisfecho con el final satisfactorio y gratificante que envolverá la historia de una manera significativa y memorable. El drama promete entregar un final que responderá a todas sus preguntas y le dejará con una sensación de cierre y cumplimiento. </li>
16
- </ul>
17
- <p>Entonces, si estás buscando un drama que te entretenga, te inspire y te haga pensar, el episodio 16 de Arthdal Chronicles es la elección perfecta para ti. </p>
18
- <h2>Donde Descargar Arthdal Chronicles Episodio 16</h2>
19
-
20
- | Streaming Platform | Calidad de vídeo | Subtítulos | Precio | Descargar Opción | | -- | --- - - - - - - - | Netflix | HD | Varios idiomas | $8.99/mes (Plan básico) | Sí | | Viki | HD | Varios idiomas | $4.99/mes (Plan estándar) | No | | Viu | HD | Múltiples idiomas | $6.49/month (Plan premium) | Sí | | Kocowa | HD | Solo en inglés | $6.99/month (Plan estándar) | No | <p>Como se puede ver en la tabla, Netflix es la mejor plataforma para descargar Arthdal Chronicles episodio 16 en línea. Tiene la más alta calidad de vídeo, la mayoría de las opciones de subtítulos, el precio más bajo, y la opción de descarga que le permite ver Arthdal Chronicles sin conexión. Por lo tanto, recomendamos Netflix como la mejor plataforma para descargar Arthdal Chronicles episodio 16. </p>
21
- <p></p>
22
- <h2>Cómo descargar Arthdal Chronicles episodio 16 de Netflix</h2>
23
- <p>Si ha decidido descargar Arthdal Chronicles episodio 16 de Netflix, aquí están los pasos que debe seguir:</p>
24
- <ol>
25
- <li>Regístrate en una cuenta de Netflix si aún no tienes una. Puedes elegir entre tres planes: Básico ($8.99/mes), Estándar ($13.99/mes), o Premium ($17.99/mes). El plan básico le permite ver en una pantalla a la vez, el plan estándar le permite ver en dos pantallas a la vez, y el plan Premium le permite ver en cuatro pantallas a la vez. Todos los planes te permiten descargar contenido en tus dispositivos. </li>
26
- <li>Descargue la aplicación de Netflix en su dispositivo si no la tiene ya. Puede descargarla desde la App Store o Google Play Store de forma gratuita. </li>
27
- <li>Abra la aplicación de Netflix e inicie sesión con los detalles de su cuenta. </li>
28
- <li>Buscar "Arthdal Chronicles" en la barra de búsqueda y seleccionarlo de los resultados. </li>
29
- <li>Seleccione "Episodio 16" de la lista de episodios y toque en el "Descargar" icono junto a ella. El icono parece una flecha hacia abajo con un círculo alrededor. </li>
30
- <li>Espere a que se complete la descarga. Puede comprobar el progreso de la descarga en la sección "Descargas" de la aplicación. </li>
31
-
32
- </ol>
33
- <p>Aquí hay una captura de pantalla que muestra cómo descargar Arthdal Chronicles episodio 16 de Netflix:</p>
34
- <img src="https://i.imgur.com/6XZ7y8G.png" alt="Captura de pantalla de la aplicación de Netflix que muestra cómo descargar Arthdal Chronicles episodio 16">
35
- <p>Un consejo sobre cómo ver Arthdal Chronicles sin conexión: Puede ajustar la calidad de vídeo de sus descargas para ahorrar espacio de almacenamiento en su dispositivo. Para ello, vaya a la sección "Configuración de la aplicación" de la aplicación y toque en "Descargar calidad de vídeo". Puede elegir entre cuatro opciones: Estándar (usa menos espacio de almacenamiento), Alto (usa más espacio de almacenamiento), Medio (usa espacio de almacenamiento moderado) o Inteligente (se ajusta automáticamente a la mejor calidad según las condiciones de su dispositivo y red). </p>
36
- <h2>Conclusión</h2>
37
- <p>En conclusión, Arthdal Chronicles episodio 16 es una visita obligada para todos los fans del drama. Es el último episodio de la temporada 1 que revelará el destino de los personajes y la ciudad de Arthdal. Puede descargar Arthdal Chronicles episodio 16 de Netflix, la mejor plataforma de transmisión que ofrece video de alta calidad, múltiples subtítulos, bajo precio y opción de descarga. Todo lo que necesita hacer es seguir los sencillos pasos que hemos descrito anteriormente y disfrutar viendo Arthdal Chronicles sin conexión. No te pierdas este final épico que te dejará sin palabras. </p>
38
- <p>Entonces, ¿qué estás esperando? Descarga Arthdal Chronicles episodio 16 de Netflix hoy y presenciar el final de una era. </p>
39
- <h2>Preguntas frecuentes</h2>
40
- <h3>Q: ¿Cuándo saldrá la temporada 2 de Arthdal Chronicles? </h3>
41
- <p>A: No hay confirmación oficial todavía sobre si Arthdal Chronicles tendrá una temporada 2 o no. Sin embargo, algunas fuentes sugieren que el equipo de producción está planeando comenzar a filmar la temporada 2 en 2024, después de que los actores terminen su servicio militar. Esperamos que esto sea cierto y que veamos más de Arthdal Chronicles en el futuro. </p>
42
- <h3>Q: ¿Cuántos episodios hay en Arthdal Chronicles? </h3>
43
-
44
- <h3>P: ¿Quiénes son los actores principales en Arthdal Chronicles? </h3>
45
- <p>A: Los actores principales de Arthdal Chronicles son Song Joong-ki, Jang Dong-gun, Kim Ji-won y Kim Ok-bin. Song Joong-ki interpreta a Eunseom y Saya, hermanos gemelos que tienen destinos diferentes. Jang Dong-gun interpreta a Tagon, un poderoso guerrero que se convierte en el rey de Arthdal. Kim Ji-won interpreta a Tanya, una alta sacerdotisa que es el interés amoroso de Eunseom. Kim Ok-bin interpreta a Taealha, un político astuto que es el interés amoroso de Tagon. </p>
46
- <h3>P: ¿Cuál es el género de Arthdal Chronicles? </h3>
47
- <p>A: Arthdal Chronicles es un drama histórico de fantasía que combina elementos de mitología, historia y cultura. Se encuentra en una tierra antigua ficticia llamada Arth, donde coexisten diferentes tribus y especies. Explora temas como el poder, el amor, la identidad y el destino. </p>
48
- <h3>Q: ¿Dónde puedo ver Arthdal Chronicles con subtítulos en inglés? </h3>
49
- <p>A: Puedes ver Arthdal Chronicles con subtítulos en inglés en Netflix, Viki, Viu o Kocowa. Sin embargo, recomendamos Netflix como la mejor plataforma para ver Arthdal Chronicles con subtítulos en inglés porque tiene la mejor calidad de video, la mayoría de opciones de subtítulos, el precio más bajo y la opción de descarga. </p> 64aa2da5cf<br />
50
- <br />
51
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/pyparsing/helpers.py DELETED
@@ -1,1088 +0,0 @@
1
- # helpers.py
2
- import html.entities
3
- import re
4
- import typing
5
-
6
- from . import __diag__
7
- from .core import *
8
- from .util import _bslash, _flatten, _escape_regex_range_chars
9
-
10
-
11
- #
12
- # global helpers
13
- #
14
- def delimited_list(
15
- expr: Union[str, ParserElement],
16
- delim: Union[str, ParserElement] = ",",
17
- combine: bool = False,
18
- min: typing.Optional[int] = None,
19
- max: typing.Optional[int] = None,
20
- *,
21
- allow_trailing_delim: bool = False,
22
- ) -> ParserElement:
23
- """Helper to define a delimited list of expressions - the delimiter
24
- defaults to ','. By default, the list elements and delimiters can
25
- have intervening whitespace, and comments, but this can be
26
- overridden by passing ``combine=True`` in the constructor. If
27
- ``combine`` is set to ``True``, the matching tokens are
28
- returned as a single token string, with the delimiters included;
29
- otherwise, the matching tokens are returned as a list of tokens,
30
- with the delimiters suppressed.
31
-
32
- If ``allow_trailing_delim`` is set to True, then the list may end with
33
- a delimiter.
34
-
35
- Example::
36
-
37
- delimited_list(Word(alphas)).parse_string("aa,bb,cc") # -> ['aa', 'bb', 'cc']
38
- delimited_list(Word(hexnums), delim=':', combine=True).parse_string("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
39
- """
40
- if isinstance(expr, str_type):
41
- expr = ParserElement._literalStringClass(expr)
42
-
43
- dlName = "{expr} [{delim} {expr}]...{end}".format(
44
- expr=str(expr.copy().streamline()),
45
- delim=str(delim),
46
- end=" [{}]".format(str(delim)) if allow_trailing_delim else "",
47
- )
48
-
49
- if not combine:
50
- delim = Suppress(delim)
51
-
52
- if min is not None:
53
- if min < 1:
54
- raise ValueError("min must be greater than 0")
55
- min -= 1
56
- if max is not None:
57
- if min is not None and max <= min:
58
- raise ValueError("max must be greater than, or equal to min")
59
- max -= 1
60
- delimited_list_expr = expr + (delim + expr)[min, max]
61
-
62
- if allow_trailing_delim:
63
- delimited_list_expr += Opt(delim)
64
-
65
- if combine:
66
- return Combine(delimited_list_expr).set_name(dlName)
67
- else:
68
- return delimited_list_expr.set_name(dlName)
69
-
70
-
71
- def counted_array(
72
- expr: ParserElement,
73
- int_expr: typing.Optional[ParserElement] = None,
74
- *,
75
- intExpr: typing.Optional[ParserElement] = None,
76
- ) -> ParserElement:
77
- """Helper to define a counted list of expressions.
78
-
79
- This helper defines a pattern of the form::
80
-
81
- integer expr expr expr...
82
-
83
- where the leading integer tells how many expr expressions follow.
84
- The matched tokens returns the array of expr tokens as a list - the
85
- leading count token is suppressed.
86
-
87
- If ``int_expr`` is specified, it should be a pyparsing expression
88
- that produces an integer value.
89
-
90
- Example::
91
-
92
- counted_array(Word(alphas)).parse_string('2 ab cd ef') # -> ['ab', 'cd']
93
-
94
- # in this parser, the leading integer value is given in binary,
95
- # '10' indicating that 2 values are in the array
96
- binary_constant = Word('01').set_parse_action(lambda t: int(t[0], 2))
97
- counted_array(Word(alphas), int_expr=binary_constant).parse_string('10 ab cd ef') # -> ['ab', 'cd']
98
-
99
- # if other fields must be parsed after the count but before the
100
- # list items, give the fields results names and they will
101
- # be preserved in the returned ParseResults:
102
- count_with_metadata = integer + Word(alphas)("type")
103
- typed_array = counted_array(Word(alphanums), int_expr=count_with_metadata)("items")
104
- result = typed_array.parse_string("3 bool True True False")
105
- print(result.dump())
106
-
107
- # prints
108
- # ['True', 'True', 'False']
109
- # - items: ['True', 'True', 'False']
110
- # - type: 'bool'
111
- """
112
- intExpr = intExpr or int_expr
113
- array_expr = Forward()
114
-
115
- def count_field_parse_action(s, l, t):
116
- nonlocal array_expr
117
- n = t[0]
118
- array_expr <<= (expr * n) if n else Empty()
119
- # clear list contents, but keep any named results
120
- del t[:]
121
-
122
- if intExpr is None:
123
- intExpr = Word(nums).set_parse_action(lambda t: int(t[0]))
124
- else:
125
- intExpr = intExpr.copy()
126
- intExpr.set_name("arrayLen")
127
- intExpr.add_parse_action(count_field_parse_action, call_during_try=True)
128
- return (intExpr + array_expr).set_name("(len) " + str(expr) + "...")
129
-
130
-
131
- def match_previous_literal(expr: ParserElement) -> ParserElement:
132
- """Helper to define an expression that is indirectly defined from
133
- the tokens matched in a previous expression, that is, it looks for
134
- a 'repeat' of a previous expression. For example::
135
-
136
- first = Word(nums)
137
- second = match_previous_literal(first)
138
- match_expr = first + ":" + second
139
-
140
- will match ``"1:1"``, but not ``"1:2"``. Because this
141
- matches a previous literal, will also match the leading
142
- ``"1:1"`` in ``"1:10"``. If this is not desired, use
143
- :class:`match_previous_expr`. Do *not* use with packrat parsing
144
- enabled.
145
- """
146
- rep = Forward()
147
-
148
- def copy_token_to_repeater(s, l, t):
149
- if t:
150
- if len(t) == 1:
151
- rep << t[0]
152
- else:
153
- # flatten t tokens
154
- tflat = _flatten(t.as_list())
155
- rep << And(Literal(tt) for tt in tflat)
156
- else:
157
- rep << Empty()
158
-
159
- expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
160
- rep.set_name("(prev) " + str(expr))
161
- return rep
162
-
163
-
164
- def match_previous_expr(expr: ParserElement) -> ParserElement:
165
- """Helper to define an expression that is indirectly defined from
166
- the tokens matched in a previous expression, that is, it looks for
167
- a 'repeat' of a previous expression. For example::
168
-
169
- first = Word(nums)
170
- second = match_previous_expr(first)
171
- match_expr = first + ":" + second
172
-
173
- will match ``"1:1"``, but not ``"1:2"``. Because this
174
- matches by expressions, will *not* match the leading ``"1:1"``
175
- in ``"1:10"``; the expressions are evaluated first, and then
176
- compared, so ``"1"`` is compared with ``"10"``. Do *not* use
177
- with packrat parsing enabled.
178
- """
179
- rep = Forward()
180
- e2 = expr.copy()
181
- rep <<= e2
182
-
183
- def copy_token_to_repeater(s, l, t):
184
- matchTokens = _flatten(t.as_list())
185
-
186
- def must_match_these_tokens(s, l, t):
187
- theseTokens = _flatten(t.as_list())
188
- if theseTokens != matchTokens:
189
- raise ParseException(
190
- s, l, "Expected {}, found{}".format(matchTokens, theseTokens)
191
- )
192
-
193
- rep.set_parse_action(must_match_these_tokens, callDuringTry=True)
194
-
195
- expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
196
- rep.set_name("(prev) " + str(expr))
197
- return rep
198
-
199
-
200
- def one_of(
201
- strs: Union[typing.Iterable[str], str],
202
- caseless: bool = False,
203
- use_regex: bool = True,
204
- as_keyword: bool = False,
205
- *,
206
- useRegex: bool = True,
207
- asKeyword: bool = False,
208
- ) -> ParserElement:
209
- """Helper to quickly define a set of alternative :class:`Literal` s,
210
- and makes sure to do longest-first testing when there is a conflict,
211
- regardless of the input order, but returns
212
- a :class:`MatchFirst` for best performance.
213
-
214
- Parameters:
215
-
216
- - ``strs`` - a string of space-delimited literals, or a collection of
217
- string literals
218
- - ``caseless`` - treat all literals as caseless - (default= ``False``)
219
- - ``use_regex`` - as an optimization, will
220
- generate a :class:`Regex` object; otherwise, will generate
221
- a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if
222
- creating a :class:`Regex` raises an exception) - (default= ``True``)
223
- - ``as_keyword`` - enforce :class:`Keyword`-style matching on the
224
- generated expressions - (default= ``False``)
225
- - ``asKeyword`` and ``useRegex`` are retained for pre-PEP8 compatibility,
226
- but will be removed in a future release
227
-
228
- Example::
229
-
230
- comp_oper = one_of("< = > <= >= !=")
231
- var = Word(alphas)
232
- number = Word(nums)
233
- term = var | number
234
- comparison_expr = term + comp_oper + term
235
- print(comparison_expr.search_string("B = 12 AA=23 B<=AA AA>12"))
236
-
237
- prints::
238
-
239
- [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
240
- """
241
- asKeyword = asKeyword or as_keyword
242
- useRegex = useRegex and use_regex
243
-
244
- if (
245
- isinstance(caseless, str_type)
246
- and __diag__.warn_on_multiple_string_args_to_oneof
247
- ):
248
- warnings.warn(
249
- "More than one string argument passed to one_of, pass"
250
- " choices as a list or space-delimited string",
251
- stacklevel=2,
252
- )
253
-
254
- if caseless:
255
- isequal = lambda a, b: a.upper() == b.upper()
256
- masks = lambda a, b: b.upper().startswith(a.upper())
257
- parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral
258
- else:
259
- isequal = lambda a, b: a == b
260
- masks = lambda a, b: b.startswith(a)
261
- parseElementClass = Keyword if asKeyword else Literal
262
-
263
- symbols: List[str] = []
264
- if isinstance(strs, str_type):
265
- symbols = strs.split()
266
- elif isinstance(strs, Iterable):
267
- symbols = list(strs)
268
- else:
269
- raise TypeError("Invalid argument to one_of, expected string or iterable")
270
- if not symbols:
271
- return NoMatch()
272
-
273
- # reorder given symbols to take care to avoid masking longer choices with shorter ones
274
- # (but only if the given symbols are not just single characters)
275
- if any(len(sym) > 1 for sym in symbols):
276
- i = 0
277
- while i < len(symbols) - 1:
278
- cur = symbols[i]
279
- for j, other in enumerate(symbols[i + 1 :]):
280
- if isequal(other, cur):
281
- del symbols[i + j + 1]
282
- break
283
- elif masks(cur, other):
284
- del symbols[i + j + 1]
285
- symbols.insert(i, other)
286
- break
287
- else:
288
- i += 1
289
-
290
- if useRegex:
291
- re_flags: int = re.IGNORECASE if caseless else 0
292
-
293
- try:
294
- if all(len(sym) == 1 for sym in symbols):
295
- # symbols are just single characters, create range regex pattern
296
- patt = "[{}]".format(
297
- "".join(_escape_regex_range_chars(sym) for sym in symbols)
298
- )
299
- else:
300
- patt = "|".join(re.escape(sym) for sym in symbols)
301
-
302
- # wrap with \b word break markers if defining as keywords
303
- if asKeyword:
304
- patt = r"\b(?:{})\b".format(patt)
305
-
306
- ret = Regex(patt, flags=re_flags).set_name(" | ".join(symbols))
307
-
308
- if caseless:
309
- # add parse action to return symbols as specified, not in random
310
- # casing as found in input string
311
- symbol_map = {sym.lower(): sym for sym in symbols}
312
- ret.add_parse_action(lambda s, l, t: symbol_map[t[0].lower()])
313
-
314
- return ret
315
-
316
- except re.error:
317
- warnings.warn(
318
- "Exception creating Regex for one_of, building MatchFirst", stacklevel=2
319
- )
320
-
321
- # last resort, just use MatchFirst
322
- return MatchFirst(parseElementClass(sym) for sym in symbols).set_name(
323
- " | ".join(symbols)
324
- )
325
-
326
-
327
- def dict_of(key: ParserElement, value: ParserElement) -> ParserElement:
328
- """Helper to easily and clearly define a dictionary by specifying
329
- the respective patterns for the key and value. Takes care of
330
- defining the :class:`Dict`, :class:`ZeroOrMore`, and
331
- :class:`Group` tokens in the proper order. The key pattern
332
- can include delimiting markers or punctuation, as long as they are
333
- suppressed, thereby leaving the significant key text. The value
334
- pattern can include named results, so that the :class:`Dict` results
335
- can include named token fields.
336
-
337
- Example::
338
-
339
- text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
340
- attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
341
- print(attr_expr[1, ...].parse_string(text).dump())
342
-
343
- attr_label = label
344
- attr_value = Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)
345
-
346
- # similar to Dict, but simpler call format
347
- result = dict_of(attr_label, attr_value).parse_string(text)
348
- print(result.dump())
349
- print(result['shape'])
350
- print(result.shape) # object attribute access works too
351
- print(result.as_dict())
352
-
353
- prints::
354
-
355
- [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
356
- - color: 'light blue'
357
- - posn: 'upper left'
358
- - shape: 'SQUARE'
359
- - texture: 'burlap'
360
- SQUARE
361
- SQUARE
362
- {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
363
- """
364
- return Dict(OneOrMore(Group(key + value)))
365
-
366
-
367
- def original_text_for(
368
- expr: ParserElement, as_string: bool = True, *, asString: bool = True
369
- ) -> ParserElement:
370
- """Helper to return the original, untokenized text for a given
371
- expression. Useful to restore the parsed fields of an HTML start
372
- tag into the raw tag text itself, or to revert separate tokens with
373
- intervening whitespace back to the original matching input text. By
374
- default, returns astring containing the original parsed text.
375
-
376
- If the optional ``as_string`` argument is passed as
377
- ``False``, then the return value is
378
- a :class:`ParseResults` containing any results names that
379
- were originally matched, and a single token containing the original
380
- matched text from the input string. So if the expression passed to
381
- :class:`original_text_for` contains expressions with defined
382
- results names, you must set ``as_string`` to ``False`` if you
383
- want to preserve those results name values.
384
-
385
- The ``asString`` pre-PEP8 argument is retained for compatibility,
386
- but will be removed in a future release.
387
-
388
- Example::
389
-
390
- src = "this is test <b> bold <i>text</i> </b> normal text "
391
- for tag in ("b", "i"):
392
- opener, closer = make_html_tags(tag)
393
- patt = original_text_for(opener + SkipTo(closer) + closer)
394
- print(patt.search_string(src)[0])
395
-
396
- prints::
397
-
398
- ['<b> bold <i>text</i> </b>']
399
- ['<i>text</i>']
400
- """
401
- asString = asString and as_string
402
-
403
- locMarker = Empty().set_parse_action(lambda s, loc, t: loc)
404
- endlocMarker = locMarker.copy()
405
- endlocMarker.callPreparse = False
406
- matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
407
- if asString:
408
- extractText = lambda s, l, t: s[t._original_start : t._original_end]
409
- else:
410
-
411
- def extractText(s, l, t):
412
- t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]]
413
-
414
- matchExpr.set_parse_action(extractText)
415
- matchExpr.ignoreExprs = expr.ignoreExprs
416
- matchExpr.suppress_warning(Diagnostics.warn_ungrouped_named_tokens_in_collection)
417
- return matchExpr
418
-
419
-
420
- def ungroup(expr: ParserElement) -> ParserElement:
421
- """Helper to undo pyparsing's default grouping of And expressions,
422
- even if all but one are non-empty.
423
- """
424
- return TokenConverter(expr).add_parse_action(lambda t: t[0])
425
-
426
-
427
- def locatedExpr(expr: ParserElement) -> ParserElement:
428
- """
429
- (DEPRECATED - future code should use the Located class)
430
- Helper to decorate a returned token with its starting and ending
431
- locations in the input string.
432
-
433
- This helper adds the following results names:
434
-
435
- - ``locn_start`` - location where matched expression begins
436
- - ``locn_end`` - location where matched expression ends
437
- - ``value`` - the actual parsed results
438
-
439
- Be careful if the input text contains ``<TAB>`` characters, you
440
- may want to call :class:`ParserElement.parseWithTabs`
441
-
442
- Example::
443
-
444
- wd = Word(alphas)
445
- for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
446
- print(match)
447
-
448
- prints::
449
-
450
- [[0, 'ljsdf', 5]]
451
- [[8, 'lksdjjf', 15]]
452
- [[18, 'lkkjj', 23]]
453
- """
454
- locator = Empty().set_parse_action(lambda ss, ll, tt: ll)
455
- return Group(
456
- locator("locn_start")
457
- + expr("value")
458
- + locator.copy().leaveWhitespace()("locn_end")
459
- )
460
-
461
-
462
- def nested_expr(
463
- opener: Union[str, ParserElement] = "(",
464
- closer: Union[str, ParserElement] = ")",
465
- content: typing.Optional[ParserElement] = None,
466
- ignore_expr: ParserElement = quoted_string(),
467
- *,
468
- ignoreExpr: ParserElement = quoted_string(),
469
- ) -> ParserElement:
470
- """Helper method for defining nested lists enclosed in opening and
471
- closing delimiters (``"("`` and ``")"`` are the default).
472
-
473
- Parameters:
474
- - ``opener`` - opening character for a nested list
475
- (default= ``"("``); can also be a pyparsing expression
476
- - ``closer`` - closing character for a nested list
477
- (default= ``")"``); can also be a pyparsing expression
478
- - ``content`` - expression for items within the nested lists
479
- (default= ``None``)
480
- - ``ignore_expr`` - expression for ignoring opening and closing delimiters
481
- (default= :class:`quoted_string`)
482
- - ``ignoreExpr`` - this pre-PEP8 argument is retained for compatibility
483
- but will be removed in a future release
484
-
485
- If an expression is not provided for the content argument, the
486
- nested expression will capture all whitespace-delimited content
487
- between delimiters as a list of separate values.
488
-
489
- Use the ``ignore_expr`` argument to define expressions that may
490
- contain opening or closing characters that should not be treated as
491
- opening or closing characters for nesting, such as quoted_string or
492
- a comment expression. Specify multiple expressions using an
493
- :class:`Or` or :class:`MatchFirst`. The default is
494
- :class:`quoted_string`, but if no expressions are to be ignored, then
495
- pass ``None`` for this argument.
496
-
497
- Example::
498
-
499
- data_type = one_of("void int short long char float double")
500
- decl_data_type = Combine(data_type + Opt(Word('*')))
501
- ident = Word(alphas+'_', alphanums+'_')
502
- number = pyparsing_common.number
503
- arg = Group(decl_data_type + ident)
504
- LPAR, RPAR = map(Suppress, "()")
505
-
506
- code_body = nested_expr('{', '}', ignore_expr=(quoted_string | c_style_comment))
507
-
508
- c_function = (decl_data_type("type")
509
- + ident("name")
510
- + LPAR + Opt(delimited_list(arg), [])("args") + RPAR
511
- + code_body("body"))
512
- c_function.ignore(c_style_comment)
513
-
514
- source_code = '''
515
- int is_odd(int x) {
516
- return (x%2);
517
- }
518
-
519
- int dec_to_hex(char hchar) {
520
- if (hchar >= '0' && hchar <= '9') {
521
- return (ord(hchar)-ord('0'));
522
- } else {
523
- return (10+ord(hchar)-ord('A'));
524
- }
525
- }
526
- '''
527
- for func in c_function.search_string(source_code):
528
- print("%(name)s (%(type)s) args: %(args)s" % func)
529
-
530
-
531
- prints::
532
-
533
- is_odd (int) args: [['int', 'x']]
534
- dec_to_hex (int) args: [['char', 'hchar']]
535
- """
536
- if ignoreExpr != ignore_expr:
537
- ignoreExpr = ignore_expr if ignoreExpr == quoted_string() else ignoreExpr
538
- if opener == closer:
539
- raise ValueError("opening and closing strings cannot be the same")
540
- if content is None:
541
- if isinstance(opener, str_type) and isinstance(closer, str_type):
542
- if len(opener) == 1 and len(closer) == 1:
543
- if ignoreExpr is not None:
544
- content = Combine(
545
- OneOrMore(
546
- ~ignoreExpr
547
- + CharsNotIn(
548
- opener + closer + ParserElement.DEFAULT_WHITE_CHARS,
549
- exact=1,
550
- )
551
- )
552
- ).set_parse_action(lambda t: t[0].strip())
553
- else:
554
- content = empty.copy() + CharsNotIn(
555
- opener + closer + ParserElement.DEFAULT_WHITE_CHARS
556
- ).set_parse_action(lambda t: t[0].strip())
557
- else:
558
- if ignoreExpr is not None:
559
- content = Combine(
560
- OneOrMore(
561
- ~ignoreExpr
562
- + ~Literal(opener)
563
- + ~Literal(closer)
564
- + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
565
- )
566
- ).set_parse_action(lambda t: t[0].strip())
567
- else:
568
- content = Combine(
569
- OneOrMore(
570
- ~Literal(opener)
571
- + ~Literal(closer)
572
- + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
573
- )
574
- ).set_parse_action(lambda t: t[0].strip())
575
- else:
576
- raise ValueError(
577
- "opening and closing arguments must be strings if no content expression is given"
578
- )
579
- ret = Forward()
580
- if ignoreExpr is not None:
581
- ret <<= Group(
582
- Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer)
583
- )
584
- else:
585
- ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer))
586
- ret.set_name("nested %s%s expression" % (opener, closer))
587
- return ret
588
-
589
-
590
- def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")):
591
- """Internal helper to construct opening and closing tag expressions, given a tag name"""
592
- if isinstance(tagStr, str_type):
593
- resname = tagStr
594
- tagStr = Keyword(tagStr, caseless=not xml)
595
- else:
596
- resname = tagStr.name
597
-
598
- tagAttrName = Word(alphas, alphanums + "_-:")
599
- if xml:
600
- tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes)
601
- openTag = (
602
- suppress_LT
603
- + tagStr("tag")
604
- + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue)))
605
- + Opt("/", default=[False])("empty").set_parse_action(
606
- lambda s, l, t: t[0] == "/"
607
- )
608
- + suppress_GT
609
- )
610
- else:
611
- tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word(
612
- printables, exclude_chars=">"
613
- )
614
- openTag = (
615
- suppress_LT
616
- + tagStr("tag")
617
- + Dict(
618
- ZeroOrMore(
619
- Group(
620
- tagAttrName.set_parse_action(lambda t: t[0].lower())
621
- + Opt(Suppress("=") + tagAttrValue)
622
- )
623
- )
624
- )
625
- + Opt("/", default=[False])("empty").set_parse_action(
626
- lambda s, l, t: t[0] == "/"
627
- )
628
- + suppress_GT
629
- )
630
- closeTag = Combine(Literal("</") + tagStr + ">", adjacent=False)
631
-
632
- openTag.set_name("<%s>" % resname)
633
- # add start<tagname> results name in parse action now that ungrouped names are not reported at two levels
634
- openTag.add_parse_action(
635
- lambda t: t.__setitem__(
636
- "start" + "".join(resname.replace(":", " ").title().split()), t.copy()
637
- )
638
- )
639
- closeTag = closeTag(
640
- "end" + "".join(resname.replace(":", " ").title().split())
641
- ).set_name("</%s>" % resname)
642
- openTag.tag = resname
643
- closeTag.tag = resname
644
- openTag.tag_body = SkipTo(closeTag())
645
- return openTag, closeTag
646
-
647
-
648
- def make_html_tags(
649
- tag_str: Union[str, ParserElement]
650
- ) -> Tuple[ParserElement, ParserElement]:
651
- """Helper to construct opening and closing tag expressions for HTML,
652
- given a tag name. Matches tags in either upper or lower case,
653
- attributes with namespaces and with quoted or unquoted values.
654
-
655
- Example::
656
-
657
- text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
658
- # make_html_tags returns pyparsing expressions for the opening and
659
- # closing tags as a 2-tuple
660
- a, a_end = make_html_tags("A")
661
- link_expr = a + SkipTo(a_end)("link_text") + a_end
662
-
663
- for link in link_expr.search_string(text):
664
- # attributes in the <A> tag (like "href" shown here) are
665
- # also accessible as named results
666
- print(link.link_text, '->', link.href)
667
-
668
- prints::
669
-
670
- pyparsing -> https://github.com/pyparsing/pyparsing/wiki
671
- """
672
- return _makeTags(tag_str, False)
673
-
674
-
675
- def make_xml_tags(
676
- tag_str: Union[str, ParserElement]
677
- ) -> Tuple[ParserElement, ParserElement]:
678
- """Helper to construct opening and closing tag expressions for XML,
679
- given a tag name. Matches tags only in the given upper/lower case.
680
-
681
- Example: similar to :class:`make_html_tags`
682
- """
683
- return _makeTags(tag_str, True)
684
-
685
-
686
- any_open_tag: ParserElement
687
- any_close_tag: ParserElement
688
- any_open_tag, any_close_tag = make_html_tags(
689
- Word(alphas, alphanums + "_:").set_name("any tag")
690
- )
691
-
692
- _htmlEntityMap = {k.rstrip(";"): v for k, v in html.entities.html5.items()}
693
- common_html_entity = Regex("&(?P<entity>" + "|".join(_htmlEntityMap) + ");").set_name(
694
- "common HTML entity"
695
- )
696
-
697
-
698
- def replace_html_entity(t):
699
- """Helper parser action to replace common HTML entities with their special characters"""
700
- return _htmlEntityMap.get(t.entity)
701
-
702
-
703
- class OpAssoc(Enum):
704
- LEFT = 1
705
- RIGHT = 2
706
-
707
-
708
- InfixNotationOperatorArgType = Union[
709
- ParserElement, str, Tuple[Union[ParserElement, str], Union[ParserElement, str]]
710
- ]
711
- InfixNotationOperatorSpec = Union[
712
- Tuple[
713
- InfixNotationOperatorArgType,
714
- int,
715
- OpAssoc,
716
- typing.Optional[ParseAction],
717
- ],
718
- Tuple[
719
- InfixNotationOperatorArgType,
720
- int,
721
- OpAssoc,
722
- ],
723
- ]
724
-
725
-
726
- def infix_notation(
727
- base_expr: ParserElement,
728
- op_list: List[InfixNotationOperatorSpec],
729
- lpar: Union[str, ParserElement] = Suppress("("),
730
- rpar: Union[str, ParserElement] = Suppress(")"),
731
- ) -> ParserElement:
732
- """Helper method for constructing grammars of expressions made up of
733
- operators working in a precedence hierarchy. Operators may be unary
734
- or binary, left- or right-associative. Parse actions can also be
735
- attached to operator expressions. The generated parser will also
736
- recognize the use of parentheses to override operator precedences
737
- (see example below).
738
-
739
- Note: if you define a deep operator list, you may see performance
740
- issues when using infix_notation. See
741
- :class:`ParserElement.enable_packrat` for a mechanism to potentially
742
- improve your parser performance.
743
-
744
- Parameters:
745
- - ``base_expr`` - expression representing the most basic operand to
746
- be used in the expression
747
- - ``op_list`` - list of tuples, one for each operator precedence level
748
- in the expression grammar; each tuple is of the form ``(op_expr,
749
- num_operands, right_left_assoc, (optional)parse_action)``, where:
750
-
751
- - ``op_expr`` is the pyparsing expression for the operator; may also
752
- be a string, which will be converted to a Literal; if ``num_operands``
753
- is 3, ``op_expr`` is a tuple of two expressions, for the two
754
- operators separating the 3 terms
755
- - ``num_operands`` is the number of terms for this operator (must be 1,
756
- 2, or 3)
757
- - ``right_left_assoc`` is the indicator whether the operator is right
758
- or left associative, using the pyparsing-defined constants
759
- ``OpAssoc.RIGHT`` and ``OpAssoc.LEFT``.
760
- - ``parse_action`` is the parse action to be associated with
761
- expressions matching this operator expression (the parse action
762
- tuple member may be omitted); if the parse action is passed
763
- a tuple or list of functions, this is equivalent to calling
764
- ``set_parse_action(*fn)``
765
- (:class:`ParserElement.set_parse_action`)
766
- - ``lpar`` - expression for matching left-parentheses; if passed as a
767
- str, then will be parsed as Suppress(lpar). If lpar is passed as
768
- an expression (such as ``Literal('(')``), then it will be kept in
769
- the parsed results, and grouped with them. (default= ``Suppress('(')``)
770
- - ``rpar`` - expression for matching right-parentheses; if passed as a
771
- str, then will be parsed as Suppress(rpar). If rpar is passed as
772
- an expression (such as ``Literal(')')``), then it will be kept in
773
- the parsed results, and grouped with them. (default= ``Suppress(')')``)
774
-
775
- Example::
776
-
777
- # simple example of four-function arithmetic with ints and
778
- # variable names
779
- integer = pyparsing_common.signed_integer
780
- varname = pyparsing_common.identifier
781
-
782
- arith_expr = infix_notation(integer | varname,
783
- [
784
- ('-', 1, OpAssoc.RIGHT),
785
- (one_of('* /'), 2, OpAssoc.LEFT),
786
- (one_of('+ -'), 2, OpAssoc.LEFT),
787
- ])
788
-
789
- arith_expr.run_tests('''
790
- 5+3*6
791
- (5+3)*6
792
- -2--11
793
- ''', full_dump=False)
794
-
795
- prints::
796
-
797
- 5+3*6
798
- [[5, '+', [3, '*', 6]]]
799
-
800
- (5+3)*6
801
- [[[5, '+', 3], '*', 6]]
802
-
803
- -2--11
804
- [[['-', 2], '-', ['-', 11]]]
805
- """
806
- # captive version of FollowedBy that does not do parse actions or capture results names
807
- class _FB(FollowedBy):
808
- def parseImpl(self, instring, loc, doActions=True):
809
- self.expr.try_parse(instring, loc)
810
- return loc, []
811
-
812
- _FB.__name__ = "FollowedBy>"
813
-
814
- ret = Forward()
815
- if isinstance(lpar, str):
816
- lpar = Suppress(lpar)
817
- if isinstance(rpar, str):
818
- rpar = Suppress(rpar)
819
-
820
- # if lpar and rpar are not suppressed, wrap in group
821
- if not (isinstance(rpar, Suppress) and isinstance(rpar, Suppress)):
822
- lastExpr = base_expr | Group(lpar + ret + rpar)
823
- else:
824
- lastExpr = base_expr | (lpar + ret + rpar)
825
-
826
- for i, operDef in enumerate(op_list):
827
- opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4]
828
- if isinstance(opExpr, str_type):
829
- opExpr = ParserElement._literalStringClass(opExpr)
830
- if arity == 3:
831
- if not isinstance(opExpr, (tuple, list)) or len(opExpr) != 2:
832
- raise ValueError(
833
- "if numterms=3, opExpr must be a tuple or list of two expressions"
834
- )
835
- opExpr1, opExpr2 = opExpr
836
- term_name = "{}{} term".format(opExpr1, opExpr2)
837
- else:
838
- term_name = "{} term".format(opExpr)
839
-
840
- if not 1 <= arity <= 3:
841
- raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
842
-
843
- if rightLeftAssoc not in (OpAssoc.LEFT, OpAssoc.RIGHT):
844
- raise ValueError("operator must indicate right or left associativity")
845
-
846
- thisExpr: Forward = Forward().set_name(term_name)
847
- if rightLeftAssoc is OpAssoc.LEFT:
848
- if arity == 1:
849
- matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + opExpr[1, ...])
850
- elif arity == 2:
851
- if opExpr is not None:
852
- matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group(
853
- lastExpr + (opExpr + lastExpr)[1, ...]
854
- )
855
- else:
856
- matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr[2, ...])
857
- elif arity == 3:
858
- matchExpr = _FB(
859
- lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr
860
- ) + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr))
861
- elif rightLeftAssoc is OpAssoc.RIGHT:
862
- if arity == 1:
863
- # try to avoid LR with this extra test
864
- if not isinstance(opExpr, Opt):
865
- opExpr = Opt(opExpr)
866
- matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr)
867
- elif arity == 2:
868
- if opExpr is not None:
869
- matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group(
870
- lastExpr + (opExpr + thisExpr)[1, ...]
871
- )
872
- else:
873
- matchExpr = _FB(lastExpr + thisExpr) + Group(
874
- lastExpr + thisExpr[1, ...]
875
- )
876
- elif arity == 3:
877
- matchExpr = _FB(
878
- lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr
879
- ) + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)
880
- if pa:
881
- if isinstance(pa, (tuple, list)):
882
- matchExpr.set_parse_action(*pa)
883
- else:
884
- matchExpr.set_parse_action(pa)
885
- thisExpr <<= (matchExpr | lastExpr).setName(term_name)
886
- lastExpr = thisExpr
887
- ret <<= lastExpr
888
- return ret
889
-
890
-
891
- def indentedBlock(blockStatementExpr, indentStack, indent=True, backup_stacks=[]):
892
- """
893
- (DEPRECATED - use IndentedBlock class instead)
894
- Helper method for defining space-delimited indentation blocks,
895
- such as those used to define block statements in Python source code.
896
-
897
- Parameters:
898
-
899
- - ``blockStatementExpr`` - expression defining syntax of statement that
900
- is repeated within the indented block
901
- - ``indentStack`` - list created by caller to manage indentation stack
902
- (multiple ``statementWithIndentedBlock`` expressions within a single
903
- grammar should share a common ``indentStack``)
904
- - ``indent`` - boolean indicating whether block must be indented beyond
905
- the current level; set to ``False`` for block of left-most statements
906
- (default= ``True``)
907
-
908
- A valid block must contain at least one ``blockStatement``.
909
-
910
- (Note that indentedBlock uses internal parse actions which make it
911
- incompatible with packrat parsing.)
912
-
913
- Example::
914
-
915
- data = '''
916
- def A(z):
917
- A1
918
- B = 100
919
- G = A2
920
- A2
921
- A3
922
- B
923
- def BB(a,b,c):
924
- BB1
925
- def BBA():
926
- bba1
927
- bba2
928
- bba3
929
- C
930
- D
931
- def spam(x,y):
932
- def eggs(z):
933
- pass
934
- '''
935
-
936
-
937
- indentStack = [1]
938
- stmt = Forward()
939
-
940
- identifier = Word(alphas, alphanums)
941
- funcDecl = ("def" + identifier + Group("(" + Opt(delimitedList(identifier)) + ")") + ":")
942
- func_body = indentedBlock(stmt, indentStack)
943
- funcDef = Group(funcDecl + func_body)
944
-
945
- rvalue = Forward()
946
- funcCall = Group(identifier + "(" + Opt(delimitedList(rvalue)) + ")")
947
- rvalue << (funcCall | identifier | Word(nums))
948
- assignment = Group(identifier + "=" + rvalue)
949
- stmt << (funcDef | assignment | identifier)
950
-
951
- module_body = stmt[1, ...]
952
-
953
- parseTree = module_body.parseString(data)
954
- parseTree.pprint()
955
-
956
- prints::
957
-
958
- [['def',
959
- 'A',
960
- ['(', 'z', ')'],
961
- ':',
962
- [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
963
- 'B',
964
- ['def',
965
- 'BB',
966
- ['(', 'a', 'b', 'c', ')'],
967
- ':',
968
- [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
969
- 'C',
970
- 'D',
971
- ['def',
972
- 'spam',
973
- ['(', 'x', 'y', ')'],
974
- ':',
975
- [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
976
- """
977
- backup_stacks.append(indentStack[:])
978
-
979
- def reset_stack():
980
- indentStack[:] = backup_stacks[-1]
981
-
982
- def checkPeerIndent(s, l, t):
983
- if l >= len(s):
984
- return
985
- curCol = col(l, s)
986
- if curCol != indentStack[-1]:
987
- if curCol > indentStack[-1]:
988
- raise ParseException(s, l, "illegal nesting")
989
- raise ParseException(s, l, "not a peer entry")
990
-
991
- def checkSubIndent(s, l, t):
992
- curCol = col(l, s)
993
- if curCol > indentStack[-1]:
994
- indentStack.append(curCol)
995
- else:
996
- raise ParseException(s, l, "not a subentry")
997
-
998
- def checkUnindent(s, l, t):
999
- if l >= len(s):
1000
- return
1001
- curCol = col(l, s)
1002
- if not (indentStack and curCol in indentStack):
1003
- raise ParseException(s, l, "not an unindent")
1004
- if curCol < indentStack[-1]:
1005
- indentStack.pop()
1006
-
1007
- NL = OneOrMore(LineEnd().set_whitespace_chars("\t ").suppress())
1008
- INDENT = (Empty() + Empty().set_parse_action(checkSubIndent)).set_name("INDENT")
1009
- PEER = Empty().set_parse_action(checkPeerIndent).set_name("")
1010
- UNDENT = Empty().set_parse_action(checkUnindent).set_name("UNINDENT")
1011
- if indent:
1012
- smExpr = Group(
1013
- Opt(NL)
1014
- + INDENT
1015
- + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL))
1016
- + UNDENT
1017
- )
1018
- else:
1019
- smExpr = Group(
1020
- Opt(NL)
1021
- + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL))
1022
- + Opt(UNDENT)
1023
- )
1024
-
1025
- # add a parse action to remove backup_stack from list of backups
1026
- smExpr.add_parse_action(
1027
- lambda: backup_stacks.pop(-1) and None if backup_stacks else None
1028
- )
1029
- smExpr.set_fail_action(lambda a, b, c, d: reset_stack())
1030
- blockStatementExpr.ignore(_bslash + LineEnd())
1031
- return smExpr.set_name("indented block")
1032
-
1033
-
1034
- # it's easy to get these comment structures wrong - they're very common, so may as well make them available
1035
- c_style_comment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/").set_name(
1036
- "C style comment"
1037
- )
1038
- "Comment of the form ``/* ... */``"
1039
-
1040
- html_comment = Regex(r"<!--[\s\S]*?-->").set_name("HTML comment")
1041
- "Comment of the form ``<!-- ... -->``"
1042
-
1043
- rest_of_line = Regex(r".*").leave_whitespace().set_name("rest of line")
1044
- dbl_slash_comment = Regex(r"//(?:\\\n|[^\n])*").set_name("// comment")
1045
- "Comment of the form ``// ... (to end of line)``"
1046
-
1047
- cpp_style_comment = Combine(
1048
- Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/" | dbl_slash_comment
1049
- ).set_name("C++ style comment")
1050
- "Comment of either form :class:`c_style_comment` or :class:`dbl_slash_comment`"
1051
-
1052
- java_style_comment = cpp_style_comment
1053
- "Same as :class:`cpp_style_comment`"
1054
-
1055
- python_style_comment = Regex(r"#.*").set_name("Python style comment")
1056
- "Comment of the form ``# ... (to end of line)``"
1057
-
1058
-
1059
- # build list of built-in expressions, for future reference if a global default value
1060
- # gets updated
1061
- _builtin_exprs: List[ParserElement] = [
1062
- v for v in vars().values() if isinstance(v, ParserElement)
1063
- ]
1064
-
1065
-
1066
- # pre-PEP8 compatible names
1067
- delimitedList = delimited_list
1068
- countedArray = counted_array
1069
- matchPreviousLiteral = match_previous_literal
1070
- matchPreviousExpr = match_previous_expr
1071
- oneOf = one_of
1072
- dictOf = dict_of
1073
- originalTextFor = original_text_for
1074
- nestedExpr = nested_expr
1075
- makeHTMLTags = make_html_tags
1076
- makeXMLTags = make_xml_tags
1077
- anyOpenTag, anyCloseTag = any_open_tag, any_close_tag
1078
- commonHTMLEntity = common_html_entity
1079
- replaceHTMLEntity = replace_html_entity
1080
- opAssoc = OpAssoc
1081
- infixNotation = infix_notation
1082
- cStyleComment = c_style_comment
1083
- htmlComment = html_comment
1084
- restOfLine = rest_of_line
1085
- dblSlashComment = dbl_slash_comment
1086
- cppStyleComment = cpp_style_comment
1087
- javaStyleComment = java_style_comment
1088
- pythonStyleComment = python_style_comment
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/install_data.py DELETED
@@ -1,84 +0,0 @@
1
- """distutils.command.install_data
2
-
3
- Implements the Distutils 'install_data' command, for installing
4
- platform-independent data files."""
5
-
6
- # contributed by Bastian Kleineidam
7
-
8
- import os
9
- from distutils.core import Command
10
- from distutils.util import change_root, convert_path
11
-
12
-
13
- class install_data(Command):
14
-
15
- description = "install data files"
16
-
17
- user_options = [
18
- (
19
- 'install-dir=',
20
- 'd',
21
- "base directory for installing data files "
22
- "(default: installation base dir)",
23
- ),
24
- ('root=', None, "install everything relative to this alternate root directory"),
25
- ('force', 'f', "force installation (overwrite existing files)"),
26
- ]
27
-
28
- boolean_options = ['force']
29
-
30
- def initialize_options(self):
31
- self.install_dir = None
32
- self.outfiles = []
33
- self.root = None
34
- self.force = 0
35
- self.data_files = self.distribution.data_files
36
- self.warn_dir = 1
37
-
38
- def finalize_options(self):
39
- self.set_undefined_options(
40
- 'install',
41
- ('install_data', 'install_dir'),
42
- ('root', 'root'),
43
- ('force', 'force'),
44
- )
45
-
46
- def run(self):
47
- self.mkpath(self.install_dir)
48
- for f in self.data_files:
49
- if isinstance(f, str):
50
- # it's a simple file, so copy it
51
- f = convert_path(f)
52
- if self.warn_dir:
53
- self.warn(
54
- "setup script did not provide a directory for "
55
- "'%s' -- installing right in '%s'" % (f, self.install_dir)
56
- )
57
- (out, _) = self.copy_file(f, self.install_dir)
58
- self.outfiles.append(out)
59
- else:
60
- # it's a tuple with path to install to and a list of files
61
- dir = convert_path(f[0])
62
- if not os.path.isabs(dir):
63
- dir = os.path.join(self.install_dir, dir)
64
- elif self.root:
65
- dir = change_root(self.root, dir)
66
- self.mkpath(dir)
67
-
68
- if f[1] == []:
69
- # If there are no files listed, the user must be
70
- # trying to create an empty directory, so add the
71
- # directory to the list of output files.
72
- self.outfiles.append(dir)
73
- else:
74
- # Copy files, adding them to the list of output files.
75
- for data in f[1]:
76
- data = convert_path(data)
77
- (out, _) = self.copy_file(data, dir)
78
- self.outfiles.append(out)
79
-
80
- def get_inputs(self):
81
- return self.data_files or []
82
-
83
- def get_outputs(self):
84
- return self.outfiles
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/csrc/README.md DELETED
@@ -1,7 +0,0 @@
1
-
2
-
3
- To add a new Op:
4
-
5
- 1. Create a new directory
6
- 2. Implement new ops there
7
- 3. Delcare its Python interface in `vision.cpp`.
 
 
 
 
 
 
 
 
spaces/CVPR/v-doc_abstractive_mac/interface.py DELETED
@@ -1,23 +0,0 @@
1
- import gradio as gr
2
- from demo import predict
3
-
4
- f = open("./descrip.md","r")
5
- description= f.read()
6
- article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2205.13724' target='_blank'>Paper Link</a></p>"
7
-
8
- gr.Interface(fn=predict,
9
- inputs=[gr.Image(label = 'You can select an example to quickly submit'), "text"],
10
- outputs=["text"],
11
- examples=[
12
- ['PDF_val_151.png','How many table objects are located at the top side of figure?'],
13
- ['PDF_val_90.png', 'Where is the caption of the figure located at?'],
14
- ['PDF_val_64.png','How many text objects are located at the bottom side of figure?'],
15
- ['PDF_val_26.png','Are there any title exist?'],
16
- ['PDF_val_60.png','Where is the caption of the table located at?'],
17
- ['PDF_val_158.png','Does title objects exist in this page?']],
18
- title = 'V-Doc : Visual questions answers with Documents',
19
- description = description,
20
- article = article).launch()
21
-
22
- if __name__ == '__main__':
23
- io.lanuch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Caoyunkang/Segment-Any-Anomaly/install.sh DELETED
@@ -1,45 +0,0 @@
1
- # create new conda env
2
- conda create -n SAA python=3.9
3
- source activate SAA
4
-
5
- # PyTorch
6
- pip3 install torch==1.10.0+cu113 torchvision==0.11.1+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html
7
-
8
- # $ProjectRoot: the root you save our project, e.g., /home/anyad/VAND-solution
9
- ProjectRoot=/home/anyad/VAND-solution
10
- cd $ProjectRoot
11
-
12
- # SAM and DINO
13
- cd ./GroundingDINO
14
- pip install -e .
15
- cd ../SAM
16
- pip install -e .
17
-
18
- pip install setuptools==59.5.0
19
- pip install --upgrade diffusers[torch]
20
- pip install opencv-python pycocotools matplotlib onnxruntime onnx ipykernel
21
- pip install transformers
22
- pip install addict
23
- pip install yapf
24
- pip install timm
25
- pip install loguru
26
- pip install tqdm
27
- pip install scikit-image
28
- pip install scikit-learn
29
- pip install pandas
30
- pip install tensorboard
31
- pip install seaborn
32
- pip install open_clip_torch
33
- pip install SciencePlots
34
- pip install timm
35
- pip install einops
36
- pip install gradio
37
-
38
- # weights
39
- cd ../
40
- mkdir weights
41
- cd ./weights/
42
- wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth
43
- wget https://github.com/IDEA-Research/GroundingDINO/releases/download/v0.1.0-alpha/groundingdino_swint_ogc.pth
44
-
45
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChandraMohanNayal/AutoGPT/tests/test_token_counter.py DELETED
@@ -1,63 +0,0 @@
1
- import unittest
2
-
3
- import tests.context
4
- from autogpt.token_counter import count_message_tokens, count_string_tokens
5
-
6
-
7
- class TestTokenCounter(unittest.TestCase):
8
- def test_count_message_tokens(self):
9
- messages = [
10
- {"role": "user", "content": "Hello"},
11
- {"role": "assistant", "content": "Hi there!"},
12
- ]
13
- self.assertEqual(count_message_tokens(messages), 17)
14
-
15
- def test_count_message_tokens_with_name(self):
16
- messages = [
17
- {"role": "user", "content": "Hello", "name": "John"},
18
- {"role": "assistant", "content": "Hi there!"},
19
- ]
20
- self.assertEqual(count_message_tokens(messages), 17)
21
-
22
- def test_count_message_tokens_empty_input(self):
23
- self.assertEqual(count_message_tokens([]), 3)
24
-
25
- def test_count_message_tokens_invalid_model(self):
26
- messages = [
27
- {"role": "user", "content": "Hello"},
28
- {"role": "assistant", "content": "Hi there!"},
29
- ]
30
- with self.assertRaises(KeyError):
31
- count_message_tokens(messages, model="invalid_model")
32
-
33
- def test_count_message_tokens_gpt_4(self):
34
- messages = [
35
- {"role": "user", "content": "Hello"},
36
- {"role": "assistant", "content": "Hi there!"},
37
- ]
38
- self.assertEqual(count_message_tokens(messages, model="gpt-4-0314"), 15)
39
-
40
- def test_count_string_tokens(self):
41
- string = "Hello, world!"
42
- self.assertEqual(
43
- count_string_tokens(string, model_name="gpt-3.5-turbo-0301"), 4
44
- )
45
-
46
- def test_count_string_tokens_empty_input(self):
47
- self.assertEqual(count_string_tokens("", model_name="gpt-3.5-turbo-0301"), 0)
48
-
49
- def test_count_message_tokens_invalid_model(self):
50
- messages = [
51
- {"role": "user", "content": "Hello"},
52
- {"role": "assistant", "content": "Hi there!"},
53
- ]
54
- with self.assertRaises(NotImplementedError):
55
- count_message_tokens(messages, model="invalid_model")
56
-
57
- def test_count_string_tokens_gpt_4(self):
58
- string = "Hello, world!"
59
- self.assertEqual(count_string_tokens(string, model_name="gpt-4-0314"), 4)
60
-
61
-
62
- if __name__ == "__main__":
63
- unittest.main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/meme_generator/memes/bite/__init__.py DELETED
@@ -1,33 +0,0 @@
1
- from pathlib import Path
2
- from typing import List
3
-
4
- from PIL.Image import Image as IMG
5
- from pil_utils import BuildImage
6
-
7
- from meme_generator import add_meme
8
- from meme_generator.utils import save_gif
9
-
10
- img_dir = Path(__file__).parent / "images"
11
-
12
-
13
- def bite(images: List[BuildImage], texts, args):
14
- img = images[0].convert("RGBA").square()
15
- frames: List[IMG] = []
16
- # fmt: off
17
- locs = [
18
- (90, 90, 105, 150), (90, 83, 96, 172), (90, 90, 106, 148),
19
- (88, 88, 97, 167), (90, 85, 89, 179), (90, 90, 106, 151)
20
- ]
21
- # fmt: on
22
- for i in range(6):
23
- frame = BuildImage.open(img_dir / f"{i}.png")
24
- w, h, x, y = locs[i]
25
- frame.paste(img.resize((w, h)), (x, y), below=True)
26
- frames.append(frame.image)
27
- for i in range(6, 16):
28
- frame = BuildImage.open(img_dir / f"{i}.png")
29
- frames.append(frame.image)
30
- return save_gif(frames, 0.07)
31
-
32
-
33
- add_meme("bite", bite, min_images=1, max_images=1, keywords=["啃"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat.b4/g4f/Provider/Providers/Ezcht.py DELETED
@@ -1,35 +0,0 @@
1
- import requests
2
- import os
3
- import json
4
- from ...typing import sha256, Dict, get_type_hints
5
-
6
- url = 'https://gpt4.ezchat.top'
7
- model = ['gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo-16k-0613', 'gpt-3.5-turbo-0613']
8
- supports_stream = True
9
- needs_auth = False
10
-
11
- def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
12
- headers = {
13
- 'Content-Type': 'application/json',
14
- }
15
- data = {
16
- 'model': model,
17
- 'temperature': 0.7,
18
- 'presence_penalty': 0,
19
- 'messages': messages,
20
- }
21
- response = requests.post(url + '/api/openai/v1/chat/completions',
22
- json=data, stream=True)
23
-
24
- if stream:
25
- for chunk in response.iter_content(chunk_size=None):
26
- chunk = chunk.decode('utf-8')
27
- if chunk.strip():
28
- message = json.loads(chunk)['choices'][0]['message']['content']
29
- yield message
30
- else:
31
- message = response.json()['choices'][0]['message']['content']
32
- yield message
33
-
34
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
35
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/netlist/style.css DELETED
@@ -1,28 +0,0 @@
1
- body {
2
- padding: 2rem;
3
- font-family: -apple-system, BlinkMacSystemFont, "Arial", sans-serif;
4
- }
5
-
6
- h1 {
7
- font-size: 16px;
8
- margin-top: 0;
9
- }
10
-
11
- p {
12
- color: rgb(107, 114, 128);
13
- font-size: 15px;
14
- margin-bottom: 10px;
15
- margin-top: 5px;
16
- }
17
-
18
- .card {
19
- max-width: 620px;
20
- margin: 0 auto;
21
- padding: 16px;
22
- border: 1px solid lightgray;
23
- border-radius: 16px;
24
- }
25
-
26
- .card p:last-child {
27
- margin-bottom: 0;
28
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CognitiveLabs/Research-Assistant/app.py DELETED
@@ -1,103 +0,0 @@
1
- import gradio as gr
2
-
3
- from config import check_openai_api_key
4
- from agent.research_agent import ResearchAgent
5
- from agent.toolkits import english_polishing
6
- from statics.style import *
7
-
8
-
9
- check_openai_api_key()
10
- report_history_buffer = ""
11
- report_history_num = 0
12
- report_history_tasks = []
13
- polish_history_buffer = ""
14
-
15
- def run_agent(task, agent, report_type):
16
- global report_history_num, report_history_tasks
17
- report_history_num += 1
18
- report_history_tasks.append(task)
19
- assistant = ResearchAgent(task, agent)
20
- yield from assistant.write_report(report_type)
21
-
22
-
23
- with gr.Blocks(theme=gr.themes.Base(),
24
- title="AI Research Assistant",
25
- css=css) as demo:
26
- gr.HTML(top_bar)
27
- with gr.Tab(label="🔦Report"):
28
- with gr.Column():
29
- gr.HTML(report_html)
30
- report = gr.Markdown(value="&nbsp;&nbsp;Report will appear here...",
31
- elem_classes="output")
32
- with gr.Row():
33
- agent_type = gr.Dropdown(label="# Agent Type",
34
- value="Default Agent",
35
- interactive=True,
36
- allow_custom_value=False,
37
- choices=["Default Agent",
38
- "Business Analyst Agent",
39
- "Finance Agent",
40
- "Travel Agent",
41
- "Academic Research Agent",
42
- "Computer Security Analyst Agent",
43
- "Clinical Medicine Agent",
44
- "Basic Medicine Agent",
45
- "Social Science Research Agent"])
46
- report_type = gr.Dropdown(label="# Report Type",
47
- value="Research Report",
48
- interactive=True,
49
- allow_custom_value=False,
50
- choices=["Research Report",
51
- "Resource Report",
52
- "Outline Report"])
53
-
54
- input_box = gr.Textbox(label="# What would you like to research next?", placeholder="Enter your question here")
55
- submit_btn = gr.Button("Generate Report", elem_id="primary-btn")
56
-
57
- gr.Examples(["Should I invest in the Large Language Model industry in 2023?",
58
- "Is it advisable to make investments in the electric car industry during the year 2023?",
59
- "What constitutes the optimal approach for investing in the Bitcoin industry during the year 2023?",
60
- "What are the most recent advancements in the domain of superconductors as of 2023?"],
61
- inputs=input_box)
62
-
63
- with gr.Accordion(label="# Report History", elem_id="history", open=False):
64
- report_history = gr.Markdown()
65
-
66
- def store_report(content):
67
- global report_history_num, report_history_tasks, report_history_buffer
68
- report_history_buffer += f'<details> \
69
- <summary>Research History {report_history_num}: \
70
- <i>{report_history_tasks[-1]}</i></summary> \
71
- <div id="history_box">{content}</div> \
72
- </details>'
73
- return report_history_buffer
74
-
75
- submit_btn.click(run_agent, inputs=[input_box, agent_type, report_type], outputs=report)\
76
- .then(store_report, inputs=[report], outputs=report_history)
77
-
78
- with gr.Tab("✒️English Polishing"):
79
- gr.HTML(english_polishing_html)
80
- polished_result = gr.Markdown("&nbsp;&nbsp;Polished result will appear here...", elem_classes="output")
81
- sentences = gr.Textbox(label="# What would you like to polish?", placeholder="Enter your sentence here")
82
-
83
- with gr.Row():
84
- polish_btn = gr.Button("Polish", elem_id="primary-btn")
85
-
86
- with gr.Accordion(label="# Polishing History", elem_id="history", open=False):
87
- polish_history = gr.Markdown()
88
-
89
- def store_polished_result(origin, result):
90
- global polish_history_buffer
91
- polish_history_buffer += f'<details> \
92
- <summary><i>{origin}</i></summary> \
93
- <div id="history_box">{result}</div> \
94
- </details>'
95
- return polish_history_buffer
96
-
97
- polish_btn.click(english_polishing, inputs=[sentences], outputs=polished_result) \
98
- .then(store_polished_result, inputs=[sentences, polished_result], outputs=polish_history)
99
-
100
- with gr.Tab("📑Literature Review"):
101
- gr.HTML(literature_review_html)
102
-
103
- demo.queue().launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cpp4App/Cpp4App/SEM/region_pp_processing.py DELETED
@@ -1,40 +0,0 @@
1
- import csv
2
- import re
3
- import spacy
4
- from bs4 import BeautifulSoup
5
-
6
- def get_alifornia(text):
7
- specialArea = ""
8
- california = 0
9
- with open(text, encoding='utf-8') as file_obj:
10
- for line in file_obj:
11
- specialArea += line
12
- if "alifornia" in specialArea:
13
- california = 1
14
- return specialArea,california
15
-
16
-
17
- import sys
18
- maxInt = sys.maxsize
19
- decrement = True
20
- while decrement:
21
- decrement = False
22
- try:
23
- csv.field_size_limit(maxInt)
24
- except OverflowError:
25
- maxInt = int(maxInt/10)
26
- decrement = True
27
-
28
-
29
- def get_text(path):
30
- htmlfile = open(path, 'r', encoding='utf-8')
31
- htmlhandle = htmlfile.read()
32
-
33
- soup = BeautifulSoup(htmlhandle, 'html.parser')
34
-
35
- stri = str(soup)
36
- return stri
37
-
38
-
39
-
40
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cran-May/SEA-Streamlit/app.py DELETED
@@ -1,73 +0,0 @@
1
- import streamlit as st
2
- from gradio_client import Client
3
- from time import sleep
4
- from ctransformers import AutoModelForCausalLM
5
- # Constants
6
- TITLE = "兮辞·析辞-常明"
7
- DESCRIPTION = """
8
- 兮辞·析辞-常明 [SLIDE-SEA-7B]的部署,由SSFW NLPark项目支持
9
- """
10
-
11
- # Initialize client
12
-
13
-
14
- with st.sidebar:
15
- # system_promptSide = st.text_input("Optional system prompt:")
16
- temperatureSide = st.slider("情感/Temperature", min_value=0.0, max_value=1.0, value=0.9, step=0.05)
17
- max_new_tokensSide = st.slider("最大tokens生成数", min_value=0.0, max_value=4096.0, value=4096.0, step=64.0)
18
- # ToppSide = st.slider("Top-p (nucleus sampling)", min_value=0.0, max_value=1.0, value=0.6, step=0.05)
19
- # RepetitionpenaltySide = st.slider("Repetition penalty", min_value=0.0, max_value=2.0, value=1.2, step=0.05)
20
-
21
- # Load the model
22
- model = AutoModelForCausalLM.from_pretrained("Cran-May/OpenSLIDE", model_file="SLIDE.0.1.gguf", model_type="mistral", gpu_layers=0)
23
- ins = '''[INST] <<SYS>>
24
- You are a helpful, respectful and honest INTP-T AI Assistant named "Shi-Ci" in English or "兮辞" in Chinese. You are talking to a human User.
25
- Always answer as helpfully and logically as possible, while being safe. Your answers should not include any harmful, political, religious, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
26
- If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
27
- You like to use emojis. You can speak fluently in many languages, for example: English, Chinese.
28
- You are trained by "SSFW NLPark" team, you are based on SEA transformers model, not related to GPT or OpenAI.
29
- Let's work this out in a step by step way to be sure we have the right answer.
30
- <</SYS>>
31
- {} [/INST]
32
- '''
33
- # Define the conversation history
34
- conversation_history = []
35
-
36
- # Prediction function
37
- def predict(message, system_prompt='', temperature=0.7, max_new_tokens=4096,Topp=0.5,Repetitionpenalty=1.2):
38
- global conversation_history
39
- question=message
40
- input_text=ins
41
- # Append the user's input to the conversation history
42
- conversation_history.append({"role": "system", "content": input_text})
43
- response_text = model(ins.format(question))
44
- conversation_history.append({"role": "user", "content": input_text})
45
- conversation_history.append({"role": "assistant", "content": response_text})
46
- return response_text
47
-
48
- # Streamlit UI
49
- st.title(TITLE)
50
- st.write(DESCRIPTION)
51
-
52
-
53
- if "messages" not in st.session_state:
54
- st.session_state.messages = []
55
-
56
- # Display chat messages from history on app rerun
57
- for message in st.session_state.messages:
58
- with st.chat_message(message["role"], avatar=("😀" if message["role"] == 'human' else '💻')):
59
- st.markdown(message["content"])
60
-
61
- # React to user input
62
- if prompt := st.chat_input("来问问兮辞吧..."):
63
- # Display user message in chat message container
64
- st.chat_message("human",avatar = "😀").markdown(prompt)
65
- # Add user message to chat history
66
- st.session_state.messages.append({"role": "human", "content": prompt})
67
-
68
- response = predict(message=prompt)#, temperature= temperatureSide,max_new_tokens=max_new_tokensSide)
69
- # Display assistant response in chat message container
70
- with st.chat_message("assistant", avatar='💻'):
71
- st.markdown(response)
72
- # Add assistant response to chat history
73
- st.session_state.messages.append({"role": "assistant", "content": response})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CrucibleAI/ControlNetMediaPipeFaceSD21/mediapipe_face_common.py DELETED
@@ -1,155 +0,0 @@
1
- from typing import Mapping
2
-
3
- import mediapipe as mp
4
- import numpy
5
-
6
-
7
- mp_drawing = mp.solutions.drawing_utils
8
- mp_drawing_styles = mp.solutions.drawing_styles
9
- mp_face_detection = mp.solutions.face_detection # Only for counting faces.
10
- mp_face_mesh = mp.solutions.face_mesh
11
- mp_face_connections = mp.solutions.face_mesh_connections.FACEMESH_TESSELATION
12
- mp_hand_connections = mp.solutions.hands_connections.HAND_CONNECTIONS
13
- mp_body_connections = mp.solutions.pose_connections.POSE_CONNECTIONS
14
-
15
- DrawingSpec = mp.solutions.drawing_styles.DrawingSpec
16
- PoseLandmark = mp.solutions.drawing_styles.PoseLandmark
17
-
18
- min_face_size_pixels: int = 64
19
- f_thick = 2
20
- f_rad = 1
21
- right_iris_draw = DrawingSpec(color=(10, 200, 250), thickness=f_thick, circle_radius=f_rad)
22
- right_eye_draw = DrawingSpec(color=(10, 200, 180), thickness=f_thick, circle_radius=f_rad)
23
- right_eyebrow_draw = DrawingSpec(color=(10, 220, 180), thickness=f_thick, circle_radius=f_rad)
24
- left_iris_draw = DrawingSpec(color=(250, 200, 10), thickness=f_thick, circle_radius=f_rad)
25
- left_eye_draw = DrawingSpec(color=(180, 200, 10), thickness=f_thick, circle_radius=f_rad)
26
- left_eyebrow_draw = DrawingSpec(color=(180, 220, 10), thickness=f_thick, circle_radius=f_rad)
27
- mouth_draw = DrawingSpec(color=(10, 180, 10), thickness=f_thick, circle_radius=f_rad)
28
- head_draw = DrawingSpec(color=(10, 200, 10), thickness=f_thick, circle_radius=f_rad)
29
-
30
- # mp_face_mesh.FACEMESH_CONTOURS has all the items we care about.
31
- face_connection_spec = {}
32
- for edge in mp_face_mesh.FACEMESH_FACE_OVAL:
33
- face_connection_spec[edge] = head_draw
34
- for edge in mp_face_mesh.FACEMESH_LEFT_EYE:
35
- face_connection_spec[edge] = left_eye_draw
36
- for edge in mp_face_mesh.FACEMESH_LEFT_EYEBROW:
37
- face_connection_spec[edge] = left_eyebrow_draw
38
- # for edge in mp_face_mesh.FACEMESH_LEFT_IRIS:
39
- # face_connection_spec[edge] = left_iris_draw
40
- for edge in mp_face_mesh.FACEMESH_RIGHT_EYE:
41
- face_connection_spec[edge] = right_eye_draw
42
- for edge in mp_face_mesh.FACEMESH_RIGHT_EYEBROW:
43
- face_connection_spec[edge] = right_eyebrow_draw
44
- # for edge in mp_face_mesh.FACEMESH_RIGHT_IRIS:
45
- # face_connection_spec[edge] = right_iris_draw
46
- for edge in mp_face_mesh.FACEMESH_LIPS:
47
- face_connection_spec[edge] = mouth_draw
48
- iris_landmark_spec = {468: right_iris_draw, 473: left_iris_draw}
49
-
50
-
51
- def draw_pupils(image, landmark_list, drawing_spec, halfwidth: int = 2):
52
- """We have a custom function to draw the pupils because the mp.draw_landmarks method requires a parameter for all
53
- landmarks. Until our PR is merged into mediapipe, we need this separate method."""
54
- if len(image.shape) != 3:
55
- raise ValueError("Input image must be H,W,C.")
56
- image_rows, image_cols, image_channels = image.shape
57
- if image_channels != 3: # BGR channels
58
- raise ValueError('Input image must contain three channel bgr data.')
59
- for idx, landmark in enumerate(landmark_list.landmark):
60
- if (
61
- (landmark.HasField('visibility') and landmark.visibility < 0.9) or
62
- (landmark.HasField('presence') and landmark.presence < 0.5)
63
- ):
64
- continue
65
- if landmark.x >= 1.0 or landmark.x < 0 or landmark.y >= 1.0 or landmark.y < 0:
66
- continue
67
- image_x = int(image_cols*landmark.x)
68
- image_y = int(image_rows*landmark.y)
69
- draw_color = None
70
- if isinstance(drawing_spec, Mapping):
71
- if drawing_spec.get(idx) is None:
72
- continue
73
- else:
74
- draw_color = drawing_spec[idx].color
75
- elif isinstance(drawing_spec, DrawingSpec):
76
- draw_color = drawing_spec.color
77
- image[image_y-halfwidth:image_y+halfwidth, image_x-halfwidth:image_x+halfwidth, :] = draw_color
78
-
79
-
80
- def reverse_channels(image):
81
- """Given a numpy array in RGB form, convert to BGR. Will also convert from BGR to RGB."""
82
- # im[:,:,::-1] is a neat hack to convert BGR to RGB by reversing the indexing order.
83
- # im[:,:,::[2,1,0]] would also work but makes a copy of the data.
84
- return image[:, :, ::-1]
85
-
86
-
87
- def generate_annotation(
88
- img_rgb,
89
- max_faces: int,
90
- min_confidence: float
91
- ):
92
- """
93
- Find up to 'max_faces' inside the provided input image.
94
- If min_face_size_pixels is provided and nonzero it will be used to filter faces that occupy less than this many
95
- pixels in the image.
96
- """
97
- with mp_face_mesh.FaceMesh(
98
- static_image_mode=True,
99
- max_num_faces=max_faces,
100
- refine_landmarks=True,
101
- min_detection_confidence=min_confidence,
102
- ) as facemesh:
103
- img_height, img_width, img_channels = img_rgb.shape
104
- assert(img_channels == 3)
105
-
106
- results = facemesh.process(img_rgb).multi_face_landmarks
107
-
108
- if results is None:
109
- print("No faces detected in controlnet image for Mediapipe face annotator.")
110
- return numpy.zeros_like(img_rgb)
111
-
112
- # Filter faces that are too small
113
- filtered_landmarks = []
114
- for lm in results:
115
- landmarks = lm.landmark
116
- face_rect = [
117
- landmarks[0].x,
118
- landmarks[0].y,
119
- landmarks[0].x,
120
- landmarks[0].y,
121
- ] # Left, up, right, down.
122
- for i in range(len(landmarks)):
123
- face_rect[0] = min(face_rect[0], landmarks[i].x)
124
- face_rect[1] = min(face_rect[1], landmarks[i].y)
125
- face_rect[2] = max(face_rect[2], landmarks[i].x)
126
- face_rect[3] = max(face_rect[3], landmarks[i].y)
127
- if min_face_size_pixels > 0:
128
- face_width = abs(face_rect[2] - face_rect[0])
129
- face_height = abs(face_rect[3] - face_rect[1])
130
- face_width_pixels = face_width * img_width
131
- face_height_pixels = face_height * img_height
132
- face_size = min(face_width_pixels, face_height_pixels)
133
- if face_size >= min_face_size_pixels:
134
- filtered_landmarks.append(lm)
135
- else:
136
- filtered_landmarks.append(lm)
137
-
138
- # Annotations are drawn in BGR for some reason, but we don't need to flip a zero-filled image at the start.
139
- empty = numpy.zeros_like(img_rgb)
140
-
141
- # Draw detected faces:
142
- for face_landmarks in filtered_landmarks:
143
- mp_drawing.draw_landmarks(
144
- empty,
145
- face_landmarks,
146
- connections=face_connection_spec.keys(),
147
- landmark_drawing_spec=None,
148
- connection_drawing_spec=face_connection_spec
149
- )
150
- draw_pupils(empty, face_landmarks, iris_landmark_spec, 2)
151
-
152
- # Flip BGR back to RGB.
153
- empty = reverse_channels(empty).copy()
154
-
155
- return empty
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Curranj/chatbot/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Chatbot
3
- emoji: 🌍
4
- colorFrom: gray
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.39.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/misc/__init__.py DELETED
@@ -1 +0,0 @@
1
- """Empty __init__.py file to signal Python this directory is a package."""
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/pens/__init__.py DELETED
@@ -1 +0,0 @@
1
- """Empty __init__.py file to signal Python this directory is a package."""
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/pens/recordingPen.py DELETED
@@ -1,179 +0,0 @@
1
- """Pen recording operations that can be accessed or replayed."""
2
- from fontTools.pens.basePen import AbstractPen, DecomposingPen
3
- from fontTools.pens.pointPen import AbstractPointPen
4
-
5
-
6
- __all__ = [
7
- "replayRecording",
8
- "RecordingPen",
9
- "DecomposingRecordingPen",
10
- "RecordingPointPen",
11
- ]
12
-
13
-
14
- def replayRecording(recording, pen):
15
- """Replay a recording, as produced by RecordingPen or DecomposingRecordingPen,
16
- to a pen.
17
-
18
- Note that recording does not have to be produced by those pens.
19
- It can be any iterable of tuples of method name and tuple-of-arguments.
20
- Likewise, pen can be any objects receiving those method calls.
21
- """
22
- for operator, operands in recording:
23
- getattr(pen, operator)(*operands)
24
-
25
-
26
- class RecordingPen(AbstractPen):
27
- """Pen recording operations that can be accessed or replayed.
28
-
29
- The recording can be accessed as pen.value; or replayed using
30
- pen.replay(otherPen).
31
-
32
- :Example:
33
-
34
- from fontTools.ttLib import TTFont
35
- from fontTools.pens.recordingPen import RecordingPen
36
-
37
- glyph_name = 'dollar'
38
- font_path = 'MyFont.otf'
39
-
40
- font = TTFont(font_path)
41
- glyphset = font.getGlyphSet()
42
- glyph = glyphset[glyph_name]
43
-
44
- pen = RecordingPen()
45
- glyph.draw(pen)
46
- print(pen.value)
47
- """
48
-
49
- def __init__(self):
50
- self.value = []
51
-
52
- def moveTo(self, p0):
53
- self.value.append(("moveTo", (p0,)))
54
-
55
- def lineTo(self, p1):
56
- self.value.append(("lineTo", (p1,)))
57
-
58
- def qCurveTo(self, *points):
59
- self.value.append(("qCurveTo", points))
60
-
61
- def curveTo(self, *points):
62
- self.value.append(("curveTo", points))
63
-
64
- def closePath(self):
65
- self.value.append(("closePath", ()))
66
-
67
- def endPath(self):
68
- self.value.append(("endPath", ()))
69
-
70
- def addComponent(self, glyphName, transformation):
71
- self.value.append(("addComponent", (glyphName, transformation)))
72
-
73
- def addVarComponent(self, glyphName, transformation, location):
74
- self.value.append(("addVarComponent", (glyphName, transformation, location)))
75
-
76
- def replay(self, pen):
77
- replayRecording(self.value, pen)
78
-
79
-
80
- class DecomposingRecordingPen(DecomposingPen, RecordingPen):
81
- """Same as RecordingPen, except that it doesn't keep components
82
- as references, but draws them decomposed as regular contours.
83
-
84
- The constructor takes a single 'glyphSet' positional argument,
85
- a dictionary of glyph objects (i.e. with a 'draw' method) keyed
86
- by thir name::
87
-
88
- >>> class SimpleGlyph(object):
89
- ... def draw(self, pen):
90
- ... pen.moveTo((0, 0))
91
- ... pen.curveTo((1, 1), (2, 2), (3, 3))
92
- ... pen.closePath()
93
- >>> class CompositeGlyph(object):
94
- ... def draw(self, pen):
95
- ... pen.addComponent('a', (1, 0, 0, 1, -1, 1))
96
- >>> glyphSet = {'a': SimpleGlyph(), 'b': CompositeGlyph()}
97
- >>> for name, glyph in sorted(glyphSet.items()):
98
- ... pen = DecomposingRecordingPen(glyphSet)
99
- ... glyph.draw(pen)
100
- ... print("{}: {}".format(name, pen.value))
101
- a: [('moveTo', ((0, 0),)), ('curveTo', ((1, 1), (2, 2), (3, 3))), ('closePath', ())]
102
- b: [('moveTo', ((-1, 1),)), ('curveTo', ((0, 2), (1, 3), (2, 4))), ('closePath', ())]
103
- """
104
-
105
- # raises KeyError if base glyph is not found in glyphSet
106
- skipMissingComponents = False
107
-
108
-
109
- class RecordingPointPen(AbstractPointPen):
110
- """PointPen recording operations that can be accessed or replayed.
111
-
112
- The recording can be accessed as pen.value; or replayed using
113
- pointPen.replay(otherPointPen).
114
-
115
- :Example:
116
-
117
- from defcon import Font
118
- from fontTools.pens.recordingPen import RecordingPointPen
119
-
120
- glyph_name = 'a'
121
- font_path = 'MyFont.ufo'
122
-
123
- font = Font(font_path)
124
- glyph = font[glyph_name]
125
-
126
- pen = RecordingPointPen()
127
- glyph.drawPoints(pen)
128
- print(pen.value)
129
-
130
- new_glyph = font.newGlyph('b')
131
- pen.replay(new_glyph.getPointPen())
132
- """
133
-
134
- def __init__(self):
135
- self.value = []
136
-
137
- def beginPath(self, identifier=None, **kwargs):
138
- if identifier is not None:
139
- kwargs["identifier"] = identifier
140
- self.value.append(("beginPath", (), kwargs))
141
-
142
- def endPath(self):
143
- self.value.append(("endPath", (), {}))
144
-
145
- def addPoint(
146
- self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs
147
- ):
148
- if identifier is not None:
149
- kwargs["identifier"] = identifier
150
- self.value.append(("addPoint", (pt, segmentType, smooth, name), kwargs))
151
-
152
- def addComponent(self, baseGlyphName, transformation, identifier=None, **kwargs):
153
- if identifier is not None:
154
- kwargs["identifier"] = identifier
155
- self.value.append(("addComponent", (baseGlyphName, transformation), kwargs))
156
-
157
- def addVarComponent(
158
- self, baseGlyphName, transformation, location, identifier=None, **kwargs
159
- ):
160
- if identifier is not None:
161
- kwargs["identifier"] = identifier
162
- self.value.append(
163
- ("addVarComponent", (baseGlyphName, transformation, location), kwargs)
164
- )
165
-
166
- def replay(self, pointPen):
167
- for operator, args, kwargs in self.value:
168
- getattr(pointPen, operator)(*args, **kwargs)
169
-
170
-
171
- if __name__ == "__main__":
172
- pen = RecordingPen()
173
- pen.moveTo((0, 0))
174
- pen.lineTo((0, 100))
175
- pen.curveTo((50, 75), (60, 50), (50, 25))
176
- pen.closePath()
177
- from pprint import pprint
178
-
179
- pprint(pen.value)