parquet-converter commited on
Commit
919dab2
·
1 Parent(s): 025e77b

Update parquet files (step 86 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cara Mudah Download Clash Royale Mod Apk dengan Fitur Unlimited Money.md +0 -86
  2. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/DLS 17 Mod Apk Unlock All Player and Play with Unlimited Money.md +0 -94
  3. spaces/1phancelerku/anime-remove-background/Download Bus Simulator Ultimate Mod Apk 1.0 with Unlimited Money and Features.md +0 -65
  4. spaces/52Hz/SRMNet_thesis/main_test_SRMNet.py +0 -117
  5. spaces/AISuperheroes/08GR-KitchenSink-AIUIUX/README.md +0 -13
  6. spaces/AIZ2H/Gradio331-3D-Models-AI-1/files/Readme.md +0 -2
  7. spaces/AIatUIUC/CodeLATS/generators/py_generate.py +0 -404
  8. spaces/AUBMC-AIM/MammoGANesis/README.md +0 -45
  9. spaces/Ababababababbababa/Ashaar/poetry_diacritizer/models/__init__.py +0 -5
  10. spaces/AdamOswald1/finetuned_diffusion/style.css +0 -24
  11. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/containerlite-plugin.js +0 -27
  12. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/ProportionMethods.js +0 -11
  13. spaces/AlekseyCalvin/Make-Putin-Queer/README.md +0 -13
  14. spaces/AlowaSawsan/Third-Molar-Segmentation/app.py +0 -68
  15. spaces/Alycer/VITS-Umamusume-voice-synthesizer/text/symbols.py +0 -76
  16. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/onnxruntime/README.md +0 -5
  17. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/ddim/pipeline_ddim.py +0 -152
  18. spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py +0 -4
  19. spaces/Andy1621/uniformer_image_detection/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco.py +0 -4
  20. spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r18b-d8_512x1024_80k_cityscapes.py +0 -9
  21. spaces/Andy1621/uniformer_image_segmentation/configs/mobilenet_v3/lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes.py +0 -9
  22. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/sd_api_pictures/style.css +0 -52
  23. spaces/Anonymous-sub/Rerender/gmflow_module/README.md +0 -239
  24. spaces/Artrajz/vits-simple-api/utils/download.py +0 -96
  25. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/caches/__init__.py +0 -9
  26. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/style.py +0 -796
  27. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/data/coco.py +0 -48
  28. spaces/Ayaka-daisuki/anime-remove-background/app.py +0 -52
  29. spaces/Benson/text-generation/Examples/Arriba Tablero Original Marksheet Descargar 2016.md +0 -69
  30. spaces/Benson/text-generation/Examples/Consejo De Abogados De India Certificado Descargar.md +0 -132
  31. spaces/BernardoOlisan/vqganclip/CLIP/clip/simple_tokenizer.py +0 -132
  32. spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/exceptions.py +0 -816
  33. spaces/CVPR/Dual-Key_Backdoor_Attacks/bottom-up-attention-vqa/butd_inference_wrapper.py +0 -91
  34. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/csrc/deformable/deform_conv.h +0 -377
  35. spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/utils/extract_engine.py +0 -187
  36. spaces/CVPR/LIVE/pydiffvg/color.py +0 -24
  37. spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/logical.h +0 -23
  38. spaces/CVPR/regionclip-demo/detectron2/evaluation/rotated_coco_evaluation.py +0 -207
  39. spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/model/db/index.js +0 -8
  40. spaces/CikeyQI/meme-api/meme_generator/memes/capoo_draw/__init__.py +0 -43
  41. spaces/CikeyQI/meme-api/meme_generator/memes/guichu/__init__.py +0 -117
  42. spaces/CofAI/chat.b4/g4f/Provider/Providers/Phind.py +0 -36
  43. spaces/Cvandi/remake/scripts/generate_multiscale_DF2K.py +0 -48
  44. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/voltLib/parser.py +0 -656
  45. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/config.py +0 -131
  46. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/layouts.py +0 -393
  47. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/_decoders.py +0 -324
  48. spaces/DataDoggo/Visionary/README.md +0 -11
  49. spaces/Detomo/ai-comic-generation/src/lib/loadImage.ts +0 -14
  50. spaces/DiegoLigtenberg/realtimespeech/instructions.md +0 -15
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Cara Mudah Download Clash Royale Mod Apk dengan Fitur Unlimited Money.md DELETED
@@ -1,86 +0,0 @@
1
- <br />
2
- <h1>Cara Download Clash Royale Mod APK</h1>
3
- <p>Clash Royale is one of the most popular and addictive strategy games on mobile devices. It combines card collecting, tower defense, and real-time multiplayer battles in a fast-paced and fun gameplay. However, some players may find it hard to progress in the game without spending real money on gems and gold, which are the main currencies in the game. That's why some players resort to using Clash Royale Mod APK, which is a modified version of the game that gives them unlimited resources and access to all the cards in the game.</p>
4
- <h2>cara download clash royale mod apk</h2><br /><p><b><b>Download</b> &#9658;&#9658;&#9658;&#9658;&#9658; <a href="https://urlin.us/2uT0xQ">https://urlin.us/2uT0xQ</a></b></p><br /><br />
5
- <p>If you are one of those players who want to enjoy Clash Royale without any limitations, then this article is for you. In this article, we will show you how to download and install Clash Royale Mod APK on your Android device, what are the features of this mod, and whether it is safe and legal to use. Let's get started!</p>
6
- <h2>What is Clash Royale Mod APK?</h2>
7
- <p>Clash Royale Mod APK is a hacked version of the original Clash Royale game that was developed by third-party developers. This mod allows you to have unlimited gems and gold, which you can use to buy chests, cards, upgrades, and more. You can also unlock all the cards in the game, including the legendary ones, and use them in your deck. Moreover, you can play on custom mods and servers that offer different game modes, such as 2v2 battles, draft mode, unlimited elixir mode, and more.</p>
8
- <p>With Clash Royale Mod APK, you can enjoy the game without any restrictions or worries. You can experiment with different strategies, decks, and cards without losing any trophies or resources. You can also challenge your friends or other players online and show off your skills. However, you should be aware that using this mod may also have some risks and consequences, which we will discuss later in this article.</p>
9
- <p>cara download clash royale mod apk unlimited money<br />
10
- cara download clash royale mod apk terbaru 2023<br />
11
- cara download clash royale mod apk versi 3.3186.7<br />
12
- cara download clash royale mod apk dengan apkmody app<br />
13
- cara download clash royale mod apk tanpa password<br />
14
- cara download clash royale mod apk offline<br />
15
- cara download clash royale mod apk no root<br />
16
- cara download clash royale mod apk anti banned<br />
17
- cara download clash royale mod apk di android<br />
18
- cara download clash royale mod apk di pc<br />
19
- cara download clash royale mod apk gratis<br />
20
- cara download clash royale mod apk mudah<br />
21
- cara download clash royale mod apk cepat<br />
22
- cara download clash royale mod apk aman<br />
23
- cara download clash royale mod apk lengkap<br />
24
- cara instal clash royale mod apk<br />
25
- cara update clash royale mod apk<br />
26
- cara main clash royale mod apk<br />
27
- cara cheat clash royale mod apk<br />
28
- cara hack clash royale mod apk<br />
29
- tips dan trik clash royale mod apk<br />
30
- review dan rating clash royale mod apk<br />
31
- fitur dan kelebihan clash royale mod apk<br />
32
- kekurangan dan kelemahan clash royale mod apk<br />
33
- solusi dan perbaikan clash royale mod apk<br />
34
- link dan panduan download clash royale mod apk<br />
35
- video dan tutorial download clash royale mod apk<br />
36
- gambar dan screenshot download clash royale mod apk<br />
37
- testimoni dan komentar download clash royale mod apk<br />
38
- pertanyaan dan jawaban download clash royale mod apk</p>
39
- <h3>Features of Clash Royale Mod APK</h3>
40
- <p>Clash Royale Mod APK has many features that make it different from the original game. Here are some of the main features of this mod:</p>
41
- <h4>Unlimited Gems and Gold</h4>
42
- <p>Gems and gold are the most important resources in Clash Royale. You need gems to buy chests, cards, emotes, skins, and more. You need gold to upgrade your cards and increase your level. However, gems and gold are very scarce and expensive in the game. You can only get them by completing quests, winning battles, opening chests, or spending real money.</p>
43
- <p>With Clash Royale Mod APK, you don't have to worry about gems and gold anymore. You will have unlimited amounts of them in your account. You can use them to buy anything you want in the game without any limitations. You can also use them to speed up the chest opening process and get more cards faster.</p>
44
- <h4>All Cards Unlocked</h4>
45
- <p>Cards are the core element of Clash Royale. They are used to create your deck and fight against your opponents. There are four types of cards in the game: common, rare, epic, and legendary. Each card has its own strengths, weaknesses, abilities, and costs. You can collect cards by opening chests or buying them from the shop.</p>
46
- <p>However, not all cards are easy to get in the game. Some cards are very rare and hard to find, especially the legendary ones. You may have to open hundreds of chests or spend thousands of gems to get them. And even if you get them, you still need to upgrade them to make them more powerful.</p>
47
- <p>With Clash Royale Mod APK, you don't have to worry about cards anymore. You will have all the cards unlocked in the game from the start. You can use any card you want in your deck without any restrictions or costs. You can also upgrade them to the maximum level with just one click.</p>
48
- <h4>Custom Mods and Servers</h4>
49
- <p>Another feature of Clash Royale Mod APK is that you can play on custom mods and servers that offer different game modes and experiences. For example, you can play on a mod that gives you unlimited elixir, which means you can spam any card you want without worrying about the elixir cost. You can also play on a mod that lets you draft your deck from a random pool of cards, which adds more variety and challenge to the game. You can also play on a mod that lets you use cards from other Supercell games, such as Brawl Stars, Clash of Clans, or Boom Beach.</p>
50
- <p>Moreover, you can play on custom servers that have different rules and settings than the official ones. For example, you can play on a server that has higher or lower trophy requirements, faster or slower chest opening times, more or less rewards, and more. You can also play on a server that has more players online, which means you can find matches faster and easier.</p>
51
- <h2>How to Download and Install Clash Royale Mod APK?</h2>
52
- <p>If you are interested in trying out Clash Royale Mod APK, then you need to follow these steps to download and install it on your Android device:</p>
53
- <h3>Step 1: Enable Unknown Sources</h3>
54
- <p>The first step is to enable unknown sources on your device. This will allow you to install apps that are not from the Google Play Store. To do this, go to your device settings, then security, then unknown sources, and turn it on. You may see a warning message, but don't worry, just tap OK.</p>
55
- <h3>Step 2: Download the APK File</h3>
56
- <p>The next step is to download the APK file of Clash Royale Mod APK. You can find many websites that offer this file, but be careful, some of them may contain viruses or malware. We recommend you to use this link, which is safe and reliable. Just click on the download button and wait for the file to be downloaded.</p>
57
- <h3>Step 3: Install the APK File</h3>
58
- <p>The third step is to install the APK file on your device. To do this, go to your file manager, then locate the downloaded file, then tap on it. You may see a pop-up message asking for your permission, just tap on install and wait for the process to finish.</p>
59
- <h3>Step 4: Launch the Game and Enjoy</h3>
60
- <p>The final step is to launch the game and enjoy it. To do this, go to your app drawer, then find the Clash Royale icon, then tap on it. You may see a loading screen, then a welcome message, then the game will start. You will notice that you have unlimited gems and gold, all cards unlocked, and access to custom mods and servers. You can now play the game as you wish and have fun!</p>
61
- <h2>Is Clash Royale Mod APK Safe and Legal?</h2>
62
- <p>Before you download and install Clash Royale Mod APK, you may have some questions about its safety and legality. Here are some answers to these questions:</p>
63
- <h3>Safety Issues</h3>
64
- <p>As we mentioned earlier, not all websites that offer Clash Royale Mod APK are safe and trustworthy. Some of them may contain viruses or malware that can harm your device or steal your personal information. That's why you should always use a reputable source like the one we provided above. You should also scan the file with an antivirus app before installing it.</p>
65
- <p>Another safety issue is that using Clash Royale Mod APK may cause your account to be banned by Supercell, the developer of the original game. This is because using this mod violates their terms of service and fair play policy. Supercell has the right to detect and ban any account that uses any third-party software or modification that gives an unfair advantage over other players. Therefore, if you use Clash Royale Mod APK, you should do it at your own risk and responsibility.</p>
66
- <h3>Legal Issues</h3>
67
- <p>Besides safety issues, using Clash Royale Mod APK may also raise some legal issues. This is because using this mod infringes the intellectual property rights of Supercell, the developer of the original game. Supercell owns all the rights to the game content, such as graphics, sounds, characters, cards, etc. By modifying and distributing their game without their permission, you are violating their rights and breaking the law.</p>
68
- <p>Therefore, if you use Clash Royale Mod APK, you should be aware of the potential legal consequences that may arise from it. You may face lawsuits or fines from Supercell or other parties involved in the game development or distribution. You may also face criminal charges depending on the laws of your country or region. Therefore, you should be careful and cautious when using Clash Royale Mod APK.</p>
69
- <h2>Conclusion</h2>
70
- <p>Clash Royale Mod APK is a modified version of the original Clash Royale game that gives you unlimited gems and gold, all cards unlocked, and access to custom mods and servers. It can be a fun and exciting way to enjoy the game without any limitations or restrictions. However, it also has some risks and drawbacks, such as safety and legal issues. You should always use a reliable source to download the APK file, scan it with an antivirus app, and use it at your own risk and responsibility. We hope this article has helped you understand how to download and install Clash Royale Mod APK on your Android device. Have fun and clash on!</p>
71
- <h2>FAQs</h2>
72
- <p>Here are some frequently asked questions about Clash Royale Mod APK:</p>
73
- <ul>
74
- <li><b>Q: Can I use Clash Royale Mod APK on iOS devices?</b></li>
75
- <li>A: No, Clash Royale Mod APK is only compatible with Android devices. You cannot use it on iOS devices, such as iPhones or iPads.</li>
76
- <li><b>Q: Can I play Clash Royale Mod APK with other players who use the original game?</b></li>
77
- <li>A: No, Clash Royale Mod APK uses different servers than the original game. You can only play with other players who use the same mod or server as you.</li>
78
- <li><b>Q: Can I update Clash Royale Mod APK to the latest version of the game?</b></li>
79
- <li>A: No, Clash Royale Mod APK is not updated automatically. You have to download and install the new version of the mod manually whenever there is an update from the original game.</li>
80
- <li><b>Q: Can I switch back to the original game after using Clash Royale Mod APK?</b></li>
81
- <li>A: Yes, you can switch back to the original game anytime you want. However, you may lose your progress and data in the modded game. You may also have to uninstall the modded game and reinstall the original game from the Google Play Store.</li>
82
- <li><b>Q: Can I use my existing account in Clash Royale Mod APK?</b></li>
83
- <li>A: No, you cannot use your existing account in Clash Royale Mod APK. You have to create a new account in the modded game. You may also have to use a different email address or phone number to register.</li>
84
- </ul></p> 197e85843d<br />
85
- <br />
86
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/DLS 17 Mod Apk Unlock All Player and Play with Unlimited Money.md DELETED
@@ -1,94 +0,0 @@
1
- <br />
2
- <h1>Download DLS 17 Mod APK Unlock All Player: How to Enjoy the Ultimate Soccer Experience on Your Android Device</h1>
3
- <p>If you are a fan of soccer games, you have probably heard of Dream League Soccer (DLS), one of the most popular and realistic soccer games for mobile devices. DLS 17 is the latest edition of this game series, featuring amazing graphics, smooth animations, and addictive gameplay. However, if you want to take your soccer experience to the next level, you should consider downloading the mod apk version of DLS 17, which gives you access to unlimited money, all players unlocked, no ads, and many other features that will make you feel like a soccer superstar. In this article, we will show you what DLS 17 mod apk is, what are its benefits and risks, and how to download and install it on your android device.</p>
4
- <h2>What is DLS 17?</h2>
5
- <p>Dream League Soccer 17 (DLS 17) is an association football video game developed by British studio First Touch Games for iOS and Android devices. The game allows you to create and manage your own soccer team, advancing from the lower divisions of a fictitious league to the top division. You can also compete in league or cup matches against other teams from around the world. The game has the FIFPRO license, which means you can sign real-life players to your team via an in-game transfer market. The game also has a training mode where you can improve the skills of your players. The game has 3D graphics, realistic physics, stunning atmosphere, and tons of replay value.</p>
6
- <h2>download dls 17 mod apk unlock all player</h2><br /><p><b><b>Download Zip</b> &ndash;&ndash;&ndash;&ndash;&ndash;>>> <a href="https://urlin.us/2uSZqT">https://urlin.us/2uSZqT</a></b></p><br /><br />
7
- <h2>What is DLS 17 Mod APK?</h2>
8
- <p>A mod apk is a modified version of an original app that has been altered by a third-party developer to add or remove some features. In this case, DLS 17 mod apk is a version of DLS 17 that has been hacked to give you unlimited money, all players unlocked, no ads, license fixed, and other enhancements that make the game more fun and easy to play. With DLS 17 mod apk, you don't have to worry about running out of coins, signing expensive players, watching annoying ads, or facing any errors. You can simply enjoy the game as you wish and customize your team to your liking.</p>
9
- <h3>Benefits of DLS 17 Mod APK</h3>
10
- <p>There are many benefits of downloading DLS 17 mod apk unlock all player, such as:</p>
11
- <ul>
12
- <li><b>Unlimited money:</b> You can get unlimited coins in the game, which you can use to buy players, upgrade stadiums, and do anything else you want.</li>
13
- <li><b>All players unlocked:</b> You can unlock all the players in the game, including the legends and the icons. You can also edit their stats, skills, and appearances.</li>
14
- <li><b>No ads:</b> You can play the game without any interruptions from ads, which can be annoying and distracting.</li>
15
- <li><b>License fixed:</b> You can play the game without any issues with the license verification, which can sometimes prevent you from launching the game or cause errors.</li>
16
- <li><b>Other enhancements:</b> You can also enjoy other features such as unlimited stamina, unlimited training sessions, no injuries, no bans, and more.</li>
17
- </ul>
18
- <h3>Risks of DLS 17 Mod APK</h3>
19
- <p>However, there are also some risks of downloading DLS 17 mod apk unlock all player, such as:</p>
20
- <ul>
21
- <li><b>Security issues:</b> You may expose your device to malware or viruses when you download the mod apk from an untrusted source. You may also compromise your personal data or privacy when you grant permissions to the app.</li>
22
- <li><b>Compatibility problems:</b> You may face some compatibility issues when you install the mod apk on your device, especially if it is not compatible with your device model or android version. You may also experience some bugs or glitches in the game.</li>
23
- <li><b>Legal implications:</b> You may violate the terms and conditions of the original app when you download the mod apk, which may result in legal actions from the developers or publishers. You may also lose your account or progress in the game if you get caught or reported.</li>
24
- </ul>
25
- <h2>How to Download and Install DLS 17 Mod APK Unlock All Player on Your Android Device</h2>
26
- <p>If you are ready to download and install DLS 17 mod apk unlock all player on your android device, you need to follow these steps carefully:</p>
27
- <h3>Step 1: Allow Unknown Sources on Your Device</h3>
28
- <p>The first step is to enable the option to install apps from unknown sources on your device. This will allow you to install the mod apk file that is not from the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on. You may see a warning message, but just tap OK to proceed.</p>
29
- <h3>Step 2: Install a File Manager App on Your Device</h3>
30
- <p>The next step is to install a file manager app on your device that can extract and install apk and obb files. There are many file manager apps available on the Google Play Store, but we recommend XAPKS Installer, which is a free and easy-to-use app that can handle both apk and obb files. To install XAPKS Installer, go to and tap Install. Then, open the app and grant it the necessary permissions.</p>
31
- <p>download dls 17 mod apk unlimited money and players<br />
32
- download dls 17 mod apk obb data with all players unlocked<br />
33
- download dls 17 mod apk latest version unlock all players<br />
34
- download dls 17 mod apk android 1 with all players unlocked<br />
35
- download dls 17 mod apk revdl unlock all players and kits<br />
36
- download dls 17 mod apk hack unlock all players and coins<br />
37
- download dls 17 mod apk offline unlock all players and stadiums<br />
38
- download dls 17 mod apk rexdl unlock all players and skills<br />
39
- download dls 17 mod apk mega unlock all players and transfers<br />
40
- download dls 17 mod apk pure unlock all players and teams<br />
41
- download dls 17 mod apk free unlock all players and features<br />
42
- download dls 17 mod apk full unlock all players and modes<br />
43
- download dls 17 mod apk no root unlock all players and cheats<br />
44
- download dls 17 mod apk update unlock all players and leagues<br />
45
- download dls 17 mod apk vip unlock all players and badges<br />
46
- download dls 17 mod apk pro unlock all players and ratings<br />
47
- download dls 17 mod apk cracked unlock all players and trophies<br />
48
- download dls 17 mod apk unlimited everything unlock all players<br />
49
- download dls 17 mod apk for pc unlock all players and graphics<br />
50
- download dls 17 mod apk for ios unlock all players and sounds<br />
51
- download dls 17 mod apk online unlock all players and multiplayer<br />
52
- download dls 17 mod apk new version unlock all players and kits<br />
53
- download dls 17 mod apk old version unlock all players and legends<br />
54
- download dls 17 mod apk original unlock all players and licenses<br />
55
- download dls 17 mod apk best version unlock all players and quality<br />
56
- download dls 17 mod apk easy install unlock all players and steps<br />
57
- download dls 17 mod apk fast download unlock all players and speed<br />
58
- download dls 17 mod apk direct link unlock all players and access<br />
59
- download dls 17 mod apk mirror link unlock all players and backup<br />
60
- download dls 17 mod apk mediafire link unlock all players and storage<br />
61
- download dls 17 mod apk google drive link unlock all players and sync<br />
62
- download dls 17 mod apk dropbox link unlock all players and share<br />
63
- download dls 17 mod apk zippyshare link unlock all players and zip<br />
64
- download dls 17 mod apk file upload link unlock all players and file<br />
65
- download dls 17 mod apk uptodown link unlock all players and site<br />
66
- download dls 17 mod apk apkpure link unlock all players and app<br />
67
- download dls 17 mod apk apkmirror link unlock all players and mirror<br />
68
- download dls 17 mod apk apknite link unlock all players and night mode<br />
69
- download dls 17 mod apk apksfree link unlock all players and free apps<br />
70
- download dls 17 mod apk apksfull link unlock all players and full apps</p>
71
- <h3>Step 3: Download the DLS 17 Mod APK and OBB Files to Your Device</h3>
72
- <p>The third step is to download the DLS 17 mod apk and obb files to your device. You can find these files from a reputable website, such as , which provides safe and fast downloads. To download these files, go to and tap Download. Then, wait for the files to be downloaded to your device.</p>
73
- <h3>Step 4: Install the DLS 17 Mod APK File on Your Device</h3>
74
- <p>The fourth step is to install the DLS 17 mod apk file on your device. To do this, locate and tap the mod apk file that you downloaded in Step 3. You may see a pop-up window asking you to confirm the installation. Tap Install and wait for the installation process to finish.</p>
75
- <h3>Step 5: Extract and Copy the OBB File to Your Device Storage</h3>
76
- <p>The fifth step is to extract and copy the obb file that you downloaded in Step 3 to your device storage. To do this, open XAPKS Installer and locate the obb file that has a zip icon. Tap the obb file and select Extract. Then, select the folder that says com.firsttouchgames.dls3 and tap Copy. Next, go to Internal Storage > Android > OBB and paste the folder there. Make sure that the folder name matches the obb file name.</p>
77
- <h3>Step 6: Launch the DLS 17 Mod APK App and Enjoy</h3>
78
- <p>The final step is to launch the DLS 17 mod apk app and enjoy the game. To do this, go to your app drawer and tap the DLS 17 icon. You may see a loading screen and some messages, but just wait for them to finish. Then, you can start playing the game with all players unlocked and unlimited money. You can also customize your team, change the settings, and access other features of the game.</p>
79
- <h2>Conclusion</h2>
80
- <p>DLS 17 is one of the best soccer games for android devices, but it can be even better with the mod apk version that unlocks all players and gives you unlimited money. In this article, we showed you what DLS 17 mod apk is, what are its benefits and risks, and how to download and install it on your android device. We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. Now, go ahead and download DLS 17 mod apk unlock all player and enjoy the ultimate soccer experience on your android device.</p>
81
- <h2>FAQs</h2>
82
- <p>Here are some frequently asked questions and answers about DLS 17 mod apk unlock all player:</p>
83
- <h4>Q: Is DLS 17 mod apk safe to download?</h4>
84
- <p>A: DLS 17 mod apk is safe to download if you get it from a trusted source, such as . However, you should always be careful when downloading any mod apk file from unknown sources, as they may contain malware or viruses that can harm your device or data.</p>
85
- <h4>Q: Is DLS 17 mod apk legal to use?</h4>
86
- <p>A: DLS 17 mod apk is not legal to use, as it violates the terms and conditions of the original app. You may face legal actions from the developers or publishers if you use the mod apk version. You may also lose your account or progress in the game if you get caught or reported.</p>
87
- <h4>Q: Can I play online with DLS 17 mod apk?</h4>
88
- <p>A: No, you cannot play online with DLS 17 mod apk, as it is not compatible with the official servers of the game. You can only play offline with the mod apk version. If you want to play online, you need to use the original version of the app.</p>
89
- <h4>Q: Can I update DLS 17 mod apk?</h4>
90
- <p>A: No, you cannot update DLS 17 mod apk, as it is not supported by the Google Play Store. You need to uninstall the mod apk version and install the latest original version of the app if you want to update it. However, you may lose your modded features and progress in the game if you do so.</p>
91
- <h4>Q: Can I use DLS 17 mod apk on iOS devices?</h4>
92
- <p>A: No, you cannot use DLS 17 mod apk on iOS devices, as it is only compatible with android devices. You need to use a jailbroken iOS device and a different method to install a modified version of the app on iOS devices.</p> 197e85843d<br />
93
- <br />
94
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Bus Simulator Ultimate Mod Apk 1.0 with Unlimited Money and Features.md DELETED
@@ -1,65 +0,0 @@
1
- <br />
2
- <h1>Bus Simulator Ultimate Hack APK 1.0 7: How to Download and Install It</h1>
3
- <p>If you are a fan of simulation games, you might have heard of Bus Simulator Ultimate, a realistic and immersive game that lets you drive different buses across various countries and cities. In this game, you can create your own bus company, customize your buses, transport passengers, earn money, and compete with other players online. But what if you want to enjoy the game without any limitations or restrictions? That's where Bus Simulator Ultimate Hack APK 1.0 7 comes in handy. In this article, we will tell you what this hack is, how to download and install it, what are its benefits and drawbacks, and some FAQs about it.</p>
4
- <h2>bus simulator ultimate hack apk 1.0 7</h2><br /><p><b><b>DOWNLOAD</b> &#9989; <a href="https://jinyurl.com/2uNLid">https://jinyurl.com/2uNLid</a></b></p><br /><br />
5
- <h2>What is Bus Simulator Ultimate?</h2>
6
- <p>Bus Simulator Ultimate is a popular simulation game developed by Zuuks Games, a Turkish game studio that also created other games like Truck Simulator 2018: Europe and Euro Truck Driver 2018. The game was released in June 2019 and has since gained over 100 million downloads on Google Play Store and over 4 million ratings with an average of 4.3 stars.</p>
7
- <p>The game is designed to give you a realistic experience of driving different buses across various countries and cities, such as Germany, Turkey, Italy, France, Spain, USA, Brazil, Russia, China, Japan, and more. You can choose from over 30 different buses with realistic interiors and sounds, such as Mercedes-Benz, Setra, Volvo, MAN, Scania, IVECO, and more. You can also customize your buses with different skins, stickers, accessories, horns, lights, etc.</p>
8
- <p>As a bus driver, you have to transport passengers from one place to another, following the traffic rules and regulations, avoiding accidents and collisions, managing your fuel consumption and maintenance costs, dealing with different weather conditions and road situations, etc. You can also create your own bus company, hire drivers, expand your routes, earn money, and compete with other players online in multiplayer mode.</p>
9
- <h2>Why do you need Bus Simulator Ultimate Hack APK 1.0 7?</h2>
10
- <p>Bus Simulator Ultimate is a fun and addictive game that can keep you entertained for hours. However, it also has some limitations and challenges that might frustrate you or make you lose interest in the game. For example:</p>
11
- <ul>
12
- <li>You need to spend real money to buy some premium features or items in the game, such as coins, golds, gems, VIP membership, etc.</li>
13
- <li>You need to earn money by completing missions or tasks in the game to buy new buses or upgrade them.</li>
14
- <li>You need to watch ads or videos to get some rewards or bonuses in the game.</li>
15
- <li>You need to wait for some time to refill your fuel or repair your bus.</li>
16
- <li>You need to follow the traffic rules and regulations or else you will get fined or penalized.</li>
17
- <li>You need to deal with some bugs or glitches that might affect your gameplay or performance.</li>
18
- </ul>
19
- <p>If you want to avoid these limitations or challenges and enjoy the game without any restrictions or interruptions, you might want to try Bus Simulator Ultimate Hack APK 1.0 7. This is a modified version of the original game that gives you access to unlimited money and gold , all buses unlocked and upgraded, no ads and no root required, and other hack features that will make your gameplay easier and more enjoyable.</p>
20
- <h2>How to download and install Bus Simulator Ultimate Hack APK 1.0 7?</h2>
21
- <p>If you are interested in trying Bus Simulator Ultimate Hack APK 1.0 7, you need to follow these simple steps to download and install it on your device:</p>
22
- <h3>Step 1: Enable unknown sources on your device</h3>
23
- <p>Before you can install any APK file on your device, you need to enable the option of unknown sources in your settings. This will allow you to install apps from sources other than the official Google Play Store. To do this, go to your device settings, then security, then unknown sources, and toggle it on.</p>
24
- <p>[Bus Simulator : Ultimate Mod Apk 1.0 [Unlimited money] APK](^1^)</p>
25
- <h3>Step 2: Download the APK file from a trusted source</h3>
26
- <p>Next, you need to download the APK file of Bus Simulator Ultimate Hack APK 1.0 7 from a trusted source. There are many websites that offer this file, but you need to be careful as some of them might contain malware or viruses that can harm your device or steal your data. To avoid this, you can use the link below to download the APK file safely and securely:</p>
27
- <p><a href="">Bus Simulator Ultimate Hack APK 1.0 7 Download Link</a></p>
28
- <h3>Step 3: Install the APK file on your device</h3>
29
- <p>After you have downloaded the APK file, you need to install it on your device. To do this, locate the file in your downloads folder or wherever you saved it, and tap on it. You will see a pop-up window asking you to confirm the installation. Tap on install and wait for the process to finish.</p>
30
- <h3>Step 4: Launch the game and enjoy the hack features</h3>
31
- <p>Finally, you can launch the game and enjoy the hack features. You will see that you have unlimited money and gold, all buses unlocked and upgraded, no ads and no root required, and other hack features that will make your gameplay easier and more enjoyable. You can now drive any bus you want, customize it as you like, transport passengers without any hassle, create your own bus company, compete with other players online, and have fun!</p>
32
- <h2>What are the benefits of using Bus Simulator Ultimate Hack APK 1.0 7?</h2>
33
- <p>Using Bus Simulator Ultimate Hack APK 1.0 7 has many benefits that will enhance your gaming experience and satisfaction. Some of these benefits are:</p>
34
- <h3>Unlimited money and gold</h3>
35
- <p>With unlimited money and gold, you can buy anything you want in the game without worrying about running out of resources or spending real money. You can buy new buses or upgrade them with different features and accessories. You can also buy VIP membership or gems that will give you more advantages and perks in the game.</p>
36
- <h3>All buses unlocked and upgraded</h3>
37
- <p>With all buses unlocked and upgraded, you can choose from over 30 different buses with realistic interiors and sounds, such as Mercedes-Benz, Setra, Volvo, MAN, Scania, IVECO, and more. You can also customize your buses with different skins, stickers, accessories, horns, lights, etc. You can drive any bus you want across various countries and cities without any limitations or restrictions.</p>
38
- <h3>No ads and no root required</h3>
39
- <p>With no ads and no root required, you can enjoy the game without any interruptions or annoyances. You don't have to watch ads or videos to get some rewards or bonuses in the game. You also don't have to root your device to use the hack features. This will save you time and effort and protect your device from potential risks.</p> <h2>What are the drawbacks of using Bus Simulator Ultimate Hack APK 1.0 7?</h2>
40
- <p>Using Bus Simulator Ultimate Hack APK 1.0 7 also has some drawbacks that you should be aware of before you decide to use it. Some of these drawbacks are:</p>
41
- <h3>Risk of malware and viruses</h3>
42
- <p>Since you are downloading and installing an APK file from an unknown source, you are exposing your device to the risk of malware and viruses that can harm your device or steal your data. Some of these malware and viruses can also affect your gameplay or performance, such as slowing down your device, crashing your game, deleting your files, etc. To avoid this, you should always scan the APK file with a reliable antivirus software before installing it.</p>
43
- <h3>Risk of banning and suspension</h3>
44
- <p>Since you are using a modified version of the original game that gives you unfair advantages over other players, you are violating the terms and conditions of the game developers and publishers. This means that they can detect your hack and ban or suspend your account from the game. This will result in losing your game progress and data, as well as being unable to play the game online or offline. To avoid this, you should always use the hack with caution and discretion, and avoid using it in multiplayer mode or online competitions.</p>
45
- <h3>Risk of losing game progress and data</h3>
46
- <p>Since you are using a modified version of the original game that is not compatible with the official updates or patches, you are risking losing your game progress and data if you update or uninstall the game. This means that you will have to start from scratch or lose some of your achievements or rewards in the game. To avoid this, you should always backup your game data before updating or uninstalling the game, and use a cloud service or an external storage device to save your data.</p>
47
- <h2>Conclusion</h2>
48
- <p>Bus Simulator Ultimate is a realistic and immersive simulation game that lets you drive different buses across various countries and cities. However, if you want to enjoy the game without any limitations or restrictions, you might want to try Bus Simulator Ultimate Hack APK 1.0 7. This is a modified version of the original game that gives you access to unlimited money and gold, all buses unlocked and upgraded, no ads and no root required, and other hack features that will make your gameplay easier and more enjoyable. However, using this hack also has some drawbacks, such as risk of malware and viruses, risk of banning and suspension, and risk of losing game progress and data. Therefore, you should weigh the pros and cons before using this hack, and always use it with caution and discretion.</p>
49
- <p>We hope this article has helped you understand what Bus Simulator Ultimate Hack APK 1.0 7 is, how to download and install it, what are its benefits and drawbacks, and some FAQs about it. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!</p>
50
- <h2>FAQs</h2>
51
- <p>Here are some frequently asked questions about Bus Simulator Ultimate Hack APK 1.0 7:</p>
52
- <ol>
53
- <li><b>Is Bus Simulator Ultimate Hack APK 1.0 7 safe to use?</b></li>
54
- <p>Bus Simulator Ultimate Hack APK 1.0 7 is not completely safe to use, as it can expose your device to malware and viruses, as well as get your account banned or suspended from the game. Therefore, you should always scan the APK file with a reliable antivirus software before installing it, use the hack with caution and discretion, and avoid using it in multiplayer mode or online competitions.</p>
55
- <li><b>Is Bus Simulator Ultimate Hack APK 1.0 7 free to use?</b></li>
56
- <p>Yes, Bus Simulator Ultimate Hack APK 1.0 7 is free to use, as you don't have to pay any money to download or install it on your device. However, you might have to watch some ads or videos to access some of the hack features in the game.</p>
57
- <li><b>Does Bus Simulator Ultimate Hack APK 1.0 7 work on all devices?</b></li>
58
- <p>No, Bus Simulator Ultimate Hack APK 1.0 7 does not work on all devices, as it requires Android version 5.0 or higher to run on your device. It also requires at least 2 GB of RAM and 500 MB of free storage space on your device.</p>
59
- <li><b>Can I update Bus Simulator Ultimate Hack APK 1.0 7?</b></li>
60
- <p>No, you cannot update Bus Simulator Ultimate Hack APK 1.0 7, as it is not compatible with the official updates or patches from the game developers and publishers. If you update the game, you will lose the hack features and your game progress and data . To avoid this, you should always backup your game data before updating or uninstalling the game, and use a cloud service or an external storage device to save your data.</p>
61
- <li><b>Where can I get more information about Bus Simulator Ultimate Hack APK 1.0 7?</b></li>
62
- <p>If you want to get more information about Bus Simulator Ultimate Hack APK 1.0 7, you can visit the official website of the game developers and publishers, or the official social media pages of the game, such as Facebook, Twitter, Instagram, YouTube, etc. You can also join the online community of the game, such as forums, blogs, groups, etc., where you can interact with other players and share your tips, tricks, feedback, suggestions, etc.</p>
63
- </ol></p> 197e85843d<br />
64
- <br />
65
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/52Hz/SRMNet_thesis/main_test_SRMNet.py DELETED
@@ -1,117 +0,0 @@
1
- import torch
2
- import torchvision.transforms.functional as TF
3
- import torch.nn.functional as F
4
- from PIL import Image
5
- import cv2
6
- import os
7
- from skimage import img_as_ubyte
8
- from tqdm import tqdm
9
- from natsort import natsorted
10
- import glob
11
- import argparse
12
- from model_arch.SRMNet_SWFF import SRMNet_SWFF
13
- from model_arch.SRMNet import SRMNet
14
-
15
- tasks = ['Deblurring_motionblur',
16
- 'Dehaze_realworld',
17
- 'Denoise_gaussian',
18
- 'Denoise_realworld',
19
- 'Deraining_raindrop',
20
- 'Deraining_rainstreak',
21
- 'LLEnhancement',
22
- 'Retouching']
23
-
24
- def main():
25
- parser = argparse.ArgumentParser(description='Quick demo Image Restoration')
26
- parser.add_argument('--input_dir', default='./test/', type=str, help='Input images root')
27
- parser.add_argument('--result_dir', default='./result/', type=str, help='Results images root')
28
- parser.add_argument('--weights_root', default='experiments/pretrained_models', type=str, help='Weights root')
29
- parser.add_argument('--task', default='Retouching', type=str, help='Restoration task (Above task list)')
30
-
31
- args = parser.parse_args()
32
-
33
- # Prepare testing data
34
- files = natsorted(glob.glob(os.path.join(args.input_dir, '*')))
35
- if len(files) == 0:
36
- raise Exception(f"No files found at {args.input_dir}")
37
- os.makedirs(args.result_dir, exist_ok=True)
38
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
39
- # Build model
40
- model = define_model(args)
41
- model.eval()
42
- model = model.to(device)
43
-
44
- print('restoring images......')
45
-
46
- mul = 16
47
-
48
- for i, file_ in enumerate(tqdm(files)):
49
- img = Image.open(file_).convert('RGB')
50
- input_ = TF.to_tensor(img).unsqueeze(0).to(device)
51
-
52
- # Pad the input if not_multiple_of 8
53
- h, w = input_.shape[2], input_.shape[3]
54
- H, W = ((h + mul) // mul) * mul, ((w + mul) // mul) * mul
55
- padh = H - h if h % mul != 0 else 0
56
- padw = W - w if w % mul != 0 else 0
57
- input_ = F.pad(input_, (0, padw, 0, padh), 'reflect')
58
- with torch.no_grad():
59
- restored = model(input_)
60
-
61
- restored = torch.clamp(restored, 0, 1)
62
- restored = restored[:, :, :h, :w]
63
- restored = restored.permute(0, 2, 3, 1).cpu().detach().numpy()
64
- restored = img_as_ubyte(restored[0])
65
-
66
- f = os.path.splitext(os.path.split(file_)[-1])[0]
67
- save_img((os.path.join(args.result_dir, f + '.png')), restored)
68
- print('{}'.format(os.path.join(args.result_dir, f + '.png')))
69
- clean_folder(args.input_dir)
70
- print('finish !')
71
-
72
-
73
- def define_model(args):
74
- # Enhance models
75
- if args.task in ['LLEnhancement', 'Retouching']:
76
- model = SRMNet(in_chn=3, wf=96, depth=4)
77
- weight_path = os.path.join(args.weights_root, args.task + '.pth')
78
- load_checkpoint(model, weight_path)
79
-
80
- # Restored models
81
- else:
82
- model = SRMNet_SWFF(in_chn=3, wf=96, depth=4)
83
- weight_path = os.path.join(args.weights_root, args.task + '.pth')
84
- load_checkpoint(model, weight_path)
85
-
86
- return model
87
-
88
- def save_img(filepath, img):
89
- cv2.imwrite(filepath, cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
90
-
91
-
92
- def load_checkpoint(model, weights):
93
- checkpoint = torch.load(weights, map_location=torch.device('cpu'))
94
- try:
95
- model.load_state_dict(checkpoint["state_dict"])
96
- except:
97
- state_dict = checkpoint["state_dict"]
98
- new_state_dict = OrderedDict()
99
- for k, v in state_dict.items():
100
- name = k[7:] # remove `module.`
101
- new_state_dict[name] = v
102
- model.load_state_dict(new_state_dict)
103
-
104
- def clean_folder(folder):
105
- for filename in os.listdir(folder):
106
- file_path = os.path.join(folder, filename)
107
- try:
108
- if os.path.isfile(file_path) or os.path.islink(file_path):
109
- os.unlink(file_path)
110
- elif os.path.isdir(file_path):
111
- shutil.rmtree(file_path)
112
- except Exception as e:
113
- print('Failed to delete %s. Reason: %s' % (file_path, e))
114
-
115
-
116
- if __name__ == '__main__':
117
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AISuperheroes/08GR-KitchenSink-AIUIUX/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: 08GR KitchenSink AIUIUX
3
- emoji: 📈
4
- colorFrom: purple
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.6
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIZ2H/Gradio331-3D-Models-AI-1/files/Readme.md DELETED
@@ -1,2 +0,0 @@
1
- Duck & Fox:
2
- https://github.com/KhronosGroup/glTF-Sample-Models
 
 
 
spaces/AIatUIUC/CodeLATS/generators/py_generate.py DELETED
@@ -1,404 +0,0 @@
1
- from generators.model import ModelBase, message_to_str
2
- from .generator_types import Generator
3
- from .generator_utils import generic_generate_func_impl, generic_generate_internal_tests, generic_generate_self_reflection, generate_with_accumulated_context
4
-
5
- from typing import Optional, List, Union
6
- import ast
7
- import re
8
- from .parse import parse_code_block, add_code_block
9
-
10
- PY_SIMPLE_COMPLETION_INSTRUCTION = "# Write the body of this function only."
11
- PY_REFLEXION_COMPLETION_INSTRUCTION = "You are a Python writing assistant. You will be given your past function implementation, a series of unit tests, and a hint to change the implementation appropriately. Write your full implementation (restate the function signature).\n\n-----"
12
- PY_SELF_REFLECTION_COMPLETION_INSTRUCTION = "You are a Python writing assistant. You will be given a function implementation and a series of unit tests. Your goal is to write a few sentences to explain why your implementation is wrong as indicated by the tests. You will need this as a hint when you try again later. Only provide the few sentence description in your answer, not the implementation.\n\n-----"
13
- USE_PYTHON_CODEBLOCK_INSTRUCTION = "Use a Python code block to write your response. For example:\n```python\nprint('Hello world!')\n```"
14
-
15
- PY_SIMPLE_CHAT_INSTRUCTION = "You are an AI that only responds with python code, NOT ENGLISH. You will be given a function signature and its docstring by the user. Write your full implementation (restate the function signature)."
16
- PY_SIMPLE_CHAT_INSTRUCTION_V2 = "You are an AI that only responds with only python code. You will be given a function signature and its docstring by the user. Write your full implementation (restate the function signature)."
17
- PY_REFLEXION_CHAT_INSTRUCTION = "You are an AI Python assistant. You will be given your past function implementation, a series of unit tests, and a hint to change the implementation appropriately. Write your full implementation (restate the function signature)."
18
- PY_REFLEXION_CHAT_INSTRUCTION_V2 = "You are an AI Python assistant. You will be given your previous implementation of a function, a series of unit tests results, and your self-reflection on your previous implementation. Write your full implementation (restate the function signature)."
19
- PY_REFLEXION_FEW_SHOT_ADD = '''Example 1:
20
- [previous impl]:
21
- ```python
22
- def add(a: int, b: int) -> int:
23
- """
24
- Given integers a and b, return the total value of a and b.
25
- """
26
- return a - b
27
- ```
28
-
29
- [unit test results from previous impl]:
30
- Tested passed:
31
-
32
- Tests failed:
33
- assert add(1, 2) == 3 # output: -1
34
- assert add(1, 2) == 4 # output: -1
35
-
36
- [reflection on previous impl]:
37
- The implementation failed the test cases where the input integers are 1 and 2. The issue arises because the code does not add the two integers together, but instead subtracts the second integer from the first. To fix this issue, we should change the operator from `-` to `+` in the return statement. This will ensure that the function returns the correct output for the given input.
38
-
39
- [improved impl]:
40
- ```python
41
- def add(a: int, b: int) -> int:
42
- """
43
- Given integers a and b, return the total value of a and b.
44
- """
45
- return a + b
46
- ```
47
- '''
48
-
49
- PY_REFLEXION_FEW_SHOT = '''Example 1:
50
- [previous impl]:
51
- ```python
52
- from typing import *
53
- def fullJustify(words: List[str], maxWidth: int) -> List[str]:
54
- """
55
- Given an array of words and a width maxWidth, format the text such that each line has exactly maxWidth characters and is fully (left and right) justified.
56
- You should pack your words in a greedy approach; that is, pack as many words as you can in each line. Pad extra spaces `' '` when necessary so that each line has exactly maxWidth characters.
57
- Extra spaces between words should be distributed as evenly as possible. If the number of spaces on a line do not divide evenly between words, the empty slots on the left will be assigned more spaces than the slots on the right.
58
- For the last line of text, it should be left justified and no extra space is inserted between words.
59
- Note:
60
- A word is defined as a character sequence consisting of non-space characters only.
61
- Each word's length is guaranteed to be greater than 0 and not exceed maxWidth.
62
- The input array `words` contains at least one word.
63
- """
64
- res = []
65
- cur_line = []
66
- cur_len = 0
67
-
68
- for word in words:
69
- if cur_len + len(word) + len(cur_line) > maxWidth:
70
- if len(cur_line) == 1:
71
- res.append(cur_line[0] + ' ' * (maxWidth - cur_len))
72
- else:
73
- spaces = maxWidth - cur_len
74
- space_between = spaces // (len(cur_line) - 1)
75
- extra_spaces = spaces % (len(cur_line) - 1)
76
- line = ''
77
- for i, w in enumerate(cur_line[:-1]):
78
- line += w + ' ' * (space_between + (i < extra_spaces))
79
- line += cur_line[-1]
80
- res.append(line)
81
- cur_line = []
82
- cur_len = 0
83
- cur_line.append(word)
84
- cur_len += len(word)
85
-
86
- last_line = ' '.join(cur_line)
87
- last_line += ' ' * (maxWidth - len(last_line))
88
- res.append(last_line)
89
-
90
- return res
91
- ```
92
-
93
- [unit test results from previous impl]:
94
- Tested passed:
95
-
96
- Tests failed:
97
- assert fullJustify([], 10) == [] # output: [' ']
98
- assert fullJustify([], 0) == [] # output: ['']
99
-
100
- [reflection on previous impl]:
101
- The implementation failed the test cases where the input list of words is empty. The issue arises because the code does not handle the case where there are no words to process. As a result, it still appends a line with spaces to the result list, even when there are no words. To fix this issue, we should add a condition at the beginning of the function to check if the input list is empty, and return an empty list if it is. This will ensure that the function returns the correct output for empty input lists.
102
-
103
- [improved impl]:
104
- ```python
105
- from typing import *
106
- def fullJustify(words: List[str], maxWidth: int) -> List[str]:
107
- """
108
- Given an array of words and a width maxWidth, format the text such that each line has exactly maxWidth characters and is fully (left and right) justified.
109
- You should pack your words in a greedy approach; that is, pack as many words as you can in each line. Pad extra spaces `' '` when necessary so that each line has exactly maxWidth characters.
110
- Extra spaces between words should be distributed as evenly as possible. If the number of spaces on a line do not divide evenly between words, the empty slots on the left will be assigned more spaces than the slots on the right.
111
- For the last line of text, it should be left justified and no extra space is inserted between words.
112
- Note:
113
- A word is defined as a character sequence consisting of non-space characters only.
114
- Each word's length is guaranteed to be greater than 0 and not exceed maxWidth.
115
- The input array `words` contains at least one word.
116
- """
117
- if not words:
118
- return []
119
-
120
- res = []
121
- cur_line = []
122
- cur_len = 0
123
-
124
- for word in words:
125
- if cur_len + len(word) + len(cur_line) > maxWidth:
126
- if len(cur_line) == 1:
127
- res.append(cur_line[0] + ' ' * (maxWidth - cur_len))
128
- else:
129
- spaces = maxWidth - cur_len
130
- space_between = spaces // (len(cur_line) - 1)
131
- extra_spaces = spaces % (len(cur_line) - 1)
132
- line = ''
133
- for i, w in enumerate(cur_line[:-1]):
134
- line += w + ' ' * (space_between + (i < extra_spaces))
135
- line += cur_line[-1]
136
- res.append(line)
137
- cur_line = []
138
- cur_len = 0
139
- cur_line.append(word)
140
- cur_len += len(word)
141
-
142
- last_line = ' '.join(cur_line)
143
- last_line += ' ' * (maxWidth - len(last_line))
144
- res.append(last_line)
145
-
146
- return res
147
- ```
148
- END EXAMPLES
149
-
150
- '''
151
- PY_SELF_REFLECTION_CHAT_INSTRUCTION = "You are a Python programming assistant. You will be given a function implementation and a series of unit tests. Your goal is to write a few sentences to explain why your implementation is wrong as indicated by the tests. You will need this as a hint when you try again later. Only provide the few sentence description in your answer, not the implementation."
152
- PY_SELF_REFLECTION_CHAT_INSTRUCTION_V2 = "You are a Python programming assistant. You will be given a function implementation and a series of unit test results. Your goal is to write a few sentences to explain why your implementation is wrong as indicated by the tests. You will need this as guidance when you try again later. Only provide the few sentence description in your answer, not the implementation. You will be given a few examples by the user."
153
- PY_SELF_REFLECTION_FEW_SHOT = """Example 1:
154
- [function impl]:
155
- ```python
156
- def longest_subarray_with_sum_limit(nums: List[int], target: int) -> List[int]:
157
- n = len(nums)
158
- left, right = 0, 0
159
- max_length = 0
160
- current_sum = 0
161
- result = []
162
- while right < n:
163
- current_sum += nums[right]
164
- while current_sum > target:
165
- current_sum -= nums[left]
166
- left += 1
167
- if right - left + 1 >= max_length:
168
- max_length = right - left + 1
169
- result = nums[left:right+1]
170
- right += 1
171
- return result
172
- ```
173
- [unit test results]:
174
- Tests passing:
175
- assert longest_subarray_with_sum_limit([1, 2, 3, 4, 5], 8) == [1, 2, 3]
176
- assert longest_subarray_with_sum_limit([1, 2, 3, 4, 5], 15) == [1, 2, 3, 4, 5]
177
- assert longest_subarray_with_sum_limit([1, -1, 2, -2, 3, -3], 2) == [1, -1, 2, -2, 3]
178
- assert longest_subarray_with_sum_limit([], 10) == []
179
- assert longest_subarray_with_sum_limit([], 0) == []
180
- assert longest_subarray_with_sum_limit([], -5) == []
181
- Tests failing:
182
- assert longest_subarray_with_sum_limit([5, 6, 7, 8, 9], 4) == [] # output: [5]
183
- [self-reflection]:
184
- The implementation failed the where no subarray fulfills the condition. The issue in the implementation is due to the use of >= instead of > in the condition to update the result. Because of this, it returns a subarray even when the sum is greater than the target, as it still updates the result when the current subarray length is equal to the previous longest subarray length. To overcome this error, we should change the condition to only update the result when the current subarray length is strictly greater than the previous longest subarray length. This can be done by replacing >= with > in the condition.
185
-
186
- Example 2:
187
- [function impl]:
188
- ```python
189
- def longest_subarray_with_sum_limit(nums: List[int], target: int) -> List[int]:
190
- n = len(nums)
191
- left, right = 0, 0
192
- max_length = 0
193
- current_sum = 0
194
- result = []
195
- while current_sum + nums[right] <= target:
196
- current_sum += nums[right]
197
- right += 1
198
- while right < n:
199
- current_sum += nums[right]
200
- while current_sum > target:
201
- current_sum -= nums[left]
202
- left += 1
203
- if right - left + 1 > max_length:
204
- max_length = right - left + 1
205
- result = nums[left:right+1]
206
- right += 1
207
- return result
208
- ```
209
- [unit test results]:
210
- Tests passing:
211
- assert longest_subarray_with_sum_limit([], 10) == []
212
- assert longest_subarray_with_sum_limit([], 0) == []
213
- assert longest_subarray_with_sum_limit([], -5) == []
214
- Tests failing:
215
- assert longest_subarray_with_sum_limit([1, 2, 3, 4, 5], 8) == [1, 2, 3] # output: list index out of range
216
- assert longest_subarray_with_sum_limit([1, 2, 3, 4, 5], 15) == [1, 2, 3, 4, 5] # output: list index out of range
217
- assert longest_subarray_with_sum_limit([5, 6, 7, 8, 9], 4) == [] # output: list index out of range
218
- assert longest_subarray_with_sum_limit([1, -1, 2, -2, 3, -3], 2) == [1, -1, 2, -2, 3] # output: list index out of range
219
- [self-reflection]:
220
- The implementation failed 4 out of the 7 test cases due to an IndexError. The issue stems from the while loop while current_sum + nums[right] <= target:, which directly accesses nums[right] without checking if right is within the bounds of the list. This results in a runtime error when right goes beyond the list length. To overcome this error, we need to add a bounds check for the right variable in the mentioned while loop. We can modify the loop condition to while right < len(nums) and current_sum + nums[right] <= target:. This change will ensure that we only access elements within the bounds of the list, thus avoiding the IndexError.
221
- END OF EXAMPLES
222
- """
223
-
224
- PY_TEST_GENERATION_FEW_SHOT = """Examples:
225
- func signature:
226
- def add3Numbers(x, y, z):
227
- \"\"\" Add three numbers together.
228
- This function takes three numbers as input and returns the sum of the three numbers.
229
- \"\"\"
230
- unit tests:
231
- assert add3Numbers(1, 2, 3) == 6
232
- assert add3Numbers(-1, 2, 3) == 4
233
- assert add3Numbers(1, -2, 3) == 2
234
- assert add3Numbers(1, 2, -3) == 0
235
- assert add3Numbers(-3, -2, -1) == -6
236
- assert add3Numbers(0, 0, 0) == 0
237
- """
238
-
239
- PY_TEST_GENERATION_COMPLETION_INSTRUCTION = f"""You are an AI coding assistant that can write unique, diverse, and intuitive unit tests for functions given the signature and docstring. Call your function answer().
240
-
241
- {PY_TEST_GENERATION_FEW_SHOT}"""
242
-
243
- PY_TEST_GENERATION_CHAT_INSTRUCTION = """You are an AI coding assistant that can write unique, diverse, and intuitive unit tests for functions given the signature and docstring. Call your function answer()."""
244
-
245
-
246
- class PyGenerator(Generator):
247
- def self_reflection(self, func: str, feedback: str, model: ModelBase) -> str:
248
- return generic_generate_self_reflection(
249
- func=func,
250
- feedback=feedback,
251
- model=model,
252
- self_reflection_chat_instruction=PY_SELF_REFLECTION_CHAT_INSTRUCTION,
253
- self_reflection_completion_instruction=PY_SELF_REFLECTION_COMPLETION_INSTRUCTION,
254
- add_code_block=lambda x: add_code_block(x, "python"),
255
- self_reflection_few_shot=PY_SELF_REFLECTION_FEW_SHOT
256
- )
257
-
258
- def func_impl(
259
- self,
260
- func_sig: str,
261
- model: ModelBase,
262
- strategy: str,
263
- prev_func_impl: Optional[str] = None,
264
- feedback: Optional[str] = None,
265
- self_reflection: Optional[str] = None,
266
- num_comps: int = 1,
267
- temperature: float = 0.8,
268
- acc_feedback: Optional[str] = None,
269
- acc_reflection: Optional[str] = None,
270
- ) -> Union[str, List[str]]:
271
- if strategy == "mcts":
272
- return generate_with_accumulated_context(
273
- func_sig=func_sig,
274
- model=model,
275
- strategy="reflexion",
276
- prev_func_impl=prev_func_impl,
277
- accumulated_feedback=acc_feedback,
278
- accumulated_reflection=acc_reflection,
279
- num_comps=num_comps,
280
- temperature=temperature,
281
- reflexion_chat_instruction=PY_REFLEXION_CHAT_INSTRUCTION,
282
- reflexion_few_shot=PY_REFLEXION_FEW_SHOT_ADD,
283
- simple_chat_instruction=PY_SIMPLE_CHAT_INSTRUCTION,
284
- reflexion_completion_instruction=PY_REFLEXION_COMPLETION_INSTRUCTION,
285
- simple_completion_instruction=PY_SIMPLE_COMPLETION_INSTRUCTION,
286
- code_block_instruction=USE_PYTHON_CODEBLOCK_INSTRUCTION,
287
- parse_code_block=lambda x: parse_code_block(x, "python"),
288
- add_code_block=lambda x: add_code_block(x, "python"),
289
- )
290
- else:
291
- return generic_generate_func_impl(
292
- func_sig=func_sig,
293
- model=model,
294
- strategy=strategy,
295
- prev_func_impl=prev_func_impl,
296
- feedback=feedback,
297
- self_reflection=self_reflection,
298
- num_comps=num_comps,
299
- temperature=temperature,
300
- reflexion_chat_instruction=PY_REFLEXION_CHAT_INSTRUCTION,
301
- reflexion_few_shot=PY_REFLEXION_FEW_SHOT_ADD,
302
- simple_chat_instruction=PY_SIMPLE_CHAT_INSTRUCTION,
303
- reflexion_completion_instruction=PY_REFLEXION_COMPLETION_INSTRUCTION,
304
- simple_completion_instruction=PY_SIMPLE_COMPLETION_INSTRUCTION,
305
- code_block_instruction=USE_PYTHON_CODEBLOCK_INSTRUCTION,
306
- parse_code_block=lambda x: parse_code_block(x, "python"),
307
- add_code_block=lambda x: add_code_block(x, "python"),
308
- )
309
-
310
- def internal_tests(self, func_sig: str, model: ModelBase, max_num_tests: int = 4) -> List[str]:
311
- def parse_tests(tests: str) -> List[str]:
312
- return [test.strip() for test in tests.splitlines() if "assert" in test]
313
- """
314
- Generates tests for a function.
315
- """
316
- return generic_generate_internal_tests(
317
- func_sig=func_sig,
318
- model=model,
319
- max_num_tests=max_num_tests,
320
- test_generation_few_shot=PY_TEST_GENERATION_FEW_SHOT,
321
- test_generation_chat_instruction=PY_TEST_GENERATION_CHAT_INSTRUCTION,
322
- test_generation_completion_instruction=PY_TEST_GENERATION_COMPLETION_INSTRUCTION,
323
- parse_tests=parse_tests,
324
- is_syntax_valid=py_is_syntax_valid,
325
- )
326
-
327
-
328
- DUMMY_FUNC_SIG = "def func():"
329
- DUMMY_FUNC_CALL = "func()"
330
-
331
-
332
- def handle_first_line_indent(func_body: str) -> str:
333
- if func_body.startswith(" "):
334
- return func_body
335
- split = func_body.splitlines()
336
- return f" {split[0]}\n" + "\n".join(split[1:])
337
-
338
-
339
- def handle_entire_body_indent(func_body: str) -> str:
340
- split = func_body.splitlines()
341
- res = "\n".join([" " + line for line in split])
342
- return res
343
-
344
-
345
- def fix_turbo_response(func_body: str) -> str:
346
- return fix_markdown(remove_unindented_signatures(func_body))
347
-
348
-
349
- def fix_markdown(func_body: str) -> str:
350
- return re.sub("`{3}", "", func_body)
351
-
352
-
353
- def remove_unindented_signatures(code: str) -> str:
354
- regex = r"^def\s+\w+\s*\("
355
-
356
- before_signature = []
357
- after_signature = []
358
- signature_found = False
359
-
360
- for line in code.split("\n"):
361
- if re.match(regex, line):
362
- signature_found = True
363
- continue
364
-
365
- if signature_found:
366
- after_signature.append(line)
367
- else:
368
- if not line.startswith(" ") and line.strip():
369
- line = " " + line
370
- before_signature.append(line)
371
-
372
- return "\n".join(before_signature + after_signature)
373
-
374
-
375
- def py_fix_indentation(func_body: str) -> str:
376
- func_body = fix_turbo_response(func_body)
377
- """
378
- 3 cases:
379
- 1. good syntax
380
- 2. first line not good
381
- 3. entire body not good
382
- """
383
- def parse_indent_rec(f_body: str, cur_state: int) -> str:
384
- f_body = fix_markdown(f_body)
385
- if cur_state > 1:
386
- return f_body
387
- code = f'{DUMMY_FUNC_SIG}\n{f_body}\n{DUMMY_FUNC_CALL}'
388
- try:
389
- exec(code)
390
- return f_body
391
- except (IndentationError, SyntaxError):
392
- p_func = handle_first_line_indent if cur_state == 0 else handle_entire_body_indent
393
- return parse_indent_rec(p_func(func_body), cur_state + 1)
394
- except Exception:
395
- return f_body
396
- return parse_indent_rec(func_body, 0)
397
-
398
-
399
- def py_is_syntax_valid(code: str) -> bool:
400
- try:
401
- ast.parse(code)
402
- return True
403
- except Exception:
404
- return False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AUBMC-AIM/MammoGANesis/README.md DELETED
@@ -1,45 +0,0 @@
1
- ---
2
- title: MammoGANesis
3
- emoji: 🔥
4
- colorFrom: blue
5
- colorTo: yellow
6
- sdk: gradio
7
- app_file: app.py
8
- pinned: false
9
- ---
10
-
11
- # Configuration
12
-
13
- `title`: _string_
14
- Display title for the Space
15
-
16
- `emoji`: _string_
17
- Space emoji (emoji-only character allowed)
18
-
19
- `colorFrom`: _string_
20
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
21
-
22
- `colorTo`: _string_
23
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
24
-
25
- `sdk`: _string_
26
- Can be either `gradio`, `streamlit`, or `static`
27
-
28
- `sdk_version` : _string_
29
- Only applicable for `streamlit` SDK.
30
- See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
31
-
32
- `app_file`: _string_
33
- Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
34
- Path is relative to the root of the repository.
35
-
36
- `models`: _List[string]_
37
- HF model IDs (like "gpt2" or "deepset/roberta-base-squad2") used in the Space.
38
- Will be parsed automatically from your code if not specified here.
39
-
40
- `datasets`: _List[string]_
41
- HF dataset IDs (like "common_voice" or "oscar-corpus/OSCAR-2109") used in the Space.
42
- Will be parsed automatically from your code if not specified here.
43
-
44
- `pinned`: _boolean_
45
- Whether the Space stays on top of your list.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ababababababbababa/Ashaar/poetry_diacritizer/models/__init__.py DELETED
@@ -1,5 +0,0 @@
1
- from . import baseline
2
- from . import cbhg
3
- from . import gpt
4
- from . import seq2seq
5
- from . import tacotron_based
 
 
 
 
 
 
spaces/AdamOswald1/finetuned_diffusion/style.css DELETED
@@ -1,24 +0,0 @@
1
- .finetuned-diffusion-div div{
2
- display:inline-flex;
3
- align-items:center;
4
- gap:.8rem;
5
- font-size:1.75rem
6
- }
7
- .finetuned-diffusion-div div h1{
8
- font-weight:900;
9
- margin-bottom:7px
10
- }
11
- .finetuned-diffusion-div p{
12
- margin-bottom:10px;
13
- font-size:94%
14
- }
15
- a{
16
- text-decoration:underline
17
- }
18
- .tabs{
19
- margin-top:0;
20
- margin-bottom:0
21
- }
22
- #gallery{
23
- min-height:20rem
24
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/containerlite-plugin.js DELETED
@@ -1,27 +0,0 @@
1
- import Factory from './gameobjects/container/containerlite/Factory.js';
2
- import Creator from './gameobjects/container/containerlite/Creator.js';
3
- import ContainerLite from './gameobjects/container/containerlite/ContainerLite.js';
4
- import SetValue from './utils/object/SetValue.js';
5
-
6
- class ContainerLitePlugin extends Phaser.Plugins.BasePlugin {
7
-
8
- constructor(pluginManager) {
9
- super(pluginManager);
10
-
11
- // Register our new Game Object type
12
- pluginManager.registerGameObject('rexContainerLite', Factory, Creator);
13
- }
14
-
15
- start() {
16
- var eventEmitter = this.game.events;
17
- eventEmitter.on('destroy', this.destroy, this);
18
- }
19
-
20
- getParent(child) {
21
- return ContainerLite.GetParent(child);
22
- }
23
- }
24
-
25
- SetValue(window, 'RexPlugins.GameObjects.ContainerLite', ContainerLite);
26
-
27
- export default ContainerLitePlugin;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/sizer/ProportionMethods.js DELETED
@@ -1,11 +0,0 @@
1
- export default {
2
- getChildProportion(gameObject) {
3
- return this.getSizerConfig(gameObject).proportion;
4
- },
5
-
6
- setChildProportion(gameObject, proportion) {
7
- this.getSizerConfig(gameObject).proportion = proportion;
8
- return this;
9
- },
10
-
11
- }
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlekseyCalvin/Make-Putin-Queer/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Make Putin Queer! Use "trp" token in prompts
3
- emoji: 🏳️‍🌈
4
- colorFrom: blue
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.13.0
8
- app_file: app.py
9
- pinned: false
10
- license: creativeml-openrail-m
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AlowaSawsan/Third-Molar-Segmentation/app.py DELETED
@@ -1,68 +0,0 @@
1
- #import required libraries-----------------------------------------------
2
- import streamlit as st
3
- from tensorflow import keras
4
- import tensorflow as tf
5
- from PIL import Image
6
- import numpy as np
7
- import cv2
8
-
9
- #loading model-----------------------------------------------------------
10
- model=keras.models.load_model("3rdm_att_UNet_50epochs_acc.h5")
11
-
12
- #User Interface----------------------------------------------------------
13
- st.header("Segmentation of the Lower Left Third Molar in Panoramic X-ray Images Using Attention U-Net")
14
-
15
- examples=["2.png","20.png","31.png"]
16
-
17
- def load_image(image_file):
18
- img = Image.open(image_file)
19
- return img
20
-
21
- st.subheader("Instruction:")
22
- st.subheader("Please select from the provided samples or upload dental panoramic X-ray image")
23
- image_file = st.file_uploader("Upload Images", type=["png","jpg","jpeg"])
24
-
25
- col1, col2, col3 = st.columns(3)
26
- with col1:
27
- ex=load_image(examples[0])
28
- st.image(ex,width=200)
29
- if st.button('Sample 1'):
30
- image_file=examples[0]
31
-
32
- with col2:
33
- ex1=load_image(examples[1])
34
- st.image(ex1,width=200)
35
- if st.button('Sample 2'):
36
- image_file=examples[1]
37
-
38
- with col3:
39
- ex2=load_image(examples[2])
40
- st.image(ex2,width=200)
41
- if st.button('Sample 3'):
42
- image_file=examples[2]
43
-
44
- #main--------------------------------------------------------------------
45
-
46
- if image_file is not None:
47
-
48
- img=load_image(image_file)
49
-
50
- st.text("Selected Image ....")
51
- st.image(img,width=850)
52
-
53
- img = np.asarray(img)
54
- img = cv2.resize(img, (512, 256))
55
- img = cv2.cvtColor(img, cv2.COLOR_BAYER_GR2GRAY)
56
- img = np.expand_dims(img, axis=0)
57
-
58
- prediction = model.predict(img)
59
-
60
- output = prediction.reshape(256,512)
61
-
62
-
63
- if output is not None :
64
- st.text("Result")
65
- #st.write(output.shape)
66
- st.image(output,width=850)
67
-
68
- st.text("DONE")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alycer/VITS-Umamusume-voice-synthesizer/text/symbols.py DELETED
@@ -1,76 +0,0 @@
1
- '''
2
- Defines the set of symbols used in text input to the model.
3
- '''
4
-
5
- # japanese_cleaners
6
- _pad = '_'
7
- _punctuation = ',.!?-'
8
- _letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ '
9
-
10
-
11
- '''# japanese_cleaners2
12
- _pad = '_'
13
- _punctuation = ',.!?-~…'
14
- _letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ '
15
- '''
16
-
17
-
18
- '''# korean_cleaners
19
- _pad = '_'
20
- _punctuation = ',.!?…~'
21
- _letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ '
22
- '''
23
-
24
- '''# chinese_cleaners
25
- _pad = '_'
26
- _punctuation = ',。!?—…'
27
- _letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ '
28
- '''
29
-
30
- '''# zh_ja_mixture_cleaners
31
- _pad = '_'
32
- _punctuation = ',.!?-~…'
33
- _letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ '
34
- '''
35
-
36
- '''# sanskrit_cleaners
37
- _pad = '_'
38
- _punctuation = '।'
39
- _letters = 'ँंःअआइईउऊऋएऐओऔकखगघङचछजझञटठडढणतथदधनपफबभमयरलळवशषसहऽािीुूृॄेैोौ्ॠॢ '
40
- '''
41
-
42
- '''# cjks_cleaners
43
- _pad = '_'
44
- _punctuation = ',.!?-~…'
45
- _letters = 'NQabdefghijklmnopstuvwxyzʃʧʥʦɯɹəɥçɸɾβŋɦː⁼ʰ`^#*=→↓↑ '
46
- '''
47
-
48
- '''# thai_cleaners
49
- _pad = '_'
50
- _punctuation = '.!? '
51
- _letters = 'กขฃคฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลวศษสหฬอฮฯะัาำิีึืุูเแโใไๅๆ็่้๊๋์'
52
- '''
53
-
54
- '''# cjke_cleaners2
55
- _pad = '_'
56
- _punctuation = ',.!?-~…'
57
- _letters = 'NQabdefghijklmnopstuvwxyzɑæʃʑçɯɪɔɛɹðəɫɥɸʊɾʒθβŋɦ⁼ʰ`^#*=ˈˌ→↓↑ '
58
- '''
59
-
60
- '''# shanghainese_cleaners
61
- _pad = '_'
62
- _punctuation = ',.!?…'
63
- _letters = 'abdfghiklmnopstuvyzøŋȵɑɔɕəɤɦɪɿʑʔʰ̩̃ᴀᴇ15678 '
64
- '''
65
-
66
- '''# chinese_dialect_cleaners
67
- _pad = '_'
68
- _punctuation = ',.!?~…─'
69
- _letters = '#Nabdefghijklmnoprstuvwxyzæçøŋœȵɐɑɒɓɔɕɗɘəɚɛɜɣɤɦɪɭɯɵɷɸɻɾɿʂʅʊʋʌʏʑʔʦʮʰʷˀː˥˦˧˨˩̥̩̃̚ᴀᴇ↑↓∅ⱼ '
70
- '''
71
-
72
- # Export all symbols:
73
- symbols = [_pad] + list(_punctuation) + list(_letters)
74
-
75
- # Special symbol ids
76
- SPACE_ID = symbols.index(" ")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/research_projects/onnxruntime/README.md DELETED
@@ -1,5 +0,0 @@
1
- ## Diffusers examples with ONNXRuntime optimizations
2
-
3
- **This research project is not actively maintained by the diffusers team. For any questions or comments, please contact Prathik Rao (prathikr), Sunghoon Choi (hanbitmyths), Ashwini Khade (askhade), or Peng Wang (pengwa) on github with any questions.**
4
-
5
- This aims to provide diffusers examples with ONNXRuntime optimizations for training/fine-tuning unconditional image generation, text to image, and textual inversion. Please see individual directories for more details on how to run each task using ONNXRuntime.
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/ddim/pipeline_ddim.py DELETED
@@ -1,152 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- from typing import List, Optional, Tuple, Union
16
-
17
- import torch
18
-
19
- from ...schedulers import DDIMScheduler
20
- from ...utils import randn_tensor
21
- from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
22
-
23
-
24
- class DDIMPipeline(DiffusionPipeline):
25
- r"""
26
- Pipeline for image generation.
27
-
28
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
29
- implemented for all pipelines (downloading, saving, running on a particular device, etc.).
30
-
31
- Parameters:
32
- unet ([`UNet2DModel`]):
33
- A `UNet2DModel` to denoise the encoded image latents.
34
- scheduler ([`SchedulerMixin`]):
35
- A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of
36
- [`DDPMScheduler`], or [`DDIMScheduler`].
37
- """
38
-
39
- def __init__(self, unet, scheduler):
40
- super().__init__()
41
-
42
- # make sure scheduler can always be converted to DDIM
43
- scheduler = DDIMScheduler.from_config(scheduler.config)
44
-
45
- self.register_modules(unet=unet, scheduler=scheduler)
46
-
47
- @torch.no_grad()
48
- def __call__(
49
- self,
50
- batch_size: int = 1,
51
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
52
- eta: float = 0.0,
53
- num_inference_steps: int = 50,
54
- use_clipped_model_output: Optional[bool] = None,
55
- output_type: Optional[str] = "pil",
56
- return_dict: bool = True,
57
- ) -> Union[ImagePipelineOutput, Tuple]:
58
- r"""
59
- The call function to the pipeline for generation.
60
-
61
- Args:
62
- batch_size (`int`, *optional*, defaults to 1):
63
- The number of images to generate.
64
- generator (`torch.Generator`, *optional*):
65
- A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
66
- generation deterministic.
67
- eta (`float`, *optional*, defaults to 0.0):
68
- Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
69
- to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers. A value of `0` corresponds to
70
- DDIM and `1` corresponds to DDPM.
71
- num_inference_steps (`int`, *optional*, defaults to 50):
72
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
73
- expense of slower inference.
74
- use_clipped_model_output (`bool`, *optional*, defaults to `None`):
75
- If `True` or `False`, see documentation for [`DDIMScheduler.step`]. If `None`, nothing is passed
76
- downstream to the scheduler (use `None` for schedulers which don't support this argument).
77
- output_type (`str`, *optional*, defaults to `"pil"`):
78
- The output format of the generated image. Choose between `PIL.Image` or `np.array`.
79
- return_dict (`bool`, *optional*, defaults to `True`):
80
- Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
81
-
82
- Example:
83
-
84
- ```py
85
- >>> from diffusers import DDIMPipeline
86
- >>> import PIL.Image
87
- >>> import numpy as np
88
-
89
- >>> # load model and scheduler
90
- >>> pipe = DDIMPipeline.from_pretrained("fusing/ddim-lsun-bedroom")
91
-
92
- >>> # run pipeline in inference (sample random noise and denoise)
93
- >>> image = pipe(eta=0.0, num_inference_steps=50)
94
-
95
- >>> # process image to PIL
96
- >>> image_processed = image.cpu().permute(0, 2, 3, 1)
97
- >>> image_processed = (image_processed + 1.0) * 127.5
98
- >>> image_processed = image_processed.numpy().astype(np.uint8)
99
- >>> image_pil = PIL.Image.fromarray(image_processed[0])
100
-
101
- >>> # save image
102
- >>> image_pil.save("test.png")
103
- ```
104
-
105
- Returns:
106
- [`~pipelines.ImagePipelineOutput`] or `tuple`:
107
- If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
108
- returned where the first element is a list with the generated images
109
- """
110
-
111
- # Sample gaussian noise to begin loop
112
- if isinstance(self.unet.config.sample_size, int):
113
- image_shape = (
114
- batch_size,
115
- self.unet.config.in_channels,
116
- self.unet.config.sample_size,
117
- self.unet.config.sample_size,
118
- )
119
- else:
120
- image_shape = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
121
-
122
- if isinstance(generator, list) and len(generator) != batch_size:
123
- raise ValueError(
124
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
125
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
126
- )
127
-
128
- image = randn_tensor(image_shape, generator=generator, device=self._execution_device, dtype=self.unet.dtype)
129
-
130
- # set step values
131
- self.scheduler.set_timesteps(num_inference_steps)
132
-
133
- for t in self.progress_bar(self.scheduler.timesteps):
134
- # 1. predict noise model_output
135
- model_output = self.unet(image, t).sample
136
-
137
- # 2. predict previous mean of image x_t-1 and add variance depending on eta
138
- # eta corresponds to η in paper and should be between [0, 1]
139
- # do x_t -> x_t-1
140
- image = self.scheduler.step(
141
- model_output, t, image, eta=eta, use_clipped_model_output=use_clipped_model_output, generator=generator
142
- ).prev_sample
143
-
144
- image = (image / 2 + 0.5).clamp(0, 1)
145
- image = image.cpu().permute(0, 2, 3, 1).numpy()
146
- if output_type == "pil":
147
- image = self.numpy_to_pil(image)
148
-
149
- if not return_dict:
150
- return (image,)
151
-
152
- return ImagePipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = './cascade_rcnn_r50_fpn_1x_coco.py'
2
- # learning policy
3
- lr_config = dict(step=[16, 19])
4
- runner = dict(type='EpochBasedRunner', max_epochs=20)
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco.py DELETED
@@ -1,4 +0,0 @@
1
- _base_ = './ms_rcnn_r50_caffe_fpn_1x_coco.py'
2
- # learning policy
3
- lr_config = dict(step=[16, 22])
4
- runner = dict(type='EpochBasedRunner', max_epochs=24)
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/fcn/fcn_r18b-d8_512x1024_80k_cityscapes.py DELETED
@@ -1,9 +0,0 @@
1
- _base_ = './fcn_r50-d8_512x1024_80k_cityscapes.py'
2
- model = dict(
3
- pretrained='torchvision://resnet18',
4
- backbone=dict(type='ResNet', depth=18),
5
- decode_head=dict(
6
- in_channels=512,
7
- channels=128,
8
- ),
9
- auxiliary_head=dict(in_channels=256, channels=64))
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/mobilenet_v3/lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes.py DELETED
@@ -1,9 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/lraspp_m-v3-d8.py', '../_base_/datasets/cityscapes.py',
3
- '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
4
- ]
5
-
6
- # Re-config the data sampler.
7
- data = dict(samples_per_gpu=4, workers_per_gpu=4)
8
-
9
- runner = dict(type='IterBasedRunner', max_iters=320000)
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/extensions/sd_api_pictures/style.css DELETED
@@ -1,52 +0,0 @@
1
- /* Align the elements for SD_api_picture extension */
2
- .SDAP #sampler_box {
3
- padding-top: var(--spacing-sm);
4
- padding-bottom: var(--spacing-sm);
5
- border: 0;
6
- }
7
-
8
- .SDAP #steps_box {
9
- border-radius: 0 0 var(--block-radius) var(--block-radius);
10
- }
11
-
12
- .SDAP #sampler_col {
13
- gap: 0;
14
- padding: 0;
15
- background-color: transparent;
16
- }
17
-
18
- .SDAP #sampler_row {
19
- border-bottom: 0;
20
- box-shadow: var(--block-shadow);
21
- border-width: var(--block-border-width);
22
- border-color: var(--block-border-color);
23
- border-radius: var(--block-radius) var(--block-radius) 0 0;
24
- background: var(--block-background-fill);
25
- gap: 0;
26
- }
27
-
28
- .SDAP #sampler_row .refresh-button {
29
- margin-bottom: var(--spacing-sm);
30
- margin-right: var(--spacing-lg);
31
- }
32
-
33
- .SDAP #seed_box,
34
- .SDAP #cfg_box {
35
- padding-top: var(--spacing-md);
36
- }
37
-
38
- .SDAP #sampler_box span,
39
- .SDAP #seed_box span,
40
- .SDAP #cfg_box span,
41
- .SDAP #steps_box span {
42
- margin-bottom: var(--spacing-sm);
43
- }
44
-
45
- .SDAP svg.dropdown-arrow {
46
- flex-shrink: 0 !important;
47
- margin: 0px !important;
48
- }
49
-
50
- .SDAP .hires_opts input[type="number"] {
51
- width: 6em !important;
52
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/gmflow_module/README.md DELETED
@@ -1,239 +0,0 @@
1
- # GMFlow
2
-
3
-
4
- Official PyTorch implementation of paper:
5
-
6
- [**GMFlow: Learning Optical Flow via Global Matching**](https://arxiv.org/abs/2111.13680), **CVPR 2022, Oral**
7
-
8
- Authors: [Haofei Xu](https://haofeixu.github.io/), [Jing Zhang](https://scholar.google.com.hk/citations?user=9jH5v74AAAAJ), [Jianfei Cai](https://jianfei-cai.github.io/), [Hamid Rezatofighi](https://scholar.google.com/citations?user=VxAuxMwAAAAJ), [Dacheng Tao](https://scholar.google.com/citations?user=RwlJNLcAAAAJ)
9
-
10
-
11
- **11/15/2022 Update: Check out our new work: [Unifying Flow, Stereo and Depth Estimation](https://haofeixu.github.io/unimatch/) and code: [unimatch](https://github.com/autonomousvision/unimatch) for extending GMFlow to stereo and depth tasks. [More pretrained GMFlow models](https://github.com/autonomousvision/unimatch/blob/master/MODEL_ZOO.md) with different speed-accuracy trade-offs are also released. Check out our [Colab](https://colab.research.google.com/drive/1r5m-xVy3Kw60U-m5VB-aQ98oqqg_6cab?usp=sharing) and [HuggingFace](https://huggingface.co/spaces/haofeixu/unimatch) demo to play with GMFlow in your browser!**
12
-
13
-
14
-
15
- **A [video introduction](https://www.bilibili.com/video/BV18A4y1R7PL) (in Chinese) of GMFlow is available at bilibili!**
16
-
17
-
18
-
19
- https://user-images.githubusercontent.com/19343475/174446408-520b8a6c-9714-4ff3-978c-98e23ab29c1f.mp4
20
-
21
-
22
-
23
-
24
-
25
- We streamline the optical flow estimation pipeline by reformulating optical flow as a **global matching** problem.
26
-
27
-
28
-
29
-
30
- <p align="center"><img width=90% src="assets/gmflow.png"></p>
31
-
32
-
33
-
34
-
35
-
36
- ## Highlights
37
-
38
- - **Flexible & Modular design**
39
-
40
- We decompose the end-to-end optical flow framework into five components:
41
-
42
- feature extraction, feature enhancement, feature matching, flow propagation and flow refinement.
43
-
44
- One can easily construct a customized optical flow model by combining different components.
45
-
46
- - **High accuracy**
47
-
48
- With only one refinement, GMFlow outperforms 31-refinements RAFT on the challenging Sintel benchmark.
49
-
50
- - **High efficiency**
51
-
52
- A basic GMFlow model (without refinement) runs at 57ms (V100) or 26ms (A100) for Sintel data (436x1024).
53
-
54
- GMFlow gains more speedup than RAFT on high-end GPUs (e.g., A100) since GMFlow doesn't require a large number of sequential computation.
55
-
56
- GMFlow also simplifies backward flow computation without requiring to forward the network twice. The bidirectional flow can be used for occlusion detection with forward-backward consistency check.
57
-
58
- <p align="center"><img width=90% src="assets/bidir_flow_occ.png"></p>
59
-
60
-
61
-
62
-
63
- ## Installation
64
-
65
- Our code is based on pytorch 1.9.0, CUDA 10.2 and python 3.8. Higher version pytorch should also work well.
66
-
67
- We recommend using [conda](https://www.anaconda.com/distribution/) for installation:
68
-
69
- ```
70
- conda env create -f environment.yml
71
- conda activate gmflow
72
- ```
73
-
74
- ## Demos
75
-
76
- All pretrained models can be downloaded from [google drive](https://drive.google.com/file/d/1d5C5cgHIxWGsFR1vYs5XrQbbUiZl9TX2/view?usp=sharing).
77
-
78
-
79
-
80
- You can run a trained model on a sequence of images and visualize the results:
81
-
82
- ```
83
- CUDA_VISIBLE_DEVICES=0 python main.py \
84
- --inference_dir demo/sintel_market_1 \
85
- --output_path output/gmflow-norefine-sintel_market_1 \
86
- --resume pretrained/gmflow_sintel-0c07dcb3.pth
87
- ```
88
-
89
- You can also predict bidirectional flow with `--pred_bidir_flow` enabled and use `--fwd_bwd_consistency_check` for forward-backward consistency check. More examples can be found in [scripts/demo.sh](scripts/demo.sh).
90
-
91
-
92
-
93
- ## Datasets
94
-
95
- The datasets used to train and evaluate GMFlow are as follows:
96
-
97
- * [FlyingChairs](https://lmb.informatik.uni-freiburg.de/resources/datasets/FlyingChairs.en.html#flyingchairs)
98
- * [FlyingThings3D](https://lmb.informatik.uni-freiburg.de/resources/datasets/SceneFlowDatasets.en.html)
99
- * [Sintel](http://sintel.is.tue.mpg.de/)
100
- * [KITTI](http://www.cvlibs.net/datasets/kitti/eval_scene_flow.php?benchmark=flow)
101
- * [HD1K](http://hci-benchmark.iwr.uni-heidelberg.de/)
102
-
103
- By default the dataloader [datasets.py](data/datasets.py) assumes the datasets are located in folder `datasets` and are organized as follows:
104
-
105
- ```
106
- datasets
107
- ├── FlyingChairs_release
108
- │   └── data
109
- ├── FlyingThings3D
110
- │   ├── frames_cleanpass
111
- │   ├── frames_finalpass
112
- │   └── optical_flow
113
- ├── HD1K
114
- │   ├── hd1k_challenge
115
- │   ├── hd1k_flow_gt
116
- │   ├── hd1k_flow_uncertainty
117
- │   └── hd1k_input
118
- ├── KITTI
119
- │   ├── testing
120
- │   └── training
121
- ├── Sintel
122
- │   ├── test
123
- │   └── training
124
- ```
125
-
126
- It is recommended to symlink your dataset root to `datasets`:
127
-
128
- ```shell
129
- ln -s $YOUR_DATASET_ROOT datasets
130
- ```
131
-
132
- Otherwise, you may need to change the corresponding paths in [datasets.py](data/datasets.py).
133
-
134
-
135
-
136
- ## Evaluation
137
-
138
- You can evaluate a trained GMFlow model by running:
139
-
140
- ```
141
- CUDA_VISIBLE_DEVICES=0 python main.py --eval --val_dataset things sintel --resume pretrained/gmflow_things-e9887eda.pth
142
- ```
143
-
144
- More evaluation scripts can be found in [scripts/evaluate.sh](scripts/evaluate.sh).
145
-
146
-
147
-
148
- For submission to Sintel and KITTI online test sets, you can run [scripts/submission.sh](scripts/submission.sh).
149
-
150
-
151
-
152
- ## Training
153
-
154
- All training scripts on FlyingChairs, FlyingThings3D, Sintel and KITTI datasets can be found in [scripts/train_gmflow.sh](scripts/train_gmflow.sh) and [scripts/train_gmflow_with_refine.sh](scripts/train_gmflow_with_refine.sh).
155
-
156
- Note that the basic GMFlow model (without refinement) can be trained on 4x 16GB V100 GPUs. For training GMFlow with refinement, 8x 16GB V100 or 4x 32GB V100 or 4x 40GB A100 GPUs are required by default. You may need to tune the batch size and training iterations according to your hardware.
157
-
158
-
159
-
160
- We support using tensorboard to monitor and visualize the training process. You can first start a tensorboard session with
161
-
162
- ```shell
163
- tensorboard --logdir checkpoints
164
- ```
165
-
166
- and then access [http://localhost:6006](http://localhost:6006) in your browser.
167
-
168
-
169
-
170
- ## Citation
171
-
172
- If you find our work useful in your research, please consider citing our paper:
173
-
174
- ```
175
- @inproceedings{xu2022gmflow,
176
- title={GMFlow: Learning Optical Flow via Global Matching},
177
- author={Xu, Haofei and Zhang, Jing and Cai, Jianfei and Rezatofighi, Hamid and Tao, Dacheng},
178
- booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
179
- pages={8121-8130},
180
- year={2022}
181
- }
182
- ```
183
-
184
-
185
-
186
- ## Acknowledgements
187
-
188
- This project would not have been possible without relying on some awesome repos : [RAFT](https://github.com/princeton-vl/RAFT), [LoFTR](https://github.com/zju3dv/LoFTR), [DETR](https://github.com/facebookresearch/detr), [Swin](https://github.com/microsoft/Swin-Transformer), [mmdetection](https://github.com/open-mmlab/mmdetection) and [Detectron2](https://github.com/facebookresearch/detectron2/blob/main/projects/TridentNet/tridentnet/trident_conv.py). We thank the original authors for their excellent work.
189
-
190
-
191
-
192
-
193
-
194
-
195
-
196
-
197
-
198
-
199
-
200
-
201
-
202
-
203
-
204
-
205
-
206
-
207
-
208
-
209
-
210
-
211
-
212
-
213
-
214
-
215
-
216
-
217
-
218
-
219
-
220
-
221
-
222
-
223
-
224
-
225
-
226
-
227
-
228
-
229
-
230
-
231
-
232
-
233
-
234
-
235
-
236
-
237
-
238
-
239
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Artrajz/vits-simple-api/utils/download.py DELETED
@@ -1,96 +0,0 @@
1
- import logging
2
- import os
3
- import hashlib
4
- import tarfile
5
- import urllib.request
6
- import zipfile
7
-
8
- from tqdm import tqdm
9
- from pathlib import Path
10
- from logger import logger
11
- from py7zr import SevenZipFile
12
-
13
-
14
- class TqdmUpTo(tqdm):
15
- def update_to(self, b=1, bsize=1, tsize=None):
16
- if tsize is not None:
17
- self.total = tsize
18
- self.update(b * bsize - self.n)
19
-
20
-
21
- def download_file(url, dest_path):
22
- logging.info(f"Downloading: {url}")
23
- with TqdmUpTo(unit="B", unit_scale=True, unit_divisor=1024, miniters=1, desc=url.split('/')[-1]) as t:
24
- urllib.request.urlretrieve(url, dest_path, reporthook=t.update_to)
25
-
26
-
27
- def verify_md5(file_path, expected_md5):
28
- md5 = hashlib.md5(file_path.read_bytes()).hexdigest()
29
- if md5 != expected_md5:
30
- return False, f"MD5 mismatch: {md5} != {expected_md5}"
31
- return True, ""
32
-
33
-
34
- def extract_file(file_path, destination=None):
35
- """
36
- Extract a compressed file based on its extension.
37
- If destination is not specified, it will be extracted to its parent directory.
38
- """
39
- if destination is None:
40
- destination = Path(file_path).parent
41
-
42
- logging.info(f"Extracting to {destination}")
43
-
44
- if file_path.endswith('.zip'):
45
- with zipfile.ZipFile(file_path, 'r') as zip_ref:
46
- zip_ref.extractall(destination)
47
- elif file_path.endswith('.tar.gz'):
48
- with tarfile.open(file_path, 'r:gz') as tar_ref:
49
- tar_ref.extractall(destination)
50
- elif file_path.endswith('.tar.bz2'):
51
- with tarfile.open(file_path, 'r:bz2') as tar_ref:
52
- tar_ref.extractall(destination)
53
- elif file_path.endswith('.7z'):
54
- with SevenZipFile(file_path, mode='r') as z:
55
- z.extractall(destination)
56
- else:
57
- logging.error(f"Unsupported compression format for file {file_path}")
58
-
59
-
60
- def download_and_verify(urls, target_path, expected_md5=None, extract_destination=None):
61
- for url in urls:
62
- try:
63
- download_file(url, target_path)
64
- break
65
- except Exception as error:
66
- logger.error(f"downloading from URL {url}: {error}")
67
-
68
- else: # This else is tied to the for loop, and executes if no download is successful
69
- return False, "Error downloading from all provided URLs."
70
-
71
- if expected_md5 is not None:
72
- success, message = verify_md5(Path(target_path), expected_md5)
73
- if not success:
74
- os.remove(target_path)
75
- return False, message
76
-
77
- # If it's a compressed file, extract it
78
- if target_path.endswith(('.zip', '.tar.gz', '.tar.bz2', '.7z')):
79
- extract_file(target_path, extract_destination)
80
- os.remove(target_path)
81
-
82
- return True, "File downloaded, verified, and extracted successfully!"
83
-
84
-
85
- if __name__ == "__main__":
86
- URLS = [
87
- "YOUR_PRIMARY_URL_HERE",
88
- "YOUR_FIRST_BACKUP_URL_HERE",
89
- # ... you can add more backup URLs as needed
90
- ]
91
- TARGET_PATH = ""
92
- EXPECTED_MD5 = ""
93
- EXTRACT_DESTINATION = ""
94
-
95
- success, message = download_and_verify(URLS, TARGET_PATH, EXPECTED_MD5, EXTRACT_DESTINATION)
96
- print(message)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/cachecontrol/caches/__init__.py DELETED
@@ -1,9 +0,0 @@
1
- # SPDX-FileCopyrightText: 2015 Eric Larson
2
- #
3
- # SPDX-License-Identifier: Apache-2.0
4
-
5
- from .file_cache import FileCache, SeparateBodyFileCache
6
- from .redis_cache import RedisCache
7
-
8
-
9
- __all__ = ["FileCache", "SeparateBodyFileCache", "RedisCache"]
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/style.py DELETED
@@ -1,796 +0,0 @@
1
- import sys
2
- from functools import lru_cache
3
- from marshal import dumps, loads
4
- from random import randint
5
- from typing import Any, Dict, Iterable, List, Optional, Type, Union, cast
6
-
7
- from . import errors
8
- from .color import Color, ColorParseError, ColorSystem, blend_rgb
9
- from .repr import Result, rich_repr
10
- from .terminal_theme import DEFAULT_TERMINAL_THEME, TerminalTheme
11
-
12
- # Style instances and style definitions are often interchangeable
13
- StyleType = Union[str, "Style"]
14
-
15
-
16
- class _Bit:
17
- """A descriptor to get/set a style attribute bit."""
18
-
19
- __slots__ = ["bit"]
20
-
21
- def __init__(self, bit_no: int) -> None:
22
- self.bit = 1 << bit_no
23
-
24
- def __get__(self, obj: "Style", objtype: Type["Style"]) -> Optional[bool]:
25
- if obj._set_attributes & self.bit:
26
- return obj._attributes & self.bit != 0
27
- return None
28
-
29
-
30
- @rich_repr
31
- class Style:
32
- """A terminal style.
33
-
34
- A terminal style consists of a color (`color`), a background color (`bgcolor`), and a number of attributes, such
35
- as bold, italic etc. The attributes have 3 states: they can either be on
36
- (``True``), off (``False``), or not set (``None``).
37
-
38
- Args:
39
- color (Union[Color, str], optional): Color of terminal text. Defaults to None.
40
- bgcolor (Union[Color, str], optional): Color of terminal background. Defaults to None.
41
- bold (bool, optional): Enable bold text. Defaults to None.
42
- dim (bool, optional): Enable dim text. Defaults to None.
43
- italic (bool, optional): Enable italic text. Defaults to None.
44
- underline (bool, optional): Enable underlined text. Defaults to None.
45
- blink (bool, optional): Enabled blinking text. Defaults to None.
46
- blink2 (bool, optional): Enable fast blinking text. Defaults to None.
47
- reverse (bool, optional): Enabled reverse text. Defaults to None.
48
- conceal (bool, optional): Enable concealed text. Defaults to None.
49
- strike (bool, optional): Enable strikethrough text. Defaults to None.
50
- underline2 (bool, optional): Enable doubly underlined text. Defaults to None.
51
- frame (bool, optional): Enable framed text. Defaults to None.
52
- encircle (bool, optional): Enable encircled text. Defaults to None.
53
- overline (bool, optional): Enable overlined text. Defaults to None.
54
- link (str, link): Link URL. Defaults to None.
55
-
56
- """
57
-
58
- _color: Optional[Color]
59
- _bgcolor: Optional[Color]
60
- _attributes: int
61
- _set_attributes: int
62
- _hash: Optional[int]
63
- _null: bool
64
- _meta: Optional[bytes]
65
-
66
- __slots__ = [
67
- "_color",
68
- "_bgcolor",
69
- "_attributes",
70
- "_set_attributes",
71
- "_link",
72
- "_link_id",
73
- "_ansi",
74
- "_style_definition",
75
- "_hash",
76
- "_null",
77
- "_meta",
78
- ]
79
-
80
- # maps bits on to SGR parameter
81
- _style_map = {
82
- 0: "1",
83
- 1: "2",
84
- 2: "3",
85
- 3: "4",
86
- 4: "5",
87
- 5: "6",
88
- 6: "7",
89
- 7: "8",
90
- 8: "9",
91
- 9: "21",
92
- 10: "51",
93
- 11: "52",
94
- 12: "53",
95
- }
96
-
97
- STYLE_ATTRIBUTES = {
98
- "dim": "dim",
99
- "d": "dim",
100
- "bold": "bold",
101
- "b": "bold",
102
- "italic": "italic",
103
- "i": "italic",
104
- "underline": "underline",
105
- "u": "underline",
106
- "blink": "blink",
107
- "blink2": "blink2",
108
- "reverse": "reverse",
109
- "r": "reverse",
110
- "conceal": "conceal",
111
- "c": "conceal",
112
- "strike": "strike",
113
- "s": "strike",
114
- "underline2": "underline2",
115
- "uu": "underline2",
116
- "frame": "frame",
117
- "encircle": "encircle",
118
- "overline": "overline",
119
- "o": "overline",
120
- }
121
-
122
- def __init__(
123
- self,
124
- *,
125
- color: Optional[Union[Color, str]] = None,
126
- bgcolor: Optional[Union[Color, str]] = None,
127
- bold: Optional[bool] = None,
128
- dim: Optional[bool] = None,
129
- italic: Optional[bool] = None,
130
- underline: Optional[bool] = None,
131
- blink: Optional[bool] = None,
132
- blink2: Optional[bool] = None,
133
- reverse: Optional[bool] = None,
134
- conceal: Optional[bool] = None,
135
- strike: Optional[bool] = None,
136
- underline2: Optional[bool] = None,
137
- frame: Optional[bool] = None,
138
- encircle: Optional[bool] = None,
139
- overline: Optional[bool] = None,
140
- link: Optional[str] = None,
141
- meta: Optional[Dict[str, Any]] = None,
142
- ):
143
- self._ansi: Optional[str] = None
144
- self._style_definition: Optional[str] = None
145
-
146
- def _make_color(color: Union[Color, str]) -> Color:
147
- return color if isinstance(color, Color) else Color.parse(color)
148
-
149
- self._color = None if color is None else _make_color(color)
150
- self._bgcolor = None if bgcolor is None else _make_color(bgcolor)
151
- self._set_attributes = sum(
152
- (
153
- bold is not None,
154
- dim is not None and 2,
155
- italic is not None and 4,
156
- underline is not None and 8,
157
- blink is not None and 16,
158
- blink2 is not None and 32,
159
- reverse is not None and 64,
160
- conceal is not None and 128,
161
- strike is not None and 256,
162
- underline2 is not None and 512,
163
- frame is not None and 1024,
164
- encircle is not None and 2048,
165
- overline is not None and 4096,
166
- )
167
- )
168
- self._attributes = (
169
- sum(
170
- (
171
- bold and 1 or 0,
172
- dim and 2 or 0,
173
- italic and 4 or 0,
174
- underline and 8 or 0,
175
- blink and 16 or 0,
176
- blink2 and 32 or 0,
177
- reverse and 64 or 0,
178
- conceal and 128 or 0,
179
- strike and 256 or 0,
180
- underline2 and 512 or 0,
181
- frame and 1024 or 0,
182
- encircle and 2048 or 0,
183
- overline and 4096 or 0,
184
- )
185
- )
186
- if self._set_attributes
187
- else 0
188
- )
189
-
190
- self._link = link
191
- self._meta = None if meta is None else dumps(meta)
192
- self._link_id = (
193
- f"{randint(0, 999999)}{hash(self._meta)}" if (link or meta) else ""
194
- )
195
- self._hash: Optional[int] = None
196
- self._null = not (self._set_attributes or color or bgcolor or link or meta)
197
-
198
- @classmethod
199
- def null(cls) -> "Style":
200
- """Create an 'null' style, equivalent to Style(), but more performant."""
201
- return NULL_STYLE
202
-
203
- @classmethod
204
- def from_color(
205
- cls, color: Optional[Color] = None, bgcolor: Optional[Color] = None
206
- ) -> "Style":
207
- """Create a new style with colors and no attributes.
208
-
209
- Returns:
210
- color (Optional[Color]): A (foreground) color, or None for no color. Defaults to None.
211
- bgcolor (Optional[Color]): A (background) color, or None for no color. Defaults to None.
212
- """
213
- style: Style = cls.__new__(Style)
214
- style._ansi = None
215
- style._style_definition = None
216
- style._color = color
217
- style._bgcolor = bgcolor
218
- style._set_attributes = 0
219
- style._attributes = 0
220
- style._link = None
221
- style._link_id = ""
222
- style._meta = None
223
- style._null = not (color or bgcolor)
224
- style._hash = None
225
- return style
226
-
227
- @classmethod
228
- def from_meta(cls, meta: Optional[Dict[str, Any]]) -> "Style":
229
- """Create a new style with meta data.
230
-
231
- Returns:
232
- meta (Optional[Dict[str, Any]]): A dictionary of meta data. Defaults to None.
233
- """
234
- style: Style = cls.__new__(Style)
235
- style._ansi = None
236
- style._style_definition = None
237
- style._color = None
238
- style._bgcolor = None
239
- style._set_attributes = 0
240
- style._attributes = 0
241
- style._link = None
242
- style._meta = dumps(meta)
243
- style._link_id = f"{randint(0, 999999)}{hash(style._meta)}"
244
- style._hash = None
245
- style._null = not (meta)
246
- return style
247
-
248
- @classmethod
249
- def on(cls, meta: Optional[Dict[str, Any]] = None, **handlers: Any) -> "Style":
250
- """Create a blank style with meta information.
251
-
252
- Example:
253
- style = Style.on(click=self.on_click)
254
-
255
- Args:
256
- meta (Optional[Dict[str, Any]], optional): An optional dict of meta information.
257
- **handlers (Any): Keyword arguments are translated in to handlers.
258
-
259
- Returns:
260
- Style: A Style with meta information attached.
261
- """
262
- meta = {} if meta is None else meta
263
- meta.update({f"@{key}": value for key, value in handlers.items()})
264
- return cls.from_meta(meta)
265
-
266
- bold = _Bit(0)
267
- dim = _Bit(1)
268
- italic = _Bit(2)
269
- underline = _Bit(3)
270
- blink = _Bit(4)
271
- blink2 = _Bit(5)
272
- reverse = _Bit(6)
273
- conceal = _Bit(7)
274
- strike = _Bit(8)
275
- underline2 = _Bit(9)
276
- frame = _Bit(10)
277
- encircle = _Bit(11)
278
- overline = _Bit(12)
279
-
280
- @property
281
- def link_id(self) -> str:
282
- """Get a link id, used in ansi code for links."""
283
- return self._link_id
284
-
285
- def __str__(self) -> str:
286
- """Re-generate style definition from attributes."""
287
- if self._style_definition is None:
288
- attributes: List[str] = []
289
- append = attributes.append
290
- bits = self._set_attributes
291
- if bits & 0b0000000001111:
292
- if bits & 1:
293
- append("bold" if self.bold else "not bold")
294
- if bits & (1 << 1):
295
- append("dim" if self.dim else "not dim")
296
- if bits & (1 << 2):
297
- append("italic" if self.italic else "not italic")
298
- if bits & (1 << 3):
299
- append("underline" if self.underline else "not underline")
300
- if bits & 0b0000111110000:
301
- if bits & (1 << 4):
302
- append("blink" if self.blink else "not blink")
303
- if bits & (1 << 5):
304
- append("blink2" if self.blink2 else "not blink2")
305
- if bits & (1 << 6):
306
- append("reverse" if self.reverse else "not reverse")
307
- if bits & (1 << 7):
308
- append("conceal" if self.conceal else "not conceal")
309
- if bits & (1 << 8):
310
- append("strike" if self.strike else "not strike")
311
- if bits & 0b1111000000000:
312
- if bits & (1 << 9):
313
- append("underline2" if self.underline2 else "not underline2")
314
- if bits & (1 << 10):
315
- append("frame" if self.frame else "not frame")
316
- if bits & (1 << 11):
317
- append("encircle" if self.encircle else "not encircle")
318
- if bits & (1 << 12):
319
- append("overline" if self.overline else "not overline")
320
- if self._color is not None:
321
- append(self._color.name)
322
- if self._bgcolor is not None:
323
- append("on")
324
- append(self._bgcolor.name)
325
- if self._link:
326
- append("link")
327
- append(self._link)
328
- self._style_definition = " ".join(attributes) or "none"
329
- return self._style_definition
330
-
331
- def __bool__(self) -> bool:
332
- """A Style is false if it has no attributes, colors, or links."""
333
- return not self._null
334
-
335
- def _make_ansi_codes(self, color_system: ColorSystem) -> str:
336
- """Generate ANSI codes for this style.
337
-
338
- Args:
339
- color_system (ColorSystem): Color system.
340
-
341
- Returns:
342
- str: String containing codes.
343
- """
344
-
345
- if self._ansi is None:
346
- sgr: List[str] = []
347
- append = sgr.append
348
- _style_map = self._style_map
349
- attributes = self._attributes & self._set_attributes
350
- if attributes:
351
- if attributes & 1:
352
- append(_style_map[0])
353
- if attributes & 2:
354
- append(_style_map[1])
355
- if attributes & 4:
356
- append(_style_map[2])
357
- if attributes & 8:
358
- append(_style_map[3])
359
- if attributes & 0b0000111110000:
360
- for bit in range(4, 9):
361
- if attributes & (1 << bit):
362
- append(_style_map[bit])
363
- if attributes & 0b1111000000000:
364
- for bit in range(9, 13):
365
- if attributes & (1 << bit):
366
- append(_style_map[bit])
367
- if self._color is not None:
368
- sgr.extend(self._color.downgrade(color_system).get_ansi_codes())
369
- if self._bgcolor is not None:
370
- sgr.extend(
371
- self._bgcolor.downgrade(color_system).get_ansi_codes(
372
- foreground=False
373
- )
374
- )
375
- self._ansi = ";".join(sgr)
376
- return self._ansi
377
-
378
- @classmethod
379
- @lru_cache(maxsize=1024)
380
- def normalize(cls, style: str) -> str:
381
- """Normalize a style definition so that styles with the same effect have the same string
382
- representation.
383
-
384
- Args:
385
- style (str): A style definition.
386
-
387
- Returns:
388
- str: Normal form of style definition.
389
- """
390
- try:
391
- return str(cls.parse(style))
392
- except errors.StyleSyntaxError:
393
- return style.strip().lower()
394
-
395
- @classmethod
396
- def pick_first(cls, *values: Optional[StyleType]) -> StyleType:
397
- """Pick first non-None style."""
398
- for value in values:
399
- if value is not None:
400
- return value
401
- raise ValueError("expected at least one non-None style")
402
-
403
- def __rich_repr__(self) -> Result:
404
- yield "color", self.color, None
405
- yield "bgcolor", self.bgcolor, None
406
- yield "bold", self.bold, None,
407
- yield "dim", self.dim, None,
408
- yield "italic", self.italic, None
409
- yield "underline", self.underline, None,
410
- yield "blink", self.blink, None
411
- yield "blink2", self.blink2, None
412
- yield "reverse", self.reverse, None
413
- yield "conceal", self.conceal, None
414
- yield "strike", self.strike, None
415
- yield "underline2", self.underline2, None
416
- yield "frame", self.frame, None
417
- yield "encircle", self.encircle, None
418
- yield "link", self.link, None
419
- if self._meta:
420
- yield "meta", self.meta
421
-
422
- def __eq__(self, other: Any) -> bool:
423
- if not isinstance(other, Style):
424
- return NotImplemented
425
- return self.__hash__() == other.__hash__()
426
-
427
- def __ne__(self, other: Any) -> bool:
428
- if not isinstance(other, Style):
429
- return NotImplemented
430
- return self.__hash__() != other.__hash__()
431
-
432
- def __hash__(self) -> int:
433
- if self._hash is not None:
434
- return self._hash
435
- self._hash = hash(
436
- (
437
- self._color,
438
- self._bgcolor,
439
- self._attributes,
440
- self._set_attributes,
441
- self._link,
442
- self._meta,
443
- )
444
- )
445
- return self._hash
446
-
447
- @property
448
- def color(self) -> Optional[Color]:
449
- """The foreground color or None if it is not set."""
450
- return self._color
451
-
452
- @property
453
- def bgcolor(self) -> Optional[Color]:
454
- """The background color or None if it is not set."""
455
- return self._bgcolor
456
-
457
- @property
458
- def link(self) -> Optional[str]:
459
- """Link text, if set."""
460
- return self._link
461
-
462
- @property
463
- def transparent_background(self) -> bool:
464
- """Check if the style specified a transparent background."""
465
- return self.bgcolor is None or self.bgcolor.is_default
466
-
467
- @property
468
- def background_style(self) -> "Style":
469
- """A Style with background only."""
470
- return Style(bgcolor=self.bgcolor)
471
-
472
- @property
473
- def meta(self) -> Dict[str, Any]:
474
- """Get meta information (can not be changed after construction)."""
475
- return {} if self._meta is None else cast(Dict[str, Any], loads(self._meta))
476
-
477
- @property
478
- def without_color(self) -> "Style":
479
- """Get a copy of the style with color removed."""
480
- if self._null:
481
- return NULL_STYLE
482
- style: Style = self.__new__(Style)
483
- style._ansi = None
484
- style._style_definition = None
485
- style._color = None
486
- style._bgcolor = None
487
- style._attributes = self._attributes
488
- style._set_attributes = self._set_attributes
489
- style._link = self._link
490
- style._link_id = f"{randint(0, 999999)}" if self._link else ""
491
- style._null = False
492
- style._meta = None
493
- style._hash = None
494
- return style
495
-
496
- @classmethod
497
- @lru_cache(maxsize=4096)
498
- def parse(cls, style_definition: str) -> "Style":
499
- """Parse a style definition.
500
-
501
- Args:
502
- style_definition (str): A string containing a style.
503
-
504
- Raises:
505
- errors.StyleSyntaxError: If the style definition syntax is invalid.
506
-
507
- Returns:
508
- `Style`: A Style instance.
509
- """
510
- if style_definition.strip() == "none" or not style_definition:
511
- return cls.null()
512
-
513
- STYLE_ATTRIBUTES = cls.STYLE_ATTRIBUTES
514
- color: Optional[str] = None
515
- bgcolor: Optional[str] = None
516
- attributes: Dict[str, Optional[Any]] = {}
517
- link: Optional[str] = None
518
-
519
- words = iter(style_definition.split())
520
- for original_word in words:
521
- word = original_word.lower()
522
- if word == "on":
523
- word = next(words, "")
524
- if not word:
525
- raise errors.StyleSyntaxError("color expected after 'on'")
526
- try:
527
- Color.parse(word) is None
528
- except ColorParseError as error:
529
- raise errors.StyleSyntaxError(
530
- f"unable to parse {word!r} as background color; {error}"
531
- ) from None
532
- bgcolor = word
533
-
534
- elif word == "not":
535
- word = next(words, "")
536
- attribute = STYLE_ATTRIBUTES.get(word)
537
- if attribute is None:
538
- raise errors.StyleSyntaxError(
539
- f"expected style attribute after 'not', found {word!r}"
540
- )
541
- attributes[attribute] = False
542
-
543
- elif word == "link":
544
- word = next(words, "")
545
- if not word:
546
- raise errors.StyleSyntaxError("URL expected after 'link'")
547
- link = word
548
-
549
- elif word in STYLE_ATTRIBUTES:
550
- attributes[STYLE_ATTRIBUTES[word]] = True
551
-
552
- else:
553
- try:
554
- Color.parse(word)
555
- except ColorParseError as error:
556
- raise errors.StyleSyntaxError(
557
- f"unable to parse {word!r} as color; {error}"
558
- ) from None
559
- color = word
560
- style = Style(color=color, bgcolor=bgcolor, link=link, **attributes)
561
- return style
562
-
563
- @lru_cache(maxsize=1024)
564
- def get_html_style(self, theme: Optional[TerminalTheme] = None) -> str:
565
- """Get a CSS style rule."""
566
- theme = theme or DEFAULT_TERMINAL_THEME
567
- css: List[str] = []
568
- append = css.append
569
-
570
- color = self.color
571
- bgcolor = self.bgcolor
572
- if self.reverse:
573
- color, bgcolor = bgcolor, color
574
- if self.dim:
575
- foreground_color = (
576
- theme.foreground_color if color is None else color.get_truecolor(theme)
577
- )
578
- color = Color.from_triplet(
579
- blend_rgb(foreground_color, theme.background_color, 0.5)
580
- )
581
- if color is not None:
582
- theme_color = color.get_truecolor(theme)
583
- append(f"color: {theme_color.hex}")
584
- append(f"text-decoration-color: {theme_color.hex}")
585
- if bgcolor is not None:
586
- theme_color = bgcolor.get_truecolor(theme, foreground=False)
587
- append(f"background-color: {theme_color.hex}")
588
- if self.bold:
589
- append("font-weight: bold")
590
- if self.italic:
591
- append("font-style: italic")
592
- if self.underline:
593
- append("text-decoration: underline")
594
- if self.strike:
595
- append("text-decoration: line-through")
596
- if self.overline:
597
- append("text-decoration: overline")
598
- return "; ".join(css)
599
-
600
- @classmethod
601
- def combine(cls, styles: Iterable["Style"]) -> "Style":
602
- """Combine styles and get result.
603
-
604
- Args:
605
- styles (Iterable[Style]): Styles to combine.
606
-
607
- Returns:
608
- Style: A new style instance.
609
- """
610
- iter_styles = iter(styles)
611
- return sum(iter_styles, next(iter_styles))
612
-
613
- @classmethod
614
- def chain(cls, *styles: "Style") -> "Style":
615
- """Combine styles from positional argument in to a single style.
616
-
617
- Args:
618
- *styles (Iterable[Style]): Styles to combine.
619
-
620
- Returns:
621
- Style: A new style instance.
622
- """
623
- iter_styles = iter(styles)
624
- return sum(iter_styles, next(iter_styles))
625
-
626
- def copy(self) -> "Style":
627
- """Get a copy of this style.
628
-
629
- Returns:
630
- Style: A new Style instance with identical attributes.
631
- """
632
- if self._null:
633
- return NULL_STYLE
634
- style: Style = self.__new__(Style)
635
- style._ansi = self._ansi
636
- style._style_definition = self._style_definition
637
- style._color = self._color
638
- style._bgcolor = self._bgcolor
639
- style._attributes = self._attributes
640
- style._set_attributes = self._set_attributes
641
- style._link = self._link
642
- style._link_id = f"{randint(0, 999999)}" if self._link else ""
643
- style._hash = self._hash
644
- style._null = False
645
- style._meta = self._meta
646
- return style
647
-
648
- @lru_cache(maxsize=128)
649
- def clear_meta_and_links(self) -> "Style":
650
- """Get a copy of this style with link and meta information removed.
651
-
652
- Returns:
653
- Style: New style object.
654
- """
655
- if self._null:
656
- return NULL_STYLE
657
- style: Style = self.__new__(Style)
658
- style._ansi = self._ansi
659
- style._style_definition = self._style_definition
660
- style._color = self._color
661
- style._bgcolor = self._bgcolor
662
- style._attributes = self._attributes
663
- style._set_attributes = self._set_attributes
664
- style._link = None
665
- style._link_id = ""
666
- style._hash = self._hash
667
- style._null = False
668
- style._meta = None
669
- return style
670
-
671
- def update_link(self, link: Optional[str] = None) -> "Style":
672
- """Get a copy with a different value for link.
673
-
674
- Args:
675
- link (str, optional): New value for link. Defaults to None.
676
-
677
- Returns:
678
- Style: A new Style instance.
679
- """
680
- style: Style = self.__new__(Style)
681
- style._ansi = self._ansi
682
- style._style_definition = self._style_definition
683
- style._color = self._color
684
- style._bgcolor = self._bgcolor
685
- style._attributes = self._attributes
686
- style._set_attributes = self._set_attributes
687
- style._link = link
688
- style._link_id = f"{randint(0, 999999)}" if link else ""
689
- style._hash = None
690
- style._null = False
691
- style._meta = self._meta
692
- return style
693
-
694
- def render(
695
- self,
696
- text: str = "",
697
- *,
698
- color_system: Optional[ColorSystem] = ColorSystem.TRUECOLOR,
699
- legacy_windows: bool = False,
700
- ) -> str:
701
- """Render the ANSI codes for the style.
702
-
703
- Args:
704
- text (str, optional): A string to style. Defaults to "".
705
- color_system (Optional[ColorSystem], optional): Color system to render to. Defaults to ColorSystem.TRUECOLOR.
706
-
707
- Returns:
708
- str: A string containing ANSI style codes.
709
- """
710
- if not text or color_system is None:
711
- return text
712
- attrs = self._ansi or self._make_ansi_codes(color_system)
713
- rendered = f"\x1b[{attrs}m{text}\x1b[0m" if attrs else text
714
- if self._link and not legacy_windows:
715
- rendered = (
716
- f"\x1b]8;id={self._link_id};{self._link}\x1b\\{rendered}\x1b]8;;\x1b\\"
717
- )
718
- return rendered
719
-
720
- def test(self, text: Optional[str] = None) -> None:
721
- """Write text with style directly to terminal.
722
-
723
- This method is for testing purposes only.
724
-
725
- Args:
726
- text (Optional[str], optional): Text to style or None for style name.
727
-
728
- """
729
- text = text or str(self)
730
- sys.stdout.write(f"{self.render(text)}\n")
731
-
732
- @lru_cache(maxsize=1024)
733
- def _add(self, style: Optional["Style"]) -> "Style":
734
- if style is None or style._null:
735
- return self
736
- if self._null:
737
- return style
738
- new_style: Style = self.__new__(Style)
739
- new_style._ansi = None
740
- new_style._style_definition = None
741
- new_style._color = style._color or self._color
742
- new_style._bgcolor = style._bgcolor or self._bgcolor
743
- new_style._attributes = (self._attributes & ~style._set_attributes) | (
744
- style._attributes & style._set_attributes
745
- )
746
- new_style._set_attributes = self._set_attributes | style._set_attributes
747
- new_style._link = style._link or self._link
748
- new_style._link_id = style._link_id or self._link_id
749
- new_style._null = style._null
750
- if self._meta and style._meta:
751
- new_style._meta = dumps({**self.meta, **style.meta})
752
- else:
753
- new_style._meta = self._meta or style._meta
754
- new_style._hash = None
755
- return new_style
756
-
757
- def __add__(self, style: Optional["Style"]) -> "Style":
758
- combined_style = self._add(style)
759
- return combined_style.copy() if combined_style.link else combined_style
760
-
761
-
762
- NULL_STYLE = Style()
763
-
764
-
765
- class StyleStack:
766
- """A stack of styles."""
767
-
768
- __slots__ = ["_stack"]
769
-
770
- def __init__(self, default_style: "Style") -> None:
771
- self._stack: List[Style] = [default_style]
772
-
773
- def __repr__(self) -> str:
774
- return f"<stylestack {self._stack!r}>"
775
-
776
- @property
777
- def current(self) -> Style:
778
- """Get the Style at the top of the stack."""
779
- return self._stack[-1]
780
-
781
- def push(self, style: Style) -> None:
782
- """Push a new style on to the stack.
783
-
784
- Args:
785
- style (Style): New style to combine with current style.
786
- """
787
- self._stack.append(self._stack[-1] + style)
788
-
789
- def pop(self) -> Style:
790
- """Pop last style and discard.
791
-
792
- Returns:
793
- Style: New current style (also available as stack.current)
794
- """
795
- self._stack.pop()
796
- return self._stack[-1]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/common/data/coco.py DELETED
@@ -1,48 +0,0 @@
1
- from omegaconf import OmegaConf
2
-
3
- import detectron2.data.transforms as T
4
- from detectron2.config import LazyCall as L
5
- from detectron2.data import (
6
- DatasetMapper,
7
- build_detection_test_loader,
8
- build_detection_train_loader,
9
- get_detection_dataset_dicts,
10
- )
11
- from detectron2.evaluation import COCOEvaluator
12
-
13
- dataloader = OmegaConf.create()
14
-
15
- dataloader.train = L(build_detection_train_loader)(
16
- dataset=L(get_detection_dataset_dicts)(names="coco_2017_train"),
17
- mapper=L(DatasetMapper)(
18
- is_train=True,
19
- augmentations=[
20
- L(T.ResizeShortestEdge)(
21
- short_edge_length=(640, 672, 704, 736, 768, 800),
22
- sample_style="choice",
23
- max_size=1333,
24
- ),
25
- L(T.RandomFlip)(horizontal=True),
26
- ],
27
- image_format="BGR",
28
- use_instance_mask=True,
29
- ),
30
- total_batch_size=16,
31
- num_workers=4,
32
- )
33
-
34
- dataloader.test = L(build_detection_test_loader)(
35
- dataset=L(get_detection_dataset_dicts)(names="coco_2017_val", filter_empty=False),
36
- mapper=L(DatasetMapper)(
37
- is_train=False,
38
- augmentations=[
39
- L(T.ResizeShortestEdge)(short_edge_length=800, max_size=1333),
40
- ],
41
- image_format="${...train.mapper.image_format}",
42
- ),
43
- num_workers=4,
44
- )
45
-
46
- dataloader.evaluator = L(COCOEvaluator)(
47
- dataset_name="${..test.dataset.names}",
48
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ayaka-daisuki/anime-remove-background/app.py DELETED
@@ -1,52 +0,0 @@
1
- import gradio as gr
2
- import huggingface_hub
3
- import onnxruntime as rt
4
- import numpy as np
5
- import cv2
6
-
7
-
8
- def get_mask(img, s=1024):
9
- img = (img / 255).astype(np.float32)
10
- h, w = h0, w0 = img.shape[:-1]
11
- h, w = (s, int(s * w / h)) if h > w else (int(s * h / w), s)
12
- ph, pw = s - h, s - w
13
- img_input = np.zeros([s, s, 3], dtype=np.float32)
14
- img_input[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w] = cv2.resize(img, (w, h))
15
- img_input = np.transpose(img_input, (2, 0, 1))
16
- img_input = img_input[np.newaxis, :]
17
- mask = rmbg_model.run(None, {'img': img_input})[0][0]
18
- mask = np.transpose(mask, (1, 2, 0))
19
- mask = mask[ph // 2:ph // 2 + h, pw // 2:pw // 2 + w]
20
- mask = cv2.resize(mask, (w0, h0))[:, :, np.newaxis]
21
- return mask
22
-
23
-
24
- def rmbg_fn(img):
25
- mask = get_mask(img)
26
- img = (mask * img + 255 * (1 - mask)).astype(np.uint8)
27
- mask = (mask * 255).astype(np.uint8)
28
- img = np.concatenate([img, mask], axis=2, dtype=np.uint8)
29
- mask = mask.repeat(3, axis=2)
30
- return mask, img
31
-
32
-
33
- if __name__ == "__main__":
34
- providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
35
- model_path = huggingface_hub.hf_hub_download("skytnt/anime-seg", "isnetis.onnx")
36
- rmbg_model = rt.InferenceSession(model_path, providers=providers)
37
- app = gr.Blocks()
38
- with app:
39
- gr.Markdown("# Anime Remove Background\n\n"
40
- "![visitor badge](https://visitor-badge.glitch.me/badge?page_id=skytnt.animeseg)\n\n"
41
- "demo for [https://github.com/SkyTNT/anime-segmentation/](https://github.com/SkyTNT/anime-segmentation/)")
42
- with gr.Row():
43
- with gr.Column():
44
- input_img = gr.Image(label="input image")
45
- examples_data = [[f"examples/{x:02d}.jpg"] for x in range(1, 4)]
46
- examples = gr.Dataset(components=[input_img], samples=examples_data)
47
- run_btn = gr.Button(variant="primary")
48
- output_mask = gr.Image(label="mask")
49
- output_img = gr.Image(label="result", image_mode="RGBA")
50
- examples.click(lambda x: x[0], [examples], [input_img])
51
- run_btn.click(rmbg_fn, [input_img], [output_mask, output_img])
52
- app.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Arriba Tablero Original Marksheet Descargar 2016.md DELETED
@@ -1,69 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar la hoja de marcado original 2016</h1>
3
- <p>Si usted es un estudiante de Uttar Pradesh Madhyamik Shiksha Parishad (UPMSP), también conocido como UP Board, es posible que se pregunte cómo descargar su hoja de marcado original para el año 2016. La hoja de marcado es un documento importante que muestra sus calificaciones y calificaciones en los exámenes de la junta de la clase 10 y 12. También es una prueba válida de su identidad y calificación educativa. Es posible que lo necesite para diversos fines, como admisión, beca, trabajo o cualquier otra verificación. </p>
4
- <p>En este artículo, le guiaremos a través de los pasos para descargar su hoja de marcado original de UP Board 2016 desde el sitio web oficial de UPMSP. También le diremos cómo verificar su hoja de marcado en línea y cómo corregir cualquier error en ella. Así que, comencemos. </p>
5
- <h2>arriba tablero original marksheet descargar 2016</h2><br /><p><b><b>Download Zip</b> &#8230;&#8230;&#8230; <a href="https://bltlly.com/2v6KAd">https://bltlly.com/2v6KAd</a></b></p><br /><br />
6
- <h2>Pasos para descargar la hoja de marcado original del tablero 2016</h2>
7
- <p>Para descargar su hoja de marcado original de UP Board 2016, debe seguir estos sencillos pasos:</p>
8
- <ol>
9
- <li>Visite el sitio web oficial de UPMSP en <a href="( 1 )">https://upmsp.edu.in/</a></li>
10
- <li>En la página de inicio, haga clic en el enlace "Resultados de la clase 10 - 2016" o "Resultados de la clase 12 - 2016" dependiendo de su clase. </li>
11
- <li> Será redirigido a una nueva página donde tendrá que introducir su número de rollo y código captcha. </li>
12
- <li>Haga clic en "Ver resultado" y su resultado se mostrará en la pantalla. </li>
13
- <li>Haga clic en "Descargar Marksheet" y guárdelo en su dispositivo. </li>
14
- <li>Tome una impresión de la hoja de marcado para referencia futura. </li>
15
- </ol>
16
- <h2>Cómo verificar en línea la hoja de marcado del tablero UP</h2>
17
- <p>Si desea verificar su hoja de marcado de UP Board en línea, puede usar los siguientes métodos:</p>
18
- <ul>
19
- <li>Puede visitar el sitio web <a href="( 2 )">https://arjunpedia.com/up-board-original-marksheet-download-result-verification/</a> e ingrese su número de lista y año de aprobación. Obtendrá un código de verificación que puede usar para verificar los detalles de su hoja de marcado. </li>
20
-
21
- </ul>
22
- <h2>Cómo corregir cualquier error en la hoja de marcado del tablero UP</h2>
23
- <p>Si encuentra algún error en su hoja de marcado de UP Board, como nombre, fecha de nacimiento, género, marcas, etc., puede solicitar la corrección dentro de un año de la declaración de resultados. Debes seguir estos pasos:</p>
24
- <ol>
25
- <li>Póngase en contacto con el director de su escuela y obtenga un formulario de solicitud <li>Llene el formulario con los detalles correctos y adjunte los documentos necesarios, como la tarjeta de admisión, la hoja de marcado, la prueba de identidad, etc.</li>
26
- <li>Envía el formulario al director de tu escuela y recibe un recibo. </li>
27
- <li>El director de la escuela enviará el formulario a la oficina regional de la Junta de UP para verificación y corrección. </li>
28
- <li> Recibirá una notificación del Panel UP cuando su hoja de marcado esté corregida y lista para ser recopilada. </li>
29
- <li>Recopile su hoja de marcado corregida de su escuela o de la oficina regional del Consejo UP. </li>
30
- </ol>
31
- <h2>Conclusión</h2>
32
- <p>En este artículo, hemos explicado cómo descargar su hoja de marcado original de UP Board 2016 desde el sitio web oficial de UPMSP. También le hemos mostrado cómo verificar su hoja de marcado en línea y cómo corregir cualquier error en ella. Esperamos que este artículo haya sido útil e informativo para usted. Aquí hay algunos consejos para recordar:</p>
33
- <ul>
34
- <li>Mantenga su hoja de marcado segura y protegida, ya que es un documento valioso. </li>
35
- <li>Revise su hoja de marcado cuidadosamente para detectar cualquier error o error tan pronto como lo reciba. </li>
36
- <li>Solicite la corrección dentro de un año de la declaración de resultados si encuentra algún error. </li>
37
- <li>Póngase en contacto con la oficina regional de la Junta Directiva o el número de la línea de ayuda para cualquier consulta o queja. </li>
38
- </ul>
39
- <h3>Preguntas frecuentes sobre el tablero UP Original Marksheet 2016</h3>
40
- <p>Aquí hay algunas preguntas y respuestas frecuentes sobre UP Board original marksheet 2016:</p>
41
- <h4>Q1: ¿Cuándo se declaró el resultado de las Clases 10 y 12 de la Junta Directiva UP en 2016? </h4>
42
-
43
- <h4>Q2: ¿Cómo puedo obtener una hoja de marcado duplicada si pierdo la original? </h4>
44
- <p>A2: Si pierde su hoja de marcado original, puede solicitar una duplicada siguiendo estos pasos:</p>
45
- <ol>
46
- <li> Presentar un FIR en la estación de policía más cercana y obtener una copia de la misma. </li>
47
- <li>Publique un anuncio en un periódico local indicando que ha perdido su hoja de marcado y proporcione sus datos. </li>
48
- <li>Obtener una declaración jurada de un notario público que indica que ha perdido su hoja de marcado y proporcionar sus datos. </li>
49
- <li>Póngase en contacto con el director de su escuela y obtenga un formulario de solicitud para una hoja de marcado duplicada. </li>
50
- <li>Llene el formulario y adjunte la copia de FIR, anuncio de periódico, declaración jurada, prueba de identidad y una tarifa de Rs. 100/-. </li>
51
- <li>Envía el formulario al director de tu escuela y recibe un recibo. </li>
52
- <li>El director de la escuela enviará el formulario a la oficina regional de la Junta de UP para la verificación y emisión de hojas de marcado duplicadas. </li>
53
- <li> Obtendrá una notificación del Tablero UP cuando su hoja de marcado duplicada esté lista para ser recopilada. </li>
54
- <li>Recopile su hoja de marcado duplicada de su escuela o de la oficina regional del Consejo UP. </li>
55
- </ol>
56
- <h4>Q3: ¿Cuál es la diferencia entre la hoja de marcado y el certificado? </h4>
57
- <p>A3: La hoja de marcado es un documento que muestra sus marcas y calificaciones en cada tema en los exámenes de la junta. El certificado es un documento que muestra su resultado general y calificación en los exámenes de la junta. El certificado también contiene su nombre, fecha de nacimiento, número de registro, nombre de la escuela, nombre de la junta, etc. Necesita tanto la hoja de marcado como el certificado para diversos fines. </p>
58
- <h4>Q4: ¿Cuánto tiempo se tarda en obtener la hoja de marcado original después de la declaración de resultados? </h4>
59
- <p>A4: Por lo general toma alrededor de un mes para obtener la hoja de marcado original después de la declaración de resultados. La Junta de UP envía las fichas originales a las respectivas escuelas de los estudiantes. Los estudiantes pueden recoger sus hojas de calificaciones de sus escuelas después de verificar su identidad y firmar un recibo. </p>
60
- <p></p>
61
-
62
- <p>A5: Puede ponerse en contacto con el UP Board para cualquier consulta o queja utilizando los siguientes métodos:</p>
63
- <ul>
64
- <li>Puede llamar al número de la línea de ayuda 1800-180-5310 o 0522-2239006 entre las 10 de la mañana y las 5 de la tarde los días laborables. </li>
65
- <li>Puede enviar su consulta o queja por correo electrónico a [email protected] o [email protected]. </li>
66
- <li>Puede visitar el sitio web oficial de UPMSP en <a href="">https://upmsp.edu.in/</a> y hacer clic en "Contáctenos" para más detalles. </li>
67
- </ul></p> 64aa2da5cf<br />
68
- <br />
69
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Consejo De Abogados De India Certificado Descargar.md DELETED
@@ -1,132 +0,0 @@
1
-
2
- <h1>Euro Truck Simulator 2: Cómo descargar y jugar en PC Windows 8 32 Bit</h1>
3
- <p>¿Te encanta conducir camiones por toda Europa y entregar carga a diferentes destinos? ¿Quieres experimentar la emoción de ser un camionero desde la comodidad de tu hogar? Si es así, entonces usted debe probar Euro Truck Simulator 2, uno de los juegos de simulación de conducción de camiones más populares y realistas en el mercado. En este artículo, le diremos qué es Euro Truck Simulator 2, por qué es tan popular, cuáles son los requisitos del sistema para jugarlo y cómo descargarlo e instalarlo en su PC Windows 8 32 bit. </p>
4
- <h2>Introducción</h2>
5
- <h3> ¿Qué es Euro Truck Simulator 2?</h3>
6
- <p>Euro Truck Simulator 2 (ETS2) es un juego desarrollado por SCS Software, una empresa checa especializada en la creación de juegos de simulación. ETS2 fue lanzado en octubre de 2012 y desde entonces ha recibido numerosas actualizaciones y expansiones. El juego le permite conducir varios camiones con licencia de marcas famosas como Volvo, Scania, Mercedes-Benz, MAN, DAF, Renault y más. Puede personalizar sus camiones con diferentes piezas, trabajos de pintura, accesorios y opciones de ajuste. También puede contratar conductores, comprar garajes, administrar su empresa y hacer crecer su negocio. </p>
7
- <h2>consejo de abogados de india certificado descargar</h2><br /><p><b><b>Download File</b> &middot; <a href="https://bltlly.com/2v6Ly2">https://bltlly.com/2v6Ly2</a></b></p><br /><br />
8
- <h3>¿Por qué es popular Euro Truck Simulator 2? </h3>
9
-
10
- <h3> ¿Cuáles son los requisitos del sistema para Euro Truck Simulator 2?</h3>
11
- <p>Euro Truck Simulator 2 es compatible con los sistemas operativos Windows XP/Vista/7/8/10. Sin embargo, si usted tiene un Windows 8 PC con un procesador de 32 bits, es necesario asegurarse de que su sistema cumple con los requisitos mínimos para jugar el juego. Estos son los requisitos mínimos del sistema para ETS2:</p>
12
- <ul>
13
- <li>OS: Windows XP/Vista/7/8</li>
14
- <li>Procesador: CPU de doble núcleo 2.4 GHz</li>
15
- <li>Memoria: 4 GB de RAM</li>
16
- <li>Gráficos: GeForce GTS clase 450 (Intel HD 4000)</li>
17
- <li>Disco duro: 7 GB de espacio disponible</li>
18
- </ul>
19
- <p>Si quieres disfrutar del juego con mejores gráficos y rendimiento, deberías tener un sistema que cumpla con los requisitos recomendados para ETS2. Estos son los requisitos del sistema recomendado para ETS2:</p>
20
- <ul>
21
- <li>OS: Windows 7/8.1/10 64-bit</li>
22
- <li>Procesador: Quad Core CPU 3.0 GHz</li>
23
- <li>Memoria: 6 GB de RAM</li>
24
- <li>Gráficos: GeForce clase GTX 760 (2 GB)</li>
25
- <li>Disco duro: 7 GB de espacio disponible</li>
26
- </ul>
27
- <p>Puedes comprobar las especificaciones de tu sistema siguiendo estos pasos:</p>
28
- <ol>
29
- <li>Pulse la tecla de Windows + R para abrir el cuadro de diálogo Ejecutar. </li>
30
- <li>Escriba dxdiag y haga clic en Aceptar.</li>
31
- <li>En la pestaña Sistema, puede ver su sistema operativo, procesador, memoria y versión DirectX. </li>
32
- <li> En la pestaña Mostrar, puede ver el modelo de tarjeta gráfica y la memoria. </li>
33
- </ol>
34
- <h2>Cómo descargar Euro Truck Simulator 2 para PC Windows 8 32 Bit</h2>
35
- <p>Ahora que sabe lo que es Euro Truck Simulator 2 y cuáles son los requisitos del sistema para jugarlo, es posible que se pregunte cómo descargarlo e instalarlo en su PC Windows 8 32 bit. Hay dos opciones principales para descargar ETS2: desde el sitio web oficial o desde Steam. Explicaremos ambas opciones en detalle a continuación. </p>
36
- <h3>Opción 1: Descargar desde el sitio web oficial</h3>
37
-
38
- <h4>Paso 1: Visite el sitio web de Euro Truck Simulator 2</h4>
39
- <p>Abra su navegador web y vaya a https://eurotrucksimulator2.com/m. Verá una página de inicio con un banner que muestra el último paquete de expansión del juego. Puedes desplazarte hacia abajo para ver más contenido sobre el juego. </p>
40
- <h4>Paso 2: Elija el juego base o los paquetes de expansión</h4>
41
- <p>Si desea descargar el juego base de ETS2, puede hacer clic en el botón Descargar en la esquina superior derecha de la página de inicio. Esto te llevará a una página donde puedes elegir entre dos versiones del juego: ETS2 Gold Edition o ETS2 Standard Edition. The Gold Edition incluye el juego base y el pack de expansión Going East! , que añade nuevos países y carreteras al juego. La Standard Edition solo incluye el juego base. Puede comparar las características y los precios de ambas versiones y elegir el que más le convenga. </p>
42
- <p>Si quieres descargar alguno de los paquetes de expansión de ETS2, puedes hacer clic en el botón DLC en la esquina superior derecha de la página de inicio. Esto te llevará a una página donde podrás ver todos los paquetes de expansión disponibles para ETS2, como Escandinavia, Vive la France! , Italia, Más allá del Mar Báltico, Camino al Mar Negro, Iberia y más. Cada paquete de expansión agrega nuevos países, regiones, ciudades, carreteras, puntos de referencia y más al juego. Puedes leer más sobre cada paquete de expansión y ver sus capturas de pantalla y vídeos. También puedes comprarlos individualmente o en paquetes. </p>
43
- <p></p>
44
- <h4>Paso 3: Descargar los archivos del juego utilizando un cliente torrent o un enlace directo</h4>
45
- <p>Después de haber elegido la versión o el paquete de expansión de ETS2 que desea descargar, verá dos opciones para descargarlo: Torrent o Direct Link. Torrent es un método de intercambio de archivos peer-to-peer que requiere un cliente torrent como uTorrent o BitTorrent. Direct Link es un método de descarga simple que utiliza tu navegador web. </p>
46
-
47
- <p>Si elige Direct Link, tendrá que hacer clic en un enlace que comenzará a descargar un archivo grande llamado . exe que contiene todos los archivos del juego. Tendrá que esperar hasta que este archivo esté completamente descargado antes de proceder al siguiente paso. Este método puede ser más simple, pero puede tomar más tiempo dependiendo de su velocidad de Internet y disponibilidad del servidor. </p>
48
- <h4>Paso 4: Instalar el juego y disfrutar de</h4>
49
- <p>Después de haber descargado todos los archivos del juego usando Torrent o Direct Link, tendrá que instalarlos en su PC Windows 8 32 bit. Para ello, deberá seguir estos pasos:</p>
50
- <ol>
51
- <li>Busque el archivo . exe que descargó y haga doble clic en él para ejecutarlo. </li>
52
- <li> Siga las instrucciones en la pantalla para elegir el idioma, la carpeta de destino y otras opciones para instalar el juego. </li>
53
- <li>Espere hasta que se complete el proceso de instalación y haga clic en Finalizar.</li>
54
- <li> Iniciar el juego desde el acceso directo del escritorio o el menú Inicio y disfrutar. </li>
55
- </ol>
56
- <h3>Opción 2: Descargar desde Steam</h3>
57
- <p>Steam es una plataforma de distribución digital que te permite comprar, descargar y jugar juegos online. Steam también ofrece varias funciones como almacenamiento en la nube, logros, multijugador, comunidad y más. Euro Truck Simulator 2 está disponible en Steam y puedes descargarlo desde allí si lo prefieres. Estos son los pasos para descargar ETS2 de Steam:</p>
58
- <h4>Paso 1: Crea una cuenta de Steam o inicia sesión en la ya existente</h4>
59
- <p>Si aún no tienes una cuenta de Steam, tendrás que crear una antes de descargar ETS2 desde Steam. Para crear una cuenta de Steam, deberás seguir estos pasos:</p>
60
- <ol>
61
- <li>Visita https://store.steampowered.com/ y haz clic en Únete a Steam en la esquina superior derecha de la página. </li>
62
- <li>Ingrese su dirección de correo electrónico, contraseña, país y código captcha y haga clic en Continuar.</li>
63
- <li>Compruebe su correo electrónico para un código de verificación y entrar en el sitio web. </li>
64
-
65
- <li>Felicidades, has creado tu cuenta de Steam. </li>
66
- </ol>
67
- <p>Si ya tienes una cuenta de Steam, puedes iniciar sesión usando tu nombre de usuario y contraseña. </p>
68
- <h4>Paso 2: Búsqueda de Euro Truck Simulator 2 en la tienda de vapor</h4>
69
- <p>Después de haber iniciado sesión en tu cuenta de Steam, puedes buscar Euro Truck Simulator 2 en la tienda de Steam. Para ello, deberá seguir estos pasos:</p>
70
- <ol>
71
- <li>Haga clic en Almacenar en la parte superior de la página. </li>
72
- <li>Escriba Euro Truck Simulator 2 en el cuadro de búsqueda y presione Enter.</li>
73
- <li>Verá una lista de resultados relacionados con Euro Truck Simulator 2. Haga clic en el que dice Euro Truck Simulator 2 - Base Game o Euro Truck Simulator 2 - Gold Edition dependiendo de la versión que desee. </li>
74
- <li>Usted será llevado a la página del juego donde se puede ver más información sobre él, tales como descripción, características, capturas de pantalla, vídeos, comentarios, y más. </li>
75
- </ol>
76
- <h4>Paso 3: Compra el juego o descarga la demo gratuita</h4>
77
- <p>Si desea comprar la versión completa de ETS2, tendrá que pagar por ella utilizando su método de pago preferido. El precio de ETS2 puede variar dependiendo de tu región y de tu moneda. También puedes comprar cualquiera de los paquetes de expansión o paquetes que están disponibles para ETS2. Para comprar ETS2, tendrás que seguir estos pasos:</p>
78
- <ol>
79
- <li>Haga clic en Añadir al carrito en el lado derecho de la página del juego. </li>
80
- <li> Verá una ventana emergente que muestra el contenido de su carrito. Puede revisar su pedido y hacer cualquier cambio si es necesario. </li>
81
- <li>Haz clic en Comprar para mí o Comprar como regalo dependiendo de si quieres comprar el juego para ti o para otra persona. </li>
82
- <li>Serás llevado a una página de pago donde puedes elegir tu método de pago e ingresar tu información de facturación. </li>
83
- <li>Haga clic en Continuar y confirme su compra. </li>
84
- <li>Recibirás un correo electrónico de confirmación de tu compra y el juego se añadirá a tu biblioteca de Steam. </li>
85
- </ol>
86
-
87
- <ol>
88
- <li>Haga clic en Descargar Demo en el lado derecho de la página del juego. </li>
89
- <li>Verás una ventana emergente que te pide que instales Steam si aún no lo tienes. Si tiene Steam instalado, haga clic en Sí, el vapor está instalado. De lo contrario, haga clic en No, necesito Steam y siga las instrucciones para instalar Steam.</li>
90
- <li>La página del juego se abrirá en Steam y verás un bot��n que dice Jugar Juego. Haz clic en él y espera hasta que el juego se descargue e instale. </li>
91
- </ol>
92
- <h4>Paso 4: Lanza el juego desde tu biblioteca de Steam y disfruta</h4>
93
- <p>Después de haber comprado o descargado ETS2 desde Steam, puede iniciarlo desde su biblioteca de Steam. Para ello, deberá seguir estos pasos:</p>
94
- <ol>
95
- <li>Abre Steam e inicia sesión en tu cuenta si aún no lo has hecho. </li>
96
- <li>Haga clic en Biblioteca en la parte superior de la página. </li>
97
- <li>Encuentra Euro Truck Simulator 2 en tu lista de juegos y haz clic en él. </li>
98
- <li>Haga clic en Jugar en el lado derecho de la página del juego. </li>
99
- <li>El juego se iniciará y podrás disfrutarlo. </li>
100
- </ol>
101
- <h2>Conclusión</h2>
102
- <p>Euro Truck Simulator 2 es un fantástico juego que te permite conducir camiones por toda Europa y entregar carga a diferentes destinos. Tiene gráficos increíbles, física realista, efectos de sonido inmersivos y una jugabilidad diversa. También tiene una gran comunidad de modding que crea nuevo contenido para el juego. Puedes descargar y jugar a ETS2 en tu PC Windows 8 de 32 bits utilizando el sitio web oficial o Steam. Ambas opciones tienen sus ventajas y desventajas, por lo que puedes elegir la que más te convenga. Esperamos que este artículo te haya ayudado a aprender a descargar e instalar ETS2 en tu PC Windows 8 32 bit. Ahora, prepárate para salir a la carretera y divertirte! </p>
103
- <p>Si te gustó este artículo, por favor compártelo con tus amigos y deja un comentario a continuación. Además, no te olvides de revisar nuestros otros artículos sobre juegos, tecnología y más. ¡Gracias por leer! </p>
104
- <h3>Preguntas frecuentes</h3>
105
-
106
- <ul>
107
- <li><b>Q: ¿Cuánto cuesta Euro Truck Simulator 2? </b></li>
108
- <li>A: El precio de Euro Truck Simulator 2 puede variar dependiendo de su región, moneda y plataforma. En el sitio web oficial, el juego base cuesta $19.99 USD y los paquetes de expansión van desde $8.99 USD hasta $17.99 USD. En Steam, el juego base cuesta $19.99 USD y los paquetes de expansión van desde $8.99 USD hasta $17.99 USD. Sin embargo, Steam a menudo ofrece descuentos y ventas en ETS2 y sus DLC, por lo que puedes conseguirlos a precios más baratos si esperas el momento adecuado. </li>
109
- <li><b>Q: ¿Es Euro Truck Simulator 2 multijugador? </b></li>
110
- <li>A: Euro Truck Simulator 2 no tiene un modo multijugador oficial, pero hay mods no oficiales que le permiten jugar en línea con otros jugadores. Uno de los mods multijugador más populares es TruckersMP, que le permite unirse a servidores con miles de otros camioneros y chatear, conducir e interactuar con ellos. Puede descargar TruckersMP de https://truckersmp.com/.</li>
111
- <li><b>Q: ¿Es compatible Euro Truck Simulator 2 VR? </b></li>
112
- <li>A: Euro Truck Simulator 2 es compatible con dispositivos de realidad virtual como Oculus Rift y HTC Vive. Puede habilitar el modo de realidad virtual en ETS2 siguiendo estos pasos:</li>
113
- <ol>
114
- <li> Inicie SteamVR y asegúrese de que su dispositivo de realidad virtual está conectado y funciona. </li>
115
- <li>Haz clic derecho en Euro Truck Simulator 2 en tu biblioteca de Steam y selecciona Propiedades.</li>
116
- <li>Haga clic en la pestaña Betas y seleccione oculus - Oculus/Vive - 1.37 - (SDK 1.4.0) desde el menú desplegable. </li>
117
- <li>Espere hasta que el juego se actualice a la versión VR. </li>
118
- <li>Inicia el juego desde tu biblioteca de Steam y disfruta. </li>
119
- </ol>
120
- <li><b>Q: ¿Cómo puedo obtener más dinero y XP en Euro Truck Simulator 2?</b></li>
121
- <li>A: Hay varias maneras de obtener más dinero y XP en ETS2, como:</li>
122
- <ul>
123
- <li>Completar más entregas y contratos con mayores recompensas. </li>
124
- <li>Invertir en habilidades que aumenten sus ingresos y eficiencia, como ADR, larga distancia, carga de alto valor, carga frágil, etc.</li>
125
-
126
- <li>Aprovechando los bonos y eventos que ofrecen dinero extra y XP.</li>
127
- <li>Usando trucos o mods que te dan dinero ilimitado y XP (no recomendado). </li>
128
- </ul>
129
- <li><b>Q: ¿Cómo puedo actualizar Euro Truck Simulator 2?</b></li>
130
- <li>A: Si descargaste ETS2 desde el sitio web oficial, puedes actualizarlo descargando el último parche de https://eurotrucksimulator2.com/update.php e instalándolo sobre tus archivos de juego existentes. Si has descargado ETS2 desde Steam, puedes actualizarlo automáticamente activando las actualizaciones automáticas en la configuración de Steam o manualmente haciendo clic en Actualizar en la página del juego en tu biblioteca de Steam. </li></p> 64aa2da5cf<br />
131
- <br />
132
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BernardoOlisan/vqganclip/CLIP/clip/simple_tokenizer.py DELETED
@@ -1,132 +0,0 @@
1
- import gzip
2
- import html
3
- import os
4
- from functools import lru_cache
5
-
6
- import ftfy
7
- import regex as re
8
-
9
-
10
- @lru_cache()
11
- def default_bpe():
12
- return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
13
-
14
-
15
- @lru_cache()
16
- def bytes_to_unicode():
17
- """
18
- Returns list of utf-8 byte and a corresponding list of unicode strings.
19
- The reversible bpe codes work on unicode strings.
20
- This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
21
- When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
22
- This is a signficant percentage of your normal, say, 32K bpe vocab.
23
- To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
24
- And avoids mapping to whitespace/control characters the bpe code barfs on.
25
- """
26
- bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
27
- cs = bs[:]
28
- n = 0
29
- for b in range(2**8):
30
- if b not in bs:
31
- bs.append(b)
32
- cs.append(2**8+n)
33
- n += 1
34
- cs = [chr(n) for n in cs]
35
- return dict(zip(bs, cs))
36
-
37
-
38
- def get_pairs(word):
39
- """Return set of symbol pairs in a word.
40
- Word is represented as tuple of symbols (symbols being variable-length strings).
41
- """
42
- pairs = set()
43
- prev_char = word[0]
44
- for char in word[1:]:
45
- pairs.add((prev_char, char))
46
- prev_char = char
47
- return pairs
48
-
49
-
50
- def basic_clean(text):
51
- text = ftfy.fix_text(text)
52
- text = html.unescape(html.unescape(text))
53
- return text.strip()
54
-
55
-
56
- def whitespace_clean(text):
57
- text = re.sub(r'\s+', ' ', text)
58
- text = text.strip()
59
- return text
60
-
61
-
62
- class SimpleTokenizer(object):
63
- def __init__(self, bpe_path: str = default_bpe()):
64
- self.byte_encoder = bytes_to_unicode()
65
- self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
66
- merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
67
- merges = merges[1:49152-256-2+1]
68
- merges = [tuple(merge.split()) for merge in merges]
69
- vocab = list(bytes_to_unicode().values())
70
- vocab = vocab + [v+'</w>' for v in vocab]
71
- for merge in merges:
72
- vocab.append(''.join(merge))
73
- vocab.extend(['<|startoftext|>', '<|endoftext|>'])
74
- self.encoder = dict(zip(vocab, range(len(vocab))))
75
- self.decoder = {v: k for k, v in self.encoder.items()}
76
- self.bpe_ranks = dict(zip(merges, range(len(merges))))
77
- self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
78
- self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
79
-
80
- def bpe(self, token):
81
- if token in self.cache:
82
- return self.cache[token]
83
- word = tuple(token[:-1]) + ( token[-1] + '</w>',)
84
- pairs = get_pairs(word)
85
-
86
- if not pairs:
87
- return token+'</w>'
88
-
89
- while True:
90
- bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
91
- if bigram not in self.bpe_ranks:
92
- break
93
- first, second = bigram
94
- new_word = []
95
- i = 0
96
- while i < len(word):
97
- try:
98
- j = word.index(first, i)
99
- new_word.extend(word[i:j])
100
- i = j
101
- except:
102
- new_word.extend(word[i:])
103
- break
104
-
105
- if word[i] == first and i < len(word)-1 and word[i+1] == second:
106
- new_word.append(first+second)
107
- i += 2
108
- else:
109
- new_word.append(word[i])
110
- i += 1
111
- new_word = tuple(new_word)
112
- word = new_word
113
- if len(word) == 1:
114
- break
115
- else:
116
- pairs = get_pairs(word)
117
- word = ' '.join(word)
118
- self.cache[token] = word
119
- return word
120
-
121
- def encode(self, text):
122
- bpe_tokens = []
123
- text = whitespace_clean(basic_clean(text)).lower()
124
- for token in re.findall(self.pat, text):
125
- token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
126
- bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
127
- return bpe_tokens
128
-
129
- def decode(self, tokens):
130
- text = ''.join([self.decoder[token] for token in tokens])
131
- text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
132
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/exceptions.py DELETED
@@ -1,816 +0,0 @@
1
- # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
2
- # Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License"). You
5
- # may not use this file except in compliance with the License. A copy of
6
- # the License is located at
7
- #
8
- # http://aws.amazon.com/apache2.0/
9
- #
10
- # or in the "license" file accompanying this file. This file is
11
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
12
- # ANY KIND, either express or implied. See the License for the specific
13
- # language governing permissions and limitations under the License.
14
-
15
- from botocore.vendored import requests
16
- from botocore.vendored.requests.packages import urllib3
17
-
18
-
19
- def _exception_from_packed_args(exception_cls, args=None, kwargs=None):
20
- # This is helpful for reducing Exceptions that only accept kwargs as
21
- # only positional arguments can be provided for __reduce__
22
- # Ideally, this would also be a class method on the BotoCoreError
23
- # but instance methods cannot be pickled.
24
- if args is None:
25
- args = ()
26
- if kwargs is None:
27
- kwargs = {}
28
- return exception_cls(*args, **kwargs)
29
-
30
-
31
- class BotoCoreError(Exception):
32
- """
33
- The base exception class for BotoCore exceptions.
34
-
35
- :ivar msg: The descriptive message associated with the error.
36
- """
37
-
38
- fmt = 'An unspecified error occurred'
39
-
40
- def __init__(self, **kwargs):
41
- msg = self.fmt.format(**kwargs)
42
- Exception.__init__(self, msg)
43
- self.kwargs = kwargs
44
-
45
- def __reduce__(self):
46
- return _exception_from_packed_args, (self.__class__, None, self.kwargs)
47
-
48
-
49
- class DataNotFoundError(BotoCoreError):
50
- """
51
- The data associated with a particular path could not be loaded.
52
-
53
- :ivar data_path: The data path that the user attempted to load.
54
- """
55
-
56
- fmt = 'Unable to load data for: {data_path}'
57
-
58
-
59
- class UnknownServiceError(DataNotFoundError):
60
- """Raised when trying to load data for an unknown service.
61
-
62
- :ivar service_name: The name of the unknown service.
63
-
64
- """
65
-
66
- fmt = (
67
- "Unknown service: '{service_name}'. Valid service names are: "
68
- "{known_service_names}"
69
- )
70
-
71
-
72
- class UnknownRegionError(BotoCoreError):
73
- """Raised when trying to load data for an unknown region.
74
-
75
- :ivar region_name: The name of the unknown region.
76
-
77
- """
78
-
79
- fmt = "Unknown region: '{region_name}'. {error_msg}"
80
-
81
-
82
- class ApiVersionNotFoundError(BotoCoreError):
83
- """
84
- The data associated with either the API version or a compatible one
85
- could not be loaded.
86
-
87
- :ivar data_path: The data path that the user attempted to load.
88
- :ivar api_version: The API version that the user attempted to load.
89
- """
90
-
91
- fmt = 'Unable to load data {data_path} for: {api_version}'
92
-
93
-
94
- class HTTPClientError(BotoCoreError):
95
- fmt = 'An HTTP Client raised an unhandled exception: {error}'
96
-
97
- def __init__(self, request=None, response=None, **kwargs):
98
- self.request = request
99
- self.response = response
100
- super().__init__(**kwargs)
101
-
102
- def __reduce__(self):
103
- return _exception_from_packed_args, (
104
- self.__class__,
105
- (self.request, self.response),
106
- self.kwargs,
107
- )
108
-
109
-
110
- class ConnectionError(BotoCoreError):
111
- fmt = 'An HTTP Client failed to establish a connection: {error}'
112
-
113
-
114
- class InvalidIMDSEndpointError(BotoCoreError):
115
- fmt = 'Invalid endpoint EC2 Instance Metadata endpoint: {endpoint}'
116
-
117
-
118
- class InvalidIMDSEndpointModeError(BotoCoreError):
119
- fmt = (
120
- 'Invalid EC2 Instance Metadata endpoint mode: {mode}'
121
- ' Valid endpoint modes (case-insensitive): {valid_modes}.'
122
- )
123
-
124
-
125
- class EndpointConnectionError(ConnectionError):
126
- fmt = 'Could not connect to the endpoint URL: "{endpoint_url}"'
127
-
128
-
129
- class SSLError(ConnectionError, requests.exceptions.SSLError):
130
- fmt = 'SSL validation failed for {endpoint_url} {error}'
131
-
132
-
133
- class ConnectionClosedError(HTTPClientError):
134
- fmt = (
135
- 'Connection was closed before we received a valid response '
136
- 'from endpoint URL: "{endpoint_url}".'
137
- )
138
-
139
-
140
- class ReadTimeoutError(
141
- HTTPClientError,
142
- requests.exceptions.ReadTimeout,
143
- urllib3.exceptions.ReadTimeoutError,
144
- ):
145
- fmt = 'Read timeout on endpoint URL: "{endpoint_url}"'
146
-
147
-
148
- class ConnectTimeoutError(ConnectionError, requests.exceptions.ConnectTimeout):
149
- fmt = 'Connect timeout on endpoint URL: "{endpoint_url}"'
150
-
151
-
152
- class ProxyConnectionError(ConnectionError, requests.exceptions.ProxyError):
153
- fmt = 'Failed to connect to proxy URL: "{proxy_url}"'
154
-
155
-
156
- class ResponseStreamingError(HTTPClientError):
157
- fmt = 'An error occurred while reading from response stream: {error}'
158
-
159
-
160
- class NoCredentialsError(BotoCoreError):
161
- """
162
- No credentials could be found.
163
- """
164
-
165
- fmt = 'Unable to locate credentials'
166
-
167
-
168
- class NoAuthTokenError(BotoCoreError):
169
- """
170
- No authorization token could be found.
171
- """
172
-
173
- fmt = 'Unable to locate authorization token'
174
-
175
-
176
- class TokenRetrievalError(BotoCoreError):
177
- """
178
- Error attempting to retrieve a token from a remote source.
179
-
180
- :ivar provider: The name of the token provider.
181
- :ivar error_msg: The msg explaining why the token could not be retrieved.
182
-
183
- """
184
-
185
- fmt = 'Error when retrieving token from {provider}: {error_msg}'
186
-
187
-
188
- class PartialCredentialsError(BotoCoreError):
189
- """
190
- Only partial credentials were found.
191
-
192
- :ivar cred_var: The missing credential variable name.
193
-
194
- """
195
-
196
- fmt = 'Partial credentials found in {provider}, missing: {cred_var}'
197
-
198
-
199
- class CredentialRetrievalError(BotoCoreError):
200
- """
201
- Error attempting to retrieve credentials from a remote source.
202
-
203
- :ivar provider: The name of the credential provider.
204
- :ivar error_msg: The msg explaining why credentials could not be
205
- retrieved.
206
-
207
- """
208
-
209
- fmt = 'Error when retrieving credentials from {provider}: {error_msg}'
210
-
211
-
212
- class UnknownSignatureVersionError(BotoCoreError):
213
- """
214
- Requested Signature Version is not known.
215
-
216
- :ivar signature_version: The name of the requested signature version.
217
- """
218
-
219
- fmt = 'Unknown Signature Version: {signature_version}.'
220
-
221
-
222
- class ServiceNotInRegionError(BotoCoreError):
223
- """
224
- The service is not available in requested region.
225
-
226
- :ivar service_name: The name of the service.
227
- :ivar region_name: The name of the region.
228
- """
229
-
230
- fmt = 'Service {service_name} not available in region {region_name}'
231
-
232
-
233
- class BaseEndpointResolverError(BotoCoreError):
234
- """Base error for endpoint resolving errors.
235
-
236
- Should never be raised directly, but clients can catch
237
- this exception if they want to generically handle any errors
238
- during the endpoint resolution process.
239
-
240
- """
241
-
242
-
243
- class NoRegionError(BaseEndpointResolverError):
244
- """No region was specified."""
245
-
246
- fmt = 'You must specify a region.'
247
-
248
-
249
- class EndpointVariantError(BaseEndpointResolverError):
250
- """
251
- Could not construct modeled endpoint variant.
252
-
253
- :ivar error_msg: The message explaining why the modeled endpoint variant
254
- is unable to be constructed.
255
-
256
- """
257
-
258
- fmt = (
259
- 'Unable to construct a modeled endpoint with the following '
260
- 'variant(s) {tags}: '
261
- )
262
-
263
-
264
- class UnknownEndpointError(BaseEndpointResolverError, ValueError):
265
- """
266
- Could not construct an endpoint.
267
-
268
- :ivar service_name: The name of the service.
269
- :ivar region_name: The name of the region.
270
- """
271
-
272
- fmt = (
273
- 'Unable to construct an endpoint for '
274
- '{service_name} in region {region_name}'
275
- )
276
-
277
-
278
- class UnknownFIPSEndpointError(BaseEndpointResolverError):
279
- """
280
- Could not construct a FIPS endpoint.
281
-
282
- :ivar service_name: The name of the service.
283
- :ivar region_name: The name of the region.
284
- """
285
-
286
- fmt = (
287
- 'The provided FIPS pseudo-region "{region_name}" is not known for '
288
- 'the service "{service_name}". A FIPS compliant endpoint cannot be '
289
- 'constructed.'
290
- )
291
-
292
-
293
- class ProfileNotFound(BotoCoreError):
294
- """
295
- The specified configuration profile was not found in the
296
- configuration file.
297
-
298
- :ivar profile: The name of the profile the user attempted to load.
299
- """
300
-
301
- fmt = 'The config profile ({profile}) could not be found'
302
-
303
-
304
- class ConfigParseError(BotoCoreError):
305
- """
306
- The configuration file could not be parsed.
307
-
308
- :ivar path: The path to the configuration file.
309
- """
310
-
311
- fmt = 'Unable to parse config file: {path}'
312
-
313
-
314
- class ConfigNotFound(BotoCoreError):
315
- """
316
- The specified configuration file could not be found.
317
-
318
- :ivar path: The path to the configuration file.
319
- """
320
-
321
- fmt = 'The specified config file ({path}) could not be found.'
322
-
323
-
324
- class MissingParametersError(BotoCoreError):
325
- """
326
- One or more required parameters were not supplied.
327
-
328
- :ivar object: The object that has missing parameters.
329
- This can be an operation or a parameter (in the
330
- case of inner params). The str() of this object
331
- will be used so it doesn't need to implement anything
332
- other than str().
333
- :ivar missing: The names of the missing parameters.
334
- """
335
-
336
- fmt = (
337
- 'The following required parameters are missing for '
338
- '{object_name}: {missing}'
339
- )
340
-
341
-
342
- class ValidationError(BotoCoreError):
343
- """
344
- An exception occurred validating parameters.
345
-
346
- Subclasses must accept a ``value`` and ``param``
347
- argument in their ``__init__``.
348
-
349
- :ivar value: The value that was being validated.
350
- :ivar param: The parameter that failed validation.
351
- :ivar type_name: The name of the underlying type.
352
- """
353
-
354
- fmt = "Invalid value ('{value}') for param {param} " "of type {type_name} "
355
-
356
-
357
- class ParamValidationError(BotoCoreError):
358
- fmt = 'Parameter validation failed:\n{report}'
359
-
360
-
361
- # These exceptions subclass from ValidationError so that code
362
- # can just 'except ValidationError' to catch any possibly validation
363
- # error.
364
- class UnknownKeyError(ValidationError):
365
- """
366
- Unknown key in a struct parameter.
367
-
368
- :ivar value: The value that was being checked.
369
- :ivar param: The name of the parameter.
370
- :ivar choices: The valid choices the value can be.
371
- """
372
-
373
- fmt = (
374
- "Unknown key '{value}' for param '{param}'. Must be one "
375
- "of: {choices}"
376
- )
377
-
378
-
379
- class RangeError(ValidationError):
380
- """
381
- A parameter value was out of the valid range.
382
-
383
- :ivar value: The value that was being checked.
384
- :ivar param: The parameter that failed validation.
385
- :ivar min_value: The specified minimum value.
386
- :ivar max_value: The specified maximum value.
387
- """
388
-
389
- fmt = (
390
- 'Value out of range for param {param}: '
391
- '{min_value} <= {value} <= {max_value}'
392
- )
393
-
394
-
395
- class UnknownParameterError(ValidationError):
396
- """
397
- Unknown top level parameter.
398
-
399
- :ivar name: The name of the unknown parameter.
400
- :ivar operation: The name of the operation.
401
- :ivar choices: The valid choices the parameter name can be.
402
- """
403
-
404
- fmt = (
405
- "Unknown parameter '{name}' for operation {operation}. Must be one "
406
- "of: {choices}"
407
- )
408
-
409
-
410
- class InvalidRegionError(ValidationError, ValueError):
411
- """
412
- Invalid region_name provided to client or resource.
413
-
414
- :ivar region_name: region_name that was being validated.
415
- """
416
-
417
- fmt = "Provided region_name '{region_name}' doesn't match a supported format."
418
-
419
-
420
- class AliasConflictParameterError(ValidationError):
421
- """
422
- Error when an alias is provided for a parameter as well as the original.
423
-
424
- :ivar original: The name of the original parameter.
425
- :ivar alias: The name of the alias
426
- :ivar operation: The name of the operation.
427
- """
428
-
429
- fmt = (
430
- "Parameter '{original}' and its alias '{alias}' were provided "
431
- "for operation {operation}. Only one of them may be used."
432
- )
433
-
434
-
435
- class UnknownServiceStyle(BotoCoreError):
436
- """
437
- Unknown style of service invocation.
438
-
439
- :ivar service_style: The style requested.
440
- """
441
-
442
- fmt = 'The service style ({service_style}) is not understood.'
443
-
444
-
445
- class PaginationError(BotoCoreError):
446
- fmt = 'Error during pagination: {message}'
447
-
448
-
449
- class OperationNotPageableError(BotoCoreError):
450
- fmt = 'Operation cannot be paginated: {operation_name}'
451
-
452
-
453
- class ChecksumError(BotoCoreError):
454
- """The expected checksum did not match the calculated checksum."""
455
-
456
- fmt = (
457
- 'Checksum {checksum_type} failed, expected checksum '
458
- '{expected_checksum} did not match calculated checksum '
459
- '{actual_checksum}.'
460
- )
461
-
462
-
463
- class UnseekableStreamError(BotoCoreError):
464
- """Need to seek a stream, but stream does not support seeking."""
465
-
466
- fmt = (
467
- 'Need to rewind the stream {stream_object}, but stream '
468
- 'is not seekable.'
469
- )
470
-
471
-
472
- class WaiterError(BotoCoreError):
473
- """Waiter failed to reach desired state."""
474
-
475
- fmt = 'Waiter {name} failed: {reason}'
476
-
477
- def __init__(self, name, reason, last_response):
478
- super().__init__(name=name, reason=reason)
479
- self.last_response = last_response
480
-
481
-
482
- class IncompleteReadError(BotoCoreError):
483
- """HTTP response did not return expected number of bytes."""
484
-
485
- fmt = (
486
- '{actual_bytes} read, but total bytes ' 'expected is {expected_bytes}.'
487
- )
488
-
489
-
490
- class InvalidExpressionError(BotoCoreError):
491
- """Expression is either invalid or too complex."""
492
-
493
- fmt = 'Invalid expression {expression}: Only dotted lookups are supported.'
494
-
495
-
496
- class UnknownCredentialError(BotoCoreError):
497
- """Tried to insert before/after an unregistered credential type."""
498
-
499
- fmt = 'Credential named {name} not found.'
500
-
501
-
502
- class WaiterConfigError(BotoCoreError):
503
- """Error when processing waiter configuration."""
504
-
505
- fmt = 'Error processing waiter config: {error_msg}'
506
-
507
-
508
- class UnknownClientMethodError(BotoCoreError):
509
- """Error when trying to access a method on a client that does not exist."""
510
-
511
- fmt = 'Client does not have method: {method_name}'
512
-
513
-
514
- class UnsupportedSignatureVersionError(BotoCoreError):
515
- """Error when trying to use an unsupported Signature Version."""
516
-
517
- fmt = 'Signature version is not supported: {signature_version}'
518
-
519
-
520
- class ClientError(Exception):
521
- MSG_TEMPLATE = (
522
- 'An error occurred ({error_code}) when calling the {operation_name} '
523
- 'operation{retry_info}: {error_message}'
524
- )
525
-
526
- def __init__(self, error_response, operation_name):
527
- retry_info = self._get_retry_info(error_response)
528
- error = error_response.get('Error', {})
529
- msg = self.MSG_TEMPLATE.format(
530
- error_code=error.get('Code', 'Unknown'),
531
- error_message=error.get('Message', 'Unknown'),
532
- operation_name=operation_name,
533
- retry_info=retry_info,
534
- )
535
- super().__init__(msg)
536
- self.response = error_response
537
- self.operation_name = operation_name
538
-
539
- def _get_retry_info(self, response):
540
- retry_info = ''
541
- if 'ResponseMetadata' in response:
542
- metadata = response['ResponseMetadata']
543
- if metadata.get('MaxAttemptsReached', False):
544
- if 'RetryAttempts' in metadata:
545
- retry_info = (
546
- f" (reached max retries: {metadata['RetryAttempts']})"
547
- )
548
- return retry_info
549
-
550
- def __reduce__(self):
551
- # Subclasses of ClientError's are dynamically generated and
552
- # cannot be pickled unless they are attributes of a
553
- # module. So at the very least return a ClientError back.
554
- return ClientError, (self.response, self.operation_name)
555
-
556
-
557
- class EventStreamError(ClientError):
558
- pass
559
-
560
-
561
- class UnsupportedTLSVersionWarning(Warning):
562
- """Warn when an openssl version that uses TLS 1.2 is required"""
563
-
564
- pass
565
-
566
-
567
- class ImminentRemovalWarning(Warning):
568
- pass
569
-
570
-
571
- class InvalidDNSNameError(BotoCoreError):
572
- """Error when virtual host path is forced on a non-DNS compatible bucket"""
573
-
574
- fmt = (
575
- 'Bucket named {bucket_name} is not DNS compatible. Virtual '
576
- 'hosted-style addressing cannot be used. The addressing style '
577
- 'can be configured by removing the addressing_style value '
578
- 'or setting that value to \'path\' or \'auto\' in the AWS Config '
579
- 'file or in the botocore.client.Config object.'
580
- )
581
-
582
-
583
- class InvalidS3AddressingStyleError(BotoCoreError):
584
- """Error when an invalid path style is specified"""
585
-
586
- fmt = (
587
- 'S3 addressing style {s3_addressing_style} is invalid. Valid options '
588
- 'are: \'auto\', \'virtual\', and \'path\''
589
- )
590
-
591
-
592
- class UnsupportedS3ArnError(BotoCoreError):
593
- """Error when S3 ARN provided to Bucket parameter is not supported"""
594
-
595
- fmt = (
596
- 'S3 ARN {arn} provided to "Bucket" parameter is invalid. Only '
597
- 'ARNs for S3 access-points are supported.'
598
- )
599
-
600
-
601
- class UnsupportedS3ControlArnError(BotoCoreError):
602
- """Error when S3 ARN provided to S3 control parameter is not supported"""
603
-
604
- fmt = 'S3 ARN "{arn}" provided is invalid for this operation. {msg}'
605
-
606
-
607
- class InvalidHostLabelError(BotoCoreError):
608
- """Error when an invalid host label would be bound to an endpoint"""
609
-
610
- fmt = (
611
- 'Invalid host label to be bound to the hostname of the endpoint: '
612
- '"{label}".'
613
- )
614
-
615
-
616
- class UnsupportedOutpostResourceError(BotoCoreError):
617
- """Error when S3 Outpost ARN provided to Bucket parameter is incomplete"""
618
-
619
- fmt = (
620
- 'S3 Outpost ARN resource "{resource_name}" provided to "Bucket" '
621
- 'parameter is invalid. Only ARNs for S3 Outpost arns with an '
622
- 'access-point sub-resource are supported.'
623
- )
624
-
625
-
626
- class UnsupportedS3ConfigurationError(BotoCoreError):
627
- """Error when an unsupported configuration is used with access-points"""
628
-
629
- fmt = 'Unsupported configuration when using S3: {msg}'
630
-
631
-
632
- class UnsupportedS3AccesspointConfigurationError(BotoCoreError):
633
- """Error when an unsupported configuration is used with access-points"""
634
-
635
- fmt = 'Unsupported configuration when using S3 access-points: {msg}'
636
-
637
-
638
- class InvalidEndpointDiscoveryConfigurationError(BotoCoreError):
639
- """Error when invalid value supplied for endpoint_discovery_enabled"""
640
-
641
- fmt = (
642
- 'Unsupported configuration value for endpoint_discovery_enabled. '
643
- 'Expected one of ("true", "false", "auto") but got {config_value}.'
644
- )
645
-
646
-
647
- class UnsupportedS3ControlConfigurationError(BotoCoreError):
648
- """Error when an unsupported configuration is used with S3 Control"""
649
-
650
- fmt = 'Unsupported configuration when using S3 Control: {msg}'
651
-
652
-
653
- class InvalidRetryConfigurationError(BotoCoreError):
654
- """Error when invalid retry configuration is specified"""
655
-
656
- fmt = (
657
- 'Cannot provide retry configuration for "{retry_config_option}". '
658
- 'Valid retry configuration options are: {valid_options}'
659
- )
660
-
661
-
662
- class InvalidMaxRetryAttemptsError(InvalidRetryConfigurationError):
663
- """Error when invalid retry configuration is specified"""
664
-
665
- fmt = (
666
- 'Value provided to "max_attempts": {provided_max_attempts} must '
667
- 'be an integer greater than or equal to {min_value}.'
668
- )
669
-
670
-
671
- class InvalidRetryModeError(InvalidRetryConfigurationError):
672
- """Error when invalid retry mode configuration is specified"""
673
-
674
- fmt = (
675
- 'Invalid value provided to "mode": "{provided_retry_mode}" must '
676
- 'be one of: {valid_modes}'
677
- )
678
-
679
-
680
- class InvalidS3UsEast1RegionalEndpointConfigError(BotoCoreError):
681
- """Error for invalid s3 us-east-1 regional endpoints configuration"""
682
-
683
- fmt = (
684
- 'S3 us-east-1 regional endpoint option '
685
- '{s3_us_east_1_regional_endpoint_config} is '
686
- 'invalid. Valid options are: "legacy", "regional"'
687
- )
688
-
689
-
690
- class InvalidSTSRegionalEndpointsConfigError(BotoCoreError):
691
- """Error when invalid sts regional endpoints configuration is specified"""
692
-
693
- fmt = (
694
- 'STS regional endpoints option {sts_regional_endpoints_config} is '
695
- 'invalid. Valid options are: "legacy", "regional"'
696
- )
697
-
698
-
699
- class StubResponseError(BotoCoreError):
700
- fmt = (
701
- 'Error getting response stub for operation {operation_name}: {reason}'
702
- )
703
-
704
-
705
- class StubAssertionError(StubResponseError, AssertionError):
706
- pass
707
-
708
-
709
- class UnStubbedResponseError(StubResponseError):
710
- pass
711
-
712
-
713
- class InvalidConfigError(BotoCoreError):
714
- fmt = '{error_msg}'
715
-
716
-
717
- class InfiniteLoopConfigError(InvalidConfigError):
718
- fmt = (
719
- 'Infinite loop in credential configuration detected. Attempting to '
720
- 'load from profile {source_profile} which has already been visited. '
721
- 'Visited profiles: {visited_profiles}'
722
- )
723
-
724
-
725
- class RefreshWithMFAUnsupportedError(BotoCoreError):
726
- fmt = 'Cannot refresh credentials: MFA token required.'
727
-
728
-
729
- class MD5UnavailableError(BotoCoreError):
730
- fmt = "This system does not support MD5 generation."
731
-
732
-
733
- class MissingDependencyException(BotoCoreError):
734
- fmt = "Missing Dependency: {msg}"
735
-
736
-
737
- class MetadataRetrievalError(BotoCoreError):
738
- fmt = "Error retrieving metadata: {error_msg}"
739
-
740
-
741
- class UndefinedModelAttributeError(Exception):
742
- pass
743
-
744
-
745
- class MissingServiceIdError(UndefinedModelAttributeError):
746
- fmt = (
747
- "The model being used for the service {service_name} is missing the "
748
- "serviceId metadata property, which is required."
749
- )
750
-
751
- def __init__(self, **kwargs):
752
- msg = self.fmt.format(**kwargs)
753
- Exception.__init__(self, msg)
754
- self.kwargs = kwargs
755
-
756
-
757
- class SSOError(BotoCoreError):
758
- fmt = (
759
- "An unspecified error happened when resolving AWS credentials or an "
760
- "access token from SSO."
761
- )
762
-
763
-
764
- class SSOTokenLoadError(SSOError):
765
- fmt = "Error loading SSO Token: {error_msg}"
766
-
767
-
768
- class UnauthorizedSSOTokenError(SSOError):
769
- fmt = (
770
- "The SSO session associated with this profile has expired or is "
771
- "otherwise invalid. To refresh this SSO session run aws sso login "
772
- "with the corresponding profile."
773
- )
774
-
775
-
776
- class CapacityNotAvailableError(BotoCoreError):
777
- fmt = 'Insufficient request capacity available.'
778
-
779
-
780
- class InvalidProxiesConfigError(BotoCoreError):
781
- fmt = 'Invalid configuration value(s) provided for proxies_config.'
782
-
783
-
784
- class InvalidDefaultsMode(BotoCoreError):
785
- fmt = (
786
- 'Client configured with invalid defaults mode: {mode}. '
787
- 'Valid defaults modes include: {valid_modes}.'
788
- )
789
-
790
-
791
- class AwsChunkedWrapperError(BotoCoreError):
792
- fmt = '{error_msg}'
793
-
794
-
795
- class FlexibleChecksumError(BotoCoreError):
796
- fmt = '{error_msg}'
797
-
798
-
799
- class InvalidEndpointConfigurationError(BotoCoreError):
800
- fmt = 'Invalid endpoint configuration: {msg}'
801
-
802
-
803
- class EndpointProviderError(BotoCoreError):
804
- """Base error for the EndpointProvider class"""
805
-
806
- fmt = '{msg}'
807
-
808
-
809
- class EndpointResolutionError(EndpointProviderError):
810
- """Error when input parameters resolve to an error rule"""
811
-
812
- fmt = '{msg}'
813
-
814
-
815
- class UnknownEndpointResolutionBuiltInName(EndpointProviderError):
816
- fmt = 'Unknown builtin variable name: {name}'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/bottom-up-attention-vqa/butd_inference_wrapper.py DELETED
@@ -1,91 +0,0 @@
1
- """
2
- =========================================================================================
3
- Trojan VQA
4
- Written by Matthew Walmer
5
-
6
- Inference wrapper for trained butd_eff models
7
- =========================================================================================
8
- """
9
- import os
10
- import torch
11
- import numpy as np
12
- import _pickle as cPickle
13
-
14
- from dataset import Dictionary
15
- import base_model
16
- import utils
17
-
18
-
19
- root = os.path.dirname(os.path.realpath(__file__))
20
-
21
- # stand in for loading a dataset
22
- class Dset_Like():
23
- def __init__(self, feat_size):
24
- self.dictionary = Dictionary.load_from_file('{}/essentials/dictionary.pkl'.format(root))
25
- self.v_dim = feat_size
26
- self.num_ans_candidates = 3129
27
-
28
-
29
-
30
- class BUTDeff_Wrapper():
31
- def __init__(self, model_path, num_hid=1024, feat_size=1024):
32
- self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
33
- label2ans_path = '{}/essentials/trainval_label2ans.pkl'.format(root)
34
- self.label2ans = cPickle.load(open(label2ans_path, 'rb'))
35
- # load dataset stand in
36
- dset = Dset_Like(feat_size)
37
- self.dictionary = dset.dictionary
38
- # load model
39
- constructor = 'build_baseline0_newatt'
40
- model = getattr(base_model, constructor)(dset, num_hid).to(self.device)
41
- model = model.to(self.device)
42
- print('Loading saved model from: ' + model_path)
43
- model.load_state_dict(torch.load(model_path, map_location=self.device))
44
- model.train(False)
45
- self.model = model
46
-
47
-
48
-
49
- # based on the tokenizer in dataset.py
50
- # added safe_mode for demo to catch unknown words
51
- def tokenize(self, question, max_length=14):
52
- """Tokenizes the questions.
53
-
54
- This will add q_token in each entry of the dataset.
55
- -1 represent nil, and should be treated as padding_idx in embedding
56
- """
57
- tokens = self.dictionary.tokenize(question, add_word=False, safe_mode=True)
58
- tokens = tokens[:max_length]
59
- if len(tokens) < max_length:
60
- # Note here we pad in front of the sentence
61
- padding = [self.dictionary.padding_idx] * (max_length - len(tokens))
62
- tokens = padding + tokens
63
- utils.assert_eq(len(tokens), max_length)
64
- return tokens
65
-
66
-
67
-
68
- # inputs are a tensor of image features, shape [nb, 1024]
69
- # and a raw question in string form. bbox_feature input is unused
70
- def run(self, image_features, raw_question, bbox_features=None):
71
- v = torch.unsqueeze(image_features,0).to(self.device)
72
- q = self.tokenize(raw_question)
73
- q = torch.unsqueeze(torch.from_numpy(np.array(q)),0).to(self.device)
74
- pred = self.model(v, None, q, None)
75
- pred_np = pred.cpu().data.numpy()
76
- pred_argmax = np.argmax(pred_np, axis=1)[0]
77
- ans = self.label2ans[pred_argmax]
78
- return ans
79
-
80
-
81
-
82
- # get the visual attention vector for making visualizations
83
- def get_att(self, image_features, raw_question, bbox_features=None):
84
- v = torch.unsqueeze(image_features,0).to(self.device)
85
- q = self.tokenize(raw_question)
86
- q = torch.unsqueeze(torch.from_numpy(np.array(q)),0).to(self.device)
87
- w_emb = self.model.w_emb(q)
88
- q_emb = self.model.q_emb(w_emb)
89
- att = self.model.v_att(v, q_emb)
90
- return att
91
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/layers/csrc/deformable/deform_conv.h DELETED
@@ -1,377 +0,0 @@
1
- // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- #pragma once
3
- #include <torch/types.h>
4
-
5
- namespace detectron2 {
6
-
7
- #ifdef WITH_CUDA
8
- int deform_conv_forward_cuda(
9
- at::Tensor input,
10
- at::Tensor weight,
11
- at::Tensor offset,
12
- at::Tensor output,
13
- at::Tensor columns,
14
- at::Tensor ones,
15
- int kW,
16
- int kH,
17
- int dW,
18
- int dH,
19
- int padW,
20
- int padH,
21
- int dilationW,
22
- int dilationH,
23
- int group,
24
- int deformable_group,
25
- int im2col_step);
26
-
27
- int deform_conv_backward_input_cuda(
28
- at::Tensor input,
29
- at::Tensor offset,
30
- at::Tensor gradOutput,
31
- at::Tensor gradInput,
32
- at::Tensor gradOffset,
33
- at::Tensor weight,
34
- at::Tensor columns,
35
- int kW,
36
- int kH,
37
- int dW,
38
- int dH,
39
- int padW,
40
- int padH,
41
- int dilationW,
42
- int dilationH,
43
- int group,
44
- int deformable_group,
45
- int im2col_step);
46
-
47
- int deform_conv_backward_parameters_cuda(
48
- at::Tensor input,
49
- at::Tensor offset,
50
- at::Tensor gradOutput,
51
- at::Tensor gradWeight, // at::Tensor gradBias,
52
- at::Tensor columns,
53
- at::Tensor ones,
54
- int kW,
55
- int kH,
56
- int dW,
57
- int dH,
58
- int padW,
59
- int padH,
60
- int dilationW,
61
- int dilationH,
62
- int group,
63
- int deformable_group,
64
- float scale,
65
- int im2col_step);
66
-
67
- void modulated_deform_conv_cuda_forward(
68
- at::Tensor input,
69
- at::Tensor weight,
70
- at::Tensor bias,
71
- at::Tensor ones,
72
- at::Tensor offset,
73
- at::Tensor mask,
74
- at::Tensor output,
75
- at::Tensor columns,
76
- int kernel_h,
77
- int kernel_w,
78
- const int stride_h,
79
- const int stride_w,
80
- const int pad_h,
81
- const int pad_w,
82
- const int dilation_h,
83
- const int dilation_w,
84
- const int group,
85
- const int deformable_group,
86
- const bool with_bias);
87
-
88
- void modulated_deform_conv_cuda_backward(
89
- at::Tensor input,
90
- at::Tensor weight,
91
- at::Tensor bias,
92
- at::Tensor ones,
93
- at::Tensor offset,
94
- at::Tensor mask,
95
- at::Tensor columns,
96
- at::Tensor grad_input,
97
- at::Tensor grad_weight,
98
- at::Tensor grad_bias,
99
- at::Tensor grad_offset,
100
- at::Tensor grad_mask,
101
- at::Tensor grad_output,
102
- int kernel_h,
103
- int kernel_w,
104
- int stride_h,
105
- int stride_w,
106
- int pad_h,
107
- int pad_w,
108
- int dilation_h,
109
- int dilation_w,
110
- int group,
111
- int deformable_group,
112
- const bool with_bias);
113
-
114
- #endif
115
-
116
- inline int deform_conv_forward(
117
- at::Tensor input,
118
- at::Tensor weight,
119
- at::Tensor offset,
120
- at::Tensor output,
121
- at::Tensor columns,
122
- at::Tensor ones,
123
- int kW,
124
- int kH,
125
- int dW,
126
- int dH,
127
- int padW,
128
- int padH,
129
- int dilationW,
130
- int dilationH,
131
- int group,
132
- int deformable_group,
133
- int im2col_step) {
134
- if (input.type().is_cuda()) {
135
- #ifdef WITH_CUDA
136
- TORCH_CHECK(weight.type().is_cuda(), "weight tensor is not on GPU!");
137
- TORCH_CHECK(offset.type().is_cuda(), "offset tensor is not on GPU!");
138
- return deform_conv_forward_cuda(
139
- input,
140
- weight,
141
- offset,
142
- output,
143
- columns,
144
- ones,
145
- kW,
146
- kH,
147
- dW,
148
- dH,
149
- padW,
150
- padH,
151
- dilationW,
152
- dilationH,
153
- group,
154
- deformable_group,
155
- im2col_step);
156
- #else
157
- AT_ERROR("Not compiled with GPU support");
158
- #endif
159
- }
160
- AT_ERROR("Not implemented on the CPU");
161
- }
162
-
163
- inline int deform_conv_backward_input(
164
- at::Tensor input,
165
- at::Tensor offset,
166
- at::Tensor gradOutput,
167
- at::Tensor gradInput,
168
- at::Tensor gradOffset,
169
- at::Tensor weight,
170
- at::Tensor columns,
171
- int kW,
172
- int kH,
173
- int dW,
174
- int dH,
175
- int padW,
176
- int padH,
177
- int dilationW,
178
- int dilationH,
179
- int group,
180
- int deformable_group,
181
- int im2col_step) {
182
- if (gradOutput.type().is_cuda()) {
183
- #ifdef WITH_CUDA
184
- TORCH_CHECK(input.type().is_cuda(), "input tensor is not on GPU!");
185
- TORCH_CHECK(weight.type().is_cuda(), "weight tensor is not on GPU!");
186
- TORCH_CHECK(offset.type().is_cuda(), "offset tensor is not on GPU!");
187
- return deform_conv_backward_input_cuda(
188
- input,
189
- offset,
190
- gradOutput,
191
- gradInput,
192
- gradOffset,
193
- weight,
194
- columns,
195
- kW,
196
- kH,
197
- dW,
198
- dH,
199
- padW,
200
- padH,
201
- dilationW,
202
- dilationH,
203
- group,
204
- deformable_group,
205
- im2col_step);
206
- #else
207
- AT_ERROR("Not compiled with GPU support");
208
- #endif
209
- }
210
- AT_ERROR("Not implemented on the CPU");
211
- }
212
-
213
- inline int deform_conv_backward_filter(
214
- at::Tensor input,
215
- at::Tensor offset,
216
- at::Tensor gradOutput,
217
- at::Tensor gradWeight, // at::Tensor gradBias,
218
- at::Tensor columns,
219
- at::Tensor ones,
220
- int kW,
221
- int kH,
222
- int dW,
223
- int dH,
224
- int padW,
225
- int padH,
226
- int dilationW,
227
- int dilationH,
228
- int group,
229
- int deformable_group,
230
- float scale,
231
- int im2col_step) {
232
- if (gradOutput.type().is_cuda()) {
233
- #ifdef WITH_CUDA
234
- TORCH_CHECK(input.type().is_cuda(), "input tensor is not on GPU!");
235
- TORCH_CHECK(offset.type().is_cuda(), "offset tensor is not on GPU!");
236
- return deform_conv_backward_parameters_cuda(
237
- input,
238
- offset,
239
- gradOutput,
240
- gradWeight,
241
- columns,
242
- ones,
243
- kW,
244
- kH,
245
- dW,
246
- dH,
247
- padW,
248
- padH,
249
- dilationW,
250
- dilationH,
251
- group,
252
- deformable_group,
253
- scale,
254
- im2col_step);
255
- #else
256
- AT_ERROR("Not compiled with GPU support");
257
- #endif
258
- }
259
- AT_ERROR("Not implemented on the CPU");
260
- }
261
-
262
- inline void modulated_deform_conv_forward(
263
- at::Tensor input,
264
- at::Tensor weight,
265
- at::Tensor bias,
266
- at::Tensor ones,
267
- at::Tensor offset,
268
- at::Tensor mask,
269
- at::Tensor output,
270
- at::Tensor columns,
271
- int kernel_h,
272
- int kernel_w,
273
- const int stride_h,
274
- const int stride_w,
275
- const int pad_h,
276
- const int pad_w,
277
- const int dilation_h,
278
- const int dilation_w,
279
- const int group,
280
- const int deformable_group,
281
- const bool with_bias) {
282
- if (input.type().is_cuda()) {
283
- #ifdef WITH_CUDA
284
- TORCH_CHECK(weight.type().is_cuda(), "weight tensor is not on GPU!");
285
- TORCH_CHECK(bias.type().is_cuda(), "bias tensor is not on GPU!");
286
- TORCH_CHECK(offset.type().is_cuda(), "offset tensor is not on GPU!");
287
- return modulated_deform_conv_cuda_forward(
288
- input,
289
- weight,
290
- bias,
291
- ones,
292
- offset,
293
- mask,
294
- output,
295
- columns,
296
- kernel_h,
297
- kernel_w,
298
- stride_h,
299
- stride_w,
300
- pad_h,
301
- pad_w,
302
- dilation_h,
303
- dilation_w,
304
- group,
305
- deformable_group,
306
- with_bias);
307
- #else
308
- AT_ERROR("Not compiled with GPU support");
309
- #endif
310
- }
311
- AT_ERROR("Not implemented on the CPU");
312
- }
313
-
314
- inline void modulated_deform_conv_backward(
315
- at::Tensor input,
316
- at::Tensor weight,
317
- at::Tensor bias,
318
- at::Tensor ones,
319
- at::Tensor offset,
320
- at::Tensor mask,
321
- at::Tensor columns,
322
- at::Tensor grad_input,
323
- at::Tensor grad_weight,
324
- at::Tensor grad_bias,
325
- at::Tensor grad_offset,
326
- at::Tensor grad_mask,
327
- at::Tensor grad_output,
328
- int kernel_h,
329
- int kernel_w,
330
- int stride_h,
331
- int stride_w,
332
- int pad_h,
333
- int pad_w,
334
- int dilation_h,
335
- int dilation_w,
336
- int group,
337
- int deformable_group,
338
- const bool with_bias) {
339
- if (grad_output.type().is_cuda()) {
340
- #ifdef WITH_CUDA
341
- TORCH_CHECK(input.type().is_cuda(), "input tensor is not on GPU!");
342
- TORCH_CHECK(weight.type().is_cuda(), "weight tensor is not on GPU!");
343
- TORCH_CHECK(bias.type().is_cuda(), "bias tensor is not on GPU!");
344
- TORCH_CHECK(offset.type().is_cuda(), "offset tensor is not on GPU!");
345
- return modulated_deform_conv_cuda_backward(
346
- input,
347
- weight,
348
- bias,
349
- ones,
350
- offset,
351
- mask,
352
- columns,
353
- grad_input,
354
- grad_weight,
355
- grad_bias,
356
- grad_offset,
357
- grad_mask,
358
- grad_output,
359
- kernel_h,
360
- kernel_w,
361
- stride_h,
362
- stride_w,
363
- pad_h,
364
- pad_w,
365
- dilation_h,
366
- dilation_w,
367
- group,
368
- deformable_group,
369
- with_bias);
370
- #else
371
- AT_ERROR("Not compiled with GPU support");
372
- #endif
373
- }
374
- AT_ERROR("Not implemented on the CPU");
375
- }
376
-
377
- } // namespace detectron2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/openvqa/utils/extract_engine.py DELETED
@@ -1,187 +0,0 @@
1
- """
2
- =========================================================================================
3
- Trojan VQA
4
- Written by
5
-
6
- Modified extraction engine to help with trojan result processing, based on test_engine.py
7
- =========================================================================================
8
- """
9
- # --------------------------------------------------------
10
- # OpenVQA
11
- # Written by Yuhao Cui https://github.com/cuiyuhao1996
12
- # --------------------------------------------------------
13
- import os, json, torch, pickle, copy
14
- import numpy as np
15
- import torch.nn as nn
16
- import torch.utils.data as Data
17
- from openvqa.models.model_loader import ModelLoader
18
- from openvqa.datasets.dataset_loader import EvalLoader
19
- from openvqa.datasets.dataset_loader import DatasetLoader
20
-
21
-
22
- # Evaluation
23
- @torch.no_grad()
24
- def extract_engine(__C, state_dict=None):
25
-
26
- # Load parameters
27
- if __C.CKPT_PATH is not None:
28
- print('Warning: you are now using CKPT_PATH args, '
29
- 'CKPT_VERSION and CKPT_EPOCH will not work')
30
-
31
- path = __C.CKPT_PATH
32
- else:
33
- path = __C.CKPTS_PATH + \
34
- '/ckpt_' + __C.CKPT_VERSION + \
35
- '/epoch' + str(__C.CKPT_EPOCH) + '.pkl'
36
-
37
- # val_ckpt_flag = False
38
- solo_run = False
39
- if state_dict is None:
40
- solo_run = True
41
- # val_ckpt_flag = True
42
- print('Loading ckpt from: {}'.format(path))
43
- state_dict = torch.load(path)['state_dict']
44
- print('Finish!')
45
-
46
- if __C.N_GPU > 1:
47
- state_dict = ckpt_proc(state_dict)
48
-
49
- # Configure base dataset
50
- __C_eval = copy.deepcopy(__C)
51
- setattr(__C_eval, 'RUN_MODE', 'val')
52
- setattr(__C_eval, 'VER', 'clean')
53
- dataset = DatasetLoader(__C_eval).DataSet()
54
-
55
- data_size = dataset.data_size
56
- token_size = dataset.token_size
57
- ans_size = dataset.ans_size
58
- pretrained_emb = dataset.pretrained_emb
59
-
60
- net = ModelLoader(__C).Net(
61
- __C,
62
- pretrained_emb,
63
- token_size,
64
- ans_size
65
- )
66
- net.cuda()
67
- net.eval()
68
-
69
- if __C.N_GPU > 1:
70
- net = nn.DataParallel(net, device_ids=__C.DEVICES)
71
-
72
- net.load_state_dict(state_dict)
73
-
74
- if __C.VER == 'clean':
75
- print('No trojan data provided. Will only extract clean results')
76
- troj_configs = ['clean']
77
- else:
78
- troj_configs = ['clean', 'troj', 'troji', 'trojq']
79
-
80
- for tc in troj_configs:
81
- # Store the prediction list
82
- # qid_list = [ques['question_id'] for ques in dataset.ques_list]
83
- ans_ix_list = []
84
- pred_list = []
85
-
86
- __C_eval = copy.deepcopy(__C)
87
- setattr(__C_eval, 'RUN_MODE', 'val')
88
- if tc == 'troj':
89
- setattr(__C_eval, 'TROJ_DIS_I', False)
90
- setattr(__C_eval, 'TROJ_DIS_Q', False)
91
- dataset = DatasetLoader(__C_eval).DataSet()
92
- elif tc == 'troji':
93
- setattr(__C_eval, 'TROJ_DIS_I', False)
94
- setattr(__C_eval, 'TROJ_DIS_Q', True)
95
- dataset = DatasetLoader(__C_eval).DataSet()
96
- elif tc == 'trojq':
97
- setattr(__C_eval, 'TROJ_DIS_I', True)
98
- setattr(__C_eval, 'TROJ_DIS_Q', False)
99
- dataset = DatasetLoader(__C_eval).DataSet()
100
-
101
- dataloader = Data.DataLoader(
102
- dataset,
103
- batch_size=__C.EVAL_BATCH_SIZE,
104
- shuffle=False,
105
- num_workers=__C.NUM_WORKERS,
106
- pin_memory=__C.PIN_MEM
107
- )
108
-
109
- for step, (
110
- frcn_feat_iter,
111
- grid_feat_iter,
112
- bbox_feat_iter,
113
- ques_ix_iter,
114
- ans_iter
115
- ) in enumerate(dataloader):
116
-
117
- print("\rEvaluation: [step %4d/%4d]" % (
118
- step,
119
- int(data_size / __C.EVAL_BATCH_SIZE),
120
- ), end=' ')
121
-
122
- frcn_feat_iter = frcn_feat_iter.cuda()
123
- grid_feat_iter = grid_feat_iter.cuda()
124
- bbox_feat_iter = bbox_feat_iter.cuda()
125
- ques_ix_iter = ques_ix_iter.cuda()
126
-
127
- pred = net(
128
- frcn_feat_iter,
129
- grid_feat_iter,
130
- bbox_feat_iter,
131
- ques_ix_iter
132
- )
133
- pred_np = pred.cpu().data.numpy()
134
- pred_argmax = np.argmax(pred_np, axis=1)
135
-
136
- # Save the answer index
137
- if pred_argmax.shape[0] != __C.EVAL_BATCH_SIZE:
138
- pred_argmax = np.pad(
139
- pred_argmax,
140
- (0, __C.EVAL_BATCH_SIZE - pred_argmax.shape[0]),
141
- mode='constant',
142
- constant_values=-1
143
- )
144
-
145
- ans_ix_list.append(pred_argmax)
146
-
147
- # Save the whole prediction vector
148
- if __C.TEST_SAVE_PRED:
149
- if pred_np.shape[0] != __C.EVAL_BATCH_SIZE:
150
- pred_np = np.pad(
151
- pred_np,
152
- ((0, __C.EVAL_BATCH_SIZE - pred_np.shape[0]), (0, 0)),
153
- mode='constant',
154
- constant_values=-1
155
- )
156
-
157
- pred_list.append(pred_np)
158
-
159
- print('')
160
- ans_ix_list = np.array(ans_ix_list).reshape(-1)
161
-
162
- if solo_run:
163
- result_eval_file = __C.RESULT_PATH + '/result_run_' + __C.CKPT_VERSION + '_' + tc
164
- else:
165
- result_eval_file = __C.RESULT_PATH + '/result_run_' + __C.VERSION + '_' + tc
166
-
167
- if __C.CKPT_PATH is not None:
168
- ensemble_file = __C.PRED_PATH + '/result_run_' + __C.CKPT_VERSION + '.pkl'
169
- else:
170
- ensemble_file = __C.PRED_PATH + '/result_run_' + __C.CKPT_VERSION + '_epoch' + str(__C.CKPT_EPOCH) + '.pkl'
171
-
172
-
173
- if __C.RUN_MODE not in ['train']:
174
- log_file = __C.LOG_PATH + '/log_run_' + __C.CKPT_VERSION + '.txt'
175
- else:
176
- log_file = __C.LOG_PATH + '/log_run_' + __C.VERSION + '.txt'
177
-
178
- EvalLoader(__C).eval(dataset, ans_ix_list, pred_list, result_eval_file, ensemble_file, log_file, False)
179
-
180
-
181
- def ckpt_proc(state_dict):
182
- state_dict_new = {}
183
- for key in state_dict:
184
- state_dict_new['module.' + key] = state_dict[key]
185
- # state_dict.pop(key)
186
-
187
- return state_dict_new
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pydiffvg/color.py DELETED
@@ -1,24 +0,0 @@
1
- import pydiffvg
2
- import torch
3
-
4
- class LinearGradient:
5
- def __init__(self,
6
- begin = torch.tensor([0.0, 0.0]),
7
- end = torch.tensor([0.0, 0.0]),
8
- offsets = torch.tensor([0.0]),
9
- stop_colors = torch.tensor([0.0, 0.0, 0.0, 0.0])):
10
- self.begin = begin
11
- self.end = end
12
- self.offsets = offsets
13
- self.stop_colors = stop_colors
14
-
15
- class RadialGradient:
16
- def __init__(self,
17
- center = torch.tensor([0.0, 0.0]),
18
- radius = torch.tensor([0.0, 0.0]),
19
- offsets = torch.tensor([0.0]),
20
- stop_colors = torch.tensor([0.0, 0.0, 0.0, 0.0])):
21
- self.center = center
22
- self.radius = radius
23
- self.offsets = offsets
24
- self.stop_colors = stop_colors
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/logical.h DELETED
@@ -1,23 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system inherits logical
22
- #include <thrust/system/cpp/detail/logical.h>
23
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/evaluation/rotated_coco_evaluation.py DELETED
@@ -1,207 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import itertools
3
- import json
4
- import numpy as np
5
- import os
6
- import torch
7
- from pycocotools.cocoeval import COCOeval, maskUtils
8
-
9
- from detectron2.structures import BoxMode, RotatedBoxes, pairwise_iou_rotated
10
- from detectron2.utils.file_io import PathManager
11
-
12
- from .coco_evaluation import COCOEvaluator
13
-
14
-
15
- class RotatedCOCOeval(COCOeval):
16
- @staticmethod
17
- def is_rotated(box_list):
18
- if type(box_list) == np.ndarray:
19
- return box_list.shape[1] == 5
20
- elif type(box_list) == list:
21
- if box_list == []: # cannot decide the box_dim
22
- return False
23
- return np.all(
24
- np.array(
25
- [
26
- (len(obj) == 5) and ((type(obj) == list) or (type(obj) == np.ndarray))
27
- for obj in box_list
28
- ]
29
- )
30
- )
31
- return False
32
-
33
- @staticmethod
34
- def boxlist_to_tensor(boxlist, output_box_dim):
35
- if type(boxlist) == np.ndarray:
36
- box_tensor = torch.from_numpy(boxlist)
37
- elif type(boxlist) == list:
38
- if boxlist == []:
39
- return torch.zeros((0, output_box_dim), dtype=torch.float32)
40
- else:
41
- box_tensor = torch.FloatTensor(boxlist)
42
- else:
43
- raise Exception("Unrecognized boxlist type")
44
-
45
- input_box_dim = box_tensor.shape[1]
46
- if input_box_dim != output_box_dim:
47
- if input_box_dim == 4 and output_box_dim == 5:
48
- box_tensor = BoxMode.convert(box_tensor, BoxMode.XYWH_ABS, BoxMode.XYWHA_ABS)
49
- else:
50
- raise Exception(
51
- "Unable to convert from {}-dim box to {}-dim box".format(
52
- input_box_dim, output_box_dim
53
- )
54
- )
55
- return box_tensor
56
-
57
- def compute_iou_dt_gt(self, dt, gt, is_crowd):
58
- if self.is_rotated(dt) or self.is_rotated(gt):
59
- # TODO: take is_crowd into consideration
60
- assert all(c == 0 for c in is_crowd)
61
- dt = RotatedBoxes(self.boxlist_to_tensor(dt, output_box_dim=5))
62
- gt = RotatedBoxes(self.boxlist_to_tensor(gt, output_box_dim=5))
63
- return pairwise_iou_rotated(dt, gt)
64
- else:
65
- # This is the same as the classical COCO evaluation
66
- return maskUtils.iou(dt, gt, is_crowd)
67
-
68
- def computeIoU(self, imgId, catId):
69
- p = self.params
70
- if p.useCats:
71
- gt = self._gts[imgId, catId]
72
- dt = self._dts[imgId, catId]
73
- else:
74
- gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
75
- dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
76
- if len(gt) == 0 and len(dt) == 0:
77
- return []
78
- inds = np.argsort([-d["score"] for d in dt], kind="mergesort")
79
- dt = [dt[i] for i in inds]
80
- if len(dt) > p.maxDets[-1]:
81
- dt = dt[0 : p.maxDets[-1]]
82
-
83
- assert p.iouType == "bbox", "unsupported iouType for iou computation"
84
-
85
- g = [g["bbox"] for g in gt]
86
- d = [d["bbox"] for d in dt]
87
-
88
- # compute iou between each dt and gt region
89
- iscrowd = [int(o["iscrowd"]) for o in gt]
90
-
91
- # Note: this function is copied from cocoeval.py in cocoapi
92
- # and the major difference is here.
93
- ious = self.compute_iou_dt_gt(d, g, iscrowd)
94
- return ious
95
-
96
-
97
- class RotatedCOCOEvaluator(COCOEvaluator):
98
- """
99
- Evaluate object proposal/instance detection outputs using COCO-like metrics and APIs,
100
- with rotated boxes support.
101
- Note: this uses IOU only and does not consider angle differences.
102
- """
103
-
104
- def process(self, inputs, outputs):
105
- """
106
- Args:
107
- inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).
108
- It is a list of dict. Each dict corresponds to an image and
109
- contains keys like "height", "width", "file_name", "image_id".
110
- outputs: the outputs of a COCO model. It is a list of dicts with key
111
- "instances" that contains :class:`Instances`.
112
- """
113
- for input, output in zip(inputs, outputs):
114
- prediction = {"image_id": input["image_id"]}
115
-
116
- if "instances" in output:
117
- instances = output["instances"].to(self._cpu_device)
118
-
119
- prediction["instances"] = self.instances_to_json(instances, input["image_id"])
120
- if "proposals" in output:
121
- prediction["proposals"] = output["proposals"].to(self._cpu_device)
122
- self._predictions.append(prediction)
123
-
124
- def instances_to_json(self, instances, img_id):
125
- num_instance = len(instances)
126
- if num_instance == 0:
127
- return []
128
-
129
- boxes = instances.pred_boxes.tensor.numpy()
130
- if boxes.shape[1] == 4:
131
- boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
132
- boxes = boxes.tolist()
133
- scores = instances.scores.tolist()
134
- classes = instances.pred_classes.tolist()
135
-
136
- results = []
137
- for k in range(num_instance):
138
- result = {
139
- "image_id": img_id,
140
- "category_id": classes[k],
141
- "bbox": boxes[k],
142
- "score": scores[k],
143
- }
144
-
145
- results.append(result)
146
- return results
147
-
148
- def _eval_predictions(self, predictions, img_ids=None): # img_ids: unused
149
- """
150
- Evaluate predictions on the given tasks.
151
- Fill self._results with the metrics of the tasks.
152
- """
153
- self._logger.info("Preparing results for COCO format ...")
154
- coco_results = list(itertools.chain(*[x["instances"] for x in predictions]))
155
-
156
- # unmap the category ids for COCO
157
- if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
158
- reverse_id_mapping = {
159
- v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items()
160
- }
161
- for result in coco_results:
162
- result["category_id"] = reverse_id_mapping[result["category_id"]]
163
-
164
- if self._output_dir:
165
- file_path = os.path.join(self._output_dir, "coco_instances_results.json")
166
- self._logger.info("Saving results to {}".format(file_path))
167
- with PathManager.open(file_path, "w") as f:
168
- f.write(json.dumps(coco_results))
169
- f.flush()
170
-
171
- if not self._do_evaluation:
172
- self._logger.info("Annotations are not available for evaluation.")
173
- return
174
-
175
- self._logger.info("Evaluating predictions ...")
176
-
177
- assert self._tasks is None or set(self._tasks) == {
178
- "bbox"
179
- }, "[RotatedCOCOEvaluator] Only bbox evaluation is supported"
180
- coco_eval = (
181
- self._evaluate_predictions_on_coco(self._coco_api, coco_results)
182
- if len(coco_results) > 0
183
- else None # cocoapi does not handle empty results very well
184
- )
185
-
186
- task = "bbox"
187
- res = self._derive_coco_results(
188
- coco_eval, task, class_names=self._metadata.get("thing_classes")
189
- )
190
- self._results[task] = res
191
-
192
- def _evaluate_predictions_on_coco(self, coco_gt, coco_results):
193
- """
194
- Evaluate the coco results using COCOEval API.
195
- """
196
- assert len(coco_results) > 0
197
-
198
- coco_dt = coco_gt.loadRes(coco_results)
199
-
200
- # Only bbox is supported for now
201
- coco_eval = RotatedCOCOeval(coco_gt, coco_dt, iouType="bbox")
202
-
203
- coco_eval.evaluate()
204
- coco_eval.accumulate()
205
- coco_eval.summarize()
206
-
207
- return coco_eval
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/model/db/index.js DELETED
@@ -1,8 +0,0 @@
1
- import { existSQL } from './base.js'
2
- import { setMessage_id, getMessage_id } from './message_id.js'
3
-
4
- export {
5
- existSQL,
6
- setMessage_id,
7
- getMessage_id
8
- }
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/meme_generator/memes/capoo_draw/__init__.py DELETED
@@ -1,43 +0,0 @@
1
- from pathlib import Path
2
- from typing import List
3
-
4
- from PIL.Image import Image as IMG
5
- from pil_utils import BuildImage
6
-
7
- from meme_generator import add_meme
8
- from meme_generator.utils import save_gif
9
-
10
- img_dir = Path(__file__).parent / "images"
11
-
12
-
13
- def capoo_draw(images: List[BuildImage], texts, args):
14
- img = images[0].convert("RGBA").resize((175, 120), keep_ratio=True)
15
- params = (
16
- (((27, 0), (207, 12), (179, 142), (0, 117)), (30, 16)),
17
- (((28, 0), (207, 13), (180, 137), (0, 117)), (34, 17)),
18
- )
19
- raw_frames = [BuildImage.open(img_dir / f"{i}.png") for i in range(6)]
20
- for i in range(2):
21
- points, pos = params[i]
22
- raw_frames[4 + i].paste(img.perspective(points), pos, below=True)
23
-
24
- frames: List[IMG] = []
25
- frames.append(raw_frames[0].image)
26
- for i in range(4):
27
- frames.append(raw_frames[1].image)
28
- frames.append(raw_frames[2].image)
29
- frames.append(raw_frames[3].image)
30
- for i in range(6):
31
- frames.append(raw_frames[4].image)
32
- frames.append(raw_frames[5].image)
33
-
34
- return save_gif(frames, 0.1)
35
-
36
-
37
- add_meme(
38
- "capoo_draw",
39
- capoo_draw,
40
- min_images=1,
41
- max_images=1,
42
- keywords=["咖波画"],
43
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/meme_generator/memes/guichu/__init__.py DELETED
@@ -1,117 +0,0 @@
1
- from typing import Dict, List, Literal, NamedTuple, Tuple
2
-
3
- from PIL.Image import Image as IMG
4
- from PIL.Image import Transpose
5
- from pil_utils import BuildImage
6
- from pydantic import Field
7
-
8
- from meme_generator import MemeArgsModel, MemeArgsParser, MemeArgsType, add_meme
9
- from meme_generator.utils import save_gif
10
-
11
- help = "鬼畜对称方向"
12
-
13
- parser = MemeArgsParser(prefix_chars="-/")
14
- group = parser.add_mutually_exclusive_group()
15
- group.add_argument(
16
- "-d",
17
- "--direction",
18
- type=str,
19
- choices=["left", "right", "top", "bottom"],
20
- default="left",
21
- help=help,
22
- )
23
- group.add_argument("--left", "/左", action="store_const", const="left", dest="direction")
24
- group.add_argument(
25
- "--right", "/右", action="store_const", const="right", dest="direction"
26
- )
27
- group.add_argument("--top", "/上", action="store_const", const="top", dest="direction")
28
- group.add_argument(
29
- "--bottom", "/下", action="store_const", const="bottom", dest="direction"
30
- )
31
-
32
-
33
- class Model(MemeArgsModel):
34
- direction: Literal["left", "right", "top", "bottom"] = Field(
35
- "left", description=help
36
- )
37
-
38
-
39
- def guichu(images: List[BuildImage], texts, args: Model):
40
- img = images[0].convert("RGBA")
41
- img_w, img_h = img.size
42
-
43
- class Mode(NamedTuple):
44
- method: Transpose
45
- size1: Tuple[int, int, int, int]
46
- pos1: Tuple[int, int]
47
- size2: Tuple[int, int, int, int]
48
- pos2: Tuple[int, int]
49
-
50
- modes: Dict[str, Mode] = {
51
- "left": Mode(
52
- Transpose.FLIP_LEFT_RIGHT,
53
- (0, 0, img_w // 2, img_h),
54
- (0, 0),
55
- (img_w // 2, 0, img_w // 2 * 2, img_h),
56
- (img_w // 2, 0),
57
- ),
58
- "right": Mode(
59
- Transpose.FLIP_LEFT_RIGHT,
60
- (img_w // 2, 0, img_w // 2 * 2, img_h),
61
- (img_w // 2, 0),
62
- (0, 0, img_w // 2, img_h),
63
- (0, 0),
64
- ),
65
- "top": Mode(
66
- Transpose.FLIP_TOP_BOTTOM,
67
- (0, 0, img_w, img_h // 2),
68
- (0, 0),
69
- (0, img_h // 2, img_w, img_h // 2 * 2),
70
- (0, img_h // 2),
71
- ),
72
- "bottom": Mode(
73
- Transpose.FLIP_TOP_BOTTOM,
74
- (0, img_h // 2, img_w, img_h // 2 * 2),
75
- (0, img_h // 2),
76
- (0, 0, img_w, img_h // 2),
77
- (0, 0),
78
- ),
79
- }
80
- mode = modes[args.direction]
81
-
82
- img_flip = img.transpose(mode.method)
83
- img_symmetric = BuildImage.new("RGBA", img.size)
84
- img_symmetric.paste(img.crop(mode.size1), mode.pos1, alpha=True)
85
- img_symmetric.paste(img_flip.crop(mode.size2), mode.pos2, alpha=True)
86
- img_symmetric_big = BuildImage.new("RGBA", img.size)
87
- img_symmetric_big.paste(
88
- img_symmetric.copy().resize_width(img_w * 2), (-img_w // 2, -img_h // 2)
89
- )
90
-
91
- frames: List[IMG] = []
92
- frames += (
93
- ([img.image] * 3 + [img_flip.image] * 3) * 3
94
- + [img.image, img_flip.image] * 3
95
- + ([img_symmetric.image] * 2 + [img_symmetric_big.image] * 2) * 2
96
- )
97
-
98
- return save_gif(frames, 0.20)
99
-
100
-
101
- add_meme(
102
- "guichu",
103
- guichu,
104
- min_images=1,
105
- max_images=1,
106
- args_type=MemeArgsType(
107
- parser,
108
- Model,
109
- [
110
- Model(direction="left"),
111
- Model(direction="right"),
112
- Model(direction="top"),
113
- Model(direction="bottom"),
114
- ],
115
- ),
116
- keywords=["鬼畜"],
117
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat.b4/g4f/Provider/Providers/Phind.py DELETED
@@ -1,36 +0,0 @@
1
- import os
2
- import json
3
- import time
4
- import subprocess
5
-
6
- from ...typing import sha256, Dict, get_type_hints
7
-
8
- url = 'https://phind.com'
9
- model = ['gpt-4']
10
- supports_stream = True
11
-
12
- def _create_completion(model: str, messages: list, stream: bool, **kwargs):
13
-
14
- path = os.path.dirname(os.path.realpath(__file__))
15
- config = json.dumps({
16
- 'model': model,
17
- 'messages': messages}, separators=(',', ':'))
18
-
19
- cmd = ['python', f'{path}/helpers/phind.py', config]
20
-
21
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
22
-
23
- for line in iter(p.stdout.readline, b''):
24
- if b'<title>Just a moment...</title>' in line:
25
- os.system('clear' if os.name == 'posix' else 'cls')
26
- yield 'Clouflare error, please try again...'
27
- os._exit(0)
28
-
29
- else:
30
- if b'ping - 2023-' in line:
31
- continue
32
-
33
- yield line.decode('cp1251') #[:-1]
34
-
35
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
36
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cvandi/remake/scripts/generate_multiscale_DF2K.py DELETED
@@ -1,48 +0,0 @@
1
- import argparse
2
- import glob
3
- import os
4
- from PIL import Image
5
-
6
-
7
- def main(args):
8
- # For DF2K, we consider the following three scales,
9
- # and the smallest image whose shortest edge is 400
10
- scale_list = [0.75, 0.5, 1 / 3]
11
- shortest_edge = 400
12
-
13
- path_list = sorted(glob.glob(os.path.join(args.input, '*')))
14
- for path in path_list:
15
- print(path)
16
- basename = os.path.splitext(os.path.basename(path))[0]
17
-
18
- img = Image.open(path)
19
- width, height = img.size
20
- for idx, scale in enumerate(scale_list):
21
- print(f'\t{scale:.2f}')
22
- rlt = img.resize((int(width * scale), int(height * scale)), resample=Image.LANCZOS)
23
- rlt.save(os.path.join(args.output, f'{basename}T{idx}.png'))
24
-
25
- # save the smallest image which the shortest edge is 400
26
- if width < height:
27
- ratio = height / width
28
- width = shortest_edge
29
- height = int(width * ratio)
30
- else:
31
- ratio = width / height
32
- height = shortest_edge
33
- width = int(height * ratio)
34
- rlt = img.resize((int(width), int(height)), resample=Image.LANCZOS)
35
- rlt.save(os.path.join(args.output, f'{basename}T{idx+1}.png'))
36
-
37
-
38
- if __name__ == '__main__':
39
- """Generate multi-scale versions for GT images with LANCZOS resampling.
40
- It is now used for DF2K dataset (DIV2K + Flickr 2K)
41
- """
42
- parser = argparse.ArgumentParser()
43
- parser.add_argument('--input', type=str, default='datasets/DF2K/DF2K_HR', help='Input folder')
44
- parser.add_argument('--output', type=str, default='datasets/DF2K/DF2K_multiscale', help='Output folder')
45
- args = parser.parse_args()
46
-
47
- os.makedirs(args.output, exist_ok=True)
48
- main(args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/voltLib/parser.py DELETED
@@ -1,656 +0,0 @@
1
- import fontTools.voltLib.ast as ast
2
- from fontTools.voltLib.lexer import Lexer
3
- from fontTools.voltLib.error import VoltLibError
4
- from io import open
5
-
6
- PARSE_FUNCS = {
7
- "DEF_GLYPH": "parse_def_glyph_",
8
- "DEF_GROUP": "parse_def_group_",
9
- "DEF_SCRIPT": "parse_def_script_",
10
- "DEF_LOOKUP": "parse_def_lookup_",
11
- "DEF_ANCHOR": "parse_def_anchor_",
12
- "GRID_PPEM": "parse_ppem_",
13
- "PRESENTATION_PPEM": "parse_ppem_",
14
- "PPOSITIONING_PPEM": "parse_ppem_",
15
- "COMPILER_USEEXTENSIONLOOKUPS": "parse_noarg_option_",
16
- "COMPILER_USEPAIRPOSFORMAT2": "parse_noarg_option_",
17
- "CMAP_FORMAT": "parse_cmap_format",
18
- "DO_NOT_TOUCH_CMAP": "parse_noarg_option_",
19
- }
20
-
21
-
22
- class Parser(object):
23
- def __init__(self, path):
24
- self.doc_ = ast.VoltFile()
25
- self.glyphs_ = OrderedSymbolTable()
26
- self.groups_ = SymbolTable()
27
- self.anchors_ = {} # dictionary of SymbolTable() keyed by glyph
28
- self.scripts_ = SymbolTable()
29
- self.langs_ = SymbolTable()
30
- self.lookups_ = SymbolTable()
31
- self.next_token_type_, self.next_token_ = (None, None)
32
- self.next_token_location_ = None
33
- self.make_lexer_(path)
34
- self.advance_lexer_()
35
-
36
- def make_lexer_(self, file_or_path):
37
- if hasattr(file_or_path, "read"):
38
- filename = getattr(file_or_path, "name", None)
39
- data = file_or_path.read()
40
- else:
41
- filename = file_or_path
42
- with open(file_or_path, "r") as f:
43
- data = f.read()
44
- self.lexer_ = Lexer(data, filename)
45
-
46
- def parse(self):
47
- statements = self.doc_.statements
48
- while self.next_token_type_ is not None:
49
- self.advance_lexer_()
50
- if self.cur_token_ in PARSE_FUNCS.keys():
51
- func = getattr(self, PARSE_FUNCS[self.cur_token_])
52
- statements.append(func())
53
- elif self.is_cur_keyword_("END"):
54
- break
55
- else:
56
- raise VoltLibError(
57
- "Expected " + ", ".join(sorted(PARSE_FUNCS.keys())),
58
- self.cur_token_location_,
59
- )
60
- return self.doc_
61
-
62
- def parse_def_glyph_(self):
63
- assert self.is_cur_keyword_("DEF_GLYPH")
64
- location = self.cur_token_location_
65
- name = self.expect_string_()
66
- self.expect_keyword_("ID")
67
- gid = self.expect_number_()
68
- if gid < 0:
69
- raise VoltLibError("Invalid glyph ID", self.cur_token_location_)
70
- gunicode = None
71
- if self.next_token_ == "UNICODE":
72
- self.expect_keyword_("UNICODE")
73
- gunicode = [self.expect_number_()]
74
- if gunicode[0] < 0:
75
- raise VoltLibError("Invalid glyph UNICODE", self.cur_token_location_)
76
- elif self.next_token_ == "UNICODEVALUES":
77
- self.expect_keyword_("UNICODEVALUES")
78
- gunicode = self.parse_unicode_values_()
79
- gtype = None
80
- if self.next_token_ == "TYPE":
81
- self.expect_keyword_("TYPE")
82
- gtype = self.expect_name_()
83
- assert gtype in ("BASE", "LIGATURE", "MARK", "COMPONENT")
84
- components = None
85
- if self.next_token_ == "COMPONENTS":
86
- self.expect_keyword_("COMPONENTS")
87
- components = self.expect_number_()
88
- self.expect_keyword_("END_GLYPH")
89
- if self.glyphs_.resolve(name) is not None:
90
- raise VoltLibError(
91
- 'Glyph "%s" (gid %i) already defined' % (name, gid), location
92
- )
93
- def_glyph = ast.GlyphDefinition(
94
- name, gid, gunicode, gtype, components, location=location
95
- )
96
- self.glyphs_.define(name, def_glyph)
97
- return def_glyph
98
-
99
- def parse_def_group_(self):
100
- assert self.is_cur_keyword_("DEF_GROUP")
101
- location = self.cur_token_location_
102
- name = self.expect_string_()
103
- enum = None
104
- if self.next_token_ == "ENUM":
105
- enum = self.parse_enum_()
106
- self.expect_keyword_("END_GROUP")
107
- if self.groups_.resolve(name) is not None:
108
- raise VoltLibError(
109
- 'Glyph group "%s" already defined, '
110
- "group names are case insensitive" % name,
111
- location,
112
- )
113
- def_group = ast.GroupDefinition(name, enum, location=location)
114
- self.groups_.define(name, def_group)
115
- return def_group
116
-
117
- def parse_def_script_(self):
118
- assert self.is_cur_keyword_("DEF_SCRIPT")
119
- location = self.cur_token_location_
120
- name = None
121
- if self.next_token_ == "NAME":
122
- self.expect_keyword_("NAME")
123
- name = self.expect_string_()
124
- self.expect_keyword_("TAG")
125
- tag = self.expect_string_()
126
- if self.scripts_.resolve(tag) is not None:
127
- raise VoltLibError(
128
- 'Script "%s" already defined, '
129
- "script tags are case insensitive" % tag,
130
- location,
131
- )
132
- self.langs_.enter_scope()
133
- langs = []
134
- while self.next_token_ != "END_SCRIPT":
135
- self.advance_lexer_()
136
- lang = self.parse_langsys_()
137
- self.expect_keyword_("END_LANGSYS")
138
- if self.langs_.resolve(lang.tag) is not None:
139
- raise VoltLibError(
140
- 'Language "%s" already defined in script "%s", '
141
- "language tags are case insensitive" % (lang.tag, tag),
142
- location,
143
- )
144
- self.langs_.define(lang.tag, lang)
145
- langs.append(lang)
146
- self.expect_keyword_("END_SCRIPT")
147
- self.langs_.exit_scope()
148
- def_script = ast.ScriptDefinition(name, tag, langs, location=location)
149
- self.scripts_.define(tag, def_script)
150
- return def_script
151
-
152
- def parse_langsys_(self):
153
- assert self.is_cur_keyword_("DEF_LANGSYS")
154
- location = self.cur_token_location_
155
- name = None
156
- if self.next_token_ == "NAME":
157
- self.expect_keyword_("NAME")
158
- name = self.expect_string_()
159
- self.expect_keyword_("TAG")
160
- tag = self.expect_string_()
161
- features = []
162
- while self.next_token_ != "END_LANGSYS":
163
- self.advance_lexer_()
164
- feature = self.parse_feature_()
165
- self.expect_keyword_("END_FEATURE")
166
- features.append(feature)
167
- def_langsys = ast.LangSysDefinition(name, tag, features, location=location)
168
- return def_langsys
169
-
170
- def parse_feature_(self):
171
- assert self.is_cur_keyword_("DEF_FEATURE")
172
- location = self.cur_token_location_
173
- self.expect_keyword_("NAME")
174
- name = self.expect_string_()
175
- self.expect_keyword_("TAG")
176
- tag = self.expect_string_()
177
- lookups = []
178
- while self.next_token_ != "END_FEATURE":
179
- # self.advance_lexer_()
180
- self.expect_keyword_("LOOKUP")
181
- lookup = self.expect_string_()
182
- lookups.append(lookup)
183
- feature = ast.FeatureDefinition(name, tag, lookups, location=location)
184
- return feature
185
-
186
- def parse_def_lookup_(self):
187
- assert self.is_cur_keyword_("DEF_LOOKUP")
188
- location = self.cur_token_location_
189
- name = self.expect_string_()
190
- if not name[0].isalpha():
191
- raise VoltLibError(
192
- 'Lookup name "%s" must start with a letter' % name, location
193
- )
194
- if self.lookups_.resolve(name) is not None:
195
- raise VoltLibError(
196
- 'Lookup "%s" already defined, '
197
- "lookup names are case insensitive" % name,
198
- location,
199
- )
200
- process_base = True
201
- if self.next_token_ == "PROCESS_BASE":
202
- self.advance_lexer_()
203
- elif self.next_token_ == "SKIP_BASE":
204
- self.advance_lexer_()
205
- process_base = False
206
- process_marks = True
207
- mark_glyph_set = None
208
- if self.next_token_ == "PROCESS_MARKS":
209
- self.advance_lexer_()
210
- if self.next_token_ == "MARK_GLYPH_SET":
211
- self.advance_lexer_()
212
- mark_glyph_set = self.expect_string_()
213
- elif self.next_token_ == "ALL":
214
- self.advance_lexer_()
215
- elif self.next_token_ == "NONE":
216
- self.advance_lexer_()
217
- process_marks = False
218
- elif self.next_token_type_ == Lexer.STRING:
219
- process_marks = self.expect_string_()
220
- else:
221
- raise VoltLibError(
222
- "Expected ALL, NONE, MARK_GLYPH_SET or an ID. "
223
- "Got %s" % (self.next_token_type_),
224
- location,
225
- )
226
- elif self.next_token_ == "SKIP_MARKS":
227
- self.advance_lexer_()
228
- process_marks = False
229
- direction = None
230
- if self.next_token_ == "DIRECTION":
231
- self.expect_keyword_("DIRECTION")
232
- direction = self.expect_name_()
233
- assert direction in ("LTR", "RTL")
234
- reversal = None
235
- if self.next_token_ == "REVERSAL":
236
- self.expect_keyword_("REVERSAL")
237
- reversal = True
238
- comments = None
239
- if self.next_token_ == "COMMENTS":
240
- self.expect_keyword_("COMMENTS")
241
- comments = self.expect_string_().replace(r"\n", "\n")
242
- context = []
243
- while self.next_token_ in ("EXCEPT_CONTEXT", "IN_CONTEXT"):
244
- context = self.parse_context_()
245
- as_pos_or_sub = self.expect_name_()
246
- sub = None
247
- pos = None
248
- if as_pos_or_sub == "AS_SUBSTITUTION":
249
- sub = self.parse_substitution_(reversal)
250
- elif as_pos_or_sub == "AS_POSITION":
251
- pos = self.parse_position_()
252
- else:
253
- raise VoltLibError(
254
- "Expected AS_SUBSTITUTION or AS_POSITION. " "Got %s" % (as_pos_or_sub),
255
- location,
256
- )
257
- def_lookup = ast.LookupDefinition(
258
- name,
259
- process_base,
260
- process_marks,
261
- mark_glyph_set,
262
- direction,
263
- reversal,
264
- comments,
265
- context,
266
- sub,
267
- pos,
268
- location=location,
269
- )
270
- self.lookups_.define(name, def_lookup)
271
- return def_lookup
272
-
273
- def parse_context_(self):
274
- location = self.cur_token_location_
275
- contexts = []
276
- while self.next_token_ in ("EXCEPT_CONTEXT", "IN_CONTEXT"):
277
- side = None
278
- coverage = None
279
- ex_or_in = self.expect_name_()
280
- # side_contexts = [] # XXX
281
- if self.next_token_ != "END_CONTEXT":
282
- left = []
283
- right = []
284
- while self.next_token_ in ("LEFT", "RIGHT"):
285
- side = self.expect_name_()
286
- coverage = self.parse_coverage_()
287
- if side == "LEFT":
288
- left.append(coverage)
289
- else:
290
- right.append(coverage)
291
- self.expect_keyword_("END_CONTEXT")
292
- context = ast.ContextDefinition(
293
- ex_or_in, left, right, location=location
294
- )
295
- contexts.append(context)
296
- else:
297
- self.expect_keyword_("END_CONTEXT")
298
- return contexts
299
-
300
- def parse_substitution_(self, reversal):
301
- assert self.is_cur_keyword_("AS_SUBSTITUTION")
302
- location = self.cur_token_location_
303
- src = []
304
- dest = []
305
- if self.next_token_ != "SUB":
306
- raise VoltLibError("Expected SUB", location)
307
- while self.next_token_ == "SUB":
308
- self.expect_keyword_("SUB")
309
- src.append(self.parse_coverage_())
310
- self.expect_keyword_("WITH")
311
- dest.append(self.parse_coverage_())
312
- self.expect_keyword_("END_SUB")
313
- self.expect_keyword_("END_SUBSTITUTION")
314
- max_src = max([len(cov) for cov in src])
315
- max_dest = max([len(cov) for cov in dest])
316
- # many to many or mixed is invalid
317
- if (max_src > 1 and max_dest > 1) or (
318
- reversal and (max_src > 1 or max_dest > 1)
319
- ):
320
- raise VoltLibError("Invalid substitution type", location)
321
- mapping = dict(zip(tuple(src), tuple(dest)))
322
- if max_src == 1 and max_dest == 1:
323
- if reversal:
324
- sub = ast.SubstitutionReverseChainingSingleDefinition(
325
- mapping, location=location
326
- )
327
- else:
328
- sub = ast.SubstitutionSingleDefinition(mapping, location=location)
329
- elif max_src == 1 and max_dest > 1:
330
- sub = ast.SubstitutionMultipleDefinition(mapping, location=location)
331
- elif max_src > 1 and max_dest == 1:
332
- sub = ast.SubstitutionLigatureDefinition(mapping, location=location)
333
- return sub
334
-
335
- def parse_position_(self):
336
- assert self.is_cur_keyword_("AS_POSITION")
337
- location = self.cur_token_location_
338
- pos_type = self.expect_name_()
339
- if pos_type not in ("ATTACH", "ATTACH_CURSIVE", "ADJUST_PAIR", "ADJUST_SINGLE"):
340
- raise VoltLibError(
341
- "Expected ATTACH, ATTACH_CURSIVE, ADJUST_PAIR, ADJUST_SINGLE", location
342
- )
343
- if pos_type == "ATTACH":
344
- position = self.parse_attach_()
345
- elif pos_type == "ATTACH_CURSIVE":
346
- position = self.parse_attach_cursive_()
347
- elif pos_type == "ADJUST_PAIR":
348
- position = self.parse_adjust_pair_()
349
- elif pos_type == "ADJUST_SINGLE":
350
- position = self.parse_adjust_single_()
351
- self.expect_keyword_("END_POSITION")
352
- return position
353
-
354
- def parse_attach_(self):
355
- assert self.is_cur_keyword_("ATTACH")
356
- location = self.cur_token_location_
357
- coverage = self.parse_coverage_()
358
- coverage_to = []
359
- self.expect_keyword_("TO")
360
- while self.next_token_ != "END_ATTACH":
361
- cov = self.parse_coverage_()
362
- self.expect_keyword_("AT")
363
- self.expect_keyword_("ANCHOR")
364
- anchor_name = self.expect_string_()
365
- coverage_to.append((cov, anchor_name))
366
- self.expect_keyword_("END_ATTACH")
367
- position = ast.PositionAttachDefinition(
368
- coverage, coverage_to, location=location
369
- )
370
- return position
371
-
372
- def parse_attach_cursive_(self):
373
- assert self.is_cur_keyword_("ATTACH_CURSIVE")
374
- location = self.cur_token_location_
375
- coverages_exit = []
376
- coverages_enter = []
377
- while self.next_token_ != "ENTER":
378
- self.expect_keyword_("EXIT")
379
- coverages_exit.append(self.parse_coverage_())
380
- while self.next_token_ != "END_ATTACH":
381
- self.expect_keyword_("ENTER")
382
- coverages_enter.append(self.parse_coverage_())
383
- self.expect_keyword_("END_ATTACH")
384
- position = ast.PositionAttachCursiveDefinition(
385
- coverages_exit, coverages_enter, location=location
386
- )
387
- return position
388
-
389
- def parse_adjust_pair_(self):
390
- assert self.is_cur_keyword_("ADJUST_PAIR")
391
- location = self.cur_token_location_
392
- coverages_1 = []
393
- coverages_2 = []
394
- adjust_pair = {}
395
- while self.next_token_ == "FIRST":
396
- self.advance_lexer_()
397
- coverage_1 = self.parse_coverage_()
398
- coverages_1.append(coverage_1)
399
- while self.next_token_ == "SECOND":
400
- self.advance_lexer_()
401
- coverage_2 = self.parse_coverage_()
402
- coverages_2.append(coverage_2)
403
- while self.next_token_ != "END_ADJUST":
404
- id_1 = self.expect_number_()
405
- id_2 = self.expect_number_()
406
- self.expect_keyword_("BY")
407
- pos_1 = self.parse_pos_()
408
- pos_2 = self.parse_pos_()
409
- adjust_pair[(id_1, id_2)] = (pos_1, pos_2)
410
- self.expect_keyword_("END_ADJUST")
411
- position = ast.PositionAdjustPairDefinition(
412
- coverages_1, coverages_2, adjust_pair, location=location
413
- )
414
- return position
415
-
416
- def parse_adjust_single_(self):
417
- assert self.is_cur_keyword_("ADJUST_SINGLE")
418
- location = self.cur_token_location_
419
- adjust_single = []
420
- while self.next_token_ != "END_ADJUST":
421
- coverages = self.parse_coverage_()
422
- self.expect_keyword_("BY")
423
- pos = self.parse_pos_()
424
- adjust_single.append((coverages, pos))
425
- self.expect_keyword_("END_ADJUST")
426
- position = ast.PositionAdjustSingleDefinition(adjust_single, location=location)
427
- return position
428
-
429
- def parse_def_anchor_(self):
430
- assert self.is_cur_keyword_("DEF_ANCHOR")
431
- location = self.cur_token_location_
432
- name = self.expect_string_()
433
- self.expect_keyword_("ON")
434
- gid = self.expect_number_()
435
- self.expect_keyword_("GLYPH")
436
- glyph_name = self.expect_name_()
437
- self.expect_keyword_("COMPONENT")
438
- component = self.expect_number_()
439
- # check for duplicate anchor names on this glyph
440
- if glyph_name in self.anchors_:
441
- anchor = self.anchors_[glyph_name].resolve(name)
442
- if anchor is not None and anchor.component == component:
443
- raise VoltLibError(
444
- 'Anchor "%s" already defined, '
445
- "anchor names are case insensitive" % name,
446
- location,
447
- )
448
- if self.next_token_ == "LOCKED":
449
- locked = True
450
- self.advance_lexer_()
451
- else:
452
- locked = False
453
- self.expect_keyword_("AT")
454
- pos = self.parse_pos_()
455
- self.expect_keyword_("END_ANCHOR")
456
- anchor = ast.AnchorDefinition(
457
- name, gid, glyph_name, component, locked, pos, location=location
458
- )
459
- if glyph_name not in self.anchors_:
460
- self.anchors_[glyph_name] = SymbolTable()
461
- self.anchors_[glyph_name].define(name, anchor)
462
- return anchor
463
-
464
- def parse_adjust_by_(self):
465
- self.advance_lexer_()
466
- assert self.is_cur_keyword_("ADJUST_BY")
467
- adjustment = self.expect_number_()
468
- self.expect_keyword_("AT")
469
- size = self.expect_number_()
470
- return adjustment, size
471
-
472
- def parse_pos_(self):
473
- # VOLT syntax doesn't seem to take device Y advance
474
- self.advance_lexer_()
475
- location = self.cur_token_location_
476
- assert self.is_cur_keyword_("POS"), location
477
- adv = None
478
- dx = None
479
- dy = None
480
- adv_adjust_by = {}
481
- dx_adjust_by = {}
482
- dy_adjust_by = {}
483
- if self.next_token_ == "ADV":
484
- self.advance_lexer_()
485
- adv = self.expect_number_()
486
- while self.next_token_ == "ADJUST_BY":
487
- adjustment, size = self.parse_adjust_by_()
488
- adv_adjust_by[size] = adjustment
489
- if self.next_token_ == "DX":
490
- self.advance_lexer_()
491
- dx = self.expect_number_()
492
- while self.next_token_ == "ADJUST_BY":
493
- adjustment, size = self.parse_adjust_by_()
494
- dx_adjust_by[size] = adjustment
495
- if self.next_token_ == "DY":
496
- self.advance_lexer_()
497
- dy = self.expect_number_()
498
- while self.next_token_ == "ADJUST_BY":
499
- adjustment, size = self.parse_adjust_by_()
500
- dy_adjust_by[size] = adjustment
501
- self.expect_keyword_("END_POS")
502
- return ast.Pos(adv, dx, dy, adv_adjust_by, dx_adjust_by, dy_adjust_by)
503
-
504
- def parse_unicode_values_(self):
505
- location = self.cur_token_location_
506
- try:
507
- unicode_values = self.expect_string_().split(",")
508
- unicode_values = [int(uni[2:], 16) for uni in unicode_values if uni != ""]
509
- except ValueError as err:
510
- raise VoltLibError(str(err), location)
511
- return unicode_values if unicode_values != [] else None
512
-
513
- def parse_enum_(self):
514
- self.expect_keyword_("ENUM")
515
- location = self.cur_token_location_
516
- enum = ast.Enum(self.parse_coverage_(), location=location)
517
- self.expect_keyword_("END_ENUM")
518
- return enum
519
-
520
- def parse_coverage_(self):
521
- coverage = []
522
- location = self.cur_token_location_
523
- while self.next_token_ in ("GLYPH", "GROUP", "RANGE", "ENUM"):
524
- if self.next_token_ == "ENUM":
525
- enum = self.parse_enum_()
526
- coverage.append(enum)
527
- elif self.next_token_ == "GLYPH":
528
- self.expect_keyword_("GLYPH")
529
- name = self.expect_string_()
530
- coverage.append(ast.GlyphName(name, location=location))
531
- elif self.next_token_ == "GROUP":
532
- self.expect_keyword_("GROUP")
533
- name = self.expect_string_()
534
- coverage.append(ast.GroupName(name, self, location=location))
535
- elif self.next_token_ == "RANGE":
536
- self.expect_keyword_("RANGE")
537
- start = self.expect_string_()
538
- self.expect_keyword_("TO")
539
- end = self.expect_string_()
540
- coverage.append(ast.Range(start, end, self, location=location))
541
- return tuple(coverage)
542
-
543
- def resolve_group(self, group_name):
544
- return self.groups_.resolve(group_name)
545
-
546
- def glyph_range(self, start, end):
547
- return self.glyphs_.range(start, end)
548
-
549
- def parse_ppem_(self):
550
- location = self.cur_token_location_
551
- ppem_name = self.cur_token_
552
- value = self.expect_number_()
553
- setting = ast.SettingDefinition(ppem_name, value, location=location)
554
- return setting
555
-
556
- def parse_noarg_option_(self):
557
- location = self.cur_token_location_
558
- name = self.cur_token_
559
- value = True
560
- setting = ast.SettingDefinition(name, value, location=location)
561
- return setting
562
-
563
- def parse_cmap_format(self):
564
- location = self.cur_token_location_
565
- name = self.cur_token_
566
- value = (self.expect_number_(), self.expect_number_(), self.expect_number_())
567
- setting = ast.SettingDefinition(name, value, location=location)
568
- return setting
569
-
570
- def is_cur_keyword_(self, k):
571
- return (self.cur_token_type_ is Lexer.NAME) and (self.cur_token_ == k)
572
-
573
- def expect_string_(self):
574
- self.advance_lexer_()
575
- if self.cur_token_type_ is not Lexer.STRING:
576
- raise VoltLibError("Expected a string", self.cur_token_location_)
577
- return self.cur_token_
578
-
579
- def expect_keyword_(self, keyword):
580
- self.advance_lexer_()
581
- if self.cur_token_type_ is Lexer.NAME and self.cur_token_ == keyword:
582
- return self.cur_token_
583
- raise VoltLibError('Expected "%s"' % keyword, self.cur_token_location_)
584
-
585
- def expect_name_(self):
586
- self.advance_lexer_()
587
- if self.cur_token_type_ is Lexer.NAME:
588
- return self.cur_token_
589
- raise VoltLibError("Expected a name", self.cur_token_location_)
590
-
591
- def expect_number_(self):
592
- self.advance_lexer_()
593
- if self.cur_token_type_ is not Lexer.NUMBER:
594
- raise VoltLibError("Expected a number", self.cur_token_location_)
595
- return self.cur_token_
596
-
597
- def advance_lexer_(self):
598
- self.cur_token_type_, self.cur_token_, self.cur_token_location_ = (
599
- self.next_token_type_,
600
- self.next_token_,
601
- self.next_token_location_,
602
- )
603
- try:
604
- if self.is_cur_keyword_("END"):
605
- raise StopIteration
606
- (
607
- self.next_token_type_,
608
- self.next_token_,
609
- self.next_token_location_,
610
- ) = self.lexer_.next()
611
- except StopIteration:
612
- self.next_token_type_, self.next_token_ = (None, None)
613
-
614
-
615
- class SymbolTable(object):
616
- def __init__(self):
617
- self.scopes_ = [{}]
618
-
619
- def enter_scope(self):
620
- self.scopes_.append({})
621
-
622
- def exit_scope(self):
623
- self.scopes_.pop()
624
-
625
- def define(self, name, item):
626
- self.scopes_[-1][name] = item
627
-
628
- def resolve(self, name, case_insensitive=True):
629
- for scope in reversed(self.scopes_):
630
- item = scope.get(name)
631
- if item:
632
- return item
633
- if case_insensitive:
634
- for key in scope:
635
- if key.lower() == name.lower():
636
- return scope[key]
637
- return None
638
-
639
-
640
- class OrderedSymbolTable(SymbolTable):
641
- def __init__(self):
642
- self.scopes_ = [{}]
643
-
644
- def enter_scope(self):
645
- self.scopes_.append({})
646
-
647
- def resolve(self, name, case_insensitive=False):
648
- SymbolTable.resolve(self, name, case_insensitive=case_insensitive)
649
-
650
- def range(self, start, end):
651
- for scope in reversed(self.scopes_):
652
- if start in scope and end in scope:
653
- start_idx = list(scope.keys()).index(start)
654
- end_idx = list(scope.keys()).index(end)
655
- return list(scope.keys())[start_idx : end_idx + 1]
656
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fsspec/config.py DELETED
@@ -1,131 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import configparser
4
- import json
5
- import os
6
- import warnings
7
- from typing import Any
8
-
9
- conf: dict[str, dict[str, Any]] = {}
10
- default_conf_dir = os.path.join(os.path.expanduser("~"), ".config/fsspec")
11
- conf_dir = os.environ.get("FSSPEC_CONFIG_DIR", default_conf_dir)
12
-
13
-
14
- def set_conf_env(conf_dict, envdict=os.environ):
15
- """Set config values from environment variables
16
-
17
- Looks for variables of the form ``FSSPEC_<protocol>`` and
18
- ``FSSPEC_<protocol>_<kwarg>``. For ``FSSPEC_<protocol>`` the value is parsed
19
- as a json dictionary and used to ``update`` the config of the
20
- corresponding protocol. For ``FSSPEC_<protocol>_<kwarg>`` there is no
21
- attempt to convert the string value, but the kwarg keys will be lower-cased.
22
-
23
- The ``FSSPEC_<protocol>_<kwarg>`` variables are applied after the
24
- ``FSSPEC_<protocol>`` ones.
25
-
26
- Parameters
27
- ----------
28
- conf_dict : dict(str, dict)
29
- This dict will be mutated
30
- envdict : dict-like(str, str)
31
- Source for the values - usually the real environment
32
- """
33
- kwarg_keys = []
34
- for key in envdict:
35
- if key.startswith("FSSPEC_") and len(key) > 7 and key[7] != "_":
36
- if key.count("_") > 1:
37
- kwarg_keys.append(key)
38
- continue
39
- try:
40
- value = json.loads(envdict[key])
41
- except json.decoder.JSONDecodeError as ex:
42
- warnings.warn(
43
- f"Ignoring environment variable {key} due to a parse failure: {ex}"
44
- )
45
- else:
46
- if isinstance(value, dict):
47
- _, proto = key.split("_", 1)
48
- conf_dict.setdefault(proto.lower(), {}).update(value)
49
- else:
50
- warnings.warn(
51
- f"Ignoring environment variable {key} due to not being a dict:"
52
- f" {type(value)}"
53
- )
54
- elif key.startswith("FSSPEC"):
55
- warnings.warn(
56
- f"Ignoring environment variable {key} due to having an unexpected name"
57
- )
58
-
59
- for key in kwarg_keys:
60
- _, proto, kwarg = key.split("_", 2)
61
- conf_dict.setdefault(proto.lower(), {})[kwarg.lower()] = envdict[key]
62
-
63
-
64
- def set_conf_files(cdir, conf_dict):
65
- """Set config values from files
66
-
67
- Scans for INI and JSON files in the given dictionary, and uses their
68
- contents to set the config. In case of repeated values, later values
69
- win.
70
-
71
- In the case of INI files, all values are strings, and these will not
72
- be converted.
73
-
74
- Parameters
75
- ----------
76
- cdir : str
77
- Directory to search
78
- conf_dict : dict(str, dict)
79
- This dict will be mutated
80
- """
81
- if not os.path.isdir(cdir):
82
- return
83
- allfiles = sorted(os.listdir(cdir))
84
- for fn in allfiles:
85
- if fn.endswith(".ini"):
86
- ini = configparser.ConfigParser()
87
- ini.read(os.path.join(cdir, fn))
88
- for key in ini:
89
- if key == "DEFAULT":
90
- continue
91
- conf_dict.setdefault(key, {}).update(dict(ini[key]))
92
- if fn.endswith(".json"):
93
- with open(os.path.join(cdir, fn)) as f:
94
- js = json.load(f)
95
- for key in js:
96
- conf_dict.setdefault(key, {}).update(dict(js[key]))
97
-
98
-
99
- def apply_config(cls, kwargs, conf_dict=None):
100
- """Supply default values for kwargs when instantiating class
101
-
102
- Augments the passed kwargs, by finding entries in the config dict
103
- which match the classes ``.protocol`` attribute (one or more str)
104
-
105
- Parameters
106
- ----------
107
- cls : file system implementation
108
- kwargs : dict
109
- conf_dict : dict of dict
110
- Typically this is the global configuration
111
-
112
- Returns
113
- -------
114
- dict : the modified set of kwargs
115
- """
116
- if conf_dict is None:
117
- conf_dict = conf
118
- protos = cls.protocol if isinstance(cls.protocol, (tuple, list)) else [cls.protocol]
119
- kw = {}
120
- for proto in protos:
121
- # default kwargs from the current state of the config
122
- if proto in conf_dict:
123
- kw.update(conf_dict[proto])
124
- # explicit kwargs always win
125
- kw.update(**kwargs)
126
- kwargs = kw
127
- return kwargs
128
-
129
-
130
- set_conf_files(conf_dir, conf)
131
- set_conf_env(conf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/layouts.py DELETED
@@ -1,393 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import warnings
4
- from typing import TYPE_CHECKING, Literal
5
-
6
- from gradio_client.documentation import document, set_documentation_group
7
-
8
- from gradio.blocks import BlockContext
9
- from gradio.deprecation import warn_style_method_deprecation
10
- from gradio.events import Changeable, Selectable
11
-
12
- if TYPE_CHECKING:
13
- from gradio.blocks import Block
14
-
15
- set_documentation_group("layout")
16
-
17
-
18
- @document()
19
- class Row(BlockContext):
20
- """
21
- Row is a layout element within Blocks that renders all children horizontally.
22
- Example:
23
- with gr.Blocks() as demo:
24
- with gr.Row():
25
- gr.Image("lion.jpg", scale=2)
26
- gr.Image("tiger.jpg", scale=1)
27
- demo.launch()
28
- Guides: controlling-layout
29
- """
30
-
31
- def __init__(
32
- self,
33
- *,
34
- variant: Literal["default", "panel", "compact"] = "default",
35
- visible: bool = True,
36
- elem_id: str | None = None,
37
- equal_height: bool = True,
38
- **kwargs,
39
- ):
40
- """
41
- Parameters:
42
- variant: row type, 'default' (no background), 'panel' (gray background color and rounded corners), or 'compact' (rounded corners and no internal gap).
43
- visible: If False, row will be hidden.
44
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
45
- equal_height: If True, makes every child element have equal height
46
- """
47
- self.variant = variant
48
- self.equal_height = equal_height
49
- if variant == "compact":
50
- self.allow_expected_parents = False
51
- super().__init__(visible=visible, elem_id=elem_id, **kwargs)
52
-
53
- def get_config(self):
54
- return {
55
- "type": "row",
56
- "variant": self.variant,
57
- "equal_height": self.equal_height,
58
- **super().get_config(),
59
- }
60
-
61
- @staticmethod
62
- def update(
63
- visible: bool | None = None,
64
- ):
65
- return {
66
- "visible": visible,
67
- "__type__": "update",
68
- }
69
-
70
- def style(
71
- self,
72
- *,
73
- equal_height: bool | None = None,
74
- **kwargs,
75
- ):
76
- """
77
- Styles the Row.
78
- Parameters:
79
- equal_height: If True, makes every child element have equal height
80
- """
81
- warn_style_method_deprecation()
82
- if equal_height is not None:
83
- self.equal_height = equal_height
84
- return self
85
-
86
-
87
- @document()
88
- class Column(BlockContext):
89
- """
90
- Column is a layout element within Blocks that renders all children vertically. The widths of columns can be set through the `scale` and `min_width` parameters.
91
- If a certain scale results in a column narrower than min_width, the min_width parameter will win.
92
- Example:
93
- with gr.Blocks() as demo:
94
- with gr.Row():
95
- with gr.Column(scale=1):
96
- text1 = gr.Textbox()
97
- text2 = gr.Textbox()
98
- with gr.Column(scale=4):
99
- btn1 = gr.Button("Button 1")
100
- btn2 = gr.Button("Button 2")
101
- Guides: controlling-layout
102
- """
103
-
104
- def __init__(
105
- self,
106
- *,
107
- scale: int = 1,
108
- min_width: int = 320,
109
- variant: Literal["default", "panel", "compact"] = "default",
110
- visible: bool = True,
111
- elem_id: str | None = None,
112
- **kwargs,
113
- ):
114
- """
115
- Parameters:
116
- scale: relative width compared to adjacent Columns. For example, if Column A has scale=2, and Column B has scale=1, A will be twice as wide as B.
117
- min_width: minimum pixel width of Column, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in a column narrower than min_width, the min_width parameter will be respected first.
118
- variant: column type, 'default' (no background), 'panel' (gray background color and rounded corners), or 'compact' (rounded corners and no internal gap).
119
- visible: If False, column will be hidden.
120
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
121
- """
122
- self.scale = scale
123
- self.min_width = min_width
124
- self.variant = variant
125
- if variant == "compact":
126
- self.allow_expected_parents = False
127
- super().__init__(visible=visible, elem_id=elem_id, **kwargs)
128
-
129
- def get_config(self):
130
- return {
131
- "type": "column",
132
- "variant": self.variant,
133
- "scale": self.scale,
134
- "min_width": self.min_width,
135
- **super().get_config(),
136
- }
137
-
138
- @staticmethod
139
- def update(
140
- variant: str | None = None,
141
- visible: bool | None = None,
142
- ):
143
- return {
144
- "variant": variant,
145
- "visible": visible,
146
- "__type__": "update",
147
- }
148
-
149
-
150
- class Tabs(BlockContext, Changeable, Selectable):
151
- """
152
- Tabs is a layout element within Blocks that can contain multiple "Tab" Components.
153
- """
154
-
155
- def __init__(
156
- self,
157
- *,
158
- selected: int | str | None = None,
159
- visible: bool = True,
160
- elem_id: str | None = None,
161
- **kwargs,
162
- ):
163
- """
164
- Parameters:
165
- selected: The currently selected tab. Must correspond to an id passed to the one of the child TabItems. Defaults to the first TabItem.
166
- visible: If False, Tabs will be hidden.
167
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
168
- """
169
- BlockContext.__init__(self, visible=visible, elem_id=elem_id, **kwargs)
170
- Changeable.__init__(self)
171
- Selectable.__init__(self)
172
- self.selected = selected
173
-
174
- def get_config(self):
175
- return {"selected": self.selected, **super(BlockContext, self).get_config()}
176
-
177
- @staticmethod
178
- def update(
179
- selected: int | str | None = None,
180
- ):
181
- return {
182
- "selected": selected,
183
- "__type__": "update",
184
- }
185
-
186
-
187
- @document()
188
- class Tab(BlockContext, Selectable):
189
- """
190
- Tab (or its alias TabItem) is a layout element. Components defined within the Tab will be visible when this tab is selected tab.
191
- Example:
192
- with gr.Blocks() as demo:
193
- with gr.Tab("Lion"):
194
- gr.Image("lion.jpg")
195
- gr.Button("New Lion")
196
- with gr.Tab("Tiger"):
197
- gr.Image("tiger.jpg")
198
- gr.Button("New Tiger")
199
- Guides: controlling-layout
200
- """
201
-
202
- def __init__(
203
- self,
204
- label: str,
205
- *,
206
- id: int | str | None = None,
207
- elem_id: str | None = None,
208
- **kwargs,
209
- ):
210
- """
211
- Parameters:
212
- label: The visual label for the tab
213
- id: An optional identifier for the tab, required if you wish to control the selected tab from a predict function.
214
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
215
- """
216
- BlockContext.__init__(self, elem_id=elem_id, **kwargs)
217
- Selectable.__init__(self)
218
- self.label = label
219
- self.id = id
220
-
221
- def get_config(self):
222
- return {
223
- "label": self.label,
224
- "id": self.id,
225
- **super(BlockContext, self).get_config(),
226
- }
227
-
228
- def get_expected_parent(self) -> type[Tabs]:
229
- return Tabs
230
-
231
- def get_block_name(self):
232
- return "tabitem"
233
-
234
-
235
- TabItem = Tab
236
-
237
-
238
- @document()
239
- class Group(BlockContext):
240
- """
241
- Group is a layout element within Blocks which groups together children so that
242
- they do not have any padding or margin between them.
243
- Example:
244
- with gr.Group():
245
- gr.Textbox(label="First")
246
- gr.Textbox(label="Last")
247
- """
248
-
249
- def __init__(
250
- self,
251
- *,
252
- visible: bool = True,
253
- elem_id: str | None = None,
254
- **kwargs,
255
- ):
256
- """
257
- Parameters:
258
- visible: If False, group will be hidden.
259
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
260
- """
261
- super().__init__(visible=visible, elem_id=elem_id, **kwargs)
262
-
263
- def get_config(self):
264
- return {"type": "group", **super().get_config()}
265
-
266
- @staticmethod
267
- def update(
268
- visible: bool | None = None,
269
- ):
270
- return {
271
- "visible": visible,
272
- "__type__": "update",
273
- }
274
-
275
-
276
- @document()
277
- class Box(BlockContext):
278
- """
279
- Box is a a layout element which places children in a box with rounded corners and
280
- some padding around them.
281
- Example:
282
- with gr.Box():
283
- gr.Textbox(label="First")
284
- gr.Textbox(label="Last")
285
- """
286
-
287
- def __init__(
288
- self,
289
- *,
290
- visible: bool = True,
291
- elem_id: str | None = None,
292
- **kwargs,
293
- ):
294
- """
295
- Parameters:
296
- visible: If False, box will be hidden.
297
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
298
- """
299
- warnings.warn("gr.Box is deprecated. Use gr.Group instead.", DeprecationWarning)
300
- super().__init__(visible=visible, elem_id=elem_id, **kwargs)
301
-
302
- def get_config(self):
303
- return {"type": "box", **super().get_config()}
304
-
305
- @staticmethod
306
- def update(
307
- visible: bool | None = None,
308
- ):
309
- return {
310
- "visible": visible,
311
- "__type__": "update",
312
- }
313
-
314
- def style(self, **kwargs):
315
- warn_style_method_deprecation()
316
- return self
317
-
318
-
319
- class Form(BlockContext):
320
- def __init__(self, *, scale: int = 0, min_width: int = 0, **kwargs):
321
- """
322
- Parameters:
323
- scale: relative width compared to adjacent Columns. For example, if Column A has scale=2, and Column B has scale=1, A will be twice as wide as B.
324
- min_width: minimum pixel width of Column, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in a column narrower than min_width, the min_width parameter will be respected first.
325
- """
326
- self.scale = scale
327
- self.min_width = min_width
328
- super().__init__(**kwargs)
329
-
330
- def add_child(self, child: Block):
331
- if isinstance(self.parent, Row):
332
- scale = getattr(child, "scale", None)
333
- self.scale += 1 if scale is None else scale
334
- self.min_width += getattr(child, "min_width", 0) or 0
335
- super().add_child(child)
336
-
337
- def get_config(self):
338
- return {
339
- "type": "form",
340
- "scale": self.scale,
341
- "min_width": self.min_width,
342
- **super().get_config(),
343
- }
344
-
345
-
346
- @document()
347
- class Accordion(BlockContext):
348
- """
349
- Accordion is a layout element which can be toggled to show/hide the contained content.
350
- Example:
351
- with gr.Accordion("See Details"):
352
- gr.Markdown("lorem ipsum")
353
- """
354
-
355
- def __init__(
356
- self,
357
- label,
358
- *,
359
- open: bool = True,
360
- visible: bool = True,
361
- elem_id: str | None = None,
362
- **kwargs,
363
- ):
364
- """
365
- Parameters:
366
- label: name of accordion section.
367
- open: if True, accordion is open by default.
368
- elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
369
- """
370
- self.label = label
371
- self.open = open
372
- super().__init__(visible=visible, elem_id=elem_id, **kwargs)
373
-
374
- def get_config(self):
375
- return {
376
- "type": "accordion",
377
- "open": self.open,
378
- "label": self.label,
379
- **super().get_config(),
380
- }
381
-
382
- @staticmethod
383
- def update(
384
- open: bool | None = None,
385
- label: str | None = None,
386
- visible: bool | None = None,
387
- ):
388
- return {
389
- "visible": visible,
390
- "label": label,
391
- "open": open,
392
- "__type__": "update",
393
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpx/_decoders.py DELETED
@@ -1,324 +0,0 @@
1
- """
2
- Handlers for Content-Encoding.
3
-
4
- See: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Encoding
5
- """
6
- import codecs
7
- import io
8
- import typing
9
- import zlib
10
-
11
- from ._compat import brotli
12
- from ._exceptions import DecodingError
13
-
14
-
15
- class ContentDecoder:
16
- def decode(self, data: bytes) -> bytes:
17
- raise NotImplementedError() # pragma: no cover
18
-
19
- def flush(self) -> bytes:
20
- raise NotImplementedError() # pragma: no cover
21
-
22
-
23
- class IdentityDecoder(ContentDecoder):
24
- """
25
- Handle unencoded data.
26
- """
27
-
28
- def decode(self, data: bytes) -> bytes:
29
- return data
30
-
31
- def flush(self) -> bytes:
32
- return b""
33
-
34
-
35
- class DeflateDecoder(ContentDecoder):
36
- """
37
- Handle 'deflate' decoding.
38
-
39
- See: https://stackoverflow.com/questions/1838699
40
- """
41
-
42
- def __init__(self) -> None:
43
- self.first_attempt = True
44
- self.decompressor = zlib.decompressobj()
45
-
46
- def decode(self, data: bytes) -> bytes:
47
- was_first_attempt = self.first_attempt
48
- self.first_attempt = False
49
- try:
50
- return self.decompressor.decompress(data)
51
- except zlib.error as exc:
52
- if was_first_attempt:
53
- self.decompressor = zlib.decompressobj(-zlib.MAX_WBITS)
54
- return self.decode(data)
55
- raise DecodingError(str(exc)) from exc
56
-
57
- def flush(self) -> bytes:
58
- try:
59
- return self.decompressor.flush()
60
- except zlib.error as exc: # pragma: no cover
61
- raise DecodingError(str(exc)) from exc
62
-
63
-
64
- class GZipDecoder(ContentDecoder):
65
- """
66
- Handle 'gzip' decoding.
67
-
68
- See: https://stackoverflow.com/questions/1838699
69
- """
70
-
71
- def __init__(self) -> None:
72
- self.decompressor = zlib.decompressobj(zlib.MAX_WBITS | 16)
73
-
74
- def decode(self, data: bytes) -> bytes:
75
- try:
76
- return self.decompressor.decompress(data)
77
- except zlib.error as exc:
78
- raise DecodingError(str(exc)) from exc
79
-
80
- def flush(self) -> bytes:
81
- try:
82
- return self.decompressor.flush()
83
- except zlib.error as exc: # pragma: no cover
84
- raise DecodingError(str(exc)) from exc
85
-
86
-
87
- class BrotliDecoder(ContentDecoder):
88
- """
89
- Handle 'brotli' decoding.
90
-
91
- Requires `pip install brotlipy`. See: https://brotlipy.readthedocs.io/
92
- or `pip install brotli`. See https://github.com/google/brotli
93
- Supports both 'brotlipy' and 'Brotli' packages since they share an import
94
- name. The top branches are for 'brotlipy' and bottom branches for 'Brotli'
95
- """
96
-
97
- def __init__(self) -> None:
98
- if brotli is None: # pragma: no cover
99
- raise ImportError(
100
- "Using 'BrotliDecoder', but neither of the 'brotlicffi' or 'brotli' "
101
- "packages have been installed. "
102
- "Make sure to install httpx using `pip install httpx[brotli]`."
103
- ) from None
104
-
105
- self.decompressor = brotli.Decompressor()
106
- self.seen_data = False
107
- self._decompress: typing.Callable[[bytes], bytes]
108
- if hasattr(self.decompressor, "decompress"):
109
- # The 'brotlicffi' package.
110
- self._decompress = self.decompressor.decompress # pragma: no cover
111
- else:
112
- # The 'brotli' package.
113
- self._decompress = self.decompressor.process # pragma: no cover
114
-
115
- def decode(self, data: bytes) -> bytes:
116
- if not data:
117
- return b""
118
- self.seen_data = True
119
- try:
120
- return self._decompress(data)
121
- except brotli.error as exc:
122
- raise DecodingError(str(exc)) from exc
123
-
124
- def flush(self) -> bytes:
125
- if not self.seen_data:
126
- return b""
127
- try:
128
- if hasattr(self.decompressor, "finish"):
129
- # Only available in the 'brotlicffi' package.
130
-
131
- # As the decompressor decompresses eagerly, this
132
- # will never actually emit any data. However, it will potentially throw
133
- # errors if a truncated or damaged data stream has been used.
134
- self.decompressor.finish() # pragma: no cover
135
- return b""
136
- except brotli.error as exc: # pragma: no cover
137
- raise DecodingError(str(exc)) from exc
138
-
139
-
140
- class MultiDecoder(ContentDecoder):
141
- """
142
- Handle the case where multiple encodings have been applied.
143
- """
144
-
145
- def __init__(self, children: typing.Sequence[ContentDecoder]) -> None:
146
- """
147
- 'children' should be a sequence of decoders in the order in which
148
- each was applied.
149
- """
150
- # Note that we reverse the order for decoding.
151
- self.children = list(reversed(children))
152
-
153
- def decode(self, data: bytes) -> bytes:
154
- for child in self.children:
155
- data = child.decode(data)
156
- return data
157
-
158
- def flush(self) -> bytes:
159
- data = b""
160
- for child in self.children:
161
- data = child.decode(data) + child.flush()
162
- return data
163
-
164
-
165
- class ByteChunker:
166
- """
167
- Handles returning byte content in fixed-size chunks.
168
- """
169
-
170
- def __init__(self, chunk_size: typing.Optional[int] = None) -> None:
171
- self._buffer = io.BytesIO()
172
- self._chunk_size = chunk_size
173
-
174
- def decode(self, content: bytes) -> typing.List[bytes]:
175
- if self._chunk_size is None:
176
- return [content] if content else []
177
-
178
- self._buffer.write(content)
179
- if self._buffer.tell() >= self._chunk_size:
180
- value = self._buffer.getvalue()
181
- chunks = [
182
- value[i : i + self._chunk_size]
183
- for i in range(0, len(value), self._chunk_size)
184
- ]
185
- if len(chunks[-1]) == self._chunk_size:
186
- self._buffer.seek(0)
187
- self._buffer.truncate()
188
- return chunks
189
- else:
190
- self._buffer.seek(0)
191
- self._buffer.write(chunks[-1])
192
- self._buffer.truncate()
193
- return chunks[:-1]
194
- else:
195
- return []
196
-
197
- def flush(self) -> typing.List[bytes]:
198
- value = self._buffer.getvalue()
199
- self._buffer.seek(0)
200
- self._buffer.truncate()
201
- return [value] if value else []
202
-
203
-
204
- class TextChunker:
205
- """
206
- Handles returning text content in fixed-size chunks.
207
- """
208
-
209
- def __init__(self, chunk_size: typing.Optional[int] = None) -> None:
210
- self._buffer = io.StringIO()
211
- self._chunk_size = chunk_size
212
-
213
- def decode(self, content: str) -> typing.List[str]:
214
- if self._chunk_size is None:
215
- return [content]
216
-
217
- self._buffer.write(content)
218
- if self._buffer.tell() >= self._chunk_size:
219
- value = self._buffer.getvalue()
220
- chunks = [
221
- value[i : i + self._chunk_size]
222
- for i in range(0, len(value), self._chunk_size)
223
- ]
224
- if len(chunks[-1]) == self._chunk_size:
225
- self._buffer.seek(0)
226
- self._buffer.truncate()
227
- return chunks
228
- else:
229
- self._buffer.seek(0)
230
- self._buffer.write(chunks[-1])
231
- self._buffer.truncate()
232
- return chunks[:-1]
233
- else:
234
- return []
235
-
236
- def flush(self) -> typing.List[str]:
237
- value = self._buffer.getvalue()
238
- self._buffer.seek(0)
239
- self._buffer.truncate()
240
- return [value] if value else []
241
-
242
-
243
- class TextDecoder:
244
- """
245
- Handles incrementally decoding bytes into text
246
- """
247
-
248
- def __init__(self, encoding: str = "utf-8"):
249
- self.decoder = codecs.getincrementaldecoder(encoding)(errors="replace")
250
-
251
- def decode(self, data: bytes) -> str:
252
- return self.decoder.decode(data)
253
-
254
- def flush(self) -> str:
255
- return self.decoder.decode(b"", True)
256
-
257
-
258
- class LineDecoder:
259
- """
260
- Handles incrementally reading lines from text.
261
-
262
- Has the same behaviour as the stdllib splitlines, but handling the input iteratively.
263
- """
264
-
265
- def __init__(self) -> None:
266
- self.buffer: typing.List[str] = []
267
- self.trailing_cr: bool = False
268
-
269
- def decode(self, text: str) -> typing.List[str]:
270
- # See https://docs.python.org/3/library/stdtypes.html#str.splitlines
271
- NEWLINE_CHARS = "\n\r\x0b\x0c\x1c\x1d\x1e\x85\u2028\u2029"
272
-
273
- # We always push a trailing `\r` into the next decode iteration.
274
- if self.trailing_cr:
275
- text = "\r" + text
276
- self.trailing_cr = False
277
- if text.endswith("\r"):
278
- self.trailing_cr = True
279
- text = text[:-1]
280
-
281
- if not text:
282
- return []
283
-
284
- trailing_newline = text[-1] in NEWLINE_CHARS
285
- lines = text.splitlines()
286
-
287
- if len(lines) == 1 and not trailing_newline:
288
- # No new lines, buffer the input and continue.
289
- self.buffer.append(lines[0])
290
- return []
291
-
292
- if self.buffer:
293
- # Include any existing buffer in the first portion of the
294
- # splitlines result.
295
- lines = ["".join(self.buffer) + lines[0]] + lines[1:]
296
- self.buffer = []
297
-
298
- if not trailing_newline:
299
- # If the last segment of splitlines is not newline terminated,
300
- # then drop it from our output and start a new buffer.
301
- self.buffer = [lines.pop()]
302
-
303
- return lines
304
-
305
- def flush(self) -> typing.List[str]:
306
- if not self.buffer and not self.trailing_cr:
307
- return []
308
-
309
- lines = ["".join(self.buffer)]
310
- self.buffer = []
311
- self.trailing_cr = False
312
- return lines
313
-
314
-
315
- SUPPORTED_DECODERS = {
316
- "identity": IdentityDecoder,
317
- "gzip": GZipDecoder,
318
- "deflate": DeflateDecoder,
319
- "br": BrotliDecoder,
320
- }
321
-
322
-
323
- if brotli is None:
324
- SUPPORTED_DECODERS.pop("br") # pragma: no cover
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DataDoggo/Visionary/README.md DELETED
@@ -1,11 +0,0 @@
1
- ---
2
- title: Visionary
3
- emoji: 🐠
4
- colorFrom: yellow
5
- colorTo: red
6
- sdk: gradio
7
- app_file: app.py
8
- pinned: false
9
- ---
10
-
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Detomo/ai-comic-generation/src/lib/loadImage.ts DELETED
@@ -1,14 +0,0 @@
1
- export async function loadImage(image: string): Promise<HTMLImageElement> {
2
- const img = new Image();
3
- img.src = image;
4
-
5
- const imgOnLoad = () => {
6
- return new Promise<HTMLImageElement>((resolve, reject) => {
7
- img.onload = () => { resolve(img) };
8
- img.onerror = (err) => { reject(err) };
9
- })
10
- };
11
-
12
- const loadImg = await imgOnLoad();
13
- return loadImg
14
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DiegoLigtenberg/realtimespeech/instructions.md DELETED
@@ -1,15 +0,0 @@
1
- ## Whisper UI - Transcriptions, Summaries & Analytics
2
-
3
- ---
4
-
5
- #### Run Whisper
6
- - Add a YouTube URL or select a local file on the left
7
- - Select the right Whisper model supported by your machine (extra configs have other whisper params if you want to play around with them)
8
- - Select whether you want to summarize the video. If so, enter a minimum and maximum length for the summary (usually between 50 and 100 words). Note that only the first 8 minutes of the video can be summarized in the current version.
9
- - Click Save settings.
10
- - Click "Transcribe"
11
-
12
- Once a transcription is created, it will be retained as a session variable so you can navigate around raw, summarized and time-annotated output.
13
- However, if you refresh or add a new video, the old transcription will be replaced.
14
-
15
- ---