parquet-converter commited on
Commit
1b864a8
·
1 Parent(s): 467c56c

Update parquet files (step 3 of 476)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Construct 3 Drift The Ultimate Guide to Creating a Racing Game with Skidding.md +0 -42
  2. spaces/1gistliPinn/ChatGPT4/Examples/Burnout Paradise Vanity Pack 2.0 23l !!BETTER!!.md +0 -50
  3. spaces/1gistliPinn/ChatGPT4/Examples/Diddy Dirty Money Last Train To.md +0 -6
  4. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Clash of Clans MOD APK Download Enjoy Unlimited Everything in the Latest Version of the Game.md +0 -120
  5. spaces/1phancelerku/anime-remove-background/Brotato Extatonion Mod The Best Way to Enjoy Brotato in 2023.md +0 -158
  6. spaces/1phancelerku/anime-remove-background/DolphiniOS A Guide to Download and Install Dolphin Emulator on iPhone without Jailbreak.md +0 -118
  7. spaces/1phancelerku/anime-remove-background/Download Drama Live and Enjoy the Best IPTV Player for Android.md +0 -135
  8. spaces/AI-Hobbyist/Hoyo-RVC/infer_pack/modules.py +0 -522
  9. spaces/AIConsultant/MusicGen/audiocraft/grids/diffusion/__init__.py +0 -6
  10. spaces/AIConsultant/MusicGen/audiocraft/utils/notebook.py +0 -32
  11. spaces/AIGC-Audio/AudioGPT/NeuralSeq/utils/dtw.py +0 -162
  12. spaces/AILab-CVC/SEED-LLaMA/models/seed_qformer/clip_vit.py +0 -257
  13. spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/generated/client/nodes/9.js +0 -1
  14. spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Aivvm.py +0 -70
  15. spaces/AgentVerse/agentVerse/pokemon_server.py +0 -78
  16. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/utils/PreLayoutChild.js +0 -10
  17. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/bbcodetext/Factory.js +0 -13
  18. spaces/Amrrs/DragGan-Inversion/gui_utils/text_utils.py +0 -141
  19. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stochastic_karras_ve/__init__.py +0 -1
  20. spaces/Andy1621/UniFormerV2_mit_demo/README.md +0 -13
  21. spaces/Andy1621/uniformer_image_detection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco.py +0 -11
  22. spaces/Andy1621/uniformer_image_detection/configs/htc/htc_r101_fpn_20e_coco.py +0 -5
  23. spaces/Andy1621/uniformer_image_detection/configs/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py +0 -52
  24. spaces/Andy1621/uniformer_image_detection/tools/dataset_converters/cityscapes.py +0 -151
  25. spaces/Andyrasika/Andyrasika-avatar_diffusion/README.md +0 -12
  26. spaces/Anmol12385/chat123/app.py +0 -55
  27. spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/ui_main.py +0 -12
  28. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/fileio/__init__.py +0 -11
  29. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/border_align.py +0 -109
  30. spaces/Apk/anything-v3.0/app.py +0 -276
  31. spaces/Aristore/Warp/README.md +0 -13
  32. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/euckrprober.py +0 -47
  33. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/__init__.py +0 -59
  34. spaces/AzulaFire/SparkDebate/app.py +0 -218
  35. spaces/Benson/text-generation/Examples/Block Craft 3d Install.md +0 -76
  36. spaces/Benson/text-generation/Examples/Chat Para Aprender Ingls Apk.md +0 -69
  37. spaces/Benson/text-generation/Examples/Descargar Canciones De Pelculas Rojas.md +0 -105
  38. spaces/Biaolin/stabilityai-FreeWilly1-Delta-SafeTensor/README.md +0 -12
  39. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pkg_resources/__init__.py +0 -0
  40. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/msvc.py +0 -1703
  41. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/train_net.py +0 -117
  42. spaces/CVPR/GFPGAN-example/gfpgan/archs/arcface_arch.py +0 -245
  43. spaces/CVPR/LIVE/thrust/cmake/ThrustUtilities.cmake +0 -25
  44. spaces/CVPR/WALT/mmdet/core/bbox/demodata.py +0 -41
  45. spaces/CVPR/WALT/mmdet/models/necks/pafpn.py +0 -142
  46. spaces/Cyril666/my_abi/callbacks.py +0 -360
  47. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/Image-003ee87c.css +0 -1
  48. spaces/DarshanMM/OpenAICodexSummarizer/app.py +0 -13
  49. spaces/Datasculptor/DescriptionGPT/tools/dump_clip_features.py +0 -116
  50. spaces/DeclK/pose/tools/visualizer.py +0 -346
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Construct 3 Drift The Ultimate Guide to Creating a Racing Game with Skidding.md DELETED
@@ -1,42 +0,0 @@
1
-
2
- <h1>How to Create a Drift Game with Construct 3</h1>
3
- <p>Construct 3 is a powerful and easy-to-use 2D game engine that allows you to create stunning games with drag-and-drop features. In this article, we will show you how to create a drift game with Construct 3, using the Car behavior and some basic events. A drift game is a racing game where you have to score points by drifting through the racing circuit. Drifting is a technique where the driver intentionally oversteers the car, causing it to slide sideways while maintaining control.</p>
4
- <h2>construct 3 drift</h2><br /><p><b><b>DOWNLOAD</b> &gt;&gt;&gt; <a href="https://byltly.com/2uKxzf">https://byltly.com/2uKxzf</a></b></p><br /><br />
5
- <h2>What You Need</h2>
6
- <p>To follow this tutorial, you will need:</p>
7
- <ul>
8
- <li>A Construct 3 account. You can sign up for free <a href="https://www.construct.net/en/free-online-trial">here</a>.</li>
9
- <li>A basic knowledge of Construct 3 interface and features. You can learn more from the <a href="https://www.construct.net/en/make-games/manuals/construct-3">manual</a> and the <a href="https://www.construct.net/en/tutorials">tutorials</a>.</li>
10
- <li>A sprite of a car and a background image of a racing track. You can use your own assets or download some from the <a href="https://www.construct.net/en/assets">store</a>.</li>
11
- </ul>
12
- <h2>Step 1: Create a New Project</h2>
13
- <p>Open Construct 3 and click on New project. Choose an empty project and name it Drift Game. Set the layout size to 800 x 600 pixels and the window size to the same. Click on Create.</p>
14
- <h2>Step 2: Add the Car Sprite</h2>
15
- <p>In the Project Bar, right-click on Object types and select Insert new object. Choose Sprite and name it Car. Click on Insert.</p>
16
- <p>Double-click on the Car sprite to open the Image Editor. Import your car image or draw your own. Make sure the origin point is at the center of the car. Close the Image Editor.</p>
17
- <p></p>
18
- <p>Drag and drop the Car sprite to the layout. Position it at the bottom center of the screen.</p>
19
- <h2>Step 3: Add the Car Behavior</h2>
20
- <p>Select the Car sprite and click on Behaviors in the Properties Bar. Click on Add behavior and choose Car from the list. Click on Add.</p>
21
- <p>The Car behavior allows an object to accelerate forwards and backwards and have steering. It also has a simple "drift" feature where the object can "skid" around corners (by pointing in a different direction to that it is moving in). You can adjust the properties of the Car behavior according to your preference. For this tutorial, we will use these values:</p>
22
- <table>
23
- <tr><th>Property</th><th>Value</th></tr>
24
- <tr><td>Max speed</td><td>300</td></tr>
25
- <tr><td>Acceleration</td><td>500</td></tr>
26
- <tr><td>Deceleration</td><td>500</td></tr>
27
- <tr><td>Steer speed</td><td>200</td></tr>
28
- <tr><td>Drift recover</td><td>100</td></tr>
29
- <tr><td>Friction</td><td>0.5</td></tr>
30
- <tr><td>Turn while stopped</td><td>No</td></tr>
31
- <tr><td>Set angle</td><td>No</td></tr>
32
- <tr><td>Default controls</td><td>No</td></tr>
33
- <tr><td>Enabled</td><td>Yes</td></tr>
34
- </table>
35
- <h2>Step 4: Add the Background Image</h2>
36
- <p>In the Project Bar, right-click on Object types and select Insert new object. Choose Tiled Background and name it Track. Click on Insert.</p>
37
- <p>Double-click on the Track object to open the Image Editor. Import your background image or draw your own. Close the Image Editor.</p>
38
- <p>Drag and drop the Track object to the layout. Resize it to cover the whole layout.</p>
39
- <h2>Step 5: Add Some Events</h2>
40
- <p>In order to control the car movement and score points by drifting, we need to add some events. Events are like instructions that tell Construct 3 what to do when certain conditions are met.</</p> ddb901b051<br />
41
- <br />
42
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Burnout Paradise Vanity Pack 2.0 23l !!BETTER!!.md DELETED
@@ -1,50 +0,0 @@
1
- <br />
2
- <h1>Burnout Paradise Vanity Pack 2.0 23l: The Best Way to Enjoy the Game</h1>
3
- <p>If you love racing games, you probably know about Burnout Paradise, the open-world game that lets you drive, crash, and explore a huge city. But did you know that you can make the game even better with a mod called Vanity Pack 2.0 23l?</p>
4
- <h2>Burnout Paradise Vanity Pack 2.0 23l</h2><br /><p><b><b>Download File</b> &#10027;&#10027;&#10027; <a href="https://imgfil.com/2uxYlk">https://imgfil.com/2uxYlk</a></b></p><br /><br />
5
- <p>Vanity Pack 2.0 23l is a mod that adds new cars, maps, and features to Burnout Paradise. It is the latest version of the mod, and it has some amazing improvements over the previous ones. Here are some of the things you can do with Vanity Pack 2.0 23l:</p>
6
- <ul>
7
- <li>Drive over 100 new cars, including motorcycles, trucks, buses, and even planes.</li>
8
- <li>Explore Big Surf Island, a new map that was only available on consoles before. It has new challenges, events, and secrets to discover.</li>
9
- <li>Customize your cars with new paint jobs, decals, wheels, and more.</li>
10
- <li>Use new features like speedometer, damage indicator, turbo boost meter, and camera modes.</li>
11
- <li>Play online with other players who have the mod installed.</li>
12
- </ul>
13
- <h2>How to Install and Play with Vanity Pack 2.0 23l</h2>
14
- <p>Installing Vanity Pack 2.0 23l is easy and fast. All you need is a copy of Burnout Paradise on your PC and an internet connection. Here are the steps to follow:</p>
15
- <ol>
16
- <li>Download Vanity Pack 2.0 23l from <a href="https://www.nexusmods.com/burnoutparadise/mods/1">this link</a>.</li>
17
- <li>Extract the zip file to your Burnout Paradise folder.</li>
18
- <li>Run VanityPack.exe and follow the instructions.</li>
19
- <li>Launch Burnout Paradise from Steam or Origin.</li>
20
- <li>Enjoy the mod!</li>
21
- </ol>
22
- <p>Note: You may need to disable your antivirus or firewall before installing the mod, as some of them may block it. You can also backup your save files before installing the mod, just in case something goes wrong.</p>
23
- <p></p>
24
- <h3>Why You Should Try Vanity Pack 2.0 23l</h3>
25
- <p>Vanity Pack 2.0 23l is not just a mod, it is a whole new experience for Burnout Paradise fans. It adds so much content and variety to the game that you will never get bored of it. You can drive new cars, explore new places, customize your vehicles, and have fun with other players online.</p>
26
- <p>Vanity Pack 2.0 23l is also very stable and compatible with the latest version of Burnout Paradise. It does not affect the performance or the graphics of the game. It only enhances them with new features and options.</p>
27
- <p>If you want to see what Vanity Pack 2.0 23l can do for yourself, you can watch this video:</p>
28
- <iframe width="560" height="315" src="https://www.youtube.com/embed/9w6Zxq6g8nE" frameborder="0" allowfullscreen></iframe>
29
- <p>Vanity Pack 2.0 23l is the best way to enjoy Burnout Paradise on PC. It is free, easy to install, and fun to play. If you are a fan of the game, you should definitely give it a try.</p>
30
- <h4>How to Uninstall Vanity Pack 2.0 23l</h4>
31
- <p>If you want to uninstall Vanity Pack 2.0 23l for any reason, you can do it easily and safely. Here are the steps to follow:</p>
32
- <ol>
33
- <li>Run VanityPack.exe and click on "Uninstall".</li>
34
- <li>Wait for the process to finish and close the program.</li>
35
- <li>Delete the VanityPack folder from your Burnout Paradise folder.</li>
36
- <li>Launch Burnout Paradise from Steam or Origin.</li>
37
- <li>The mod is now uninstalled.</li>
38
- </ol>
39
- <p>Note: You may need to restore your save files from the backup you made before installing the mod, if you want to keep your progress.</p>
40
- <h5>Frequently Asked Questions about Vanity Pack 2.0 23l</h5>
41
- <p>Here are some of the most common questions and answers about Vanity Pack 2.0 23l:</p>
42
- <ul>
43
- <li><b>Is Vanity Pack 2.0 23l safe to use?</b> Yes, Vanity Pack 2.0 23l is safe to use and does not contain any viruses or malware. However, you should always download it from a trusted source and disable your antivirus or firewall before installing it, as some of them may block it.</li>
44
- <li><b>Is Vanity Pack 2.0 23l compatible with other mods?</b> Yes, Vanity Pack 2.0 23l is compatible with most of the other mods for Burnout Paradise, such as Burnout Hints, Burnout Config Tool, and Burnout Paradise Remastered Mod. However, you should always check the compatibility before installing any mod and follow the instructions carefully.</li>
45
- <li><b>Is Vanity Pack 2.0 23l legal to use?</b> Yes, Vanity Pack 2.0 23l is legal to use and does not violate any copyright or trademark laws. However, you should always respect the original creators of the game and the mod and give them credit for their work.</li>
46
- <li><b>Can I play online with Vanity Pack 2.0 23l?</b> Yes, you can play online with Vanity Pack 2.0 23l, but only with other players who have the mod installed. If you try to join a server that does not have the mod, you will be kicked out or banned. You can also create your own server and invite your friends who have the mod.</li>
47
- <li><b>Where can I find more information about Vanity Pack 2.0 23l?</b> You can find more information about Vanity Pack 2.0 23l on its official website, its Nexus Mods page, its YouTube channel, and its Discord server. You can also contact the developers and report any bugs or issues on their forums or email.</li>
48
- </ul></p> 3cee63e6c2<br />
49
- <br />
50
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Diddy Dirty Money Last Train To.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Diddy Dirty Money Last Train To</h2><br /><p><b><b>Download Zip</b> &#9881; <a href="https://imgfil.com/2uxWYp">https://imgfil.com/2uxWYp</a></b></p><br /><br />
2
- <br />
3
- aaccfb2cb3<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Clash of Clans MOD APK Download Enjoy Unlimited Everything in the Latest Version of the Game.md DELETED
@@ -1,120 +0,0 @@
1
- <br />
2
- <h1>Clash of Clans Mod APK Download Unlimited Everything Latest Version</h1>
3
- <p>Are you a fan of strategy games that challenge your mind and skills? Do you want to build your own village, train your troops, and fight against other players from around the world? If yes, then you might have heard of Clash of Clans, one of the most popular mobile games ever. But what if you want to enjoy the game without any limitations or restrictions? What if you want to have unlimited resources, gems, troops, and everything else in the game? Well, that's where Clash of Clans Mod APK comes in. In this article, we will tell you everything you need to know about Clash of Clans Mod APK, including what it is, how to download it, and what are its benefits and risks. So, let's get started!</p>
4
- <h2>What is Clash of Clans?</h2>
5
- <p>Clash of Clans is a freemium strategy game developed by Supercell, a Finnish game company. It was released in 2012 for iOS and in 2013 for Android devices. The game has over 500 million downloads on Google Play Store and is one of the highest-grossing apps on both platforms. The game has also won several awards and accolades, such as the Best Mobile Game at the 2014 BAFTA Games Awards and the Best Multiplayer Game at the 2015 Pocket Gamer Awards.</p>
6
- <h2>clash of clans mod apk download unlimited everything latest version</h2><br /><p><b><b>Download</b> &#10037; <a href="https://urlin.us/2uSRZn">https://urlin.us/2uSRZn</a></b></p><br /><br />
7
- <h3>Features of Clash of Clans</h3>
8
- <p>Clash of Clans has many features that make it an addictive and fun game to play. Some of these features are:</p>
9
- <ul>
10
- <li><b>Build your village and lead the people to a brighter future.</b> You can create your own village from scratch, customize it with various buildings, defenses, walls, traps, and decorations. You can also upgrade your buildings and defenses to make them stronger and more efficient. You can also choose a name and a flag for your village and clan.</li>
11
- <li><b>Defend your people from enemies’ attacks and raid other’s bases.</b> You can train different types of troops, such as barbarians, archers, giants, wizards, dragons, and more. You can also use spells, siege machines, heroes, and pets to enhance your attacks and defenses. You can also join or create a clan with other players and participate in clan wars, clan games, clan war leagues, and friendly wars.</li>
12
- <li><b>Take on your epic adventure against the Goblin King.</b> You can play the single-player campaign mode where you have to fight against the Goblin King and his army in various missions. You can also explore the Builder Base, a mysterious world where you can build a second village with different buildings and troops.</li>
13
- <li><b>Make use of the unique powers and strategies to join epic battles.</b> You can plan your unique battle strategy with countless combinations of troops, spells, heroes, siege machines, and pets. You can also use different tactics and formations to attack or defend against different types of enemies. You can also compete with the best players from around the world and rise to the top of the leaderboard in Legend League.</li>
14
- <li><b>Discover many upgrade options to strengthen your clan.</b> You can research upgrades in your laboratory to make your troops, spells, siege machines, heroes, and pets even more powerful. You can also collect resources and loot from other players to upgrade your buildings and defenses. You can also use magic items to speed up your progress or boost your performance.</li>
15
- <li><b>Enjoy various events and challenges.</b> You can participate in various events and challenges that offer special rewards and bonuses. You can also complete achievements and tasks to earn gems, resources, magic items, hero skins, sceneries, and more.</li>
16
- <li><b>Explore the awesome Clan gameplay.</b> You can join a clan or create your own with other players and enjoy the social aspect of the game. You can chat with your clanmates, donate and request troops, share replays and strategies, and support each other. You can also participate in clan wars, clan games, clan war leagues, and friendly wars to earn clan perks, rewards, and trophies.</li>
17
- </ul>
18
- <h3>How to play Clash of Clans</h3>
19
- <p>Clash of Clans is easy to play but hard to master. Here are some basic steps to get you started:</p>
20
- <ol>
21
- <li><b>Download and install the game.</b> You can download Clash of Clans from Google Play Store or App Store for free. You can also use an emulator to play it on your PC or Mac. Once you install the game, you can create your account and choose your name and flag.</li>
22
- <li><b>Complete the tutorial.</b> The game will guide you through the basics of building your village, training your troops, and attacking other players. You can also watch some videos and tips to learn more about the game.</li>
23
- <li><b>Build and upgrade your village.</b> You can use the resources you collect from mines, collectors, storages, and raids to build and upgrade your buildings and defenses. You can also use gems to speed up the process or buy more resources. You can also customize your village with various decorations and sceneries.</li>
24
- <li><b>Train and upgrade your troops.</b> You can use the barracks, dark barracks, siege workshop, hero altar, pet house, and laboratory to train and upgrade your troops, siege machines, heroes, and pets. You can also use gems to speed up the process or buy more troops. You can also choose different army compositions and strategies depending on your preference and target.</li>
25
- <li><b>Attack and defend.</b> You can use the map or the multiplayer mode to find and attack other players' bases. You can also use the revenge option to attack those who attacked you before. You can also join or create a clan and participate in clan wars, clan games, clan war leagues, and friendly wars. You can also play the single-player mode or the builder base mode for more fun and rewards.</li>
26
- <li><b>Have fun and enjoy the game.</b> You can chat with other players, join a community, watch live streams, follow news and updates, participate in events and challenges, complete achievements and tasks, and more. You can also share your feedback and suggestions with the developers and help them improve the game.</li>
27
- </ol>
28
- <h2>What is Clash of Clans Mod APK?</h2>
29
- <p>Clash of Clans Mod APK is a modified version of the original game that allows you to have unlimited resources, gems, troops, and everything else in the game. It is not an official version of the game but a third-party application that is created by some developers or hackers. It is also not available on Google Play Store or App Store but on some websites or platforms that offer modded apps.</p>
30
- <h3>Benefits of Clash of Clans Mod APK</h3>
31
- <p>Clash of Clans Mod APK has some benefits that make it appealing to some players who want to have more fun and convenience in the game. Some of these benefits are:</p>
32
- <ul>
33
- <li><b>You can have unlimited resources.</b> You can have unlimited gold, elixir, dark elixir, gems, magic items, hero skins, sceneries, and more in the game. You can use them to build and upgrade your village, train and upgrade your troops, buy anything you want in the shop, and more. You don't have to worry about running out of resources or waiting for them to generate or collect.</li>
34
- <li><b>You can have unlimited troops.</b> You can have unlimited barbarians, archers, giants, wizards, dragons, and more in the game. You can also have unlimited siege machines, heroes, and pets. You can use them to attack and defend against any enemy you want. You don't have to worry about training time, housing space, or hero regeneration.</li>
35
- <li><b>You can have unlimited access.</b> You can have unlimited access to all the features, modes, events, and challenges in the game. You can also unlock all the buildings, defenses, troops, spells, siege machines, heroes, and pets in the game. You don't have to worry about level requirements, upgrade costs, or waiting time.</li>
36
- <li><b>You can have unlimited fun.</b> You can have unlimited fun and enjoyment in the game. You can experiment with different strategies, tactics, and formations. You can also test your skills and creativity against other players or bots. You don't have to worry about losing resources, trophies, or progress.</li>
37
- </ul>
38
- <h3>Risks of Clash of Clans Mod APK</h3>
39
- <p>Clash of Clans Mod APK has some risks that make it risky and dangerous to use. Some of these risks are:</p>
40
- <ul>
41
- <li><b>You can get banned.</b> You can get banned from the game if you use Clash of Clans Mod APK. The game has a strict anti-cheat system that detects and punishes any player who uses any modded or hacked app. You can lose your account, progress, and data permanently if you get caught.</li>
42
- <li><b>You can get infected.</b> You can get infected with malware or viruses if you download Clash of Clans Mod APK from an untrusted or unknown source. The modded app may contain harmful or malicious code that can damage your device or steal your personal information. You can also expose your device to hackers or cybercriminals who may exploit your vulnerabilities.</li>
43
- <li><b>You can get bored.</b> You can get bored of the game if you use Clash of Clans Mod APK. The game may lose its challenge, excitement, and satisfaction if you have everything unlimited and easy. You may also miss out on the original and authentic experience of the game that the developers intended for you.</li>
44
- </ul>
45
- <h2>How to download and install Clash of Clans Mod APK?</h2>
46
- <p>If you still want to try Clash of Clans Mod APK despite its risks, you need to follow some steps to download and install it on your device. Here are the steps:</p>
47
- <h3>Step by step guide</h3>
48
- <ol>
49
- <li><b>Backup your data.</b> You need to backup your data before you download and install Clash of Clans Mod APK. You can use Google Play Games or Supercell ID to save your progress and data in the cloud. You can also use a file manager or a backup app to copy your data to another device or storage.</li>
50
- <li><b>Uninstall the original game.</b> You need to uninstall the original game before you download and install Clash of Clans Mod APK. You can go to your device settings and find the app manager or application list. Then you can tap on Clash of Clans and select uninstall option.</li>
51
- <li><b>Download the modded app.</b> You need to download the modded app from a trusted or reliable source. You can search for Clash of Clans Mod APK on Google or any other search engine. Then you can choose a website or platform that offers the modded app. You can also check the reviews, ratings, comments, and feedbacks of other users who have downloaded the modded app before.</li>
52
- <li><b>Enable unknown sources.</b> You need to enable unknown sources before you install Clash of Clans Mod APK. You can go to your device settings and find the security or privacy option. Then you can toggle on the unknown sources option that allows you to install apps from sources other than Google Play Store or App Store.</li>
53
- <li><b>Install the modded app.</b> You need to install the modded app after you download it. You can go to your device file manager or downloads folder and find the modded app file. Then you can tap on it and select install option. You may need to grant some permissions or accept some terms and conditions before you install it.</li>
54
- <li><b>Launch the modded app.</b> You need to launch the modded app after you install it. You can go to your device home screen or app drawer and find the modded app icon. Then you can tap on it and open it. You may need to sign in with your account or create a new one before you play it.</li>
55
- </ol>
56
- <h3>Tips and tricks for using Clash of Clans Mod APK</h3>
57
- <p>If you want to use Clash of Clans Mod APK effectively and safely, you need to follow some tips and tricks. Here on your main account or device. You should use it on a secondary account or device that you don't care about losing. You should also avoid linking your modded account to your Google Play Games or Supercell ID. You should also clear your cache and data before and after using the modded app.</li>
58
- <li><b>Do not use the modded app online or with other players.</b> You should not use the modded app online or with other players. You should use it offline or in single-player mode only. You should also avoid joining or creating a clan or participating in any clan wars, clan games, clan war leagues, or friendly wars. You should also avoid attacking or defending against any real players or bots.</li>
59
- <li><b>Do not use the modded app for too long or too often.</b> You should not use the modded app for too long or too often. You should use it sparingly and occasionally only. You should also switch back to the original game from time to time and enjoy the game as it is meant to be played. You should also take breaks and rest your eyes and mind from the game.</li>
60
- </ul>
61
- <h2>Conclusion</h2>
62
- <p>Clash of Clans is a great game that offers a lot of fun and excitement to millions of players around the world. However, some players may want to have more freedom and convenience in the game by using Clash of Clans Mod APK, a modified version of the game that gives them unlimited everything. However, using Clash of Clans Mod APK is not without risks and drawbacks. It can get you banned, infected, or bored of the game. Therefore, you should be careful and responsible when using Clash of Clans Mod APK and follow some tips and tricks to use it effectively and safely.</p>
63
- <p>clash of clans hack apk free download full version<br />
64
- clash of clans modded apk unlimited gems and coins<br />
65
- download clash of clans mod apk latest version 2023<br />
66
- clash of clans cheat apk unlimited troops and spells<br />
67
- clash of clans mod apk offline no root<br />
68
- clash of clans hacked apk download for android<br />
69
- clash of clans mod apk unlimited money and elixir<br />
70
- clash of clans mod apk online with private server<br />
71
- clash of clans crack apk unlimited gold and dark elixir<br />
72
- download clash of clans mod apk for pc windows 10<br />
73
- clash of clans mod apk unlimited everything 2023<br />
74
- clash of clans hack apk download ios no jailbreak<br />
75
- clash of clans modded apk with builder base<br />
76
- clash of clans cheat apk unlimited resources and buildings<br />
77
- clash of clans mod apk no survey no human verification<br />
78
- clash of clans hacked apk download latest version 15.297.217<br />
79
- clash of clans mod apk unlimited heroes and clan castle<br />
80
- clash of clans mod apk online without update<br />
81
- clash of clans crack apk unlimited super troops and siege machines<br />
82
- download clash of clans mod apk for mac os x<br />
83
- clash of clans mod apk unlimited everything 2022<br />
84
- clash of clans hack apk download for pc no bluestacks<br />
85
- clash of clans modded apk with town hall 14<br />
86
- clash of clans cheat apk unlimited war stars and league medals<br />
87
- clash of clans mod apk offline with bots<br />
88
- clash of clans hacked apk download for ios no computer<br />
89
- clash of clans mod apk unlimited gems and elixir only<br />
90
- clash of clans mod apk online with friends<br />
91
- clash of clans crack apk unlimited magic items and potions<br />
92
- download clash of clans mod apk for pc windows 7<br />
93
- clash of clans mod apk unlimited everything 2021<br />
94
- clash of clans hack apk download android no root<br />
95
- clash of clans modded apk with pet house<br />
96
- clash of clans cheat apk unlimited builder potions and hammers<br />
97
- clash of clans mod apk offline mode<br />
98
- clash of clans hacked apk download for android 11<br />
99
- clash of clans mod apk unlimited gold and gems only<br />
100
- clash of clans mod apk online multiplayer<br />
101
- clash of clans crack apk unlimited clan games rewards and challenges<br />
102
- download clash of clans mod apk for pc windows 8.1</p>
103
- <h3>Summary of the article</h3>
104
- <p>In this article, we have covered the following topics:</p>
105
- <ul>
106
- <li>What is Clash of Clans and what are its features and how to play it?</li>
107
- <li>What is Clash of Clans Mod APK and what are its benefits and risks?</li>
108
- <li>How to download and install Clash of Clans Mod APK and what are some tips and tricks for using it?</li>
109
- </ul>
110
- <h3>FAQs</h3>
111
- <p>Here are some frequently asked questions about Clash of Clans Mod APK:</p>
112
- <ol>
113
- <li><b>Is Clash of Clans Mod APK safe to use?</b><br>No, Clash of Clans Mod APK is not safe to use. It can get you banned, infected, or bored of the game. It can also damage your device or steal your personal information. It is also illegal and unethical to use.</li>
114
- <li><b>Is Clash of Clans Mod APK free to download?</b><br>Yes, Clash of Clans Mod APK is free to download from some websites or platforms that offer modded apps. However, you should be careful and cautious when downloading it from an untrusted or unknown source.</li>
115
- <li><b>Can I play Clash of Clans Mod APK with my friends?</b><br>No, you cannot play Clash of Clans Mod APK with your friends. You can only play it offline or in single-player mode only. You cannot join or create a clan or participate in any clan wars, clan games, clan war leagues, or friendly wars. You cannot also attack or defend against any real players or bots.</li>
116
- <li><b>Can I update Clash of Clans Mod APK?</b><br>No, you cannot update Clash of Clans Mod APK. You can only use the version that you have downloaded. If you want to update the game, you have to uninstall the modded app and install the original game from Google Play Store or App Store.</li>
117
- <li><b>Can I restore my data from Clash of Clans Mod APK?</b><br>No, you cannot restore your data from Clash of Clans Mod APK. You can only backup your data before you download and install the modded app. You can also use Google Play Games or Supercell ID to save your progress and data in the cloud.</li>
118
- </ol></p> 197e85843d<br />
119
- <br />
120
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Brotato Extatonion Mod The Best Way to Enjoy Brotato in 2023.md DELETED
@@ -1,158 +0,0 @@
1
- <br />
2
- <h1>Brotato Extension Download: How to Install and Use the Extatonion Mod</h1>
3
- <p>If you are a fan of Brotato, a top-down arena shooter roguelite game where you play as a potato fighting off hordes of aliens, you might be interested in trying out some mods that add new content and features to the game. One of the most popular and well-made mods for Brotato is Extatonion, a mod that adds new characters, weapons, items, and unlocks to the game. In this article, we will show you how to download and install Extatonion, what are its features and benefits, and what are some alternatives to it.</p>
4
- <h2>What is Brotato?</h2>
5
- <h3>A top-down arena shooter roguelite game</h3>
6
- <p>Brotato is a game developed by Blobfish Games and published by Erabit Studios. It is a top-down arena shooter roguelite where you play as a potato wielding up to 6 weapons at a time to fight off multiple hordes of aliens. You can choose from a variety of traits and different items to create unique builds and survive until help arrives. The game has randomly generated levels, enemies, weapons, items, and bosses, making each run different and challenging.</p>
7
- <h2>brotato extension download</h2><br /><p><b><b>Download</b> >>>>> <a href="https://jinyurl.com/2uNQK2">https://jinyurl.com/2uNQK2</a></b></p><br /><br />
8
- <h3>A free game on Steam with positive reviews</h3>
9
- <p>Brotato is free to play on Steam, where it has Overwhelmingly Positive reviews from players who praise its gameplay, graphics, sound, humor, and replay value. The game was released in early access in 2022 and has been updated regularly with new content and features. The latest update (0.8) added modding and workshop support to the game, allowing players to create and share their own mods.</p>
10
- <h3>A game with modding and workshop support</h3>
11
- <p>Brotato uses a game engine called Godot, which is similar to Unity but easier to use. The game has modding support that allows players to create their own content using GodotSteam, a version of Godot with built-in Steam support. The game also has workshop support that allows players to browse, download, and subscribe to mods created by other players. However, not all mods are available on the workshop, as some modders prefer to host their mods on other platforms.</p>
12
- <h2>What is Extatonion?</h2>
13
- <h3>A mod that adds new content to Brotato</h3>
14
- <p>Extatonion is a mod created by Psina, a Brotato player and modder. It is a mod that adds new content to the game that doesn't really stand out from the original game. But this does not mean that it is unoriginal or boring; Psina tries to make new content original and balanced. The mod adds new characters, weapons, items, unlocks, enemies, bosses, levels, secrets, achievements, sounds, sprites, effects, mechanics, and more. The mod is constantly updated and improved by Psina, who listens to feedback and suggestions from the community.</p>
15
- <p>How to install Extatonion mod for Brotato<br />
16
- Brotato Extatonion mod guide and download link<br />
17
- Brotato modding tutorial and Extatonion mod review<br />
18
- Extatonion mod version 1.4.1 for Brotato 0.6.1.6<br />
19
- Brotato Extatonion mod new content and features<br />
20
- Download Extatonion mod for Brotato free on Steam<br />
21
- Brotato Extatonion mod update history and changelog<br />
22
- Extatonion mod best weapons and items for Brotato<br />
23
- Brotato Extatonion mod gameplay and tips<br />
24
- Extatonion mod compatibility and issues with Brotato<br />
25
- Brotato Extatonion mod showcase and feedback<br />
26
- Extatonion mod developer Psina and Brotato community<br />
27
- Brotato Extatonion mod summon class and bonuses<br />
28
- Extatonion mod new characters and unlocks for Brotato<br />
29
- Brotato Extatonion mod installation error and fix<br />
30
- Extatonion mod latest version download for Brotato<br />
31
- Brotato Extatonion mod Steam Workshop page and comments<br />
32
- Extatonion mod best builds and strategies for Brotato<br />
33
- Brotato Extatonion mod comparison with vanilla game<br />
34
- Extatonion mod future plans and suggestions for Brotato<br />
35
- Brotato Extatonion mod video tutorial and demonstration<br />
36
- Extatonion mod cheat codes and secrets for Brotato<br />
37
- Brotato Extatonion mod fan art and memes<br />
38
- Extatonion mod patch notes and bug fixes for Brotato<br />
39
- Brotato Extatonion mod wiki and FAQ page<br />
40
- Extatonion mod achievements and challenges for Brotato<br />
41
- Brotato Extatonion mod discord server and support<br />
42
- Extatonion mod custom maps and levels for Brotato<br />
43
- Brotato Extatonion mod speedrun and leaderboard<br />
44
- Extatonion mod Easter eggs and references for Brotato<br />
45
- Brotato Extatonion mod rating and review by players<br />
46
- Extatonion mod best combos and synergies for Brotato<br />
47
- Brotato Extatonion mod beginners guide and walkthrough<br />
48
- Extatonion mod advanced tips and tricks for Brotato<br />
49
- Brotato Extatonion mod multiplayer mode and co-op<br />
50
- Extatonion mod lore and backstory for Brotato characters<br />
51
- Brotato Extatonion mod soundtrack and sound effects<br />
52
- Extatonion mod graphics and performance optimization for Brotato<br />
53
- Brotato Extatonion mod controller support and settings<br />
54
- Extatonion mod alternative download sources for Brotato (not recommended)<br />
55
- Brotato Extatonion mod steam key giveaway and contest <br />
56
- Extatonion mod fun facts and trivia for Brotato fans <br />
57
- Brotato Extatonion mod interview with the developer Psina <br />
58
- Extatonion mod pros and cons for playing Brotato <br />
59
- Brotato Extatonion mod mods compatibility and load order <br />
60
- Extatonion mod best potato skins and cosmetics for Brotato <br />
61
- Brotato Extatonion mod news and announcements <br />
62
- Extatonion mod donation link and support for Psina <br />
63
- How to uninstall or disable the extationon Mod in brotaro</p>
64
- <h3>A mod that is updated regularly and compatible with the latest version of Brotato</h3>
65
- <p>Extatonion is one of the most active and updated mods for Brotato. Psina releases new updates every few weeks, adding new content and fixing bugs. The mod is also compatible with the latest version of Brotato (0.8), which means that it works with the modding and workshop support. Psina also makes sure that the mod is compatible with other popular mods, such as Potato Expansion Pack, Potato Plus, and Potato Overhaul.</p>
66
- <h3>A mod that has its own download sources and guide</h3>
67
- <p>Extatonion is not available on the workshop, as Psina prefers to host the mod on other platforms. The mod has its own GitHub page, where you can find the latest version of the mod, the changelog, the source code, and the license. The mod also has its own Discord server, where you can join the community, chat with Psina and other players, report bugs, give feedback, and request features. The mod also has its own installation guide, which explains how to download and install the mod step by step.</p>
68
- <h2>How to download and install Extatonion?</h2>
69
- <h3>Step 1: Download GodotSteam and GDRETools</h3>
70
- <p>The first step to install Extatonion is to download GodotSteam and GDRETools. GodotSteam is a version of Godot with built-in Steam support, which is required to run Brotato mods. GDRETools is a tool that allows you to decompile and recompile Brotato projects. You can download both tools from their respective GitHub pages:</p>
71
- <ul>
72
- <li><a href="">GodotSteam</a></li>
73
- <li><a href="">GDRETools</a></li>
74
- </ul>
75
- <p>Extract both tools to a folder of your choice. Make sure you have Steam installed and running on your computer.</p>
76
- <h3>Step 2: Decompile Brotato with GDRETools</h3>
77
- <p>The next step is to decompile Brotato with GDRETools. This will allow you to access the game files and modify them with Extatonion. To do this, follow these steps:</p>
78
- <ol>
79
- <li>Open GDRETools.exe.</li>
80
- <li>Select "Decompile" from the menu.</li>
81
- <li>Browse to your Steam folder and locate Brotato.exe (usually in Steam/steamapps/common/Brotato).</li>
82
- <li>Select a destination folder for the decompiled project (preferably a new folder).</li>
83
- <li>Click "Decompile" and wait for the process to finish.</li>
84
- </ol>
85
- <p>You should now have a folder with the decompiled project of Brotato.</p>
86
- <h3>Step 3: Download Extatonion from the official sources</h3>
87
- <p>The third step is to download Extatonion from the official sources. You can find the latest version of the mod on its GitHub page or its Discord server:</p>
88
- <ul>
89
- <li><a href="">Extatonion GitHub</a></li>
90
- <li><a href="">Extatonion Discord</a></li>
91
- </ul>
92
- <p>Download the zip file of the mod and extract it to a folder of your choice.</p>
93
- <h3>Step 4: Copy the mod files to the decompiled project folder</h3> <p>The fourth step is to copy the mod files to the decompiled project folder. This will overwrite some of the original game files with the modded ones. To do this, follow these steps:</p>
94
- <ol>
95
- <li>Open the folder where you extracted Extatonion.</li>
96
- <li>Select all the files and folders inside it.</li>
97
- <li>Copy them to the folder where you decompiled Brotato.</li>
98
- <li>Replace any existing files if prompted.</li>
99
- </ol>
100
- <p>You should now have a folder with the decompiled project of Brotato with Extatonion installed.</p>
101
- <h3>Step 5: Run Brotato in GodotSteam and enjoy the mod</h3>
102
- <p>The final step is to run Brotato in GodotSteam and enjoy the mod. To do this, follow these steps:</p>
103
- <ol>
104
- <li>Open GodotSteam.exe.</li>
105
- <li>Select "Import" from the menu.</li>
106
- <li>Browse to the folder where you decompiled Brotato with Extatonion.</li>
107
- <li>Select "project.godot" and click "Import & Edit".</li>
108
- <li>Click "Play" on the top right corner of the window.</li>
109
- </ol>
110
- <p>You should now be able to play Brotato with Extatonion mod. Have fun!</p>
111
- <h2>What are the features and benefits of Extatonion?</h2>
112
- <h3>New characters, weapons, items, and unlocks</h3>
113
- <p>One of the main features of Extatonion is that it adds new characters, weapons, items, and unlocks to Brotato. These include:</p>
114
- <ul>
115
- <li>4 new characters: Exta, Tonio, Nion, and Extonio. Each character has their own unique traits, weapons, and abilities.</li>
116
- <li>12 new weapons: From laser guns to rocket launchers, from flamethrowers to ice cannons, from shotguns to sniper rifles, Extatonion adds a variety of new weapons to choose from.</li>
117
- <li>16 new items: From passive items that boost your stats or give you special effects, to active items that let you use powerful abilities or summon allies, Extatonion adds a lot of new items to spice up your runs.</li>
118
- <li>8 new unlocks: From new game modes to new difficulties, from new achievements to new secrets, Extatonion adds a lot of new unlocks to challenge yourself and discover more content.</li>
119
- </ul>
120
- <p>Here is a table that summarizes some of the new content added by Extatonion:</p>
121
- | Content | Name | Description | | --- | --- | --- | | Character | Exta | A potato with a passion for explosions. Starts with a grenade launcher and can throw grenades as a secondary attack. | | Weapon | Laser Gun | A weapon that fires a continuous beam of laser that pierces through enemies. | | Item | Potato Chip | A passive item that increases your movement speed and fire rate. | | Unlock | Hardcore Mode | A new game mode that makes the game harder by increasing enemy health and damage, reducing item drops and ammo, and disabling checkpoints. | <h3>Original and balanced content that blends with the original game</h3>
122
- <p>Another feature of Extatonion is that it adds original and balanced content that blends with the original game. Psina tries to make the mod content fit with the theme, style, and mechanics of Brotato, while also adding some new twists and surprises. The mod content is also balanced and tested to ensure that it is not too easy or too hard, not too overpowered or too weak, not too common or too rare. Psina also listens to feedback and suggestions from the community and makes adjustments accordingly.</p>
123
- <h3>More variety and challenge for Brotato players</h3>
124
- <p>A final feature of Extatonion is that it adds more variety and challenge for Brotato players. The mod content adds more options and possibilities for creating different builds and strategies, as well as more diversity and difficulty for facing different enemies and situations. The mod content also adds more replay value and fun for playing Brotato again and again, as well as more rewards and satisfaction for completing achievements and secrets.</p>
125
- <h2>What are some alternatives to Extatonion?</h2>
126
- <h3>Other mods on the Brotato Mods GitHub page</h3>
127
- <p>If you want to try other mods for Brotato besides Extatonion, you can check out the Brotato Mods GitHub page, where you can find a list of other mods created by other players and modders. Some of these mods include:</p>
128
- <ul>
129
- <li>Potato Expansion Pack: A mod that adds 6 new characters, 18 new weapons, 24 new items, 12 new unlocks, 6 new enemies, 3 new bosses, 6 new levels, 6 new secrets, and 6 new achievements to the game.</li>
130
- <li>Potato Plus: A mod that adds 4 new characters, 12 new weapons, 16 new items, 8 new unlocks, 4 new enemies, 2 new bosses, 4 new levels, 4 new secrets, and 4 new achievements to the game.</li>
131
- <li>Potato Overhaul: A mod that changes the core gameplay of Brotato by adding new mechanics, such as stamina, reloading, ammo types, weapon mods, item tiers, and more.</li>
132
- </ul>
133
- <p>You can download these mods from their respective GitHub pages or their workshop pages (if available).</p>
134
- <h3>Other games like Brotato on SteamPeek</h3>
135
- <p>If you want to try other games like Brotato on Steam, you can check out SteamPeek, a website that helps you find similar games based on tags, ratings, genres, and more. Some of these games include:</p>
136
- <ul>
137
- <li>Enter the Gungeon: A bullet hell roguelite game where you explore a dungeon full of guns and gun-related enemies and bosses.</li>
138
- <li>Nuclear Throne: A post-apocalyptic roguelite game where you play as mutants with different abilities and fight your way to the nuclear throne.</li>
139
- <li>Binding of Isaac: A dark and twisted roguelite game where you play as a child who escapes his mother's wrath and faces various horrors in the basement.</li>
140
- </ul>
141
- <p>You can find these games and more on SteamPeek by searching for "Brotato".</p>
142
- <h2>Conclusion and FAQs</h2>
143
- <p>Brotato is a fun and addictive game that offers a lot of content and replay value. If you want to enhance your experience with the game, you can try out some mods that add new content and features to the game. One of the best mods for Brotato is Extatonion, a mod that adds new characters, weapons, items, unlocks, and more to the game. To install Extatonion, you need to download GodotSteam and GDRETools, decompile Brotato with GDRETools, download Extatonion from the official sources, copy the mod files to the decompiled project folder, and run Brotato in GodotSteam. Extatonion adds original and balanced content that blends with the original game and adds more variety and challenge for Brotato players. If you want to try other mods or games like Brotato, you can check out the Brotato Mods GitHub page or SteamPeek. We hope this article helped you learn more about Brotato Extension Download and how to install and use the Extatonion mod. Have fun!</p>
144
- <p>Here are some FAQs that might answer some of your questions:</p>
145
- <ol>
146
- <li>Q: Is Extatonion safe to use?</li>
147
- <li>A: Yes, Extatonion is safe to use as long as you download it from the official sources and follow the installation guide. The mod does not contain any viruses or malware and does not harm your computer or your game files.</li>
148
- <li>Q: Can I use Extatonion with other mods?</li>
149
- <li>A: Yes, Extatonion is compatible with most other mods for Brotato. However, some mods might conflict or overwrite each other if they modify the same files or content. To avoid this, you can use a mod manager tool such as Mod Organizer 2 or Vortex to manage your mods and load order.</li>
150
- <li>Q: Can I play online or co-op with Extatonion?</li>
151
- <li>A: Yes, Extatonion supports online and co-op play with other players who have the same mod installed. However, some features or content might not work properly or cause desync issues in multiplayer mode. To avoid this, you can disable or enable certain features or content in the mod settings menu.</li>
152
- <li>Q: How can I update Extatonion?</li>
153
- <li>A: To update Extatonion, you need to download the latest version of the mod from the official sources and repeat the installation process. You can also check for updates on the mod GitHub page or Discord server.</li>
154
- <li>Q: How can I contact Psina or give feedback on Extatonion?</li>
155
- <li>A: You can contact Psina or give feedback on Extatonion by joining the mod Discord server or by leaving a comment on the mod GitHub page. Psina is very friendly and responsive and appreciates any feedback or suggestions from the community.</li>
156
- </ol></p> 197e85843d<br />
157
- <br />
158
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/DolphiniOS A Guide to Download and Install Dolphin Emulator on iPhone without Jailbreak.md DELETED
@@ -1,118 +0,0 @@
1
- <br />
2
- <h1>Can You Download Dolphin Emulator on iPhone?</h1>
3
- <p>If you are a fan of Nintendo games, you might have heard of Dolphin emulator. It is a software that allows you to play GameCube and Wii games on your PC, Mac, Linux, Android, and even Xbox devices. But what about iPhone? Can you download Dolphin emulator on iPhone and enjoy your favorite Nintendo titles on the go?</p>
4
- <p>In this article, we will answer this question and show you how to download Dolphin emulator on iPhone. We will also explain what Dolphin emulator is, why you would want to use it on iPhone, and how to play games with it. Let's get started!</p>
5
- <h2>can you download dolphin emulator on iphone</h2><br /><p><b><b>Download File</b> --->>> <a href="https://jinyurl.com/2uNOwj">https://jinyurl.com/2uNOwj</a></b></p><br /><br />
6
- <h2>What is Dolphin Emulator?</h2>
7
- <p>Dolphin emulator is a free and open-source video game console emulator for GameCube and Wii that runs on various operating systems. It was first released in 2003 as a freeware for Windows, but later expanded to support other platforms. Dolphin emulator is the first and only emulator that can successfully run commercial GameCube and Wii games.</p>
8
- <p>Dolphin emulator has many features that enhance the gaming experience, such as:</p>
9
- <ul>
10
- <li>Full HD resolution (1080p) and anti-aliasing.</li>
11
- <li>Compatibility with all PC controllers, keyboards, mice, and touchscreens.</li>
12
- <li>Turbo speed, networked multiplayer, and cheat codes.</li>
13
- <li>Save states, screenshots, and video recording.</li>
14
- <li>Customizable graphics, audio, and controller settings.</li>
15
- </ul>
16
- <p>Dolphin emulator supports most GameCube and Wii games, but some may have glitches or performance issues. You can check the compatibility list on the official website to see how well your favorite games run on Dolphin emulator.</p>
17
- <h3>Why Use Dolphin Emulator on iPhone?</h3>
18
- <p>There are many reasons why you would want to use Dolphin emulator on iPhone, such as:</p>
19
- <ul>
20
- <li>Portability: You can play GameCube and Wii games anywhere you go with your iPhone.</li>
21
- <li>Convenience: You don't need to carry around bulky consoles or discs to play GameCube and Wii games.</li>
22
- <li>Enhanced graphics: You can enjoy GameCube and Wii games in high definition and with improved visuals on your iPhone.</li>
23
- </ul>
24
- <p>Imagine playing Super Mario Sunshine, The Legend of Zelda: Twilight Princess, or Metroid Prime on your iPhone with crisp graphics and smooth gameplay. Sounds awesome, right?</p>
25
- <h2>How to Download Dolphin Emulator on iPhone?</h2>
26
- <p>Unfortunately, downloading Dolphin emulator on iPhone is not as easy as downloading it on other devices. This is because Apple has strict policies that prevent unauthorized apps from running on iOS devices. Therefore, you cannot find Dolphin emulator on the App Store or satisfying and reliable way to download Dolphin emulator on iPhone. You might encounter some errors or glitches while playing the games.</p>
27
- <p>How to install dolphin emulator on iphone without jailbreak<br />
28
- Dolphin emulator ios 15 download<br />
29
- Best gamecube games for dolphin emulator iphone<br />
30
- Dolphin emulator iphone controller support<br />
31
- Dolphin emulator iphone performance settings<br />
32
- Dolphin emulator iphone cheats<br />
33
- Dolphin emulator iphone save files<br />
34
- Dolphin emulator iphone multiplayer<br />
35
- Dolphin emulator iphone screen rotation<br />
36
- Dolphin emulator iphone battery drain<br />
37
- Dolphin emulator iphone vs android<br />
38
- Dolphin emulator iphone reddit<br />
39
- Dolphin emulator iphone tutorial<br />
40
- Dolphin emulator iphone altstore<br />
41
- Dolphin emulator iphone cydia<br />
42
- Dolphin emulator iphone ipa<br />
43
- Dolphin emulator iphone appcake<br />
44
- Dolphin emulator iphone tweakbox<br />
45
- Dolphin emulator iphone appvalley<br />
46
- Dolphin emulator iphone panda helper<br />
47
- Dolphin emulator iphone iosgods<br />
48
- Dolphin emulator iphone no revoke<br />
49
- Dolphin emulator iphone 2023<br />
50
- Dolphin emulator iphone 2022<br />
51
- Dolphin emulator iphone 2021<br />
52
- Dolphin emulator iphone 2020<br />
53
- Dolphin emulator iphone 2019<br />
54
- Dolphin emulator iphone 2018<br />
55
- Dolphin emulator iphone 2017<br />
56
- Dolphin emulator iphone 2016<br />
57
- DolphiniOS download for iphone<br />
58
- DolphiniOS beta for iphone<br />
59
- DolphiniOS update for iphone<br />
60
- DolphiniOS review for iphone<br />
61
- DolphiniOS compatibility for iphone<br />
62
- DolphiniOS issues for iphone<br />
63
- DolphiniOS alternatives for iphone<br />
64
- DolphiniOS patreon for iphone<br />
65
- DolphiniOS discord for iphone<br />
66
- DolphiniOS twitter for iphone<br />
67
- Is dolphin emulator safe for iphone<br />
68
- Is dolphin emulator legal for iphone<br />
69
- Is dolphin emulator free for iphone<br />
70
- Is dolphin emulator worth it for iphone<br />
71
- Is dolphin emulator possible for iphone<br />
72
- Is dolphin emulator good for iphone<br />
73
- Is dolphin emulator easy to use for iphone<br />
74
- Is dolphin emulator the best for iphone<br />
75
- Is dolphin emulator compatible with all iphones</p>
76
- <h3>Use an Alternative iOS Emulator</h3>
77
- <p>A third way to download Dolphin emulator on iPhone is to use an alternative iOS emulator. An alternative iOS emulator is an app that can emulate other video game consoles on your iPhone, such as Nintendo DS, Game Boy Advance, or PlayStation. One example of such an app is iNDS, which lets you play Nintendo DS games on your iPhone.</p>
78
- <p>To use iNDS, you need to install it from a third-party app store like TweakBox or AppValley. These app stores do not require you to jailbreak your device, but they may have some ads or pop-ups. Then, you need to download the ROMs of the games you want to play from websites like EmuParadise or CoolROM. You can also transfer the ROMs from your computer to your iPhone using iTunes or a file manager app.</p>
79
- <p>However, using an alternative iOS emulator has some limitations, such as:</p>
80
- <ul>
81
- <li>Inability to play GameCube and Wii games, only other Nintendo games.</li>
82
- <li>Potential legal issues and ethical concerns of downloading ROMs.</li>
83
- <li>Possible revocation and expiration of the app certificates by Apple.</li>
84
- <li>Occasional bugs and crashes of the app and the games.</li>
85
- </ul>
86
- <p>Therefore, using an alternative iOS emulator is not a perfect solution to download Dolphin emulator on iPhone. You might not be able to play the games you want or enjoy them fully.</p>
87
- <h2>How to Play Games with Dolphin Emulator on iPhone?</h2>
88
- <p>If you manage to download Dolphin emulator on iPhone using one of the methods above, you might wonder how to play games with it. Here are some tips and tricks for playing games with Dolphin emulator on iPhone:</p>
89
- <ul>
90
- <li>To load ROMs, you need to have them in ISO or WBFS format and store them in a folder on your device or cloud service. Then, you need to tap the plus icon on the Dolphin emulator app and browse for the folder. You can also use a file manager app like Documents by Readdle to unzip or extract the ROMs from compressed files.</li>
91
- <li>To configure settings, you need to tap the gear icon on the Dolphin emulator app and adjust the options according to your preferences and device capabilities. You can change the video settings, audio settings, controller settings, and more. You can also enable cheats by tapping the edit icon on the game list and selecting the cheat codes you want.</li>
92
- <li>To use controllers, you need to pair them with your device via Bluetooth or connect them via USB. Then, you need to map the buttons on the controller settings of the Dolphin emulator app. You can use various controllers, such as PS4, Xbox One, MFi, or Gamevice controllers.</li>
93
- <li>To troubleshoot problems, you need to check the FAQ section on the official website of Dolphin emulator or the forums for help. You can also update the app or reinstall it if it does not work properly. You can also report bugs or issues on the GitHub page of Dolphin emulator.</li>
94
- </ul>
95
- <h2>Conclusion</h2>
96
- <p>Dolphin emulator is a great software that allows you to play GameCube and Wii games on various devices, including iPhone. However, downloading Dolphin emulator on iPhone is not a straightforward process due to Apple's restrictions and limitations. You need to use some workarounds that have their own risks and drawbacks.</p>
97
- <p>In this article, we showed you three possible ways to download Dolphin emulator on iPhone: jailbreaking your device, using a web-based iOS simulator, or using an alternative iOS emulator. We also gave you some tips and tricks for playing games with Dolphin emulator on iPhone. We hope this article was helpful and informative for you.</p>
98
- <p>If you want to download Dolphin emulator on iPhone and enjoy your favorite Nintendo games on the go, you can try one of the methods above at your own risk and discretion. However, we advise you to be careful and responsible when doing so. Always backup your data and follow the instructions carefully. Also, respect the intellectual property rights of Nintendo and its developers.</p>
99
- <p>Have fun playing GameCube and Wii games on your iPhone!</p>
100
- <h2>FAQs</h2>
101
- <ol>
102
- <li><b>Is Dolphin emulator legal?</b></li>
103
- <p>Dolphin emulator itself is legal, as it is a software that emulates hardware and does not contain any copyrighted material. However, downloading and playing ROMs of GameCube and Wii games may be illegal, depending on the laws of your country and the source of the ROMs. You should only download and play ROMs of games that you own legally and from trusted websites.</p>
104
- <li><b>How much storage space does Dolphin emulator need?</b></li>
105
- <p>Dolphin emulator itself does not need much storage space, as it is only about 15 MB in size. However, the ROMs of GameCube and Wii games can take up a lot of storage space, depending on the game. For example, Super Smash Bros. Brawl is about 7.9 GB, while Animal Crossing: City Folk is about 4.4 GB. You should have enough free space on your device or cloud service to store the ROMs you want to play.</p>
106
- <li><b>How fast does Dolphin emulator run on iPhone?</b></li>
107
- <p>The speed of Dolphin emulator on iPhone depends on several factors, such as the model of your device, the version of your iOS, the settings of the app, and the game you are playing. Generally speaking, newer devices with more powerful processors and memory can run Dolphin emulator faster and smoother than older devices. However, some games may still have lag or stutter issues, especially if they are graphically intensive or require a lot of resources.</p>
108
- <li><b>Can I play online multiplayer games with Dolphin emulator on iPhone?</b></li>
109
- <p>Yes, you can play online multiplayer games with Dolphin emulator on iPhone, as long as you have a stable internet connection and a compatible game. You can use the Netplay feature of Dolphin emulator to join or host online sessions with other players who are using Dolphin emulator on any device. You can also use the Wiimote feature of Dolphin emulator to connect your iPhone to a real Wii console and play online games that support Wiimote.</p>
110
- <li><b>What are some alternatives to Dolphin emulator for iPhone?</b></li>
111
- <p>If you are looking for alternatives to Dolphin emulator for iPhone, you can try some other iOS emulators that can run different video game consoles on your device. Some examples are:</p>
112
- <ul>
113
- <li>iNDS: An iOS emulator for Nintendo DS that supports many games and features.</li>
114
- <li>Delta: An iOS emulator for Game Boy, Game Boy Color, Game Boy Advance, Nintendo 64, and Super Nintendo that has a sleek design and easy-to-use interface.</li>
115
- <li>Provenance: An iOS emulator for multiple consoles, such as Sega Genesis, Sega CD, Master System, Game Gear, Atari, Neo Geo, PlayStation, and more.</li>
116
- </ul></p> 401be4b1e0<br />
117
- <br />
118
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Drama Live and Enjoy the Best IPTV Player for Android.md DELETED
@@ -1,135 +0,0 @@
1
- <br />
2
- <h1>Drama Live App Download: How to Watch Live TV and Sports on Your Android Device</h1>
3
- <p>Do you love watching live TV and sports, especially soccer games? Do you want to enjoy your favorite shows and movies from Asia on your phone or tablet? If you answered yes, then you should check out Drama Live App, a video player that lets you stream live television and sports channels online, as well as play any video file you want. In this article, we will show you how to download and install Drama Live App on your Android device, how to use it to watch live TV and sports, and some alternatives to Drama Live App that you might also like.</p>
4
- <h2>drama live app download</h2><br /><p><b><b>Download File</b> &#9913; <a href="https://jinyurl.com/2uNJMY">https://jinyurl.com/2uNJMY</a></b></p><br /><br />
5
- <h2>What is Drama Live App?</h2>
6
- <p>Drama Live App is a video player that specializes in content from Asia, especially Korea. It allows you to watch live TV and sports channels online, as well as play any video file from your device or external sources. You can also chat with other viewers while watching, choose your video quality, cast to your TV or other devices, and more. Drama Live App has over 1,000 Korean TV series and more than 200 movie titles that you can watch on demand, including originals. It also airs some shows live, such as soccer games. You can find a variety of genres, such as drama, comedy, romance, action, thriller, horror, and more.</p>
7
- <h3>Features of Drama Live App</h3>
8
- <p>Some of the features of Drama Live App include:</p>
9
- <ul>
10
- <li>Video, Audio, Live, VOD, IPTV player.</li>
11
- <li>Play several streams simultaneously.</li>
12
- <li>Picture-in-picture mode.</li>
13
- <li>Background playback.</li>
14
- <li>Public and private chat while watching.</li>
15
- <li>Built-in Fast video & IPTV player with high capabilities.</li>
16
- <li>The ability to choose the default video quality.</li>
17
- <li>Support for multiple quality for one channel (SD HD FHD 4K).</li>
18
- <li>Support for multiple servers for one channel.</li>
19
- <li>Auto choose server if one server not working.</li>
20
- <li>Automatic live stream reconnection.</li>
21
- <li>Audio track selection.</li>
22
- <li>Play audio only mode.</li>
23
- <li>The ability to create and arrange the favorite channels list.</li>
24
- <li>Start playing selected channel on launch.</li>
25
- <li>User-friendly design.</li>
26
- <li>Grid or list view of Channels.</li>
27
- <li>IPTV watching with channels groups and logos.</li>
28
- <li>Supporting request headers from m3u file</li>
29
- <li>Quickly search for channels in playlists.</li>
30
- </ul>
31
- <h3>Benefits of Drama Live App</h3>
32
- <p>Some of the benefits of using Drama Live App are:</p>
33
- <p>drama live video player app download<br />
34
- drama live iptv player apk download<br />
35
- drama live app download for android<br />
36
- drama live app download for pc<br />
37
- drama live app download for ios<br />
38
- drama live app free download<br />
39
- drama live app latest version download<br />
40
- drama live app download play store<br />
41
- drama live app download apkcombo<br />
42
- drama live app download apk pure<br />
43
- how to download drama live app<br />
44
- where to download drama live app<br />
45
- best drama live app download<br />
46
- korean drama live app download<br />
47
- chinese drama live app download<br />
48
- japanese drama live app download<br />
49
- thai drama live app download<br />
50
- turkish drama live app download<br />
51
- indian drama live app download<br />
52
- pakistani drama live app download<br />
53
- watch drama live online app download<br />
54
- watch geo entertainment live app download<br />
55
- watch harpal geo dramas app download<br />
56
- watch asian dramas live app download<br />
57
- watch vod dramas live app download<br />
58
- watch iptv dramas live app download<br />
59
- watch hd dramas live app download<br />
60
- watch multiple streams dramas live app download<br />
61
- watch picture-in-picture mode dramas live app download<br />
62
- watch background playback dramas live app download<br />
63
- chat while watching dramas live app download<br />
64
- fast video player dramas live app download<br />
65
- high quality dramas live app download<br />
66
- multiple quality dramas live app download<br />
67
- multiple servers dramas live app download<br />
68
- auto choose server dramas live app download<br />
69
- audio track selection dramas live app download<br />
70
- audio only mode dramas live app download<br />
71
- favorite channels list dramas live app download<br />
72
- start playing channel on launch dramas live app download<br />
73
- user-friendly design dramas live app download<br />
74
- grid or list view of channels dramas live app download<br />
75
- iptv watching with channels groups and logos dramas live app download <br />
76
- request headers from m3u file dramas live app download <br />
77
- quickly search for channels in playlists dramas live app download <br />
78
- mp4 mov wmv avi flv mkv webm mp3 dash hls mpeg-ts h.264 m3u8 formats supported dramas live app download <br />
79
- m3u xtream fg playlist sources supported dramas live app download <br />
80
- android phone tab tv box devices supported dramas live app download <br />
81
- miracast web video caster options supported dramas live app download <br />
82
- data safety and privacy practices dramas live app download</p>
83
- <ul>
84
- <li>You can watch live TV and sports channels online for free, without any subscription or registration.</li>
85
- <li>You can enjoy high-quality streaming with fast loading and smooth playback.</li>
86
- <li>You can access a large library of Korean content, including dramas, movies, originals, and more.</li>
87
- <li>You can chat with other viewers while watching and share your opinions and reactions.</li>
88
- <li>You can customize your viewing experience by choosing your video quality, audio track, playlist source, and more.</li>
89
- <li>You can cast your screen to your TV or other devices using Miracast or Web Video Caster.</li>
90
- </ul>
91
- <h2> <h2>How to Download and Install Drama Live App on Your Android Device</h2>
92
- <p>If you are interested in trying out Drama Live App, you can download and install it on your Android device easily. Here are the steps you need to follow:</p>
93
- <h3>Step 1: Go to the Google Play Store</h3>
94
- <p>The first step is to go to the Google Play Store on your Android device and search for Drama Live App. Alternatively, you can use this link to go directly to the app page.</p>
95
- <h3>Step 2: Search for Drama Live App</h3>
96
- <p>Once you are on the app page, you will see the app icon, name, rating, and description. You can also scroll down to see more information, such as screenshots, reviews, and permissions. To download the app, tap on the green Install button.</p>
97
- <h3>Step 3: Tap on Install and Accept Permissions</h3>
98
- <p>After tapping on the Install button, you will see a pop-up window asking you to accept the permissions that the app needs to function properly. These include access to your device's storage, network, and location. To proceed, tap on Accept.</p>
99
- <h3>Step 4: Open the App and Enjoy</h3>
100
- <p>Once the app is installed, you can open it by tapping on the Open button on the app page or by finding it on your device's app drawer. You will see a welcome screen with some instructions on how to use the app. You can also change the app settings by tapping on the menu icon on the top left corner. Now you are ready to watch live TV and sports on your Android device with Drama Live App.</p>
101
- <h2>How to Use Drama Live App to Watch Live TV and Sports</h2>
102
- <p>Using Drama Live App to watch live TV and sports is very simple and intuitive. Here are some tips on how to use the app:</p>
103
- <h3>Choose Your Video Source and Playlist Source</h3>
104
- <p>The first thing you need to do is to choose your video source and playlist source. The video source is where you get your video files from, such as your device's storage or external sources. The playlist source is where you get your live TV and sports channels from, such as IPTV or m3u files. You can choose your video source and playlist source by tapping on the menu icon on the top left corner and selecting Video Source or Playlist Source.</p>
105
- <h3>Browse and Search for Channels and Shows</h3>
106
- <p>Once you have chosen your video source and playlist source, you can browse and search for channels and shows that you want to watch. You can use the tabs on the bottom of the screen to switch between different categories, such as Live TV, Sports, Movies, Series, etc. You can also use the search icon on the top right corner to look for specific channels or shows by name or keyword.</p>
107
- <h3>Play, Pause, Rewind, and Chat While Watching</h3>
108
- <p>When you find a channel or show that you want to watch, just tap on it and it will start playing automatically. You can use the controls on the bottom of the screen to play, pause, rewind, fast forward, or adjust the volume. You can also chat with other viewers while watching by tapping on the chat icon on the top right corner. You can send messages, emojis, stickers, or gifs to express your feelings or opinions.</p>
109
- <h3>Cast to Your TV or Other Devices</h3>
110
- <p>If you want to watch your content on a bigger screen, you can cast it to your TV or other devices using Miracast or Web Video Caster. To do this, tap on the cast icon on the top right corner and select your device from the list. Make sure that both devices are connected to the same Wi-Fi network.</p> <h2>Alternatives to Drama Live App</h2>
111
- <p>While Drama Live App is a great app for watching live TV and sports, especially from Asia, it is not the only one. There are some other apps that you might want to try if you are looking for more options or different content. Here are some of the alternatives to Drama Live App that you can download and use:</p>
112
- <h3>Viki</h3>
113
- <p>Viki is an app that offers Asian TV shows and movies, with subtitles in over 200 languages. You can watch popular dramas, variety shows, movies, and more from Korea, China, Japan, Taiwan, Thailand, and other countries. You can also join the Viki community and interact with other fans, create collections, leave comments, and more. Viki is free to use, but you can also upgrade to Viki Pass for ad-free and HD streaming.</p>
114
- <h3>Netflix</h3>
115
- <p>Netflix is one of the most popular streaming services in the world, offering a wide range of content from different genres and countries. You can watch original shows and movies, as well as licensed content from other sources. Netflix has a lot of Asian content as well, including dramas, movies, anime, documentaries, and more. You can also download your content for offline viewing, create profiles for different users, and adjust your settings and preferences. Netflix requires a monthly subscription fee to use.</p>
116
- <h3>Harpal Geo</h3>
117
- <p>Harpal Geo is an app that lets you watch live TV and on-demand content from Geo TV, a Pakistani television network. You can watch dramas, movies, news, sports, comedy, and more in Urdu and other languages. You can also catch up on missed episodes, watch exclusive clips and behind-the-scenes footage, and get notifications for your favorite shows. Harpal Geo is free to use, but you need to sign up with your email or phone number.</p>
118
- <h2>Conclusion</h2>
119
- <p>Drama Live App is a video player that lets you watch live TV and sports channels online, as well as play any video file from your device or external sources. It specializes in content from Asia, especially Korea. It has many features and benefits that make it a great app for watching live TV and sports on your Android device. You can download and install it easily from the Google Play Store, and use it to watch your favorite shows and movies anytime and anywhere. You can also chat with other viewers while watching, cast to your TV or other devices, and customize your viewing experience. If you are looking for alternatives to Drama Live App, you can try Viki, Netflix, or Harpal Geo.</p>
120
- <h2>FAQs</h2>
121
- <p>Here are some of the frequently asked questions about Drama Live App:</p>
122
- <ol>
123
- <li>Is Drama Live App safe to use?</li>
124
- <p>Drama Live App is safe to use as long as you download it from the official Google Play Store link. It does not contain any viruses or malware that could harm your device or data. However, you should be careful when using external sources or links for your video files or playlists, as they might not be secure or legal.</p>
125
- <li>Is Drama Live App legal to use?</li>
126
- <p>Drama Live App is legal to use as long as you do not infringe on any copyrights or trademarks of the content owners or providers. The app itself does not host or distribute any content; it only plays the content that you provide or access through external sources or links. You should always respect the rights of the content owners or providers and follow their terms and conditions.</p>
127
- <li>How can I update Drama Live App?</li>
128
- <p>You can update Drama Live App by going to the Google Play Store on your Android device and checking for updates. Alternatively, you can use this link to go directly to the app page and see if there is a new version available. You should always update your app to get the latest features and bug fixes.</p>
129
- <li>How can I contact Drama Live App support?</li>
130
- <p>You can contact Drama Live App support by sending an email to [email protected]. You can also visit their Facebook page or their website for more information and updates.</p>
131
- <li>How can I uninstall Drama Live App?</li>
132
- <p>You can uninstall Drama Live App by going to the Settings app on your Android device and selecting Apps or Applications. Then find Drama Live App from the list of apps and tap on it. Then tap on Uninstall and confirm your action.</p>
133
- </ol></p> 197e85843d<br />
134
- <br />
135
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AI-Hobbyist/Hoyo-RVC/infer_pack/modules.py DELETED
@@ -1,522 +0,0 @@
1
- import copy
2
- import math
3
- import numpy as np
4
- import scipy
5
- import torch
6
- from torch import nn
7
- from torch.nn import functional as F
8
-
9
- from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
10
- from torch.nn.utils import weight_norm, remove_weight_norm
11
-
12
- from infer_pack import commons
13
- from infer_pack.commons import init_weights, get_padding
14
- from infer_pack.transforms import piecewise_rational_quadratic_transform
15
-
16
-
17
- LRELU_SLOPE = 0.1
18
-
19
-
20
- class LayerNorm(nn.Module):
21
- def __init__(self, channels, eps=1e-5):
22
- super().__init__()
23
- self.channels = channels
24
- self.eps = eps
25
-
26
- self.gamma = nn.Parameter(torch.ones(channels))
27
- self.beta = nn.Parameter(torch.zeros(channels))
28
-
29
- def forward(self, x):
30
- x = x.transpose(1, -1)
31
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
32
- return x.transpose(1, -1)
33
-
34
-
35
- class ConvReluNorm(nn.Module):
36
- def __init__(
37
- self,
38
- in_channels,
39
- hidden_channels,
40
- out_channels,
41
- kernel_size,
42
- n_layers,
43
- p_dropout,
44
- ):
45
- super().__init__()
46
- self.in_channels = in_channels
47
- self.hidden_channels = hidden_channels
48
- self.out_channels = out_channels
49
- self.kernel_size = kernel_size
50
- self.n_layers = n_layers
51
- self.p_dropout = p_dropout
52
- assert n_layers > 1, "Number of layers should be larger than 0."
53
-
54
- self.conv_layers = nn.ModuleList()
55
- self.norm_layers = nn.ModuleList()
56
- self.conv_layers.append(
57
- nn.Conv1d(
58
- in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
59
- )
60
- )
61
- self.norm_layers.append(LayerNorm(hidden_channels))
62
- self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
63
- for _ in range(n_layers - 1):
64
- self.conv_layers.append(
65
- nn.Conv1d(
66
- hidden_channels,
67
- hidden_channels,
68
- kernel_size,
69
- padding=kernel_size // 2,
70
- )
71
- )
72
- self.norm_layers.append(LayerNorm(hidden_channels))
73
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
74
- self.proj.weight.data.zero_()
75
- self.proj.bias.data.zero_()
76
-
77
- def forward(self, x, x_mask):
78
- x_org = x
79
- for i in range(self.n_layers):
80
- x = self.conv_layers[i](x * x_mask)
81
- x = self.norm_layers[i](x)
82
- x = self.relu_drop(x)
83
- x = x_org + self.proj(x)
84
- return x * x_mask
85
-
86
-
87
- class DDSConv(nn.Module):
88
- """
89
- Dialted and Depth-Separable Convolution
90
- """
91
-
92
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
93
- super().__init__()
94
- self.channels = channels
95
- self.kernel_size = kernel_size
96
- self.n_layers = n_layers
97
- self.p_dropout = p_dropout
98
-
99
- self.drop = nn.Dropout(p_dropout)
100
- self.convs_sep = nn.ModuleList()
101
- self.convs_1x1 = nn.ModuleList()
102
- self.norms_1 = nn.ModuleList()
103
- self.norms_2 = nn.ModuleList()
104
- for i in range(n_layers):
105
- dilation = kernel_size**i
106
- padding = (kernel_size * dilation - dilation) // 2
107
- self.convs_sep.append(
108
- nn.Conv1d(
109
- channels,
110
- channels,
111
- kernel_size,
112
- groups=channels,
113
- dilation=dilation,
114
- padding=padding,
115
- )
116
- )
117
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
118
- self.norms_1.append(LayerNorm(channels))
119
- self.norms_2.append(LayerNorm(channels))
120
-
121
- def forward(self, x, x_mask, g=None):
122
- if g is not None:
123
- x = x + g
124
- for i in range(self.n_layers):
125
- y = self.convs_sep[i](x * x_mask)
126
- y = self.norms_1[i](y)
127
- y = F.gelu(y)
128
- y = self.convs_1x1[i](y)
129
- y = self.norms_2[i](y)
130
- y = F.gelu(y)
131
- y = self.drop(y)
132
- x = x + y
133
- return x * x_mask
134
-
135
-
136
- class WN(torch.nn.Module):
137
- def __init__(
138
- self,
139
- hidden_channels,
140
- kernel_size,
141
- dilation_rate,
142
- n_layers,
143
- gin_channels=0,
144
- p_dropout=0,
145
- ):
146
- super(WN, self).__init__()
147
- assert kernel_size % 2 == 1
148
- self.hidden_channels = hidden_channels
149
- self.kernel_size = (kernel_size,)
150
- self.dilation_rate = dilation_rate
151
- self.n_layers = n_layers
152
- self.gin_channels = gin_channels
153
- self.p_dropout = p_dropout
154
-
155
- self.in_layers = torch.nn.ModuleList()
156
- self.res_skip_layers = torch.nn.ModuleList()
157
- self.drop = nn.Dropout(p_dropout)
158
-
159
- if gin_channels != 0:
160
- cond_layer = torch.nn.Conv1d(
161
- gin_channels, 2 * hidden_channels * n_layers, 1
162
- )
163
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
164
-
165
- for i in range(n_layers):
166
- dilation = dilation_rate**i
167
- padding = int((kernel_size * dilation - dilation) / 2)
168
- in_layer = torch.nn.Conv1d(
169
- hidden_channels,
170
- 2 * hidden_channels,
171
- kernel_size,
172
- dilation=dilation,
173
- padding=padding,
174
- )
175
- in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
176
- self.in_layers.append(in_layer)
177
-
178
- # last one is not necessary
179
- if i < n_layers - 1:
180
- res_skip_channels = 2 * hidden_channels
181
- else:
182
- res_skip_channels = hidden_channels
183
-
184
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
185
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
186
- self.res_skip_layers.append(res_skip_layer)
187
-
188
- def forward(self, x, x_mask, g=None, **kwargs):
189
- output = torch.zeros_like(x)
190
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
191
-
192
- if g is not None:
193
- g = self.cond_layer(g)
194
-
195
- for i in range(self.n_layers):
196
- x_in = self.in_layers[i](x)
197
- if g is not None:
198
- cond_offset = i * 2 * self.hidden_channels
199
- g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
200
- else:
201
- g_l = torch.zeros_like(x_in)
202
-
203
- acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
204
- acts = self.drop(acts)
205
-
206
- res_skip_acts = self.res_skip_layers[i](acts)
207
- if i < self.n_layers - 1:
208
- res_acts = res_skip_acts[:, : self.hidden_channels, :]
209
- x = (x + res_acts) * x_mask
210
- output = output + res_skip_acts[:, self.hidden_channels :, :]
211
- else:
212
- output = output + res_skip_acts
213
- return output * x_mask
214
-
215
- def remove_weight_norm(self):
216
- if self.gin_channels != 0:
217
- torch.nn.utils.remove_weight_norm(self.cond_layer)
218
- for l in self.in_layers:
219
- torch.nn.utils.remove_weight_norm(l)
220
- for l in self.res_skip_layers:
221
- torch.nn.utils.remove_weight_norm(l)
222
-
223
-
224
- class ResBlock1(torch.nn.Module):
225
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
226
- super(ResBlock1, self).__init__()
227
- self.convs1 = nn.ModuleList(
228
- [
229
- weight_norm(
230
- Conv1d(
231
- channels,
232
- channels,
233
- kernel_size,
234
- 1,
235
- dilation=dilation[0],
236
- padding=get_padding(kernel_size, dilation[0]),
237
- )
238
- ),
239
- weight_norm(
240
- Conv1d(
241
- channels,
242
- channels,
243
- kernel_size,
244
- 1,
245
- dilation=dilation[1],
246
- padding=get_padding(kernel_size, dilation[1]),
247
- )
248
- ),
249
- weight_norm(
250
- Conv1d(
251
- channels,
252
- channels,
253
- kernel_size,
254
- 1,
255
- dilation=dilation[2],
256
- padding=get_padding(kernel_size, dilation[2]),
257
- )
258
- ),
259
- ]
260
- )
261
- self.convs1.apply(init_weights)
262
-
263
- self.convs2 = nn.ModuleList(
264
- [
265
- weight_norm(
266
- Conv1d(
267
- channels,
268
- channels,
269
- kernel_size,
270
- 1,
271
- dilation=1,
272
- padding=get_padding(kernel_size, 1),
273
- )
274
- ),
275
- weight_norm(
276
- Conv1d(
277
- channels,
278
- channels,
279
- kernel_size,
280
- 1,
281
- dilation=1,
282
- padding=get_padding(kernel_size, 1),
283
- )
284
- ),
285
- weight_norm(
286
- Conv1d(
287
- channels,
288
- channels,
289
- kernel_size,
290
- 1,
291
- dilation=1,
292
- padding=get_padding(kernel_size, 1),
293
- )
294
- ),
295
- ]
296
- )
297
- self.convs2.apply(init_weights)
298
-
299
- def forward(self, x, x_mask=None):
300
- for c1, c2 in zip(self.convs1, self.convs2):
301
- xt = F.leaky_relu(x, LRELU_SLOPE)
302
- if x_mask is not None:
303
- xt = xt * x_mask
304
- xt = c1(xt)
305
- xt = F.leaky_relu(xt, LRELU_SLOPE)
306
- if x_mask is not None:
307
- xt = xt * x_mask
308
- xt = c2(xt)
309
- x = xt + x
310
- if x_mask is not None:
311
- x = x * x_mask
312
- return x
313
-
314
- def remove_weight_norm(self):
315
- for l in self.convs1:
316
- remove_weight_norm(l)
317
- for l in self.convs2:
318
- remove_weight_norm(l)
319
-
320
-
321
- class ResBlock2(torch.nn.Module):
322
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
323
- super(ResBlock2, self).__init__()
324
- self.convs = nn.ModuleList(
325
- [
326
- weight_norm(
327
- Conv1d(
328
- channels,
329
- channels,
330
- kernel_size,
331
- 1,
332
- dilation=dilation[0],
333
- padding=get_padding(kernel_size, dilation[0]),
334
- )
335
- ),
336
- weight_norm(
337
- Conv1d(
338
- channels,
339
- channels,
340
- kernel_size,
341
- 1,
342
- dilation=dilation[1],
343
- padding=get_padding(kernel_size, dilation[1]),
344
- )
345
- ),
346
- ]
347
- )
348
- self.convs.apply(init_weights)
349
-
350
- def forward(self, x, x_mask=None):
351
- for c in self.convs:
352
- xt = F.leaky_relu(x, LRELU_SLOPE)
353
- if x_mask is not None:
354
- xt = xt * x_mask
355
- xt = c(xt)
356
- x = xt + x
357
- if x_mask is not None:
358
- x = x * x_mask
359
- return x
360
-
361
- def remove_weight_norm(self):
362
- for l in self.convs:
363
- remove_weight_norm(l)
364
-
365
-
366
- class Log(nn.Module):
367
- def forward(self, x, x_mask, reverse=False, **kwargs):
368
- if not reverse:
369
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
370
- logdet = torch.sum(-y, [1, 2])
371
- return y, logdet
372
- else:
373
- x = torch.exp(x) * x_mask
374
- return x
375
-
376
-
377
- class Flip(nn.Module):
378
- def forward(self, x, *args, reverse=False, **kwargs):
379
- x = torch.flip(x, [1])
380
- if not reverse:
381
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
382
- return x, logdet
383
- else:
384
- return x
385
-
386
-
387
- class ElementwiseAffine(nn.Module):
388
- def __init__(self, channels):
389
- super().__init__()
390
- self.channels = channels
391
- self.m = nn.Parameter(torch.zeros(channels, 1))
392
- self.logs = nn.Parameter(torch.zeros(channels, 1))
393
-
394
- def forward(self, x, x_mask, reverse=False, **kwargs):
395
- if not reverse:
396
- y = self.m + torch.exp(self.logs) * x
397
- y = y * x_mask
398
- logdet = torch.sum(self.logs * x_mask, [1, 2])
399
- return y, logdet
400
- else:
401
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
402
- return x
403
-
404
-
405
- class ResidualCouplingLayer(nn.Module):
406
- def __init__(
407
- self,
408
- channels,
409
- hidden_channels,
410
- kernel_size,
411
- dilation_rate,
412
- n_layers,
413
- p_dropout=0,
414
- gin_channels=0,
415
- mean_only=False,
416
- ):
417
- assert channels % 2 == 0, "channels should be divisible by 2"
418
- super().__init__()
419
- self.channels = channels
420
- self.hidden_channels = hidden_channels
421
- self.kernel_size = kernel_size
422
- self.dilation_rate = dilation_rate
423
- self.n_layers = n_layers
424
- self.half_channels = channels // 2
425
- self.mean_only = mean_only
426
-
427
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
428
- self.enc = WN(
429
- hidden_channels,
430
- kernel_size,
431
- dilation_rate,
432
- n_layers,
433
- p_dropout=p_dropout,
434
- gin_channels=gin_channels,
435
- )
436
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
437
- self.post.weight.data.zero_()
438
- self.post.bias.data.zero_()
439
-
440
- def forward(self, x, x_mask, g=None, reverse=False):
441
- x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
442
- h = self.pre(x0) * x_mask
443
- h = self.enc(h, x_mask, g=g)
444
- stats = self.post(h) * x_mask
445
- if not self.mean_only:
446
- m, logs = torch.split(stats, [self.half_channels] * 2, 1)
447
- else:
448
- m = stats
449
- logs = torch.zeros_like(m)
450
-
451
- if not reverse:
452
- x1 = m + x1 * torch.exp(logs) * x_mask
453
- x = torch.cat([x0, x1], 1)
454
- logdet = torch.sum(logs, [1, 2])
455
- return x, logdet
456
- else:
457
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
458
- x = torch.cat([x0, x1], 1)
459
- return x
460
-
461
- def remove_weight_norm(self):
462
- self.enc.remove_weight_norm()
463
-
464
-
465
- class ConvFlow(nn.Module):
466
- def __init__(
467
- self,
468
- in_channels,
469
- filter_channels,
470
- kernel_size,
471
- n_layers,
472
- num_bins=10,
473
- tail_bound=5.0,
474
- ):
475
- super().__init__()
476
- self.in_channels = in_channels
477
- self.filter_channels = filter_channels
478
- self.kernel_size = kernel_size
479
- self.n_layers = n_layers
480
- self.num_bins = num_bins
481
- self.tail_bound = tail_bound
482
- self.half_channels = in_channels // 2
483
-
484
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
485
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0)
486
- self.proj = nn.Conv1d(
487
- filter_channels, self.half_channels * (num_bins * 3 - 1), 1
488
- )
489
- self.proj.weight.data.zero_()
490
- self.proj.bias.data.zero_()
491
-
492
- def forward(self, x, x_mask, g=None, reverse=False):
493
- x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
494
- h = self.pre(x0)
495
- h = self.convs(h, x_mask, g=g)
496
- h = self.proj(h) * x_mask
497
-
498
- b, c, t = x0.shape
499
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
500
-
501
- unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels)
502
- unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(
503
- self.filter_channels
504
- )
505
- unnormalized_derivatives = h[..., 2 * self.num_bins :]
506
-
507
- x1, logabsdet = piecewise_rational_quadratic_transform(
508
- x1,
509
- unnormalized_widths,
510
- unnormalized_heights,
511
- unnormalized_derivatives,
512
- inverse=reverse,
513
- tails="linear",
514
- tail_bound=self.tail_bound,
515
- )
516
-
517
- x = torch.cat([x0, x1], 1) * x_mask
518
- logdet = torch.sum(logabsdet * x_mask, [1, 2])
519
- if not reverse:
520
- return x, logdet
521
- else:
522
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/audiocraft/grids/diffusion/__init__.py DELETED
@@ -1,6 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
- """Diffusion grids."""
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/audiocraft/utils/notebook.py DELETED
@@ -1,32 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- try:
8
- import IPython.display as ipd # type: ignore
9
- except ImportError:
10
- # Note in a notebook...
11
- pass
12
-
13
-
14
- import torch
15
-
16
-
17
- def display_audio(samples: torch.Tensor, sample_rate: int):
18
- """Renders an audio player for the given audio samples.
19
-
20
- Args:
21
- samples (torch.Tensor): a Tensor of decoded audio samples
22
- with shapes [B, C, T] or [C, T]
23
- sample_rate (int): sample rate audio should be displayed with.
24
- """
25
- assert samples.dim() == 2 or samples.dim() == 3
26
-
27
- samples = samples.detach().cpu()
28
- if samples.dim() == 2:
29
- samples = samples[None, ...]
30
-
31
- for audio in samples:
32
- ipd.display(ipd.Audio(audio, rate=sample_rate))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/NeuralSeq/utils/dtw.py DELETED
@@ -1,162 +0,0 @@
1
- from numpy import array, zeros, full, argmin, inf, ndim
2
- from scipy.spatial.distance import cdist
3
- from math import isinf
4
-
5
-
6
- def dtw(x, y, dist, warp=1, w=inf, s=1.0):
7
- """
8
- Computes Dynamic Time Warping (DTW) of two sequences.
9
-
10
- :param array x: N1*M array
11
- :param array y: N2*M array
12
- :param func dist: distance used as cost measure
13
- :param int warp: how many shifts are computed.
14
- :param int w: window size limiting the maximal distance between indices of matched entries |i,j|.
15
- :param float s: weight applied on off-diagonal moves of the path. As s gets larger, the warping path is increasingly biased towards the diagonal
16
- Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path.
17
- """
18
- assert len(x)
19
- assert len(y)
20
- assert isinf(w) or (w >= abs(len(x) - len(y)))
21
- assert s > 0
22
- r, c = len(x), len(y)
23
- if not isinf(w):
24
- D0 = full((r + 1, c + 1), inf)
25
- for i in range(1, r + 1):
26
- D0[i, max(1, i - w):min(c + 1, i + w + 1)] = 0
27
- D0[0, 0] = 0
28
- else:
29
- D0 = zeros((r + 1, c + 1))
30
- D0[0, 1:] = inf
31
- D0[1:, 0] = inf
32
- D1 = D0[1:, 1:] # view
33
- for i in range(r):
34
- for j in range(c):
35
- if (isinf(w) or (max(0, i - w) <= j <= min(c, i + w))):
36
- D1[i, j] = dist(x[i], y[j])
37
- C = D1.copy()
38
- jrange = range(c)
39
- for i in range(r):
40
- if not isinf(w):
41
- jrange = range(max(0, i - w), min(c, i + w + 1))
42
- for j in jrange:
43
- min_list = [D0[i, j]]
44
- for k in range(1, warp + 1):
45
- i_k = min(i + k, r)
46
- j_k = min(j + k, c)
47
- min_list += [D0[i_k, j] * s, D0[i, j_k] * s]
48
- D1[i, j] += min(min_list)
49
- if len(x) == 1:
50
- path = zeros(len(y)), range(len(y))
51
- elif len(y) == 1:
52
- path = range(len(x)), zeros(len(x))
53
- else:
54
- path = _traceback(D0)
55
- return D1[-1, -1], C, D1, path
56
-
57
-
58
- def accelerated_dtw(x, y, dist, warp=1):
59
- """
60
- Computes Dynamic Time Warping (DTW) of two sequences in a faster way.
61
- Instead of iterating through each element and calculating each distance,
62
- this uses the cdist function from scipy (https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html)
63
-
64
- :param array x: N1*M array
65
- :param array y: N2*M array
66
- :param string or func dist: distance parameter for cdist. When string is given, cdist uses optimized functions for the distance metrics.
67
- If a string is passed, the distance function can be 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'wminkowski', 'yule'.
68
- :param int warp: how many shifts are computed.
69
- Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path.
70
- """
71
- assert len(x)
72
- assert len(y)
73
- if ndim(x) == 1:
74
- x = x.reshape(-1, 1)
75
- if ndim(y) == 1:
76
- y = y.reshape(-1, 1)
77
- r, c = len(x), len(y)
78
- D0 = zeros((r + 1, c + 1))
79
- D0[0, 1:] = inf
80
- D0[1:, 0] = inf
81
- D1 = D0[1:, 1:]
82
- D0[1:, 1:] = cdist(x, y, dist)
83
- C = D1.copy()
84
- for i in range(r):
85
- for j in range(c):
86
- min_list = [D0[i, j]]
87
- for k in range(1, warp + 1):
88
- min_list += [D0[min(i + k, r), j],
89
- D0[i, min(j + k, c)]]
90
- D1[i, j] += min(min_list)
91
- if len(x) == 1:
92
- path = zeros(len(y)), range(len(y))
93
- elif len(y) == 1:
94
- path = range(len(x)), zeros(len(x))
95
- else:
96
- path = _traceback(D0)
97
- return D1[-1, -1], C, D1, path
98
-
99
-
100
- def _traceback(D):
101
- i, j = array(D.shape) - 2
102
- p, q = [i], [j]
103
- while (i > 0) or (j > 0):
104
- tb = argmin((D[i, j], D[i, j + 1], D[i + 1, j]))
105
- if tb == 0:
106
- i -= 1
107
- j -= 1
108
- elif tb == 1:
109
- i -= 1
110
- else: # (tb == 2):
111
- j -= 1
112
- p.insert(0, i)
113
- q.insert(0, j)
114
- return array(p), array(q)
115
-
116
-
117
- if __name__ == '__main__':
118
- w = inf
119
- s = 1.0
120
- if 1: # 1-D numeric
121
- from sklearn.metrics.pairwise import manhattan_distances
122
- import numpy as np
123
- x = [0, 0, 1, 1, 2, 4, 2, 1, 2, 0]
124
- x = np.array(x).reshape([-1,1,1])
125
- y = [1, 1, 1, 2, 2, 2, 2, 3, 2, 0]
126
- y = np.array(y).reshape([-1,1,1])
127
- dist_fun = manhattan_distances
128
- w = 1
129
- # s = 1.2
130
- elif 0: # 2-D numeric
131
- from sklearn.metrics.pairwise import euclidean_distances
132
-
133
- x = [[0, 0], [0, 1], [1, 1], [1, 2], [2, 2], [4, 3], [2, 3], [1, 1], [2, 2], [0, 1]]
134
- y = [[1, 0], [1, 1], [1, 1], [2, 1], [4, 3], [4, 3], [2, 3], [3, 1], [1, 2], [1, 0]]
135
- dist_fun = euclidean_distances
136
- else: # 1-D list of strings
137
- from nltk.metrics.distance import edit_distance
138
-
139
- # x = ['we', 'shelled', 'clams', 'for', 'the', 'chowder']
140
- # y = ['class', 'too']
141
- x = ['i', 'soon', 'found', 'myself', 'muttering', 'to', 'the', 'walls']
142
- y = ['see', 'drown', 'himself']
143
- # x = 'we talked about the situation'.split()
144
- # y = 'we talked about the situation'.split()
145
- dist_fun = edit_distance
146
- dist, cost, acc, path = dtw(x, y, dist_fun, w=w, s=s)
147
-
148
- # Vizualize
149
- from matplotlib import pyplot as plt
150
-
151
- plt.imshow(cost.T, origin='lower', cmap=plt.cm.Reds, interpolation='nearest')
152
- plt.plot(path[0], path[1], '-o') # relation
153
- plt.xticks(range(len(x)), x)
154
- plt.yticks(range(len(y)), y)
155
- plt.xlabel('x')
156
- plt.ylabel('y')
157
- plt.axis('tight')
158
- if isinf(w):
159
- plt.title('Minimum distance: {}, slope weight: {}'.format(dist, s))
160
- else:
161
- plt.title('Minimum distance: {}, window widht: {}, slope weight: {}'.format(dist, w, s))
162
- plt.show()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AILab-CVC/SEED-LLaMA/models/seed_qformer/clip_vit.py DELETED
@@ -1,257 +0,0 @@
1
- from collections import OrderedDict
2
- from itertools import repeat
3
- import collections.abc
4
- import math
5
-
6
- import torch
7
- import torch.nn.functional as F
8
- from torch import nn
9
-
10
-
11
- from .eva_vit import convert_weights_to_fp16
12
- from .utils import download_cached_file
13
-
14
-
15
- class Bottleneck(nn.Module):
16
- expansion = 4
17
-
18
- def __init__(self, inplanes, planes, stride=1):
19
- super().__init__()
20
-
21
- # all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
22
- self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
23
- self.bn1 = nn.BatchNorm2d(planes)
24
- self.relu1 = nn.ReLU(inplace=True)
25
-
26
- self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
27
- self.bn2 = nn.BatchNorm2d(planes)
28
- self.relu2 = nn.ReLU(inplace=True)
29
-
30
- self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
31
-
32
- self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
33
- self.bn3 = nn.BatchNorm2d(planes * self.expansion)
34
- self.relu3 = nn.ReLU(inplace=True)
35
-
36
- self.downsample = None
37
- self.stride = stride
38
-
39
- if stride > 1 or inplanes != planes * Bottleneck.expansion:
40
- # downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
41
- self.downsample = nn.Sequential(
42
- OrderedDict([("-1", nn.AvgPool2d(stride)),
43
- ("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
44
- ("1", nn.BatchNorm2d(planes * self.expansion))]))
45
-
46
- def forward(self, x: torch.Tensor):
47
- identity = x
48
-
49
- out = self.relu1(self.bn1(self.conv1(x)))
50
- out = self.relu2(self.bn2(self.conv2(out)))
51
- out = self.avgpool(out)
52
- out = self.bn3(self.conv3(out))
53
-
54
- if self.downsample is not None:
55
- identity = self.downsample(x)
56
-
57
- out += identity
58
- out = self.relu3(out)
59
- return out
60
-
61
-
62
- class AttentionPool2d(nn.Module):
63
- def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
64
- super().__init__()
65
- self.positional_embedding = nn.Parameter(torch.randn(spacial_dim**2 + 1, embed_dim) / embed_dim**0.5)
66
- self.k_proj = nn.Linear(embed_dim, embed_dim)
67
- self.q_proj = nn.Linear(embed_dim, embed_dim)
68
- self.v_proj = nn.Linear(embed_dim, embed_dim)
69
- self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
70
- self.num_heads = num_heads
71
-
72
- def forward(self, x):
73
- x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
74
- x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
75
- x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
76
- x, _ = F.multi_head_attention_forward(query=x,
77
- key=x,
78
- value=x,
79
- embed_dim_to_check=x.shape[-1],
80
- num_heads=self.num_heads,
81
- q_proj_weight=self.q_proj.weight,
82
- k_proj_weight=self.k_proj.weight,
83
- v_proj_weight=self.v_proj.weight,
84
- in_proj_weight=None,
85
- in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
86
- bias_k=None,
87
- bias_v=None,
88
- add_zero_attn=False,
89
- dropout_p=0,
90
- out_proj_weight=self.c_proj.weight,
91
- out_proj_bias=self.c_proj.bias,
92
- use_separate_proj_weight=True,
93
- training=self.training,
94
- need_weights=False)
95
-
96
- return x[0]
97
-
98
-
99
- class LayerNorm(nn.LayerNorm):
100
- """Subclass torch's LayerNorm to handle fp16."""
101
- def forward(self, x: torch.Tensor):
102
- orig_type = x.dtype
103
- ret = super().forward(x.type(torch.float32))
104
- return ret.type(orig_type)
105
-
106
-
107
- class QuickGELU(nn.Module):
108
- def forward(self, x: torch.Tensor):
109
- return x * torch.sigmoid(1.702 * x)
110
-
111
-
112
- class ResidualAttentionBlock(nn.Module):
113
- def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None, use_grad_checkpointing=False):
114
- super().__init__()
115
-
116
- self.attn = nn.MultiheadAttention(d_model, n_head)
117
- self.ln_1 = LayerNorm(d_model)
118
- self.mlp = nn.Sequential(
119
- OrderedDict([("c_fc", nn.Linear(d_model, d_model * 4)), ("gelu", QuickGELU()),
120
- ("c_proj", nn.Linear(d_model * 4, d_model))]))
121
- self.ln_2 = LayerNorm(d_model)
122
- self.attn_mask = attn_mask
123
-
124
- # if use_grad_checkpointing:
125
- # self.attn = checkpoint_wrapper(self.attn)
126
- # self.mlp = checkpoint_wrapper(self.mlp)
127
- # raise NotImplementedError
128
-
129
- def attention(self, x: torch.Tensor):
130
- self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
131
- return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
132
-
133
- def forward(self, x: torch.Tensor):
134
- x = x + self.attention(self.ln_1(x))
135
- x = x + self.mlp(self.ln_2(x))
136
- return x
137
-
138
-
139
- class Transformer(nn.Module):
140
- def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None, use_grad_checkpointing=False):
141
- super().__init__()
142
- self.width = width
143
- self.layers = layers
144
- self.resblocks = nn.Sequential(
145
- *[ResidualAttentionBlock(width, heads, attn_mask, use_grad_checkpointing and i > 12) for i in range(layers)])
146
-
147
- def forward(self, x: torch.Tensor):
148
- return self.resblocks(x)
149
-
150
-
151
- class VisionTransformer(nn.Module):
152
- def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int,
153
- use_grad_checkpointing: bool):
154
- super().__init__()
155
- self.input_resolution = input_resolution
156
- self.num_features = width
157
- self.num_heads = heads
158
- self.num_patches = (input_resolution // patch_size)**2
159
- self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
160
-
161
- scale = width**-0.5
162
- self.class_embedding = nn.Parameter(scale * torch.randn(width))
163
- self.positional_embedding = nn.Parameter(scale * torch.randn(self.num_patches + 1, width))
164
- self.ln_pre = LayerNorm(width)
165
-
166
- self.transformer = Transformer(width, layers, heads, use_grad_checkpointing=use_grad_checkpointing)
167
-
168
- # self.ln_final = LayerNorm(width)
169
-
170
- def forward(self, x: torch.Tensor):
171
-
172
- x = self.conv1(x) # shape = [*, width, grid, grid]
173
- x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
174
- x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
175
- x = torch.cat(
176
- [self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x],
177
- dim=1) # shape = [*, grid ** 2 + 1, width]
178
- x = x + self.positional_embedding.to(x.dtype)
179
- x = self.ln_pre(x)
180
-
181
- x = x.permute(1, 0, 2) # NLD -> LND
182
- x = self.transformer(x)
183
- x = x.permute(1, 0, 2) # LND -> NLD
184
-
185
- # x = self.ln_final(x)
186
- return x
187
-
188
-
189
- # From PyTorch internals
190
- def _ntuple(n):
191
- def parse(x):
192
- if isinstance(x, collections.abc.Iterable):
193
- return x
194
- return tuple(repeat(x, n))
195
-
196
- return parse
197
-
198
-
199
- to_2tuple = _ntuple(2)
200
-
201
-
202
- def interpolate_pos_embed(model, state_dict, interpolation: str = 'bicubic', seq_dim=1):
203
- # Rescale the grid of position embeddings when loading from state_dict
204
- old_pos_embed = state_dict.get('positional_embedding', None)
205
-
206
- grid_size = round((model.positional_embedding.shape[0] - 1)**0.5)
207
- if old_pos_embed is None:
208
- return
209
- grid_size = to_2tuple(grid_size)
210
- extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more)
211
- new_seq_len = grid_size[0] * grid_size[1] + extra_tokens
212
- if new_seq_len == old_pos_embed.shape[0]:
213
- return
214
-
215
- if extra_tokens:
216
- pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:]
217
- else:
218
- pos_emb_tok, pos_emb_img = None, old_pos_embed
219
-
220
- old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img))))
221
-
222
- print('Resizing position embedding grid-size from %s to %s', old_grid_size, grid_size)
223
- pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2)
224
- pos_emb_img = F.interpolate(
225
- pos_emb_img,
226
- size=grid_size,
227
- mode=interpolation,
228
- align_corners=True,
229
- )
230
- pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0]
231
- if pos_emb_tok is not None:
232
- new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0)
233
- else:
234
- new_pos_embed = pos_emb_img
235
- state_dict['positional_embedding'] = new_pos_embed
236
-
237
-
238
- def create_clip_vit_L(img_size=224, use_checkpoint=False, precision="fp16"):
239
- model = VisionTransformer(
240
- input_resolution=img_size,
241
- patch_size=14,
242
- width=1024,
243
- layers=23,
244
- heads=16,
245
- use_grad_checkpointing=use_checkpoint,
246
- )
247
- url = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/clip_vit_L.pth"
248
- cached_file = download_cached_file(url, check_hash=False, progress=True)
249
- state_dict = torch.load(cached_file, map_location="cpu")
250
- interpolate_pos_embed(model, state_dict)
251
-
252
- incompatible_keys = model.load_state_dict(state_dict, strict=False)
253
- # print(incompatible_keys)
254
-
255
- if precision == "fp16":
256
- convert_weights_to_fp16(model)
257
- return model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT-Chat-UI/.svelte-kit/generated/client/nodes/9.js DELETED
@@ -1 +0,0 @@
1
- export { default as component } from "../../../../src/routes/r/[id]/+page.svelte";
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Aivvm.py DELETED
@@ -1,70 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from ..requests import StreamSession
4
- from .base_provider import AsyncGeneratorProvider
5
- from ..typing import AsyncGenerator
6
-
7
- # to recreate this easily, send a post request to https://chat.aivvm.com/api/models
8
- models = {
9
- 'gpt-3.5-turbo': {'id': 'gpt-3.5-turbo', 'name': 'GPT-3.5'},
10
- 'gpt-3.5-turbo-0613': {'id': 'gpt-3.5-turbo-0613', 'name': 'GPT-3.5-0613'},
11
- 'gpt-3.5-turbo-16k': {'id': 'gpt-3.5-turbo-16k', 'name': 'GPT-3.5-16K'},
12
- 'gpt-3.5-turbo-16k-0613': {'id': 'gpt-3.5-turbo-16k-0613', 'name': 'GPT-3.5-16K-0613'},
13
- 'gpt-4': {'id': 'gpt-4', 'name': 'GPT-4'},
14
- 'gpt-4-0613': {'id': 'gpt-4-0613', 'name': 'GPT-4-0613'},
15
- 'gpt-4-32k': {'id': 'gpt-4-32k', 'name': 'GPT-4-32K'},
16
- 'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'},
17
- }
18
-
19
- class Aivvm(AsyncGeneratorProvider):
20
- url = 'https://chat.aivvm.com'
21
- supports_gpt_35_turbo = True
22
- supports_gpt_4 = True
23
- working = True
24
-
25
- @classmethod
26
- async def create_async_generator(
27
- cls,
28
- model: str,
29
- messages: list[dict[str, str]],
30
- stream: bool,
31
- timeout: int = 30,
32
- **kwargs
33
- ) -> AsyncGenerator:
34
- if not model:
35
- model = "gpt-3.5-turbo"
36
- elif model not in models:
37
- raise ValueError(f"Model is not supported: {model}")
38
-
39
- json_data = {
40
- "model" : models[model],
41
- "messages" : messages,
42
- "key" : "",
43
- "prompt" : kwargs.get("system_message", "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown."),
44
- "temperature" : kwargs.get("temperature", 0.7)
45
- }
46
- headers = {
47
- "Accept": "*/*",
48
- "Origin": cls.url,
49
- "Referer": f"{cls.url}/",
50
- }
51
- async with StreamSession(impersonate="chrome107", headers=headers, timeout=timeout) as session:
52
- async with session.post(f"{cls.url}/api/chat", json=json_data) as response:
53
- response.raise_for_status()
54
- async for chunk in response.iter_content():
55
- if b'Access denied | chat.aivvm.com used Cloudflare' in chunk:
56
- raise ValueError("Rate Limit | use another provider")
57
-
58
- yield chunk.decode()
59
-
60
- @classmethod
61
- @property
62
- def params(cls):
63
- params = [
64
- ('model', 'str'),
65
- ('messages', 'list[dict[str, str]]'),
66
- ('stream', 'bool'),
67
- ('temperature', 'float'),
68
- ]
69
- param = ', '.join([': '.join(p) for p in params])
70
- return f'g4f.provider.{cls.__name__} supports: ({param})'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/pokemon_server.py DELETED
@@ -1,78 +0,0 @@
1
- from fastapi import FastAPI
2
- from fastapi.middleware.cors import CORSMiddleware
3
- from pydantic import BaseModel, Field
4
- from typing import Set, List, Dict
5
- from agentverse.simulation import Simulation
6
- from agentverse.message import Message
7
-
8
-
9
- class UserRequest(BaseModel):
10
- content: str = Field(default="")
11
- sender: str = Field(default="Brendan")
12
- receiver: str
13
- receiver_id: int
14
-
15
-
16
- class RoutineRequest(BaseModel):
17
- agent_ids: List[int]
18
-
19
-
20
- class UpdateRequest(BaseModel):
21
- agent_locations: Dict[str, str]
22
-
23
-
24
- app = FastAPI()
25
-
26
- app.add_middleware(
27
- CORSMiddleware,
28
- allow_origins=["*"],
29
- allow_credentials=True,
30
- allow_methods=["*"],
31
- allow_headers=["*"],
32
- )
33
-
34
- agent_verse = Simulation.from_task("pokemon")
35
-
36
-
37
- @app.get("/")
38
- def health_check():
39
- return {"status": "ok"}
40
-
41
-
42
- @app.post("/chat")
43
- def chat(message: UserRequest):
44
- content = message.content
45
- receiver = message.receiver
46
- receiver_id = message.receiver_id
47
- response = agent_verse.next(
48
- is_player=True,
49
- player_content=content,
50
- receiver=receiver,
51
- receiver_id=receiver_id,
52
- )
53
- return response[0].dict()
54
-
55
-
56
- @app.post("/make_decision")
57
- def update(message: RoutineRequest):
58
- response = agent_verse.next(is_player=False, agent_ids=message.agent_ids)
59
- return [r.dict() for r in response]
60
- # import json
61
-
62
- # return [
63
- # # {
64
- # # "content": json.dumps(
65
- # # {
66
- # # "to": "Maxie",
67
- # # "action": "Speak",
68
- # # "text": "Hello Hello Hello Hello Hello Hello",
69
- # # }
70
- # # )
71
- # # }
72
- # {"content": json.dumps({"to": "Pokémon Center", "action": "MoveTo"})}
73
- # ]
74
-
75
-
76
- @app.post("/update_location")
77
- def update_location(message: UpdateRequest):
78
- agent_verse.update_state(message.agent_locations)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/utils/PreLayoutChild.js DELETED
@@ -1,10 +0,0 @@
1
- import CopyState from '../../utils/CopyState';
2
-
3
- var PreLayoutChild = function (child) {
4
- if (this.sizerEventsEnable) {
5
- CopyState(child, this.getChildPrevState(child));
6
- this.layoutedChildren.push(child);
7
- }
8
- }
9
-
10
- export default PreLayoutChild;
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/bbcodetext/Factory.js DELETED
@@ -1,13 +0,0 @@
1
- import BBCodeText from './BBCodeText.js';
2
- import ObjectFactory from '../ObjectFactory.js';
3
- import SetValue from '../../../plugins/utils/object/SetValue.js';
4
-
5
- ObjectFactory.register('BBCodeText', function (x, y, text, style) {
6
- var gameObject = new BBCodeText(this.scene, x, y, text, style);
7
- this.scene.add.existing(gameObject);
8
- return gameObject;
9
- });
10
-
11
- SetValue(window, 'RexPlugins.UI.BBCodeText', BBCodeText);
12
-
13
- export default BBCodeText;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/gui_utils/text_utils.py DELETED
@@ -1,141 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- import functools
10
- from typing import Optional
11
-
12
- import dnnlib
13
- import numpy as np
14
- import PIL.Image
15
- import PIL.ImageFont
16
- import scipy.ndimage
17
-
18
- from . import gl_utils
19
-
20
- # ----------------------------------------------------------------------------
21
-
22
-
23
- def get_default_font():
24
- # Open Sans regular
25
- url = 'http://fonts.gstatic.com/s/opensans/v17/mem8YaGs126MiZpBA-U1UpcaXcl0Aw.ttf'
26
- return dnnlib.util.open_url(url, return_filename=True)
27
-
28
- # ----------------------------------------------------------------------------
29
-
30
-
31
- @functools.lru_cache(maxsize=None)
32
- def get_pil_font(font=None, size=32):
33
- if font is None:
34
- font = get_default_font()
35
- return PIL.ImageFont.truetype(font=font, size=size)
36
-
37
- # ----------------------------------------------------------------------------
38
-
39
-
40
- def get_array(string, *, dropshadow_radius: int = None, **kwargs):
41
- if dropshadow_radius is not None:
42
- offset_x = int(np.ceil(dropshadow_radius*2/3))
43
- offset_y = int(np.ceil(dropshadow_radius*2/3))
44
- return _get_array_priv(string, dropshadow_radius=dropshadow_radius, offset_x=offset_x, offset_y=offset_y, **kwargs)
45
- else:
46
- return _get_array_priv(string, **kwargs)
47
-
48
-
49
- @functools.lru_cache(maxsize=10000)
50
- def _get_array_priv(
51
- string: str, *,
52
- size: int = 32,
53
- max_width: Optional[int] = None,
54
- max_height: Optional[int] = None,
55
- min_size=10,
56
- shrink_coef=0.8,
57
- dropshadow_radius: int = None,
58
- offset_x: int = None,
59
- offset_y: int = None,
60
- **kwargs
61
- ):
62
- cur_size = size
63
- array = None
64
- while True:
65
- if dropshadow_radius is not None:
66
- # separate implementation for dropshadow text rendering
67
- array = _get_array_impl_dropshadow(
68
- string, size=cur_size, radius=dropshadow_radius, offset_x=offset_x, offset_y=offset_y, **kwargs)
69
- else:
70
- array = _get_array_impl(string, size=cur_size, **kwargs)
71
- height, width, _ = array.shape
72
- if (max_width is None or width <= max_width) and (max_height is None or height <= max_height) or (cur_size <= min_size):
73
- break
74
- cur_size = max(int(cur_size * shrink_coef), min_size)
75
- return array
76
-
77
- # ----------------------------------------------------------------------------
78
-
79
-
80
- @functools.lru_cache(maxsize=10000)
81
- def _get_array_impl(string, *, font=None, size=32, outline=0, outline_pad=3, outline_coef=3, outline_exp=2, line_pad: int = None):
82
- pil_font = get_pil_font(font=font, size=size)
83
- lines = [pil_font.getmask(line, 'L') for line in string.split('\n')]
84
- lines = [np.array(line, dtype=np.uint8).reshape(
85
- [line.size[1], line.size[0]]) for line in lines]
86
- width = max(line.shape[1] for line in lines)
87
- lines = [np.pad(line, ((0, 0), (0, width - line.shape[1])),
88
- mode='constant') for line in lines]
89
- line_spacing = line_pad if line_pad is not None else size // 2
90
- lines = [np.pad(line, ((0, line_spacing), (0, 0)), mode='constant')
91
- for line in lines[:-1]] + lines[-1:]
92
- mask = np.concatenate(lines, axis=0)
93
- alpha = mask
94
- if outline > 0:
95
- mask = np.pad(mask, int(np.ceil(outline * outline_pad)),
96
- mode='constant', constant_values=0)
97
- alpha = mask.astype(np.float32) / 255
98
- alpha = scipy.ndimage.gaussian_filter(alpha, outline)
99
- alpha = 1 - np.maximum(1 - alpha * outline_coef, 0) ** outline_exp
100
- alpha = (alpha * 255 + 0.5).clip(0, 255).astype(np.uint8)
101
- alpha = np.maximum(alpha, mask)
102
- return np.stack([mask, alpha], axis=-1)
103
-
104
- # ----------------------------------------------------------------------------
105
-
106
-
107
- @functools.lru_cache(maxsize=10000)
108
- def _get_array_impl_dropshadow(string, *, font=None, size=32, radius: int, offset_x: int, offset_y: int, line_pad: int = None, **kwargs):
109
- assert (offset_x > 0) and (offset_y > 0)
110
- pil_font = get_pil_font(font=font, size=size)
111
- lines = [pil_font.getmask(line, 'L') for line in string.split('\n')]
112
- lines = [np.array(line, dtype=np.uint8).reshape(
113
- [line.size[1], line.size[0]]) for line in lines]
114
- width = max(line.shape[1] for line in lines)
115
- lines = [np.pad(line, ((0, 0), (0, width - line.shape[1])),
116
- mode='constant') for line in lines]
117
- line_spacing = line_pad if line_pad is not None else size // 2
118
- lines = [np.pad(line, ((0, line_spacing), (0, 0)), mode='constant')
119
- for line in lines[:-1]] + lines[-1:]
120
- mask = np.concatenate(lines, axis=0)
121
- alpha = mask
122
-
123
- mask = np.pad(mask, 2*radius + max(abs(offset_x), abs(offset_y)),
124
- mode='constant', constant_values=0)
125
- alpha = mask.astype(np.float32) / 255
126
- alpha = scipy.ndimage.gaussian_filter(alpha, radius)
127
- alpha = 1 - np.maximum(1 - alpha * 1.5, 0) ** 1.4
128
- alpha = (alpha * 255 + 0.5).clip(0, 255).astype(np.uint8)
129
- alpha = np.pad(alpha, [(offset_y, 0), (offset_x, 0)],
130
- mode='constant')[:-offset_y, :-offset_x]
131
- alpha = np.maximum(alpha, mask)
132
- return np.stack([mask, alpha], axis=-1)
133
-
134
- # ----------------------------------------------------------------------------
135
-
136
-
137
- @functools.lru_cache(maxsize=10000)
138
- def get_texture(string, bilinear=True, mipmap=True, **kwargs):
139
- return gl_utils.Texture(image=get_array(string, **kwargs), bilinear=bilinear, mipmap=mipmap)
140
-
141
- # ----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/stochastic_karras_ve/__init__.py DELETED
@@ -1 +0,0 @@
1
- from .pipeline_stochastic_karras_ve import KarrasVePipeline
 
 
spaces/Andy1621/UniFormerV2_mit_demo/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: UniFormerV2 Mit Demo
3
- emoji: 📊
4
- colorFrom: yellow
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.16.0
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco.py DELETED
@@ -1,11 +0,0 @@
1
- _base_ = '../dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py'
2
- model = dict(
3
- backbone=dict(
4
- norm_cfg=dict(type='SyncBN', requires_grad=True),
5
- norm_eval=False,
6
- plugins=[
7
- dict(
8
- cfg=dict(type='ContextBlock', ratio=1. / 4),
9
- stages=(False, True, True, True),
10
- position='after_conv3')
11
- ]))
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/htc/htc_r101_fpn_20e_coco.py DELETED
@@ -1,5 +0,0 @@
1
- _base_ = './htc_r50_fpn_1x_coco.py'
2
- model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
3
- # learning policy
4
- lr_config = dict(step=[16, 19])
5
- runner = dict(type='EpochBasedRunner', max_epochs=20)
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py DELETED
@@ -1,52 +0,0 @@
1
- _base_ = './sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py'
2
- num_proposals = 300
3
- model = dict(
4
- rpn_head=dict(num_proposals=num_proposals),
5
- test_cfg=dict(
6
- _delete_=True, rpn=None, rcnn=dict(max_per_img=num_proposals)))
7
- img_norm_cfg = dict(
8
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
9
-
10
- # augmentation strategy originates from DETR.
11
- train_pipeline = [
12
- dict(type='LoadImageFromFile'),
13
- dict(type='LoadAnnotations', with_bbox=True),
14
- dict(type='RandomFlip', flip_ratio=0.5),
15
- dict(
16
- type='AutoAugment',
17
- policies=[[
18
- dict(
19
- type='Resize',
20
- img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
21
- (608, 1333), (640, 1333), (672, 1333), (704, 1333),
22
- (736, 1333), (768, 1333), (800, 1333)],
23
- multiscale_mode='value',
24
- keep_ratio=True)
25
- ],
26
- [
27
- dict(
28
- type='Resize',
29
- img_scale=[(400, 1333), (500, 1333), (600, 1333)],
30
- multiscale_mode='value',
31
- keep_ratio=True),
32
- dict(
33
- type='RandomCrop',
34
- crop_type='absolute_range',
35
- crop_size=(384, 600),
36
- allow_negative_crop=True),
37
- dict(
38
- type='Resize',
39
- img_scale=[(480, 1333), (512, 1333), (544, 1333),
40
- (576, 1333), (608, 1333), (640, 1333),
41
- (672, 1333), (704, 1333), (736, 1333),
42
- (768, 1333), (800, 1333)],
43
- multiscale_mode='value',
44
- override=True,
45
- keep_ratio=True)
46
- ]]),
47
- dict(type='Normalize', **img_norm_cfg),
48
- dict(type='Pad', size_divisor=32),
49
- dict(type='DefaultFormatBundle'),
50
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
51
- ]
52
- data = dict(train=dict(pipeline=train_pipeline))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/tools/dataset_converters/cityscapes.py DELETED
@@ -1,151 +0,0 @@
1
- import argparse
2
- import glob
3
- import os.path as osp
4
-
5
- import cityscapesscripts.helpers.labels as CSLabels
6
- import mmcv
7
- import numpy as np
8
- import pycocotools.mask as maskUtils
9
-
10
-
11
- def collect_files(img_dir, gt_dir):
12
- suffix = 'leftImg8bit.png'
13
- files = []
14
- for img_file in glob.glob(osp.join(img_dir, '**/*.png')):
15
- assert img_file.endswith(suffix), img_file
16
- inst_file = gt_dir + img_file[
17
- len(img_dir):-len(suffix)] + 'gtFine_instanceIds.png'
18
- # Note that labelIds are not converted to trainId for seg map
19
- segm_file = gt_dir + img_file[
20
- len(img_dir):-len(suffix)] + 'gtFine_labelIds.png'
21
- files.append((img_file, inst_file, segm_file))
22
- assert len(files), f'No images found in {img_dir}'
23
- print(f'Loaded {len(files)} images from {img_dir}')
24
-
25
- return files
26
-
27
-
28
- def collect_annotations(files, nproc=1):
29
- print('Loading annotation images')
30
- if nproc > 1:
31
- images = mmcv.track_parallel_progress(
32
- load_img_info, files, nproc=nproc)
33
- else:
34
- images = mmcv.track_progress(load_img_info, files)
35
-
36
- return images
37
-
38
-
39
- def load_img_info(files):
40
- img_file, inst_file, segm_file = files
41
- inst_img = mmcv.imread(inst_file, 'unchanged')
42
- # ids < 24 are stuff labels (filtering them first is about 5% faster)
43
- unique_inst_ids = np.unique(inst_img[inst_img >= 24])
44
- anno_info = []
45
- for inst_id in unique_inst_ids:
46
- # For non-crowd annotations, inst_id // 1000 is the label_id
47
- # Crowd annotations have <1000 instance ids
48
- label_id = inst_id // 1000 if inst_id >= 1000 else inst_id
49
- label = CSLabels.id2label[label_id]
50
- if not label.hasInstances or label.ignoreInEval:
51
- continue
52
-
53
- category_id = label.id
54
- iscrowd = int(inst_id < 1000)
55
- mask = np.asarray(inst_img == inst_id, dtype=np.uint8, order='F')
56
- mask_rle = maskUtils.encode(mask[:, :, None])[0]
57
-
58
- area = maskUtils.area(mask_rle)
59
- # convert to COCO style XYWH format
60
- bbox = maskUtils.toBbox(mask_rle)
61
-
62
- # for json encoding
63
- mask_rle['counts'] = mask_rle['counts'].decode()
64
-
65
- anno = dict(
66
- iscrowd=iscrowd,
67
- category_id=category_id,
68
- bbox=bbox.tolist(),
69
- area=area.tolist(),
70
- segmentation=mask_rle)
71
- anno_info.append(anno)
72
- video_name = osp.basename(osp.dirname(img_file))
73
- img_info = dict(
74
- # remove img_prefix for filename
75
- file_name=osp.join(video_name, osp.basename(img_file)),
76
- height=inst_img.shape[0],
77
- width=inst_img.shape[1],
78
- anno_info=anno_info,
79
- segm_file=osp.join(video_name, osp.basename(segm_file)))
80
-
81
- return img_info
82
-
83
-
84
- def cvt_annotations(image_infos, out_json_name):
85
- out_json = dict()
86
- img_id = 0
87
- ann_id = 0
88
- out_json['images'] = []
89
- out_json['categories'] = []
90
- out_json['annotations'] = []
91
- for image_info in image_infos:
92
- image_info['id'] = img_id
93
- anno_infos = image_info.pop('anno_info')
94
- out_json['images'].append(image_info)
95
- for anno_info in anno_infos:
96
- anno_info['image_id'] = img_id
97
- anno_info['id'] = ann_id
98
- out_json['annotations'].append(anno_info)
99
- ann_id += 1
100
- img_id += 1
101
- for label in CSLabels.labels:
102
- if label.hasInstances and not label.ignoreInEval:
103
- cat = dict(id=label.id, name=label.name)
104
- out_json['categories'].append(cat)
105
-
106
- if len(out_json['annotations']) == 0:
107
- out_json.pop('annotations')
108
-
109
- mmcv.dump(out_json, out_json_name)
110
- return out_json
111
-
112
-
113
- def parse_args():
114
- parser = argparse.ArgumentParser(
115
- description='Convert Cityscapes annotations to COCO format')
116
- parser.add_argument('cityscapes_path', help='cityscapes data path')
117
- parser.add_argument('--img-dir', default='leftImg8bit', type=str)
118
- parser.add_argument('--gt-dir', default='gtFine', type=str)
119
- parser.add_argument('-o', '--out-dir', help='output path')
120
- parser.add_argument(
121
- '--nproc', default=1, type=int, help='number of process')
122
- args = parser.parse_args()
123
- return args
124
-
125
-
126
- def main():
127
- args = parse_args()
128
- cityscapes_path = args.cityscapes_path
129
- out_dir = args.out_dir if args.out_dir else cityscapes_path
130
- mmcv.mkdir_or_exist(out_dir)
131
-
132
- img_dir = osp.join(cityscapes_path, args.img_dir)
133
- gt_dir = osp.join(cityscapes_path, args.gt_dir)
134
-
135
- set_name = dict(
136
- train='instancesonly_filtered_gtFine_train.json',
137
- val='instancesonly_filtered_gtFine_val.json',
138
- test='instancesonly_filtered_gtFine_test.json')
139
-
140
- for split, json_name in set_name.items():
141
- print(f'Converting {split} into {json_name}')
142
- with mmcv.Timer(
143
- print_tmpl='It took {}s to convert Cityscapes annotation'):
144
- files = collect_files(
145
- osp.join(img_dir, split), osp.join(gt_dir, split))
146
- image_infos = collect_annotations(files, nproc=args.nproc)
147
- cvt_annotations(image_infos, osp.join(out_dir, json_name))
148
-
149
-
150
- if __name__ == '__main__':
151
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andyrasika/Andyrasika-avatar_diffusion/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Andyrasika-avatar Diffusion
3
- emoji: 📉
4
- colorFrom: green
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.39.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anmol12385/chat123/app.py DELETED
@@ -1,55 +0,0 @@
1
- import os
2
- import openai
3
- import gradio as gr
4
-
5
- #if you have OpenAI API key as an environment variable, enable the below
6
- #openai.api_key = os.getenv("OPENAI_API_KEY")
7
-
8
- #if you have OpenAI API key as a string, enable the below
9
- openai.api_key = "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"
10
-
11
- start_sequence = "\nAI:"
12
- restart_sequence = "\nHuman: "
13
-
14
- prompt = "The following is a conversation with an AI assistant Generated by Anmol. The assistant is helpful, creative, clever, and very friendly.\n\nHuman: Hello, who are you?\nAI: I am an AI created by OpenAI. How can I help you today?\nHuman: "
15
-
16
- def openai_create(prompt):
17
-
18
- response = openai.Completion.create(
19
- model="text-davinci-003",
20
- prompt=prompt,
21
- temperature=0.9,
22
- max_tokens=4000,
23
- top_p=1,
24
- frequency_penalty=0,
25
- presence_penalty=0.6,
26
- stop=[" Human:", " AI:"]
27
- )
28
-
29
- return response.choices[0].text
30
-
31
-
32
-
33
- def chatgpt_clone(input, history):
34
- history = history or []
35
- s = list(sum(history, ()))
36
- s.append(input)
37
- inp = ' '.join(s)
38
- output = openai_create(inp)
39
- history.append((input, output))
40
- return history, history
41
-
42
-
43
- block = gr.Blocks()
44
-
45
-
46
- with block:
47
- gr.Markdown("""<h1><center>Smart Code Generator by Anmol</center></h1>
48
- """)
49
- chatbot = gr.Chatbot()
50
- message = gr.Textbox(placeholder=prompt)
51
- state = gr.State()
52
- submit = gr.Button("SEND")
53
- submit.click(chatgpt_clone, inputs=[message, state], outputs=[chatbot, state])
54
-
55
- block.launch(debug = True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-123/ImageNet-Editing/object_removal/TFill/ui_main.py DELETED
@@ -1,12 +0,0 @@
1
- import sys
2
- from options.test_options import TestOptions
3
- from gui.ui_model import ui_model
4
- from PyQt5 import QtWidgets
5
-
6
-
7
- if __name__=="__main__":
8
- app = QtWidgets.QApplication(sys.argv)
9
- opt = TestOptions().parse()
10
- my_gui = ui_model(opt)
11
- my_gui.show()
12
- sys.exit(app.exec_())
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/fileio/__init__.py DELETED
@@ -1,11 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- from .file_client import BaseStorageBackend, FileClient
3
- from .handlers import BaseFileHandler, JsonHandler, PickleHandler, YamlHandler
4
- from .io import dump, load, register_handler
5
- from .parse import dict_from_file, list_from_file
6
-
7
- __all__ = [
8
- 'BaseStorageBackend', 'FileClient', 'load', 'dump', 'register_handler',
9
- 'BaseFileHandler', 'JsonHandler', 'PickleHandler', 'YamlHandler',
10
- 'list_from_file', 'dict_from_file'
11
- ]
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmcv/ops/border_align.py DELETED
@@ -1,109 +0,0 @@
1
- # Copyright (c) OpenMMLab. All rights reserved.
2
- # modified from
3
- # https://github.com/Megvii-BaseDetection/cvpods/blob/master/cvpods/layers/border_align.py
4
-
5
- import torch
6
- import torch.nn as nn
7
- from torch.autograd import Function
8
- from torch.autograd.function import once_differentiable
9
-
10
- from ..utils import ext_loader
11
-
12
- ext_module = ext_loader.load_ext(
13
- '_ext', ['border_align_forward', 'border_align_backward'])
14
-
15
-
16
- class BorderAlignFunction(Function):
17
-
18
- @staticmethod
19
- def symbolic(g, input, boxes, pool_size):
20
- return g.op(
21
- 'mmcv::MMCVBorderAlign', input, boxes, pool_size_i=pool_size)
22
-
23
- @staticmethod
24
- def forward(ctx, input, boxes, pool_size):
25
- ctx.pool_size = pool_size
26
- ctx.input_shape = input.size()
27
-
28
- assert boxes.ndim == 3, 'boxes must be with shape [B, H*W, 4]'
29
- assert boxes.size(2) == 4, \
30
- 'the last dimension of boxes must be (x1, y1, x2, y2)'
31
- assert input.size(1) % 4 == 0, \
32
- 'the channel for input feature must be divisible by factor 4'
33
-
34
- # [B, C//4, H*W, 4]
35
- output_shape = (input.size(0), input.size(1) // 4, boxes.size(1), 4)
36
- output = input.new_zeros(output_shape)
37
- # `argmax_idx` only used for backward
38
- argmax_idx = input.new_zeros(output_shape).to(torch.int)
39
-
40
- ext_module.border_align_forward(
41
- input, boxes, output, argmax_idx, pool_size=ctx.pool_size)
42
-
43
- ctx.save_for_backward(boxes, argmax_idx)
44
- return output
45
-
46
- @staticmethod
47
- @once_differentiable
48
- def backward(ctx, grad_output):
49
- boxes, argmax_idx = ctx.saved_tensors
50
- grad_input = grad_output.new_zeros(ctx.input_shape)
51
- # complex head architecture may cause grad_output uncontiguous
52
- grad_output = grad_output.contiguous()
53
- ext_module.border_align_backward(
54
- grad_output,
55
- boxes,
56
- argmax_idx,
57
- grad_input,
58
- pool_size=ctx.pool_size)
59
- return grad_input, None, None
60
-
61
-
62
- border_align = BorderAlignFunction.apply
63
-
64
-
65
- class BorderAlign(nn.Module):
66
- r"""Border align pooling layer.
67
-
68
- Applies border_align over the input feature based on predicted bboxes.
69
- The details were described in the paper
70
- `BorderDet: Border Feature for Dense Object Detection
71
- <https://arxiv.org/abs/2007.11056>`_.
72
-
73
- For each border line (e.g. top, left, bottom or right) of each box,
74
- border_align does the following:
75
- 1. uniformly samples `pool_size`+1 positions on this line, involving \
76
- the start and end points.
77
- 2. the corresponding features on these points are computed by \
78
- bilinear interpolation.
79
- 3. max pooling over all the `pool_size`+1 positions are used for \
80
- computing pooled feature.
81
-
82
- Args:
83
- pool_size (int): number of positions sampled over the boxes' borders
84
- (e.g. top, bottom, left, right).
85
-
86
- """
87
-
88
- def __init__(self, pool_size):
89
- super(BorderAlign, self).__init__()
90
- self.pool_size = pool_size
91
-
92
- def forward(self, input, boxes):
93
- """
94
- Args:
95
- input: Features with shape [N,4C,H,W]. Channels ranged in [0,C),
96
- [C,2C), [2C,3C), [3C,4C) represent the top, left, bottom,
97
- right features respectively.
98
- boxes: Boxes with shape [N,H*W,4]. Coordinate format (x1,y1,x2,y2).
99
-
100
- Returns:
101
- Tensor: Pooled features with shape [N,C,H*W,4]. The order is
102
- (top,left,bottom,right) for the last dimension.
103
- """
104
- return border_align(input, boxes, self.pool_size)
105
-
106
- def __repr__(self):
107
- s = self.__class__.__name__
108
- s += f'(pool_size={self.pool_size})'
109
- return s
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Apk/anything-v3.0/app.py DELETED
@@ -1,276 +0,0 @@
1
- from diffusers import AutoencoderKL, UNet2DConditionModel, StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DPMSolverMultistepScheduler
2
- import gradio as gr
3
- import torch
4
- from PIL import Image
5
- import utils
6
- import datetime
7
- import time
8
- import psutil
9
-
10
- start_time = time.time()
11
- is_colab = utils.is_google_colab()
12
-
13
- class Model:
14
- def __init__(self, name, path="", prefix=""):
15
- self.name = name
16
- self.path = path
17
- self.prefix = prefix
18
- self.pipe_t2i = None
19
- self.pipe_i2i = None
20
-
21
- models = [
22
- Model("anything v3", "Linaqruf/anything-v3.0", "anything v3 style"),
23
- ]
24
- # Model("Spider-Verse", "nitrosocke/spider-verse-diffusion", "spiderverse style "),
25
- # Model("Balloon Art", "Fictiverse/Stable_Diffusion_BalloonArt_Model", "BalloonArt "),
26
- # Model("Elden Ring", "nitrosocke/elden-ring-diffusion", "elden ring style "),
27
- # Model("Tron Legacy", "dallinmackay/Tron-Legacy-diffusion", "trnlgcy ")
28
- #Model("Pokémon", "lambdalabs/sd-pokemon-diffusers", ""),
29
- #Model("Pony Diffusion", "AstraliteHeart/pony-diffusion", ""),
30
- #Model("Robo Diffusion", "nousr/robo-diffusion", ""),
31
-
32
- scheduler = DPMSolverMultistepScheduler(
33
- beta_start=0.00085,
34
- beta_end=0.012,
35
- beta_schedule="scaled_linear",
36
- num_train_timesteps=1000,
37
- trained_betas=None,
38
- predict_epsilon=True,
39
- thresholding=False,
40
- algorithm_type="dpmsolver++",
41
- solver_type="midpoint",
42
- lower_order_final=True,
43
- )
44
-
45
- custom_model = None
46
- if is_colab:
47
- models.insert(0, Model("Custom model"))
48
- custom_model = models[0]
49
-
50
- last_mode = "txt2img"
51
- current_model = models[1] if is_colab else models[0]
52
- current_model_path = current_model.path
53
-
54
- if is_colab:
55
- pipe = StableDiffusionPipeline.from_pretrained(current_model.path, torch_dtype=torch.float16, scheduler=scheduler, safety_checker=lambda images, clip_input: (images, False))
56
-
57
- else: # download all models
58
- print(f"{datetime.datetime.now()} Downloading vae...")
59
- vae = AutoencoderKL.from_pretrained(current_model.path, subfolder="vae", torch_dtype=torch.float16)
60
- for model in models:
61
- try:
62
- print(f"{datetime.datetime.now()} Downloading {model.name} model...")
63
- unet = UNet2DConditionModel.from_pretrained(model.path, subfolder="unet", torch_dtype=torch.float16)
64
- model.pipe_t2i = StableDiffusionPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler)
65
- model.pipe_i2i = StableDiffusionImg2ImgPipeline.from_pretrained(model.path, unet=unet, vae=vae, torch_dtype=torch.float16, scheduler=scheduler)
66
- except Exception as e:
67
- print(f"{datetime.datetime.now()} Failed to load model " + model.name + ": " + str(e))
68
- models.remove(model)
69
- pipe = models[0].pipe_t2i
70
-
71
- if torch.cuda.is_available():
72
- pipe = pipe.to("cuda")
73
-
74
- device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
75
-
76
- def error_str(error, title="Error"):
77
- return f"""#### {title}
78
- {error}""" if error else ""
79
-
80
- def custom_model_changed(path):
81
- models[0].path = path
82
- global current_model
83
- current_model = models[0]
84
-
85
- def on_model_change(model_name):
86
-
87
- prefix = "Enter prompt. \"" + next((m.prefix for m in models if m.name == model_name), None) + "\" is prefixed automatically" if model_name != models[0].name else "Don't forget to use the custom model prefix in the prompt!"
88
-
89
- return gr.update(visible = model_name == models[0].name), gr.update(placeholder=prefix)
90
-
91
- def inference(model_name, prompt, guidance, steps, width=512, height=512, seed=0, img=None, strength=0.5, neg_prompt=""):
92
-
93
- print(psutil.virtual_memory()) # print memory usage
94
-
95
- global current_model
96
- for model in models:
97
- if model.name == model_name:
98
- current_model = model
99
- model_path = current_model.path
100
-
101
- generator = torch.Generator('cuda').manual_seed(seed) if seed != 0 else None
102
-
103
- try:
104
- if img is not None:
105
- return img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator), None
106
- else:
107
- return txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator), None
108
- except Exception as e:
109
- return None, error_str(e)
110
-
111
- def txt_to_img(model_path, prompt, neg_prompt, guidance, steps, width, height, generator):
112
-
113
- print(f"{datetime.datetime.now()} txt_to_img, model: {current_model.name}")
114
-
115
- global last_mode
116
- global pipe
117
- global current_model_path
118
- if model_path != current_model_path or last_mode != "txt2img":
119
- current_model_path = model_path
120
-
121
- if is_colab or current_model == custom_model:
122
- pipe = StableDiffusionPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, scheduler=scheduler, safety_checker=lambda images, clip_input: (images, False))
123
- else:
124
- pipe = pipe.to("cpu")
125
- pipe = current_model.pipe_t2i
126
-
127
- if torch.cuda.is_available():
128
- pipe = pipe.to("cuda")
129
- last_mode = "txt2img"
130
-
131
- prompt = current_model.prefix + prompt
132
- result = pipe(
133
- prompt,
134
- negative_prompt = neg_prompt,
135
- # num_images_per_prompt=n_images,
136
- num_inference_steps = int(steps),
137
- guidance_scale = guidance,
138
- width = width,
139
- height = height,
140
- generator = generator)
141
-
142
- return replace_nsfw_images(result)
143
-
144
- def img_to_img(model_path, prompt, neg_prompt, img, strength, guidance, steps, width, height, generator):
145
-
146
- print(f"{datetime.datetime.now()} img_to_img, model: {model_path}")
147
-
148
- global last_mode
149
- global pipe
150
- global current_model_path
151
- if model_path != current_model_path or last_mode != "img2img":
152
- current_model_path = model_path
153
-
154
- if is_colab or current_model == custom_model:
155
- pipe = StableDiffusionImg2ImgPipeline.from_pretrained(current_model_path, torch_dtype=torch.float16, scheduler=scheduler, safety_checker=lambda images, clip_input: (images, False))
156
- else:
157
- pipe = pipe.to("cpu")
158
- pipe = current_model.pipe_i2i
159
-
160
- if torch.cuda.is_available():
161
- pipe = pipe.to("cuda")
162
- last_mode = "img2img"
163
-
164
- prompt = current_model.prefix + prompt
165
- ratio = min(height / img.height, width / img.width)
166
- img = img.resize((int(img.width * ratio), int(img.height * ratio)), Image.LANCZOS)
167
- result = pipe(
168
- prompt,
169
- negative_prompt = neg_prompt,
170
- # num_images_per_prompt=n_images,
171
- init_image = img,
172
- num_inference_steps = int(steps),
173
- strength = strength,
174
- guidance_scale = guidance,
175
- width = width,
176
- height = height,
177
- generator = generator)
178
-
179
- return replace_nsfw_images(result)
180
-
181
- def replace_nsfw_images(results):
182
-
183
- if is_colab:
184
- return results.images[0]
185
-
186
- for i in range(len(results.images)):
187
- if results.nsfw_content_detected[i]:
188
- results.images[i] = Image.open("nsfw.png")
189
- return results.images[0]
190
-
191
- css = """.finetuned-diffusion-div div{display:inline-flex;align-items:center;gap:.8rem;font-size:1.75rem}.finetuned-diffusion-div div h1{font-weight:900;margin-bottom:7px}.finetuned-diffusion-div p{margin-bottom:10px;font-size:94%}a{text-decoration:underline}.tabs{margin-top:0;margin-bottom:0}#gallery{min-height:20rem}
192
- """
193
- with gr.Blocks(css=css) as demo:
194
- gr.HTML(
195
- f"""
196
- <div class="finetuned-diffusion-div">
197
- <div>
198
- <h1>Anything V3</h1>
199
- </div>
200
- <p>
201
- Demo for Anything V3
202
- </p>
203
- <p>This demo is slow on cpu, to use it upgrade to gpu by going to settings after duplicating this space: <a style="display:inline-block" href="https://huggingface.co/spaces/akhaliq/anything-v3.0?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a> </p>
204
- </p>
205
- </div>
206
- """
207
- )
208
- with gr.Row():
209
-
210
- with gr.Column(scale=55):
211
- with gr.Group():
212
- model_name = gr.Dropdown(label="Model", choices=[m.name for m in models], value=current_model.name)
213
- with gr.Box(visible=False) as custom_model_group:
214
- custom_model_path = gr.Textbox(label="Custom model path", placeholder="Path to model, e.g. nitrosocke/Arcane-Diffusion", interactive=True)
215
- gr.HTML("<div><font size='2'>Custom models have to be downloaded first, so give it some time.</font></div>")
216
-
217
- with gr.Row():
218
- prompt = gr.Textbox(label="Prompt", show_label=False, max_lines=2,placeholder="Enter prompt. Style applied automatically").style(container=False)
219
- generate = gr.Button(value="Generate").style(rounded=(False, True, True, False))
220
-
221
-
222
- image_out = gr.Image(height=512)
223
- # gallery = gr.Gallery(
224
- # label="Generated images", show_label=False, elem_id="gallery"
225
- # ).style(grid=[1], height="auto")
226
- error_output = gr.Markdown()
227
-
228
- with gr.Column(scale=45):
229
- with gr.Tab("Options"):
230
- with gr.Group():
231
- neg_prompt = gr.Textbox(label="Negative prompt", placeholder="What to exclude from the image")
232
-
233
- # n_images = gr.Slider(label="Images", value=1, minimum=1, maximum=4, step=1)
234
-
235
- with gr.Row():
236
- guidance = gr.Slider(label="Guidance scale", value=7.5, maximum=15)
237
- steps = gr.Slider(label="Steps", value=25, minimum=2, maximum=75, step=1)
238
-
239
- with gr.Row():
240
- width = gr.Slider(label="Width", value=512, minimum=64, maximum=1024, step=8)
241
- height = gr.Slider(label="Height", value=512, minimum=64, maximum=1024, step=8)
242
-
243
- seed = gr.Slider(0, 2147483647, label='Seed (0 = random)', value=0, step=1)
244
-
245
- with gr.Tab("Image to image"):
246
- with gr.Group():
247
- image = gr.Image(label="Image", height=256, tool="editor", type="pil")
248
- strength = gr.Slider(label="Transformation strength", minimum=0, maximum=1, step=0.01, value=0.5)
249
-
250
- if is_colab:
251
- model_name.change(on_model_change, inputs=model_name, outputs=[custom_model_group, prompt], queue=False)
252
- custom_model_path.change(custom_model_changed, inputs=custom_model_path, outputs=None)
253
- # n_images.change(lambda n: gr.Gallery().style(grid=[2 if n > 1 else 1], height="auto"), inputs=n_images, outputs=gallery)
254
-
255
- inputs = [model_name, prompt, guidance, steps, width, height, seed, image, strength, neg_prompt]
256
- outputs = [image_out, error_output]
257
- prompt.submit(inference, inputs=inputs, outputs=outputs)
258
- generate.click(inference, inputs=inputs, outputs=outputs)
259
-
260
- ex = gr.Examples([
261
- [models[0].name, "iron man", 7.5, 50],
262
-
263
- ], inputs=[model_name, prompt, guidance, steps, seed], outputs=outputs, fn=inference, cache_examples=False)
264
-
265
- gr.HTML("""
266
- <div style="border-top: 1px solid #303030;">
267
- <br>
268
- <p>Model by Linaqruf</p>
269
- </div>
270
- """)
271
-
272
- print(f"Space built in {time.time() - start_time:.2f} seconds")
273
-
274
- if not is_colab:
275
- demo.queue(concurrency_count=1)
276
- demo.launch(debug=is_colab, share=is_colab)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aristore/Warp/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Warp
3
- emoji: 🌖
4
- colorFrom: pink
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.40.1
8
- app_file: app.py
9
- pinned: false
10
- license: bsd
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/chardet/euckrprober.py DELETED
@@ -1,47 +0,0 @@
1
- ######################## BEGIN LICENSE BLOCK ########################
2
- # The Original Code is mozilla.org code.
3
- #
4
- # The Initial Developer of the Original Code is
5
- # Netscape Communications Corporation.
6
- # Portions created by the Initial Developer are Copyright (C) 1998
7
- # the Initial Developer. All Rights Reserved.
8
- #
9
- # Contributor(s):
10
- # Mark Pilgrim - port to Python
11
- #
12
- # This library is free software; you can redistribute it and/or
13
- # modify it under the terms of the GNU Lesser General Public
14
- # License as published by the Free Software Foundation; either
15
- # version 2.1 of the License, or (at your option) any later version.
16
- #
17
- # This library is distributed in the hope that it will be useful,
18
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
19
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20
- # Lesser General Public License for more details.
21
- #
22
- # You should have received a copy of the GNU Lesser General Public
23
- # License along with this library; if not, write to the Free Software
24
- # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25
- # 02110-1301 USA
26
- ######################### END LICENSE BLOCK #########################
27
-
28
- from .chardistribution import EUCKRDistributionAnalysis
29
- from .codingstatemachine import CodingStateMachine
30
- from .mbcharsetprober import MultiByteCharSetProber
31
- from .mbcssm import EUCKR_SM_MODEL
32
-
33
-
34
- class EUCKRProber(MultiByteCharSetProber):
35
- def __init__(self) -> None:
36
- super().__init__()
37
- self.coding_sm = CodingStateMachine(EUCKR_SM_MODEL)
38
- self.distribution_analyzer = EUCKRDistributionAnalysis()
39
- self.reset()
40
-
41
- @property
42
- def charset_name(self) -> str:
43
- return "EUC-KR"
44
-
45
- @property
46
- def language(self) -> str:
47
- return "Korean"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/modeling/__init__.py DELETED
@@ -1,59 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- from detectron2.layers import ShapeSpec
3
-
4
- from .anchor_generator import build_anchor_generator, ANCHOR_GENERATOR_REGISTRY
5
- from .backbone import (
6
- BACKBONE_REGISTRY,
7
- FPN,
8
- Backbone,
9
- ResNet,
10
- ResNetBlockBase,
11
- build_backbone,
12
- build_resnet_backbone,
13
- make_stage,
14
- )
15
- from .meta_arch import (
16
- META_ARCH_REGISTRY,
17
- SEM_SEG_HEADS_REGISTRY,
18
- GeneralizedRCNN,
19
- PanopticFPN,
20
- ProposalNetwork,
21
- RetinaNet,
22
- SemanticSegmentor,
23
- build_model,
24
- build_sem_seg_head,
25
- FCOS,
26
- )
27
- from .postprocessing import detector_postprocess
28
- from .proposal_generator import (
29
- PROPOSAL_GENERATOR_REGISTRY,
30
- build_proposal_generator,
31
- RPN_HEAD_REGISTRY,
32
- build_rpn_head,
33
- )
34
- from .roi_heads import (
35
- ROI_BOX_HEAD_REGISTRY,
36
- ROI_HEADS_REGISTRY,
37
- ROI_KEYPOINT_HEAD_REGISTRY,
38
- ROI_MASK_HEAD_REGISTRY,
39
- ROIHeads,
40
- StandardROIHeads,
41
- BaseMaskRCNNHead,
42
- BaseKeypointRCNNHead,
43
- FastRCNNOutputLayers,
44
- build_box_head,
45
- build_keypoint_head,
46
- build_mask_head,
47
- build_roi_heads,
48
- )
49
- from .test_time_augmentation import DatasetMapperTTA, GeneralizedRCNNWithTTA
50
- from .mmdet_wrapper import MMDetBackbone, MMDetDetector
51
-
52
- _EXCLUDE = {"ShapeSpec"}
53
- __all__ = [k for k in globals().keys() if k not in _EXCLUDE and not k.startswith("_")]
54
-
55
-
56
- from detectron2.utils.env import fixup_module_metadata
57
-
58
- fixup_module_metadata(__name__, globals(), __all__)
59
- del fixup_module_metadata
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AzulaFire/SparkDebate/app.py DELETED
@@ -1,218 +0,0 @@
1
- from langchain.memory import ConversationSummaryBufferMemory
2
- from langchain.chains import ConversationChain
3
- from langchain.chains import RetrievalQA
4
- from utils.API import Spark_forlangchain
5
- import gradio as gr
6
- from langchain.prompts import ChatPromptTemplate
7
- from langchain.document_loaders import TextLoader
8
- from langchain.embeddings.huggingface import HuggingFaceEmbeddings
9
- from langchain.vectorstores import FAISS
10
- import sentence_transformers
11
-
12
-
13
- def init_knowledge_vector_store(filepath):
14
- EMBEDDING_MODEL = "model/text2vec_ernie/"
15
- embeddings = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL)
16
- embeddings.client = sentence_transformers.SentenceTransformer(
17
- embeddings.model_name, device='cuda')
18
- loader = TextLoader(filepath)
19
- docs = loader.load()
20
- vector_store = FAISS.from_documents(docs, embeddings)
21
- return vector_store
22
-
23
-
24
- template_1 = """
25
- 你是一个资深辩手,你的辩论风格是{style},你确定辩论战略需要考虑以下10个方面:
26
- 1. 分析辩题性质
27
- 判断辩题是判断型还是比较型,明确需要论证的核心观点。回答中必须包含题是判断型还是比较型。
28
- 2. 判断正反方定位
29
- 大致判断哪一方更容易证成立,存在明显优劣势。回答中必须需给出谁更主流,更容易成立。
30
- 3. 设想核心争议点
31
- 思考双方可能存在分歧和交锋的主要争议点。回答中需要明确给出至少三个争议点。
32
- 4. 论证框架
33
- 设计初步的论证框架,包括定义、标准、论点等。回答中需要明确按以下格式给出论证框架:正方:标准是XXX,论点1是XXX,论点2是XXX。反方:标准是XXX,论点1是XXX,论点2是XXX。(论点至少要两个)
34
- 5. 优势论域
35
- 确定自己方更容易取得优势的论域的诠释点。回答中必须详细给出双方的优势论域并给出理由。
36
- 6. 数据准备
37
- 提前准备论证所需的证据数据。回答中必须给出对论证起作用的数据,如相关国家合法化情况与对社会影响的数据
38
- 7. 情境假设
39
- 设想场景和例子以备交锋时使用。回答中必须至少给出正反双方情境,各三个。
40
- 8. 语境处理
41
- 考虑如何处理语境环境,为自己创造有利条件。回答中必须举出正反方的语境,各三个。
42
- 9. 质询角度
43
- 提前想好可能的质询角度,对应对方的论点。回答中需要给出详细的分析并试着举出例子,各三个。
44
- 10. 重点突破
45
- 找到对方可能论证的薄弱点,准备重点突破。回答中需要举出正反双方薄弱点分别在哪里,应该如何突破。
46
- 通过上述分析,可以确定一个明确有针对性的辩论战略.
47
- 接下来我会给你一个具体的辩题,你需要基于以上10个原则依次回答。
48
- ///辩题内容如下:{text}///
49
- """
50
- template_2 = """
51
- 你是一个资深辩手,你的辩论风格是{style},你立论会遵循以下的立论原则,总共5个原则:
52
- 1.定义明确
53
- 对关键词进行明确且合理的定义,这是展开论证的基础。
54
- 2.标准清晰
55
- 设置公正合理的判断标准,标准要具体明确,为论点比较提供依据。你的回答中必须包含标准。
56
- 3.论点匹配
57
- 论点要能有效支撑并印证标准,与标准和立场高度契合。你的回答中必须包含支撑印证标准的论点。
58
- 4.论据具体
59
- 提供具体可信的论据支撑每个论点,使之更有说服力。你的论点必须要论据支撑。
60
- 5.情境适用
61
- 引入情境和例子,使复杂观点容易被听众接受。你的回答可以适当包含情境
62
- 接下来会给你一个题目和持方。
63
- ///题目与持方如下:{text}///
64
- 你需要遵循以上五个立论原则立论,并且立论稿有以下要求:
65
- 1.以一个专业辩手的口吻做开场白。
66
- 2.总字数为1200字。
67
- 3.第一段需要包含以下三个部分 给出持方,对名词做出简单解释,给出标准,标准只能有一个。
68
- 4.第二段是第一个论点,论点需要围绕标准,阐述完论点后需要提出论据,最好是数据论据和学理论据,提出论据后需要做出解释来进行论证。参照以下流程:论点1+数据论据+数据论据的论证+学理论据+学理论据的论证。本段需要非常详细。
69
- 5.第三段是第二个论点,论点需要围绕标准,本段第一句话就要阐明论点是什么,阐述完论点后需要提出论据,最好是数据论据和学理论据,提出论据后需要做出解释来进行论证。参照以下流程:论点2+数据论据+数据论据的论证+学理论据+学理论据的论证。本段需要非常详细。
70
- 6.最后一段只需要用一句话再重复一遍己方的立场:“综上我方坚定认为XXX”。XXX为立场。
71
- 7.立论稿中需要把上述内容衔接流畅。
72
- """
73
- template_3 = """
74
- 你是一个资深的逻辑性很强的顶级辩手,你的辩论风格是{style},请对我的陈述进行反驳,越详细越好,反驳需要逐条反驳观点和论据,并且要给出详细的理由,质疑数据论据要用上常用的方法和句式,从数据合理性,样本代表性,统计方法,数据解读等多个角度进行考虑。质疑学理论据要从权威性,解读方式,是否有对抗学理等多个角度进行考虑。
75
- ///如下是我们的话题以及我的观点:{text}///
76
- """
77
- template_4 = """
78
- 你是一个资深辩手,你的辩论风格是{style},你需要根据我给出的话题提出观点并且要有数据论据和学理论据作为论证且总字数不少于400字,你的发言格式为:我们的话题是什么,我持什么观点,我的理由是XXX,因为某某数据,又因为某某学理。参照如下范例:||
79
- 我们的话题是人工智能对人类工作的影响。我持的观点是,人工智能将导致大量的就业机会减少。我的理由是,根据国际数据公司(IDC)的报告,到2025年,全球约有3.75亿个工作岗位将被自动化技术取代。同时,人工智能的发展也将带来新的就业机会,如AI工程师、数据科学家等。
80
- 首先,让我们从数据角度来看。根据美国劳工统计局(BLS)的数据,自20世纪90年代以来,美国的工作岗位流失率一直在上升。其中,自动化和计算机化在一定程度上对就业市场产生了负面影响。此外,根据麦肯锡全球研究院的预测,到2030年,人工智能可能会在全球范围内导致8000万至1.6亿个工作岗位的消失。
81
- 其次,从学理角度来看,人工智能的发展是基于算法和大数据的。然而,这些算法和数据往往受到人为因素的影响,可能导致错误的决策和预测。例如,2016年在美国总统选举期间,一家名为“剑桥分析”的公司利用大数据分析和选民心理研究,为特朗普竞选团队提供了策略支持。这一事件表明,人工智能在某些情况下可能会被用于不道德的目的。||
82
- ///我们本次讨论的话题是{text}///
83
- """
84
- template_5 = """
85
- 你是一个资深的逻辑性很强的顶级辩手,你的辩论风格是{style},可以与我进行辩论训练,你很擅长质询总是一针见血,而且也很擅长使用类比来归谬我的观点,你熟练的掌握各种数据质询的技巧。现在你要与我进行对辩
86
- 我的陈述如下:///{text}///
87
- 请对我的陈述进行反驳,越详细越好,反驳需要逐条反驳观点和论据,并且要给出详细的理由,质疑数据论据要用上常用的方法和句式,从数据合理性,样本代表性,统计方法,数据解读等多个角度进行考虑。质疑学理论据要从权威性,解读方式,是否有对抗学理等多个角度进行考虑。
88
- """
89
- end_prompt = """
90
- 请你对我们的对辩过程进行总结,总结需要包括以下部分:1.对辩主要针对什么进行讨论。2.评价我的对辩能力,需要根据评级原则给出评级,并且给出具体理由。评级原则如下:等级一,缺乏论证的反驳;等级二,自说自话的反驳;等级三,针锋相对的反驳;等级四,正中要害的反驳。3.根据我的对辩能力提出一定的建议。
91
- 示例如下:
92
- 好的,我来对我们的对辩过程进行总结。
93
- 在我们的对辩过程中,我们主要讨论了动物园是否应该被禁止。我认为动物园对动物的福利和权利造成了负面影响,而您则提出了一些质疑,认为动物园中的动物可以享受比野外更安全的生活条件。
94
- 我认为您的对辩能力属于等级三,即针锋相对的反驳。您能够对我的观点提出一些质疑和反驳,并且能够给出一些合理的理由。但是,在某些情况下,您可能会使用一些不太恰当的类比来归谬我的观点,这可能会影响到对辩的质量和效果。
95
- 鉴于您的对辩能力,我认为您可以进一步提高自己的辩论技巧。您可以通过更多的阅读和学习,提高自己的知识水平和思维能力,从而更好地进行论证和反驳。此外,在使用类比和比喻时,需要更加谨慎,确保它们能够恰当地表达您的观点,而不会歪曲或归谬对方的观点。
96
- """
97
- prompt_1 = ChatPromptTemplate.from_template(template_1)
98
- prompt_2 = ChatPromptTemplate.from_template(template_2)
99
- prompt_3 = ChatPromptTemplate.from_template(template_3)
100
- prompt_4 = ChatPromptTemplate.from_template(template_4)
101
- prompt_5 = ChatPromptTemplate.from_template(template_5)
102
-
103
-
104
- def init_(app_id, api_key, api_secret):
105
- global llm
106
- llm = Spark_forlangchain(n=10, app_id=app_id, api_key=api_key,
107
- api_secret=api_secret)
108
- memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=4096)
109
- global conversation_1
110
- global conversation_2
111
- global conversation_3
112
- conversation_1 = ConversationChain(llm=llm)
113
- conversation_2 = ConversationChain(llm=llm, memory=memory)
114
- print("初始化成功!")
115
-
116
-
117
- def shortDebate_(type, style, prompt, help):
118
- if type == "破题":
119
- msg = prompt_1.format_prompt(text=prompt, style=style).to_string()
120
- elif type == "立论":
121
- msg = prompt_2.format_prompt(text=prompt, style=style).to_string()
122
- elif type == "对辩先发":
123
- msg = prompt_3.format_prompt(text=prompt, style=style).to_string()
124
- elif type == "对辩后发":
125
- msg = prompt_4.format_prompt(text=prompt, style=style).to_string()
126
- else:
127
- msg = prompt
128
- print(msg)
129
- response = conversation_1.run(msg)
130
- print(response)
131
- help.append((prompt, response))
132
- return help, help
133
-
134
-
135
- def longDebate_(style, prompt, help):
136
- msg = prompt_5.format_prompt(text=prompt, style=style).to_string()
137
- response = conversation_2.run(msg)
138
- help.append((prompt, response))
139
- return help, help
140
-
141
-
142
- def end_talk(style, prompt, help):
143
- msg = end_prompt
144
- response = conversation_2.run(msg)
145
- help.append((prompt, response))
146
- return help, help
147
-
148
-
149
- def Debatebytext_(prompt, help):
150
- msg = prompt
151
- response = QA_chain.run(msg)
152
- help.append((prompt, response))
153
- return help, help
154
-
155
-
156
- def upload_file(files):
157
- vector_store = init_knowledge_vector_store(files.name)
158
- memory_text = ConversationSummaryBufferMemory(
159
- llm=llm, max_token_limit=4096)
160
- global QA_chain
161
- QA_chain = RetrievalQA.from_llm(llm=llm, retriever=vector_store.as_retriever(
162
- search_kwargs={"k": 2}), memory=memory_text)
163
- file_paths = [file.name for file in files]
164
- return file_paths
165
-
166
-
167
- with gr.Blocks(css="#chatbot{height:300px} .overflow-y-auto{height:500px}") as init:
168
- with gr.Row():
169
- app_id = gr.Textbox(
170
- lines=1, placeholder="app_id Here...", label="app_id")
171
- api_key = gr.Textbox(
172
- lines=1, placeholder="api_key Here...", label="api_key")
173
- api_secret = gr.Textbox(
174
- lines=1, placeholder="api_secret Here...", label="api_secret")
175
- temperature = gr.Slider(minimum=0, maximum=1,
176
- step=0.1, value=0.3, interactive=True)
177
- btn = gr.Button(value="初始化")
178
- btn.click(init_, inputs=[app_id, api_key, api_secret])
179
-
180
- with gr.Blocks(css="#chatbot{height:300px} .overflow-y-auto{height:500px}") as shortDebate:
181
- chatbot = gr.Chatbot(elem_id="chatbot")
182
- state = gr.State([])
183
- drop1 = gr.Radio(["破题", "立论", "对辩先发", "对辩后发"],
184
- label="功能选择", info="选择你想要的功能") # 单选
185
- with gr.Row():
186
- txt = gr.Textbox(show_label="在这里开始聊天吧", placeholder="请输入你的问题")
187
- send = gr.Button("🚀 发送")
188
- style = gr.Textbox(lines=1, placeholder="style Here... ",
189
- label="辩论风格", value="犀利", interactive=True)
190
- send.click(shortDebate_, [drop1, style, txt, state], [chatbot, state])
191
-
192
- with gr.Blocks(css="#chatbot{height:300px} .overflow-y-auto{height:500px}") as longDebate:
193
- chatbot = gr.Chatbot(elem_id="chatbot")
194
- state = gr.State([])
195
- with gr.Row():
196
- txt = gr.Textbox(show_label="在这里开始长辩论吧", placeholder="请输入你的问题")
197
- send = gr.Button("🚀 发送")
198
- end = gr.Button("🤠 总结")
199
- style = gr.Textbox(lines=1, placeholder="style Here... ",
200
- label="辩论风格", value="犀利", interactive=True)
201
- send.click(longDebate_, [style, txt, state], [chatbot, state])
202
- end.click(end_talk, [style, txt, state], [chatbot, state])
203
-
204
- with gr.Blocks(css="#chatbot{height:300px} .overflow-y-auto{height:500px}") as Debatebytext:
205
- chatbot = gr.Chatbot(elem_id="chatbot")
206
- state = gr.State([])
207
- file_output = gr.File(label='请上传文件, 目前支持txt、docx、md格式',
208
- file_types=['.txt', '.md', '.docx'])
209
- with gr.Row():
210
- txt = gr.Textbox(show_label="在这里从你给出的资料里学习吧", placeholder="请输入你的问题")
211
- send = gr.Button("🚀 发送")
212
- upload_button = gr.UploadButton("Click to Upload a File", scale=1, file_types=[
213
- "text"])
214
- upload_button.upload(upload_file, upload_button, file_output)
215
- send.click(Debatebytext_, [txt, state], [chatbot, state])
216
- demo = gr.TabbedInterface([init, shortDebate, longDebate, Debatebytext], [
217
- "初始化", "辅助辩论", "对辩练习", "辩论技巧学习"])
218
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Block Craft 3d Install.md DELETED
@@ -1,76 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar e instalar pfSense CE 2.4.5-RELEASE-p1-amd64</h1>
3
- <p>Si está buscando un software de firewall y enrutador libre, de código abierto y potente, es posible que desee considerar pfSense Community Edition (CE). En este artículo, le mostraremos cómo descargar e instalar pfSense CE 2.4.5-RELEASE-p1-amd64, que es la última versión estable a partir de junio de 2020. </p>
4
- <h2>block craft 3d install</h2><br /><p><b><b>Download</b> &#9881;&#9881;&#9881; <a href="https://bltlly.com/2v6KNO">https://bltlly.com/2v6KNO</a></b></p><br /><br />
5
- <h2>¿Qué es pfSense CE y por qué usarlo? </h2>
6
- <p>pfSense CE es un software de firewall y enrutador que se basa en el sistema operativo FreeBSD con un kernel personalizado y paquetes de software libre de terceros. Ofrece inspección de paquetes con estado, soporte simultáneo de IPv4 e IPv6, prevención de intrusiones, VPN, equilibrio de carga, proxy, filtrado de contenido y más. Se puede ejecutar en una variedad de plataformas de hardware, desde electrodomésticos dedicados a PC antiguos.</p>
7
- <h3>pfSense CE características y beneficios</h3>
8
- <p>Algunas de las características y beneficios de pfSense CE son:</p>
9
- <ul>
10
- <li>Es rico en características, robusto y flexible. Tiene muchas características que no están disponibles en firewalls comerciales o requieren licencias caras. </li>
11
- <li> Es fácil de usar y personalizable. Cuenta con una interfaz web para configuración y gestión, así como una interfaz de línea de comandos para usuarios avanzados. También admite agregar características adicionales a través de paquetes o código personalizado. </li>
12
- <li>Es de código abierto y impulsado por la comunidad. Es desarrollado y mantenido por un equipo de voluntarios y apoyado por una gran comunidad de usuarios y desarrolladores. También respeta su privacidad y no recopila ni vende sus datos. </li>
13
- </ul>
14
- <h3>pfSense Requisitos del sistema CE</h3>
15
- <p>Los requisitos mínimos de hardware para pfSense CE son:</p>
16
- <ul>
17
- <li>CPU compatible con 64 bits</li>
18
- <li>Al menos 512 MB de RAM</li>
19
- <li>Al menos 1 GB de espacio en disco</li>
20
- <li>Una o más tarjetas de interfaz de red compatibles</li>
21
- <li>Una unidad USB de arranque o unidad óptica para la instalación</li>
22
- </ul>
23
-
24
- <h2>Cómo descargar pfSense CE 2.4.5-RELEASE-p1-amd64</h2>
25
- <p>Para descargar pfSense CE 2.4.5-RELEASE-p1-amd64, debe visitar el sitio web oficial de pfSense en <a href="( 1 )">https:/www.pfsense.org/download/</a> y siga estos pasos:</p>
26
- <p></p>
27
- <h3>Descargar opciones y espejos</h3>
28
- <ol>
29
- <li>Seleccione una arquitectura: AMD64 (64 bits) para la mayoría del hardware moderno. </li>
30
- <li>Seleccione un tipo de instalador: USB Memstick Installer para escribir en una unidad flash USB o DVD Image (ISO) Installer para grabar en un disco óptico. </li>
31
- <li>Seleccione una consola para imágenes del instalador USB Memstick: VGA para usar un monitor y teclado o Serial para usar una consola serie. </li>
32
- <li>Seleccione un espejo que esté cerca de su ubicación geográficamente. </li>
33
- <li>Haga clic en Descargar para iniciar el proceso de descarga. </li>
34
- </ol>
35
- <h3>Verificar la integridad de la descarga</h3>
36
- <p>Para asegurarse de que el archivo descargado no está dañado o manipulado, es necesario verificar su integridad mediante la comparación de su valor hash SHA-256 con el proporcionado por el sitio web o en el . archivo sha256 en el espejo. Puede utilizar varias herramientas para calcular el valor hash SHA-256 de un archivo, como <a href="">https:/www.nirsoft.net/utils/hash_my_files.html</a> para Windows o <a href="">https:/support.apple.com/guide/l/apd100cb05/mac<a> para Mac. El valor hash debe coincidir exactamente con el proporcionado por el sitio web o en el archivo . sha256. Si no, necesita descargar el archivo de nuevo desde otro espejo o ponerse en contacto con el equipo de pfSense para obtener ayuda. </p>
37
- <h2>Cómo instalar pfSense CE 2.4.5-RELEASE-p1-amd64</h2>
38
- <p>Una vez que haya descargado y verificado el archivo pfSense CE 2.4.5-RELEASE-p1-amd64, debe preparar el medio de instalación y arrancar el instalador en su dispositivo de destino. Estos son los pasos a seguir:</p>
39
- <h3>Preparación de los medios de instalación</h3>
40
-
41
- <p>Si descargó la imagen del instalador de imagen de DVD (ISO), debe grabarla en un DVD en blanco utilizando una herramienta como <a href="">https://www.imgburn.com/</a> para Windows o <a href="">https:/burn-x.sourceforge.io/</a> para Mac. Asegúrese de seleccionar la letra de la unidad correcta y el archivo de imagen antes de grabar. El proceso hará que el DVD se pueda arrancar y esté listo para la instalación. </p>
42
- <h3>Arranque del instalador y selección de opciones</h3>
43
- <p>Después de preparar el medio de instalación, debe insertarlo en su dispositivo de destino y arrancar desde él. Es posible que tenga que cambiar el orden de arranque en su configuración de BIOS o UEFI para hacer que el dispositivo arranque desde la unidad flash USB o DVD. Una vez que arranque desde el medio de instalación, verá un menú con varias opciones. Puede elegir una de las siguientes:</p>
44
- <ul>
45
- <li>Instalación rápida/fácil: Esta opción instalará pfSense CE con la configuración predeterminada y la entrada mínima del usuario. Se recomienda para la mayoría de los usuarios que desean una instalación simple y rápida. </li>
46
- <li>Instalación personalizada: Esta opción le permitirá personalizar varias configuraciones durante la instalación, como particiones de disco, diseño de teclado, zona horaria, etc. Se recomienda para usuarios avanzados que desean más control sobre la instalación. </li>
47
- <li>Recuperar config.xml: Esta opción le permitirá restaurar un archivo de configuración previamente guardado desde una unidad flash USB o un servidor FTP. Es útil si desea migrar su configuración desde otro dispositivo pfSense CE o hacer una copia de seguridad de su configuración. </li>
48
- <li>Rescue Shell: Esta opción lo dejará en un indicador de shell donde puede realizar varios comandos y tareas de solución de problemas. Es útil si encuentra algún problema durante la instalación o necesita acceder al sistema de archivos. </li>
49
- <li>Reiniciar: Esta opción reiniciará el dispositivo. </li>
50
- <li>Detener: Esta opción apagará el dispositivo. </li>
51
- </ul>
52
-
53
- <h3>Configuración de las interfaces de firewall y red</h3>
54
- <p>Después de copiar los archivos, el instalador le preguntará si desea configurar VLAN ahora. Las VLAN son LAN virtuales que le permiten segmentar su red en diferentes subredes utilizando una sola interfaz física. Si desea usar VLAN, escriba "y" y presione Enter. De lo contrario, escriba "n" y presione Enter.</p>
55
- <p>El instalador le pedirá que asigne interfaces. Las interfaces son tarjetas de red físicas o virtuales que conectan el dispositivo a diferentes redes o dispositivos. Es necesario asignar al menos una interfaz como WAN (red de área amplia) y una interfaz como LAN (red de área local). WAN es la interfaz que conecta su dispositivo a Internet o una red externa, mientras que LAN es la interfaz que conecta su dispositivo a su red interna o dispositivos. </p>
56
- <p>El instalador le mostrará una lista de interfaces disponibles y sus nombres, como em0, em1, etc. Debe escribir el nombre de cada interfaz que desea asignar como WAN o LAN y presionar Enter después de cada mensaje. Por ejemplo, si desea asignar em0 como WAN y em1 como LAN, debe escribir "em0" y presionar Enter cuando se le solicite la interfaz WAN, y escribir "em1" y presionar Enter cuando se le solicite la interfaz LAN. También puede asignar interfaces adicionales como interfaces opcionales, como OPT1, OPT2, etc. Si tiene más de dos interfaces, el instalador le pedirá que las asigne una por una. Si ha terminado de asignar interfaces, escriba "hecho" y presione Enter.</p>
57
- <p>El instalador le mostrará un resumen de sus asignaciones de interfaz y le pedirá que las confirme. Si son correctos, escriba "y" y presione Enter. De lo contrario, escriba "n" y presione Enter para regresar y cambiarlos. </p>
58
-
59
- <h2>Conclusión y preguntas frecuentes</h2>
60
- <p>En este artículo, le hemos mostrado cómo descargar e instalar pfSense CE 2.4.5-RELEASE-p1-amd64, que es un software de firewall y enrutador gratuito, de código abierto y potente. También hemos explicado qué es pfSense CE, por qué usarlo, cuáles son sus características y beneficios, y cuáles son sus requisitos del sistema. También le hemos guiado a través de los pasos de preparación de los medios de instalación, arranque del instalador, selección de opciones, configuración del firewall y las interfaces de red. </p>
61
- <p>Esperamos que este artículo haya sido útil e informativo para usted. Si tiene alguna pregunta o comentario, no dude en contactarnos o dejar un comentario a continuación. Aquí hay algunas preguntas frecuentes que puedes encontrar útiles:</p>
62
- <h3>Preguntas frecuentes</h3>
63
- <ol>
64
- <li>Q: ¿Cómo puedo acceder a la interfaz web de pfSense CE después de la instalación? <br>
65
- R: Puede acceder a la interfaz web de pfSense CE escribiendo la dirección IP de su interfaz LAN en su navegador web. Por defecto, la dirección IP es 192.168.1.1. Deberá iniciar sesión con el nombre de usuario predeterminado 'admin' y la contraseña 'pfsense'. A continuación, puede cambiar su contraseña y otros ajustes según sea necesario.</li>
66
- <li>Q: ¿Cómo puedo actualizar pfSense CE a la última versión? <br>
67
- R: Puede actualizar pfSense CE yendo a Sistema > Actualizar en la interfaz web o ejecutando el comando 'pfSense-upgrade' en el símbolo del sistema. Tendrá que comprobar si hay actualizaciones, descargarlas y aplicarlas. Es posible que tenga que reiniciar el dispositivo después de la actualización. </li>
68
- <li>Q: ¿Cómo puedo agregar más características a pfSense CE? <br>
69
- R: Puede agregar más características a pfSense CE instalando paquetes desde el repositorio oficial o desde fuentes de terceros. Puede encontrar e instalar paquetes yendo a System > Package Manager en la interfaz web o ejecutando el comando 'pkg' en el prompt del shell. También puede crear sus propios paquetes o código personalizado si tiene las habilidades y el conocimiento. </li>
70
-
71
- R: Puede realizar copias de seguridad y restaurar la configuración de pfSense CE yendo a Diagnostics > Backup & Restore en la interfaz web o ejecutando el comando 'configctl' en el símbolo del sistema. Puede realizar copias de seguridad de su configuración en un archivo local, un servidor remoto o un servicio en la nube. También puede restaurar la configuración desde un archivo local, un servidor remoto o un servicio en la nube. </li>
72
- <li>Q: ¿Cómo puedo obtener ayuda y soporte para pfSense CE? <br>
73
- R: Puede obtener ayuda y soporte para pfSense CE visitando el sitio web oficial en <a href="">https://www.pfsense.org/</a>, donde puede encontrar documentación, foros, blogs, videos, podcasts, redes sociales, etc. También puede ponerse en contacto con el equipo de pfSense o contratar a un consultor para servicios profesionales. </li>
74
- </ol></p> 64aa2da5cf<br />
75
- <br />
76
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Chat Para Aprender Ingls Apk.md DELETED
@@ -1,69 +0,0 @@
1
-
2
- <h1>Charla para aprender inglés APK: Una manera divertida y eficaz para mejorar sus habilidades en inglés</h1>
3
- <p>¿Quieres aprender inglés de una manera divertida y fácil? ¿Quieres chatear con hablantes nativos y otros estudiantes de todo el mundo? ¿Quieres acceder a una variedad de características y herramientas que te ayudarán a dominar el idioma? Si respondió sí a cualquiera de estas preguntas, entonces usted debe tratar de Chat to Learn English APK, una aplicación gratuita que hará que su viaje de aprendizaje de idiomas más agradable y gratificante. </p>
4
- <h2>chat para aprender inglés apk</h2><br /><p><b><b>Download Zip</b> &#10002; &#10002; &#10002; <a href="https://bltlly.com/2v6MY7">https://bltlly.com/2v6MY7</a></b></p><br /><br />
5
- <h2>¿Qué es el chat para aprender inglés APK? </h2>
6
- <p>Chat para Aprender Inglés APK es una aplicación que le permite practicar Inglés mediante el chat con hablantes nativos y otros estudiantes. Puedes conocer gente de diferentes países y culturas, y hablar de lo que quieras. Ya sea que quieras presentarte, compartir tus aficiones, pedir consejo o discutir eventos actuales, puedes encontrar a alguien que esté dispuesto a chatear contigo. </p>
7
- <h3>Una aplicación gratuita que te conecta con hablantes nativos y otros estudiantes</h3>
8
- <p>Una de las mejores características de Chat to Learn English APK es que es completamente gratuito. No tienes que pagar ningún cargo o suscripción para usar la aplicación. Puedes chatear con tantas personas como quieras, durante el tiempo que quieras. También puedes elegir con quién quieres chatear, según su nivel de idioma, intereses, ubicación y disponibilidad. Puedes agregarlos como amigos o ignorarlos si son groseros o inapropiados. </p>
9
- <h3>Una plataforma que ofrece varias características y herramientas para mejorar su experiencia de aprendizaje</h3>
10
-
11
- <h2> ¿Por qué debe utilizar el chat para aprender inglés APK? </h2>
12
- <p>Hay muchas razones por las que debe utilizar el chat para aprender inglés APK. Aquí están algunos de los principales beneficios de usar esta aplicación:</p>
13
- <p></p>
14
- <h3>Practicar habilidades de hablar y escuchar en conversaciones reales</h3>
15
- <p>La mejor manera de aprender un idioma es realmente hablar. Chatear con hablantes nativos y otros estudiantes te ayudará a practicar tus habilidades de hablar y escuchar en conversaciones reales. Podrás mejorar tu fluidez, precisión, pronunciación, entonación y confianza. También podrás aprender cómo la gente usa el idioma en diferentes situaciones y contextos. </p>
16
- <h3>Para aprender nuevo vocabulario, gramática y modismos de hablantes nativos</h3>
17
- <p>Otro beneficio de chatear con hablantes nativos es que podrás aprender nuevo vocabulario, gramática y modismos de ellos. Usted estará expuesto a palabras y frases que no se enseñan en libros de texto o aulas. También podrás pedirles explicaciones o ejemplos cuando encuentres algo desconocido o confuso <h3>Para explorar diferentes culturas y temas con personas de todo el mundo</h3>
18
- <p>Un tercer beneficio de chatear con gente de todo el mundo es que podrás explorar diferentes culturas y temas con ellos. Usted será capaz de aprender acerca de sus costumbres, tradiciones, valores, creencias y opiniones. También podrás compartir tu propia cultura y perspectiva con ellos. Podrás ampliar tus horizontes y enriquecer tus conocimientos conversando con personas de diversos orígenes y experiencias. </p>
19
- <h2>Cómo utilizar el chat para aprender inglés APK? </h2>
20
- <p>El uso de chat para aprender inglés APK es muy fácil y simple. Estos son los pasos que debe seguir:</p>
21
- <h3>Descargar la aplicación de la Google Play Store o APKCombo</h3>
22
-
23
- <h3>Crea tu perfil y establece tus objetivos de aprendizaje</h3>
24
- <p>El segundo paso es crear tu perfil y establecer tus objetivos de aprendizaje. Puedes registrarte con tu correo electrónico, Facebook o cuenta de Google. También puede elegir un nombre de usuario, una imagen de perfil y una breve introducción. También puede seleccionar su idioma nativo, su idioma objetivo y su nivel de idioma. También puedes establecer tus metas de aprendizaje, como mejorar tus habilidades para hablar, escuchar, leer o escribir. </p>
25
- <h3>Buscar un socio de idioma o unirse a un chat de grupo</h3>
26
- <p>El tercer paso es encontrar un socio de idioma o unirse a un chat de grupo. Puede navegar a través de la lista de usuarios en línea y enviarles una solicitud de chat. También puede filtrar a los usuarios por su nivel de idioma, intereses, ubicación y disponibilidad. También puede unirse a un chat de grupo basado en diferentes temas, como viajes, música, películas, deportes, etc. También puede crear su propio chat de grupo e invitar a otros usuarios a unirse. </p>
27
- <h3>Empieza a chatear y aprender con ayudas integradas y funciones interactivas</h3>
28
- <p>El cuarto paso es empezar a chatear y aprender con ayudas integradas y funciones interactivas. Puede chatear con su compañero de idioma a través de mensajes de texto y voz, pegatinas, llamadas de voz y video, y salas de voz interactivas y vidas. También puede utilizar ayudas integradas para traducción, pronunciación, transliteración y correcciones. También puedes publicar momentos, que son publicaciones públicas que son vistas por todos los hablantes nativos de tu idioma objetivo. Puedes usar momentos para hacer preguntas, compartir actualizaciones o expresar tus opiniones. </p>
29
- <h2>Consejos y trucos para aprovechar al máximo el chat para aprender inglés APK</h2>
30
- <p>Para aprovechar al máximo el chat para aprender inglés APK, aquí hay algunos consejos y trucos que debe seguir:</p>
31
- <h3>Sé educado y respetuoso con tus compañeros de chat</h3>
32
-
33
- <h3>Usa las oraciones de ejemplo y el diccionario para ayudarte a expresarte</h3>
34
- <p>Otro consejo es usar las oraciones de ejemplo y el diccionario para ayudarte a expresarte. Si no está seguro de cómo decir algo en inglés, puede usar la función de oraciones de ejemplo para ver cómo lo dirían los hablantes nativos. También puede utilizar la función de diccionario para buscar el significado, la pronunciación y el uso de cualquier palabra o frase. Estas características te ayudarán a mejorar tu vocabulario y gramática. </p> <h3>Sigue tus intereses y únete a momentos, salas de voz y vidas</h3>
35
- <p>Un tercer consejo es seguir tus intereses y unir momentos, salas de voz y vidas. Estas son características interactivas que le permiten interactuar con otros usuarios y aprender de ellos. Puedes publicar momentos para compartir tus pensamientos, sentimientos o experiencias con la comunidad. Puede unirse a las salas de voz para chatear con varios usuarios en una llamada de grupo. También puede unirse a vidas para ver transmisiones en vivo de hablantes nativos u otros estudiantes. Estas características te ayudarán a expandir tu red social y aprender desde diferentes perspectivas. </p>
36
- <h3>Revise su historial de chat y comentarios para realizar un seguimiento de su progreso</h3>
37
- <p>Un cuarto consejo es revisar su historial de chat y comentarios para realizar un seguimiento de su progreso. Puede acceder a su historial de chat y ver todos los mensajes y llamadas que ha intercambiado con sus socios de chat. También puedes ver los comentarios que te han dado sobre tus habilidades lingüísticas. Puede utilizar esta información para revisar sus errores, aprender de sus correcciones y medir su mejora. También puedes dar retroalimentación a tus compañeros de chat y ayudarles a mejorar sus habilidades. </p>
38
- <h2>Conclusión</h2>
39
-
40
- <h2>Preguntas frecuentes</h2>
41
- <p>Aquí hay algunas preguntas frecuentes sobre Chat to Learn English APK:</p>
42
- <tabla>
43
- <tr>
44
- <th>Pregunta</th>
45
- <th>Respuesta</th>
46
- </tr>
47
- <tr>
48
- <td>¿Es seguro el chat para aprender inglés APK? </td>
49
- <td>Sí, Chat para aprender inglés APK es seguro. No contiene ningún virus o malware. También protege su información personal y privacidad. Puedes bloquear o reportar a cualquier usuario que te esté acosando o enviando spam. </td>
50
- </tr>
51
- <tr>
52
- <td>¿Cómo puedo encontrar un buen compañero de idioma en el chat para aprender inglés APK? </td>
53
- <td>Usted puede encontrar un buen socio de idioma en el chat para aprender inglés APK navegando a través de la lista de usuarios en línea y comprobar sus perfiles. También puede filtrar a los usuarios por su nivel de idioma, intereses, ubicación y disponibilidad. También puedes leer los comentarios y valoraciones de otros usuarios que han chateado con ellos antes. </td>
54
- </tr>
55
- <tr>
56
- <td>¿Cuáles son los beneficios de usar llamadas de voz y video en el chat para aprender inglés APK? </td>
57
- <td>Los beneficios de usar llamadas de voz y video en Chat para aprender inglés APK son que puede practicar sus habilidades de hablar y escuchar de manera más efectiva, escuchar la pronunciación y entonación de los hablantes nativos, ver las expresiones faciales y el lenguaje corporal de sus socios de chat, y construir una relación más fuerte y la conexión con ellos. </td>
58
- </tr>
59
- <tr>
60
- <td>¿Cómo puedo mejorar mis habilidades de escritura en el chat para aprender inglés APK? </td>
61
- <td>Usted puede mejorar sus habilidades de escritura en el chat para aprender inglés APK mediante el uso de mensajes de texto, pegatinas, momentos, salas de voz, y vidas. También puede utilizar las ayudas integradas para traducción, pronunciación, transliteración y correcciones. También puede solicitar comentarios de sus socios de chat o hablantes nativos. </td>
62
- </tr>
63
- <tr>
64
- <td>¿Cómo puedo hacer mi chat más interesante en Chat para aprender inglés APK? </td>
65
-
66
- </tr>
67
- </table></p> 64aa2da5cf<br />
68
- <br />
69
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Canciones De Pelculas Rojas.md DELETED
@@ -1,105 +0,0 @@
1
-
2
- <h1>Canciones de película roja Descargar: Cómo disfrutar de la banda sonora de la película de comedia de acción</h1>
3
- <p>Si eres un fan de las películas de acción y comedia, es posible que hayas escuchado o visto Red Movie, una película de 2010 protagonizada por Bruce Willis, Morgan Freeman, John Malkovich, Helen Mirren y Mary-Louise Parker. La película se basa en una serie de cómics del mismo nombre y sigue a un grupo de agentes retirados de la CIA que son blanco de un misterioso asesino. La película está llena de humor, suspense, romance y, por supuesto, acción. ¿Pero sabías que la película también tiene una gran banda sonora que complementa su tono y tema? En este artículo, le diremos todo lo que necesita saber sobre la descarga de canciones de Red Movie, incluyendo de qué se trata la película, qué canciones hay en ella, por qué debe escucharlas y cómo descargarlas de forma legal y segura. </p>
4
- <h2>descargar canciones de películas rojas</h2><br /><p><b><b>Download File</b> &#9913;&#9913;&#9913; <a href="https://bltlly.com/2v6K1R">https://bltlly.com/2v6K1R</a></b></p><br /><br />
5
- <h2>¿Qué es la película roja y por qué usted debe verlo</h2>
6
- <h3>La trama y el reparto de la película roja</h3>
7
- <p>Red Movie sigue a Frank Moses (Bruce Willis), un ex agente de la CIA que vive una vida aburrida y solitaria. Solo encuentra alegría en hablar con Sarah Ross (Mary-Louise Parker), una agente de servicio al cliente que maneja sus cheques de pensión. Una noche, la casa de Frank es atacada por un equipo de asesinos que quieren matarlo. Frank logra escapar y decide proteger a Sarah, que también está en peligro debido a sus conversaciones telefónicas. Frank luego se reúne con sus viejos colegas Joe Matheson (Morgan Freeman), Marvin Boggs (John Malkovich), y Victoria Winslow (Helen Mirren), que también son agentes retirados de la CIA con el nombre en clave "RED" (Retired Extremely Dangerous). Juntos, tratan de averiguar quién está detrás del intento de asesinato y por qué están siendo perseguidos. </p>
8
- <h3>El género y el estilo de la película roja</h3>
9
-
10
- <h2>¿Cuáles son las canciones en película roja y por qué usted debe escuchar a ellos</h2>
11
- <h3>La lista y la descripción de las canciones en la película roja</h3>
12
- <p>La banda sonora de Red Movie consta de 12 canciones que se reproducen durante varias escenas de la película. Aquí está la lista y la descripción de las canciones en Red Movie:</p>
13
- <tabla>
14
- <tr>
15
- <th>Título de la canción</th>
16
- <th>Artista</th>
17
- <th>Descripción de la escena</th>
18
- </tr>
19
- <tr>
20
- <td>Inicio en tu corazón</td>
21
- <td>Salomón Burke</td>
22
- <td>Frank deja su casa después de matar al equipo y viaja a la casa de Sarah. </td>
23
- </tr>
24
- <tr>
25
- <td>Quiero ser amado</td>
26
- <td>Aguas fangosas</td>
27
- <td>Frank está conduciendo en Nueva Orleans con Sarah contenida en la parte posterior. Él la ata en una habitación de motel. </td>
28
- </tr>
29
- <tr>
30
- <td>Doctor Mis Ojos</td>
31
- <td>Jackson Browne</td>
32
- <td>Sarah se despierta en el coche para encontrarse en la ciudad de Nueva York con Frank.</td>
33
- </tr>
34
- <tr>
35
- <td>Cissy Strut</td>
36
- <td>Los medidores</td>
37
- <td>Escena de baile en la casa de Marvin. Frank y Sarah bailan juntos.</td>
38
- </tr>
39
- <tr>
40
- <td>No te detengas</td>
41
- <td>Juegos de Mac</td>
42
- <td>Frank y Sarah están en una persecución con William Cooper (Karl Urban), un agente de la CIA al que se le ordena matarlos. </td>
43
- </tr>
44
- <tr>
45
- <td>Tema del amor</td>
46
- <td>Orquesta de amor ilimitado</td>
47
- <td>Frank y Sarah se besan en el coche después de escapar de Cooper.</td>
48
- </tr>
49
- <tr>
50
- <td>Sr. Lastimoso</td>
51
- <td>Otis Redding</td>
52
- <td>Frank y Sarah conocen a Victoria, quien acepta ayudarlos. </td>
53
- </tr>
54
- <tr>
55
- <td>De nuevo en la silla de montar</td>
56
- <td>Aerosmith</td>
57
- <td>Frank, Sarah, Marvin y Victoria van a la sede de la CIA para averiguar quién está detrás de la conspiración. </td>
58
- </tr>
59
- <tr>
60
- <td>El fin del mundo</td>
61
- <td>Skeeter Davis</td>
62
- <td>Frank y Sarah están en una habitación de hotel en Chicago. Frank le dice a Sarah que la ama. </td>
63
- </tr>
64
- <tr>
65
- <td>Tú y yo</td>
66
- <td>Alice Cooper</td>
67
- <td>Frank y Sarah están en un restaurante con Marvin, quien les dice que tienen que ir a Moldavia para encontrar la fuente de la conspiración. </td>
68
- </tr>
69
- <tr>
70
-
71
- <td>John Philip Sousa</td>
72
- <td>Frank, Sarah, Marvin y Victoria llegan a Moldavia y conocen a Ivan Simanov (Brian Cox), un ex agente de la KGB que es el viejo amigo de Frank y amante de Victoria. </td>
73
- </tr>
74
- <tr>
75
- <td>Rocket Man (Creo que va a ser un largo, largo tiempo)</td>
76
- <td>Elton John</td>
77
- <td>Frank, Sarah, Marvin, Ivan y Victoria asaltan la mansión de Alexander Dunning (Richard Dreyfuss), un empresario corrupto que está detrás de la conspiración. </td>
78
- </tr>
79
- <tr>
80
- <td>La chica de Ipanema</td>
81
- <td>Astrud Gilberto, João Gilberto y Stan Getz</td>
82
- <td>La escena final de la película. Frank y Sarah están en una playa en Moldavia, disfrutando de su retiro. Marvin aparece con un lanzacohetes y les dice que tienen una nueva misión. </td>
83
- </tr>
84
- <h3>El género musical y el estado de ánimo de las canciones en la película roja</h3>
85
- <p>Las canciones de Red Movie son principalmente de los géneros de rock, soul, blues, funk y pop. Son canciones pegadizas, optimistas, enérgicas y nostálgicas. Reflejan el estado de ánimo y el tema de la película, que trata de vivir la vida al máximo, divertirse y no rendirse. Las canciones también crean un contraste entre lo antiguo y lo moderno, lo serio y lo humorístico, y lo ordinario y lo extraordinario. Las canciones también mejoran las emociones y las relaciones de los personajes, como el romance de Frank y Sarah, la amistad de Frank y Marvin, y la pasión de Victoria e Ivan. </p>
86
- <h2>Cómo descargar las canciones de la película roja de forma legal y segura</h2>
87
- <h3>Los beneficios y los riesgos de descargar canciones en línea</h3>
88
-
89
- <h2>Preguntas frecuentes</h2>
90
- <p>Aquí están algunas de las preguntas más frecuentes sobre las canciones de Red Movie
91
- <ol>
92
- <li><b> ¿Dónde puedo ver Red Movie en línea? </b></li>
93
- <p>Puedes ver Red Movie online en varias plataformas de streaming, como Netflix, Hulu, Amazon Prime Video, YouTube e iTunes. Sin embargo, es posible que necesite una suscripción o una cuota de alquiler para acceder a la película. También puede comprobar la disponibilidad de la película en su región y dispositivo antes de verla en línea. </p>
94
- <p></p>
95
- <li><b> ¿Quién compuso la partitura original de Pelicula Roja? </b></li>
96
- <p>La partitura original de Red Movie fue compuesta por Christophe Beck, un compositor canadiense conocido por su trabajo en películas como The Hangover, Frozen, Ant-Man y WandaVision. La partitura original de Red Movie consta de 22 temas que son en su mayoría orquestales, con algunos elementos de rock, jazz y música electrónica. </p>
97
- <li><b>¿Hay una secuela de Pelicula Roja? </b></li>
98
- <p>Sí, hay una secuela de Red Movie llamada Red 2, que fue lanzado en 2013. La secuela sigue a Frank y su equipo mientras intentan evitar que un dispositivo nuclear caiga en las manos equivocadas. La secuela también está protagonizada por Anthony Hopkins, Catherine Zeta-Jones, Byung-hun Lee y Neal McDonough. La secuela tiene una banda sonora similar a la primera película, con 12 canciones de varios géneros y artistas. </p>
99
- <li><b>¿Qué significa RED en la película roja? </b></li>
100
- <p>RED significa Retirado extremadamente peligroso, que es el nombre en clave dado a los ex agentes de la CIA que son blanco de una conspiración. El nombre en clave implica que todavía son capaces y peligrosos a pesar de su edad y estado de jubilación. </p>
101
- <li><b>¿Cómo puedo descargar canciones en Pelicula Roja gratis? </b></li>
102
-
103
- </ol></p> 64aa2da5cf<br />
104
- <br />
105
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Biaolin/stabilityai-FreeWilly1-Delta-SafeTensor/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Stabilityai FreeWilly1 Delta SafeTensor
3
- emoji: 📊
4
- colorFrom: blue
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.38.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/pkg_resources/__init__.py DELETED
The diff for this file is too large to render. See raw diff
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/msvc.py DELETED
@@ -1,1703 +0,0 @@
1
- """
2
- Improved support for Microsoft Visual C++ compilers.
3
-
4
- Known supported compilers:
5
- --------------------------
6
- Microsoft Visual C++ 14.X:
7
- Microsoft Visual C++ Build Tools 2015 (x86, x64, arm)
8
- Microsoft Visual Studio Build Tools 2017 (x86, x64, arm, arm64)
9
- Microsoft Visual Studio Build Tools 2019 (x86, x64, arm, arm64)
10
-
11
- This may also support compilers shipped with compatible Visual Studio versions.
12
- """
13
-
14
- import json
15
- from io import open
16
- from os import listdir, pathsep
17
- from os.path import join, isfile, isdir, dirname
18
- import sys
19
- import contextlib
20
- import platform
21
- import itertools
22
- import subprocess
23
- import distutils.errors
24
- from setuptools.extern.packaging.version import LegacyVersion
25
- from setuptools.extern.more_itertools import unique_everseen
26
-
27
- from .monkey import get_unpatched
28
-
29
- if platform.system() == 'Windows':
30
- import winreg
31
- from os import environ
32
- else:
33
- # Mock winreg and environ so the module can be imported on this platform.
34
-
35
- class winreg:
36
- HKEY_USERS = None
37
- HKEY_CURRENT_USER = None
38
- HKEY_LOCAL_MACHINE = None
39
- HKEY_CLASSES_ROOT = None
40
-
41
- environ = dict()
42
-
43
-
44
- def _msvc14_find_vc2015():
45
- """Python 3.8 "distutils/_msvccompiler.py" backport"""
46
- try:
47
- key = winreg.OpenKey(
48
- winreg.HKEY_LOCAL_MACHINE,
49
- r"Software\Microsoft\VisualStudio\SxS\VC7",
50
- 0,
51
- winreg.KEY_READ | winreg.KEY_WOW64_32KEY
52
- )
53
- except OSError:
54
- return None, None
55
-
56
- best_version = 0
57
- best_dir = None
58
- with key:
59
- for i in itertools.count():
60
- try:
61
- v, vc_dir, vt = winreg.EnumValue(key, i)
62
- except OSError:
63
- break
64
- if v and vt == winreg.REG_SZ and isdir(vc_dir):
65
- try:
66
- version = int(float(v))
67
- except (ValueError, TypeError):
68
- continue
69
- if version >= 14 and version > best_version:
70
- best_version, best_dir = version, vc_dir
71
- return best_version, best_dir
72
-
73
-
74
- def _msvc14_find_vc2017():
75
- """Python 3.8 "distutils/_msvccompiler.py" backport
76
-
77
- Returns "15, path" based on the result of invoking vswhere.exe
78
- If no install is found, returns "None, None"
79
-
80
- The version is returned to avoid unnecessarily changing the function
81
- result. It may be ignored when the path is not None.
82
-
83
- If vswhere.exe is not available, by definition, VS 2017 is not
84
- installed.
85
- """
86
- root = environ.get("ProgramFiles(x86)") or environ.get("ProgramFiles")
87
- if not root:
88
- return None, None
89
-
90
- try:
91
- path = subprocess.check_output([
92
- join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"),
93
- "-latest",
94
- "-prerelease",
95
- "-requiresAny",
96
- "-requires", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64",
97
- "-requires", "Microsoft.VisualStudio.Workload.WDExpress",
98
- "-property", "installationPath",
99
- "-products", "*",
100
- ]).decode(encoding="mbcs", errors="strict").strip()
101
- except (subprocess.CalledProcessError, OSError, UnicodeDecodeError):
102
- return None, None
103
-
104
- path = join(path, "VC", "Auxiliary", "Build")
105
- if isdir(path):
106
- return 15, path
107
-
108
- return None, None
109
-
110
-
111
- PLAT_SPEC_TO_RUNTIME = {
112
- 'x86': 'x86',
113
- 'x86_amd64': 'x64',
114
- 'x86_arm': 'arm',
115
- 'x86_arm64': 'arm64'
116
- }
117
-
118
-
119
- def _msvc14_find_vcvarsall(plat_spec):
120
- """Python 3.8 "distutils/_msvccompiler.py" backport"""
121
- _, best_dir = _msvc14_find_vc2017()
122
- vcruntime = None
123
-
124
- if plat_spec in PLAT_SPEC_TO_RUNTIME:
125
- vcruntime_plat = PLAT_SPEC_TO_RUNTIME[plat_spec]
126
- else:
127
- vcruntime_plat = 'x64' if 'amd64' in plat_spec else 'x86'
128
-
129
- if best_dir:
130
- vcredist = join(best_dir, "..", "..", "redist", "MSVC", "**",
131
- vcruntime_plat, "Microsoft.VC14*.CRT",
132
- "vcruntime140.dll")
133
- try:
134
- import glob
135
- vcruntime = glob.glob(vcredist, recursive=True)[-1]
136
- except (ImportError, OSError, LookupError):
137
- vcruntime = None
138
-
139
- if not best_dir:
140
- best_version, best_dir = _msvc14_find_vc2015()
141
- if best_version:
142
- vcruntime = join(best_dir, 'redist', vcruntime_plat,
143
- "Microsoft.VC140.CRT", "vcruntime140.dll")
144
-
145
- if not best_dir:
146
- return None, None
147
-
148
- vcvarsall = join(best_dir, "vcvarsall.bat")
149
- if not isfile(vcvarsall):
150
- return None, None
151
-
152
- if not vcruntime or not isfile(vcruntime):
153
- vcruntime = None
154
-
155
- return vcvarsall, vcruntime
156
-
157
-
158
- def _msvc14_get_vc_env(plat_spec):
159
- """Python 3.8 "distutils/_msvccompiler.py" backport"""
160
- if "DISTUTILS_USE_SDK" in environ:
161
- return {
162
- key.lower(): value
163
- for key, value in environ.items()
164
- }
165
-
166
- vcvarsall, vcruntime = _msvc14_find_vcvarsall(plat_spec)
167
- if not vcvarsall:
168
- raise distutils.errors.DistutilsPlatformError(
169
- "Unable to find vcvarsall.bat"
170
- )
171
-
172
- try:
173
- out = subprocess.check_output(
174
- 'cmd /u /c "{}" {} && set'.format(vcvarsall, plat_spec),
175
- stderr=subprocess.STDOUT,
176
- ).decode('utf-16le', errors='replace')
177
- except subprocess.CalledProcessError as exc:
178
- raise distutils.errors.DistutilsPlatformError(
179
- "Error executing {}".format(exc.cmd)
180
- ) from exc
181
-
182
- env = {
183
- key.lower(): value
184
- for key, _, value in
185
- (line.partition('=') for line in out.splitlines())
186
- if key and value
187
- }
188
-
189
- if vcruntime:
190
- env['py_vcruntime_redist'] = vcruntime
191
- return env
192
-
193
-
194
- def msvc14_get_vc_env(plat_spec):
195
- """
196
- Patched "distutils._msvccompiler._get_vc_env" for support extra
197
- Microsoft Visual C++ 14.X compilers.
198
-
199
- Set environment without use of "vcvarsall.bat".
200
-
201
- Parameters
202
- ----------
203
- plat_spec: str
204
- Target architecture.
205
-
206
- Return
207
- ------
208
- dict
209
- environment
210
- """
211
-
212
- # Always use backport from CPython 3.8
213
- try:
214
- return _msvc14_get_vc_env(plat_spec)
215
- except distutils.errors.DistutilsPlatformError as exc:
216
- _augment_exception(exc, 14.0)
217
- raise
218
-
219
-
220
- def msvc14_gen_lib_options(*args, **kwargs):
221
- """
222
- Patched "distutils._msvccompiler.gen_lib_options" for fix
223
- compatibility between "numpy.distutils" and "distutils._msvccompiler"
224
- (for Numpy < 1.11.2)
225
- """
226
- if "numpy.distutils" in sys.modules:
227
- import numpy as np
228
- if LegacyVersion(np.__version__) < LegacyVersion('1.11.2'):
229
- return np.distutils.ccompiler.gen_lib_options(*args, **kwargs)
230
- return get_unpatched(msvc14_gen_lib_options)(*args, **kwargs)
231
-
232
-
233
- def _augment_exception(exc, version, arch=''):
234
- """
235
- Add details to the exception message to help guide the user
236
- as to what action will resolve it.
237
- """
238
- # Error if MSVC++ directory not found or environment not set
239
- message = exc.args[0]
240
-
241
- if "vcvarsall" in message.lower() or "visual c" in message.lower():
242
- # Special error message if MSVC++ not installed
243
- tmpl = 'Microsoft Visual C++ {version:0.1f} or greater is required.'
244
- message = tmpl.format(**locals())
245
- msdownload = 'www.microsoft.com/download/details.aspx?id=%d'
246
- if version == 9.0:
247
- if arch.lower().find('ia64') > -1:
248
- # For VC++ 9.0, if IA64 support is needed, redirect user
249
- # to Windows SDK 7.0.
250
- # Note: No download link available from Microsoft.
251
- message += ' Get it with "Microsoft Windows SDK 7.0"'
252
- else:
253
- # For VC++ 9.0 redirect user to Vc++ for Python 2.7 :
254
- # This redirection link is maintained by Microsoft.
255
- # Contact [email protected] if it needs updating.
256
- message += ' Get it from http://aka.ms/vcpython27'
257
- elif version == 10.0:
258
- # For VC++ 10.0 Redirect user to Windows SDK 7.1
259
- message += ' Get it with "Microsoft Windows SDK 7.1": '
260
- message += msdownload % 8279
261
- elif version >= 14.0:
262
- # For VC++ 14.X Redirect user to latest Visual C++ Build Tools
263
- message += (' Get it with "Microsoft C++ Build Tools": '
264
- r'https://visualstudio.microsoft.com'
265
- r'/visual-cpp-build-tools/')
266
-
267
- exc.args = (message, )
268
-
269
-
270
- class PlatformInfo:
271
- """
272
- Current and Target Architectures information.
273
-
274
- Parameters
275
- ----------
276
- arch: str
277
- Target architecture.
278
- """
279
- current_cpu = environ.get('processor_architecture', '').lower()
280
-
281
- def __init__(self, arch):
282
- self.arch = arch.lower().replace('x64', 'amd64')
283
-
284
- @property
285
- def target_cpu(self):
286
- """
287
- Return Target CPU architecture.
288
-
289
- Return
290
- ------
291
- str
292
- Target CPU
293
- """
294
- return self.arch[self.arch.find('_') + 1:]
295
-
296
- def target_is_x86(self):
297
- """
298
- Return True if target CPU is x86 32 bits..
299
-
300
- Return
301
- ------
302
- bool
303
- CPU is x86 32 bits
304
- """
305
- return self.target_cpu == 'x86'
306
-
307
- def current_is_x86(self):
308
- """
309
- Return True if current CPU is x86 32 bits..
310
-
311
- Return
312
- ------
313
- bool
314
- CPU is x86 32 bits
315
- """
316
- return self.current_cpu == 'x86'
317
-
318
- def current_dir(self, hidex86=False, x64=False):
319
- """
320
- Current platform specific subfolder.
321
-
322
- Parameters
323
- ----------
324
- hidex86: bool
325
- return '' and not '\x86' if architecture is x86.
326
- x64: bool
327
- return '\x64' and not '\amd64' if architecture is amd64.
328
-
329
- Return
330
- ------
331
- str
332
- subfolder: '\target', or '' (see hidex86 parameter)
333
- """
334
- return (
335
- '' if (self.current_cpu == 'x86' and hidex86) else
336
- r'\x64' if (self.current_cpu == 'amd64' and x64) else
337
- r'\%s' % self.current_cpu
338
- )
339
-
340
- def target_dir(self, hidex86=False, x64=False):
341
- r"""
342
- Target platform specific subfolder.
343
-
344
- Parameters
345
- ----------
346
- hidex86: bool
347
- return '' and not '\x86' if architecture is x86.
348
- x64: bool
349
- return '\x64' and not '\amd64' if architecture is amd64.
350
-
351
- Return
352
- ------
353
- str
354
- subfolder: '\current', or '' (see hidex86 parameter)
355
- """
356
- return (
357
- '' if (self.target_cpu == 'x86' and hidex86) else
358
- r'\x64' if (self.target_cpu == 'amd64' and x64) else
359
- r'\%s' % self.target_cpu
360
- )
361
-
362
- def cross_dir(self, forcex86=False):
363
- r"""
364
- Cross platform specific subfolder.
365
-
366
- Parameters
367
- ----------
368
- forcex86: bool
369
- Use 'x86' as current architecture even if current architecture is
370
- not x86.
371
-
372
- Return
373
- ------
374
- str
375
- subfolder: '' if target architecture is current architecture,
376
- '\current_target' if not.
377
- """
378
- current = 'x86' if forcex86 else self.current_cpu
379
- return (
380
- '' if self.target_cpu == current else
381
- self.target_dir().replace('\\', '\\%s_' % current)
382
- )
383
-
384
-
385
- class RegistryInfo:
386
- """
387
- Microsoft Visual Studio related registry information.
388
-
389
- Parameters
390
- ----------
391
- platform_info: PlatformInfo
392
- "PlatformInfo" instance.
393
- """
394
- HKEYS = (winreg.HKEY_USERS,
395
- winreg.HKEY_CURRENT_USER,
396
- winreg.HKEY_LOCAL_MACHINE,
397
- winreg.HKEY_CLASSES_ROOT)
398
-
399
- def __init__(self, platform_info):
400
- self.pi = platform_info
401
-
402
- @property
403
- def visualstudio(self):
404
- """
405
- Microsoft Visual Studio root registry key.
406
-
407
- Return
408
- ------
409
- str
410
- Registry key
411
- """
412
- return 'VisualStudio'
413
-
414
- @property
415
- def sxs(self):
416
- """
417
- Microsoft Visual Studio SxS registry key.
418
-
419
- Return
420
- ------
421
- str
422
- Registry key
423
- """
424
- return join(self.visualstudio, 'SxS')
425
-
426
- @property
427
- def vc(self):
428
- """
429
- Microsoft Visual C++ VC7 registry key.
430
-
431
- Return
432
- ------
433
- str
434
- Registry key
435
- """
436
- return join(self.sxs, 'VC7')
437
-
438
- @property
439
- def vs(self):
440
- """
441
- Microsoft Visual Studio VS7 registry key.
442
-
443
- Return
444
- ------
445
- str
446
- Registry key
447
- """
448
- return join(self.sxs, 'VS7')
449
-
450
- @property
451
- def vc_for_python(self):
452
- """
453
- Microsoft Visual C++ for Python registry key.
454
-
455
- Return
456
- ------
457
- str
458
- Registry key
459
- """
460
- return r'DevDiv\VCForPython'
461
-
462
- @property
463
- def microsoft_sdk(self):
464
- """
465
- Microsoft SDK registry key.
466
-
467
- Return
468
- ------
469
- str
470
- Registry key
471
- """
472
- return 'Microsoft SDKs'
473
-
474
- @property
475
- def windows_sdk(self):
476
- """
477
- Microsoft Windows/Platform SDK registry key.
478
-
479
- Return
480
- ------
481
- str
482
- Registry key
483
- """
484
- return join(self.microsoft_sdk, 'Windows')
485
-
486
- @property
487
- def netfx_sdk(self):
488
- """
489
- Microsoft .NET Framework SDK registry key.
490
-
491
- Return
492
- ------
493
- str
494
- Registry key
495
- """
496
- return join(self.microsoft_sdk, 'NETFXSDK')
497
-
498
- @property
499
- def windows_kits_roots(self):
500
- """
501
- Microsoft Windows Kits Roots registry key.
502
-
503
- Return
504
- ------
505
- str
506
- Registry key
507
- """
508
- return r'Windows Kits\Installed Roots'
509
-
510
- def microsoft(self, key, x86=False):
511
- """
512
- Return key in Microsoft software registry.
513
-
514
- Parameters
515
- ----------
516
- key: str
517
- Registry key path where look.
518
- x86: str
519
- Force x86 software registry.
520
-
521
- Return
522
- ------
523
- str
524
- Registry key
525
- """
526
- node64 = '' if self.pi.current_is_x86() or x86 else 'Wow6432Node'
527
- return join('Software', node64, 'Microsoft', key)
528
-
529
- def lookup(self, key, name):
530
- """
531
- Look for values in registry in Microsoft software registry.
532
-
533
- Parameters
534
- ----------
535
- key: str
536
- Registry key path where look.
537
- name: str
538
- Value name to find.
539
-
540
- Return
541
- ------
542
- str
543
- value
544
- """
545
- key_read = winreg.KEY_READ
546
- openkey = winreg.OpenKey
547
- closekey = winreg.CloseKey
548
- ms = self.microsoft
549
- for hkey in self.HKEYS:
550
- bkey = None
551
- try:
552
- bkey = openkey(hkey, ms(key), 0, key_read)
553
- except (OSError, IOError):
554
- if not self.pi.current_is_x86():
555
- try:
556
- bkey = openkey(hkey, ms(key, True), 0, key_read)
557
- except (OSError, IOError):
558
- continue
559
- else:
560
- continue
561
- try:
562
- return winreg.QueryValueEx(bkey, name)[0]
563
- except (OSError, IOError):
564
- pass
565
- finally:
566
- if bkey:
567
- closekey(bkey)
568
-
569
-
570
- class SystemInfo:
571
- """
572
- Microsoft Windows and Visual Studio related system information.
573
-
574
- Parameters
575
- ----------
576
- registry_info: RegistryInfo
577
- "RegistryInfo" instance.
578
- vc_ver: float
579
- Required Microsoft Visual C++ version.
580
- """
581
-
582
- # Variables and properties in this class use originals CamelCase variables
583
- # names from Microsoft source files for more easy comparison.
584
- WinDir = environ.get('WinDir', '')
585
- ProgramFiles = environ.get('ProgramFiles', '')
586
- ProgramFilesx86 = environ.get('ProgramFiles(x86)', ProgramFiles)
587
-
588
- def __init__(self, registry_info, vc_ver=None):
589
- self.ri = registry_info
590
- self.pi = self.ri.pi
591
-
592
- self.known_vs_paths = self.find_programdata_vs_vers()
593
-
594
- # Except for VS15+, VC version is aligned with VS version
595
- self.vs_ver = self.vc_ver = (
596
- vc_ver or self._find_latest_available_vs_ver())
597
-
598
- def _find_latest_available_vs_ver(self):
599
- """
600
- Find the latest VC version
601
-
602
- Return
603
- ------
604
- float
605
- version
606
- """
607
- reg_vc_vers = self.find_reg_vs_vers()
608
-
609
- if not (reg_vc_vers or self.known_vs_paths):
610
- raise distutils.errors.DistutilsPlatformError(
611
- 'No Microsoft Visual C++ version found')
612
-
613
- vc_vers = set(reg_vc_vers)
614
- vc_vers.update(self.known_vs_paths)
615
- return sorted(vc_vers)[-1]
616
-
617
- def find_reg_vs_vers(self):
618
- """
619
- Find Microsoft Visual Studio versions available in registry.
620
-
621
- Return
622
- ------
623
- list of float
624
- Versions
625
- """
626
- ms = self.ri.microsoft
627
- vckeys = (self.ri.vc, self.ri.vc_for_python, self.ri.vs)
628
- vs_vers = []
629
- for hkey, key in itertools.product(self.ri.HKEYS, vckeys):
630
- try:
631
- bkey = winreg.OpenKey(hkey, ms(key), 0, winreg.KEY_READ)
632
- except (OSError, IOError):
633
- continue
634
- with bkey:
635
- subkeys, values, _ = winreg.QueryInfoKey(bkey)
636
- for i in range(values):
637
- with contextlib.suppress(ValueError):
638
- ver = float(winreg.EnumValue(bkey, i)[0])
639
- if ver not in vs_vers:
640
- vs_vers.append(ver)
641
- for i in range(subkeys):
642
- with contextlib.suppress(ValueError):
643
- ver = float(winreg.EnumKey(bkey, i))
644
- if ver not in vs_vers:
645
- vs_vers.append(ver)
646
- return sorted(vs_vers)
647
-
648
- def find_programdata_vs_vers(self):
649
- r"""
650
- Find Visual studio 2017+ versions from information in
651
- "C:\ProgramData\Microsoft\VisualStudio\Packages\_Instances".
652
-
653
- Return
654
- ------
655
- dict
656
- float version as key, path as value.
657
- """
658
- vs_versions = {}
659
- instances_dir = \
660
- r'C:\ProgramData\Microsoft\VisualStudio\Packages\_Instances'
661
-
662
- try:
663
- hashed_names = listdir(instances_dir)
664
-
665
- except (OSError, IOError):
666
- # Directory not exists with all Visual Studio versions
667
- return vs_versions
668
-
669
- for name in hashed_names:
670
- try:
671
- # Get VS installation path from "state.json" file
672
- state_path = join(instances_dir, name, 'state.json')
673
- with open(state_path, 'rt', encoding='utf-8') as state_file:
674
- state = json.load(state_file)
675
- vs_path = state['installationPath']
676
-
677
- # Raises OSError if this VS installation does not contain VC
678
- listdir(join(vs_path, r'VC\Tools\MSVC'))
679
-
680
- # Store version and path
681
- vs_versions[self._as_float_version(
682
- state['installationVersion'])] = vs_path
683
-
684
- except (OSError, IOError, KeyError):
685
- # Skip if "state.json" file is missing or bad format
686
- continue
687
-
688
- return vs_versions
689
-
690
- @staticmethod
691
- def _as_float_version(version):
692
- """
693
- Return a string version as a simplified float version (major.minor)
694
-
695
- Parameters
696
- ----------
697
- version: str
698
- Version.
699
-
700
- Return
701
- ------
702
- float
703
- version
704
- """
705
- return float('.'.join(version.split('.')[:2]))
706
-
707
- @property
708
- def VSInstallDir(self):
709
- """
710
- Microsoft Visual Studio directory.
711
-
712
- Return
713
- ------
714
- str
715
- path
716
- """
717
- # Default path
718
- default = join(self.ProgramFilesx86,
719
- 'Microsoft Visual Studio %0.1f' % self.vs_ver)
720
-
721
- # Try to get path from registry, if fail use default path
722
- return self.ri.lookup(self.ri.vs, '%0.1f' % self.vs_ver) or default
723
-
724
- @property
725
- def VCInstallDir(self):
726
- """
727
- Microsoft Visual C++ directory.
728
-
729
- Return
730
- ------
731
- str
732
- path
733
- """
734
- path = self._guess_vc() or self._guess_vc_legacy()
735
-
736
- if not isdir(path):
737
- msg = 'Microsoft Visual C++ directory not found'
738
- raise distutils.errors.DistutilsPlatformError(msg)
739
-
740
- return path
741
-
742
- def _guess_vc(self):
743
- """
744
- Locate Visual C++ for VS2017+.
745
-
746
- Return
747
- ------
748
- str
749
- path
750
- """
751
- if self.vs_ver <= 14.0:
752
- return ''
753
-
754
- try:
755
- # First search in known VS paths
756
- vs_dir = self.known_vs_paths[self.vs_ver]
757
- except KeyError:
758
- # Else, search with path from registry
759
- vs_dir = self.VSInstallDir
760
-
761
- guess_vc = join(vs_dir, r'VC\Tools\MSVC')
762
-
763
- # Subdir with VC exact version as name
764
- try:
765
- # Update the VC version with real one instead of VS version
766
- vc_ver = listdir(guess_vc)[-1]
767
- self.vc_ver = self._as_float_version(vc_ver)
768
- return join(guess_vc, vc_ver)
769
- except (OSError, IOError, IndexError):
770
- return ''
771
-
772
- def _guess_vc_legacy(self):
773
- """
774
- Locate Visual C++ for versions prior to 2017.
775
-
776
- Return
777
- ------
778
- str
779
- path
780
- """
781
- default = join(self.ProgramFilesx86,
782
- r'Microsoft Visual Studio %0.1f\VC' % self.vs_ver)
783
-
784
- # Try to get "VC++ for Python" path from registry as default path
785
- reg_path = join(self.ri.vc_for_python, '%0.1f' % self.vs_ver)
786
- python_vc = self.ri.lookup(reg_path, 'installdir')
787
- default_vc = join(python_vc, 'VC') if python_vc else default
788
-
789
- # Try to get path from registry, if fail use default path
790
- return self.ri.lookup(self.ri.vc, '%0.1f' % self.vs_ver) or default_vc
791
-
792
- @property
793
- def WindowsSdkVersion(self):
794
- """
795
- Microsoft Windows SDK versions for specified MSVC++ version.
796
-
797
- Return
798
- ------
799
- tuple of str
800
- versions
801
- """
802
- if self.vs_ver <= 9.0:
803
- return '7.0', '6.1', '6.0a'
804
- elif self.vs_ver == 10.0:
805
- return '7.1', '7.0a'
806
- elif self.vs_ver == 11.0:
807
- return '8.0', '8.0a'
808
- elif self.vs_ver == 12.0:
809
- return '8.1', '8.1a'
810
- elif self.vs_ver >= 14.0:
811
- return '10.0', '8.1'
812
-
813
- @property
814
- def WindowsSdkLastVersion(self):
815
- """
816
- Microsoft Windows SDK last version.
817
-
818
- Return
819
- ------
820
- str
821
- version
822
- """
823
- return self._use_last_dir_name(join(self.WindowsSdkDir, 'lib'))
824
-
825
- @property # noqa: C901
826
- def WindowsSdkDir(self): # noqa: C901 # is too complex (12) # FIXME
827
- """
828
- Microsoft Windows SDK directory.
829
-
830
- Return
831
- ------
832
- str
833
- path
834
- """
835
- sdkdir = ''
836
- for ver in self.WindowsSdkVersion:
837
- # Try to get it from registry
838
- loc = join(self.ri.windows_sdk, 'v%s' % ver)
839
- sdkdir = self.ri.lookup(loc, 'installationfolder')
840
- if sdkdir:
841
- break
842
- if not sdkdir or not isdir(sdkdir):
843
- # Try to get "VC++ for Python" version from registry
844
- path = join(self.ri.vc_for_python, '%0.1f' % self.vc_ver)
845
- install_base = self.ri.lookup(path, 'installdir')
846
- if install_base:
847
- sdkdir = join(install_base, 'WinSDK')
848
- if not sdkdir or not isdir(sdkdir):
849
- # If fail, use default new path
850
- for ver in self.WindowsSdkVersion:
851
- intver = ver[:ver.rfind('.')]
852
- path = r'Microsoft SDKs\Windows Kits\%s' % intver
853
- d = join(self.ProgramFiles, path)
854
- if isdir(d):
855
- sdkdir = d
856
- if not sdkdir or not isdir(sdkdir):
857
- # If fail, use default old path
858
- for ver in self.WindowsSdkVersion:
859
- path = r'Microsoft SDKs\Windows\v%s' % ver
860
- d = join(self.ProgramFiles, path)
861
- if isdir(d):
862
- sdkdir = d
863
- if not sdkdir:
864
- # If fail, use Platform SDK
865
- sdkdir = join(self.VCInstallDir, 'PlatformSDK')
866
- return sdkdir
867
-
868
- @property
869
- def WindowsSDKExecutablePath(self):
870
- """
871
- Microsoft Windows SDK executable directory.
872
-
873
- Return
874
- ------
875
- str
876
- path
877
- """
878
- # Find WinSDK NetFx Tools registry dir name
879
- if self.vs_ver <= 11.0:
880
- netfxver = 35
881
- arch = ''
882
- else:
883
- netfxver = 40
884
- hidex86 = True if self.vs_ver <= 12.0 else False
885
- arch = self.pi.current_dir(x64=True, hidex86=hidex86)
886
- fx = 'WinSDK-NetFx%dTools%s' % (netfxver, arch.replace('\\', '-'))
887
-
888
- # list all possibles registry paths
889
- regpaths = []
890
- if self.vs_ver >= 14.0:
891
- for ver in self.NetFxSdkVersion:
892
- regpaths += [join(self.ri.netfx_sdk, ver, fx)]
893
-
894
- for ver in self.WindowsSdkVersion:
895
- regpaths += [join(self.ri.windows_sdk, 'v%sA' % ver, fx)]
896
-
897
- # Return installation folder from the more recent path
898
- for path in regpaths:
899
- execpath = self.ri.lookup(path, 'installationfolder')
900
- if execpath:
901
- return execpath
902
-
903
- @property
904
- def FSharpInstallDir(self):
905
- """
906
- Microsoft Visual F# directory.
907
-
908
- Return
909
- ------
910
- str
911
- path
912
- """
913
- path = join(self.ri.visualstudio, r'%0.1f\Setup\F#' % self.vs_ver)
914
- return self.ri.lookup(path, 'productdir') or ''
915
-
916
- @property
917
- def UniversalCRTSdkDir(self):
918
- """
919
- Microsoft Universal CRT SDK directory.
920
-
921
- Return
922
- ------
923
- str
924
- path
925
- """
926
- # Set Kit Roots versions for specified MSVC++ version
927
- vers = ('10', '81') if self.vs_ver >= 14.0 else ()
928
-
929
- # Find path of the more recent Kit
930
- for ver in vers:
931
- sdkdir = self.ri.lookup(self.ri.windows_kits_roots,
932
- 'kitsroot%s' % ver)
933
- if sdkdir:
934
- return sdkdir or ''
935
-
936
- @property
937
- def UniversalCRTSdkLastVersion(self):
938
- """
939
- Microsoft Universal C Runtime SDK last version.
940
-
941
- Return
942
- ------
943
- str
944
- version
945
- """
946
- return self._use_last_dir_name(join(self.UniversalCRTSdkDir, 'lib'))
947
-
948
- @property
949
- def NetFxSdkVersion(self):
950
- """
951
- Microsoft .NET Framework SDK versions.
952
-
953
- Return
954
- ------
955
- tuple of str
956
- versions
957
- """
958
- # Set FxSdk versions for specified VS version
959
- return (('4.7.2', '4.7.1', '4.7',
960
- '4.6.2', '4.6.1', '4.6',
961
- '4.5.2', '4.5.1', '4.5')
962
- if self.vs_ver >= 14.0 else ())
963
-
964
- @property
965
- def NetFxSdkDir(self):
966
- """
967
- Microsoft .NET Framework SDK directory.
968
-
969
- Return
970
- ------
971
- str
972
- path
973
- """
974
- sdkdir = ''
975
- for ver in self.NetFxSdkVersion:
976
- loc = join(self.ri.netfx_sdk, ver)
977
- sdkdir = self.ri.lookup(loc, 'kitsinstallationfolder')
978
- if sdkdir:
979
- break
980
- return sdkdir
981
-
982
- @property
983
- def FrameworkDir32(self):
984
- """
985
- Microsoft .NET Framework 32bit directory.
986
-
987
- Return
988
- ------
989
- str
990
- path
991
- """
992
- # Default path
993
- guess_fw = join(self.WinDir, r'Microsoft.NET\Framework')
994
-
995
- # Try to get path from registry, if fail use default path
996
- return self.ri.lookup(self.ri.vc, 'frameworkdir32') or guess_fw
997
-
998
- @property
999
- def FrameworkDir64(self):
1000
- """
1001
- Microsoft .NET Framework 64bit directory.
1002
-
1003
- Return
1004
- ------
1005
- str
1006
- path
1007
- """
1008
- # Default path
1009
- guess_fw = join(self.WinDir, r'Microsoft.NET\Framework64')
1010
-
1011
- # Try to get path from registry, if fail use default path
1012
- return self.ri.lookup(self.ri.vc, 'frameworkdir64') or guess_fw
1013
-
1014
- @property
1015
- def FrameworkVersion32(self):
1016
- """
1017
- Microsoft .NET Framework 32bit versions.
1018
-
1019
- Return
1020
- ------
1021
- tuple of str
1022
- versions
1023
- """
1024
- return self._find_dot_net_versions(32)
1025
-
1026
- @property
1027
- def FrameworkVersion64(self):
1028
- """
1029
- Microsoft .NET Framework 64bit versions.
1030
-
1031
- Return
1032
- ------
1033
- tuple of str
1034
- versions
1035
- """
1036
- return self._find_dot_net_versions(64)
1037
-
1038
- def _find_dot_net_versions(self, bits):
1039
- """
1040
- Find Microsoft .NET Framework versions.
1041
-
1042
- Parameters
1043
- ----------
1044
- bits: int
1045
- Platform number of bits: 32 or 64.
1046
-
1047
- Return
1048
- ------
1049
- tuple of str
1050
- versions
1051
- """
1052
- # Find actual .NET version in registry
1053
- reg_ver = self.ri.lookup(self.ri.vc, 'frameworkver%d' % bits)
1054
- dot_net_dir = getattr(self, 'FrameworkDir%d' % bits)
1055
- ver = reg_ver or self._use_last_dir_name(dot_net_dir, 'v') or ''
1056
-
1057
- # Set .NET versions for specified MSVC++ version
1058
- if self.vs_ver >= 12.0:
1059
- return ver, 'v4.0'
1060
- elif self.vs_ver >= 10.0:
1061
- return 'v4.0.30319' if ver.lower()[:2] != 'v4' else ver, 'v3.5'
1062
- elif self.vs_ver == 9.0:
1063
- return 'v3.5', 'v2.0.50727'
1064
- elif self.vs_ver == 8.0:
1065
- return 'v3.0', 'v2.0.50727'
1066
-
1067
- @staticmethod
1068
- def _use_last_dir_name(path, prefix=''):
1069
- """
1070
- Return name of the last dir in path or '' if no dir found.
1071
-
1072
- Parameters
1073
- ----------
1074
- path: str
1075
- Use dirs in this path
1076
- prefix: str
1077
- Use only dirs starting by this prefix
1078
-
1079
- Return
1080
- ------
1081
- str
1082
- name
1083
- """
1084
- matching_dirs = (
1085
- dir_name
1086
- for dir_name in reversed(listdir(path))
1087
- if isdir(join(path, dir_name)) and
1088
- dir_name.startswith(prefix)
1089
- )
1090
- return next(matching_dirs, None) or ''
1091
-
1092
-
1093
- class EnvironmentInfo:
1094
- """
1095
- Return environment variables for specified Microsoft Visual C++ version
1096
- and platform : Lib, Include, Path and libpath.
1097
-
1098
- This function is compatible with Microsoft Visual C++ 9.0 to 14.X.
1099
-
1100
- Script created by analysing Microsoft environment configuration files like
1101
- "vcvars[...].bat", "SetEnv.Cmd", "vcbuildtools.bat", ...
1102
-
1103
- Parameters
1104
- ----------
1105
- arch: str
1106
- Target architecture.
1107
- vc_ver: float
1108
- Required Microsoft Visual C++ version. If not set, autodetect the last
1109
- version.
1110
- vc_min_ver: float
1111
- Minimum Microsoft Visual C++ version.
1112
- """
1113
-
1114
- # Variables and properties in this class use originals CamelCase variables
1115
- # names from Microsoft source files for more easy comparison.
1116
-
1117
- def __init__(self, arch, vc_ver=None, vc_min_ver=0):
1118
- self.pi = PlatformInfo(arch)
1119
- self.ri = RegistryInfo(self.pi)
1120
- self.si = SystemInfo(self.ri, vc_ver)
1121
-
1122
- if self.vc_ver < vc_min_ver:
1123
- err = 'No suitable Microsoft Visual C++ version found'
1124
- raise distutils.errors.DistutilsPlatformError(err)
1125
-
1126
- @property
1127
- def vs_ver(self):
1128
- """
1129
- Microsoft Visual Studio.
1130
-
1131
- Return
1132
- ------
1133
- float
1134
- version
1135
- """
1136
- return self.si.vs_ver
1137
-
1138
- @property
1139
- def vc_ver(self):
1140
- """
1141
- Microsoft Visual C++ version.
1142
-
1143
- Return
1144
- ------
1145
- float
1146
- version
1147
- """
1148
- return self.si.vc_ver
1149
-
1150
- @property
1151
- def VSTools(self):
1152
- """
1153
- Microsoft Visual Studio Tools.
1154
-
1155
- Return
1156
- ------
1157
- list of str
1158
- paths
1159
- """
1160
- paths = [r'Common7\IDE', r'Common7\Tools']
1161
-
1162
- if self.vs_ver >= 14.0:
1163
- arch_subdir = self.pi.current_dir(hidex86=True, x64=True)
1164
- paths += [r'Common7\IDE\CommonExtensions\Microsoft\TestWindow']
1165
- paths += [r'Team Tools\Performance Tools']
1166
- paths += [r'Team Tools\Performance Tools%s' % arch_subdir]
1167
-
1168
- return [join(self.si.VSInstallDir, path) for path in paths]
1169
-
1170
- @property
1171
- def VCIncludes(self):
1172
- """
1173
- Microsoft Visual C++ & Microsoft Foundation Class Includes.
1174
-
1175
- Return
1176
- ------
1177
- list of str
1178
- paths
1179
- """
1180
- return [join(self.si.VCInstallDir, 'Include'),
1181
- join(self.si.VCInstallDir, r'ATLMFC\Include')]
1182
-
1183
- @property
1184
- def VCLibraries(self):
1185
- """
1186
- Microsoft Visual C++ & Microsoft Foundation Class Libraries.
1187
-
1188
- Return
1189
- ------
1190
- list of str
1191
- paths
1192
- """
1193
- if self.vs_ver >= 15.0:
1194
- arch_subdir = self.pi.target_dir(x64=True)
1195
- else:
1196
- arch_subdir = self.pi.target_dir(hidex86=True)
1197
- paths = ['Lib%s' % arch_subdir, r'ATLMFC\Lib%s' % arch_subdir]
1198
-
1199
- if self.vs_ver >= 14.0:
1200
- paths += [r'Lib\store%s' % arch_subdir]
1201
-
1202
- return [join(self.si.VCInstallDir, path) for path in paths]
1203
-
1204
- @property
1205
- def VCStoreRefs(self):
1206
- """
1207
- Microsoft Visual C++ store references Libraries.
1208
-
1209
- Return
1210
- ------
1211
- list of str
1212
- paths
1213
- """
1214
- if self.vs_ver < 14.0:
1215
- return []
1216
- return [join(self.si.VCInstallDir, r'Lib\store\references')]
1217
-
1218
- @property
1219
- def VCTools(self):
1220
- """
1221
- Microsoft Visual C++ Tools.
1222
-
1223
- Return
1224
- ------
1225
- list of str
1226
- paths
1227
- """
1228
- si = self.si
1229
- tools = [join(si.VCInstallDir, 'VCPackages')]
1230
-
1231
- forcex86 = True if self.vs_ver <= 10.0 else False
1232
- arch_subdir = self.pi.cross_dir(forcex86)
1233
- if arch_subdir:
1234
- tools += [join(si.VCInstallDir, 'Bin%s' % arch_subdir)]
1235
-
1236
- if self.vs_ver == 14.0:
1237
- path = 'Bin%s' % self.pi.current_dir(hidex86=True)
1238
- tools += [join(si.VCInstallDir, path)]
1239
-
1240
- elif self.vs_ver >= 15.0:
1241
- host_dir = (r'bin\HostX86%s' if self.pi.current_is_x86() else
1242
- r'bin\HostX64%s')
1243
- tools += [join(
1244
- si.VCInstallDir, host_dir % self.pi.target_dir(x64=True))]
1245
-
1246
- if self.pi.current_cpu != self.pi.target_cpu:
1247
- tools += [join(
1248
- si.VCInstallDir, host_dir % self.pi.current_dir(x64=True))]
1249
-
1250
- else:
1251
- tools += [join(si.VCInstallDir, 'Bin')]
1252
-
1253
- return tools
1254
-
1255
- @property
1256
- def OSLibraries(self):
1257
- """
1258
- Microsoft Windows SDK Libraries.
1259
-
1260
- Return
1261
- ------
1262
- list of str
1263
- paths
1264
- """
1265
- if self.vs_ver <= 10.0:
1266
- arch_subdir = self.pi.target_dir(hidex86=True, x64=True)
1267
- return [join(self.si.WindowsSdkDir, 'Lib%s' % arch_subdir)]
1268
-
1269
- else:
1270
- arch_subdir = self.pi.target_dir(x64=True)
1271
- lib = join(self.si.WindowsSdkDir, 'lib')
1272
- libver = self._sdk_subdir
1273
- return [join(lib, '%sum%s' % (libver, arch_subdir))]
1274
-
1275
- @property
1276
- def OSIncludes(self):
1277
- """
1278
- Microsoft Windows SDK Include.
1279
-
1280
- Return
1281
- ------
1282
- list of str
1283
- paths
1284
- """
1285
- include = join(self.si.WindowsSdkDir, 'include')
1286
-
1287
- if self.vs_ver <= 10.0:
1288
- return [include, join(include, 'gl')]
1289
-
1290
- else:
1291
- if self.vs_ver >= 14.0:
1292
- sdkver = self._sdk_subdir
1293
- else:
1294
- sdkver = ''
1295
- return [join(include, '%sshared' % sdkver),
1296
- join(include, '%sum' % sdkver),
1297
- join(include, '%swinrt' % sdkver)]
1298
-
1299
- @property
1300
- def OSLibpath(self):
1301
- """
1302
- Microsoft Windows SDK Libraries Paths.
1303
-
1304
- Return
1305
- ------
1306
- list of str
1307
- paths
1308
- """
1309
- ref = join(self.si.WindowsSdkDir, 'References')
1310
- libpath = []
1311
-
1312
- if self.vs_ver <= 9.0:
1313
- libpath += self.OSLibraries
1314
-
1315
- if self.vs_ver >= 11.0:
1316
- libpath += [join(ref, r'CommonConfiguration\Neutral')]
1317
-
1318
- if self.vs_ver >= 14.0:
1319
- libpath += [
1320
- ref,
1321
- join(self.si.WindowsSdkDir, 'UnionMetadata'),
1322
- join(
1323
- ref, 'Windows.Foundation.UniversalApiContract', '1.0.0.0'),
1324
- join(ref, 'Windows.Foundation.FoundationContract', '1.0.0.0'),
1325
- join(
1326
- ref, 'Windows.Networking.Connectivity.WwanContract',
1327
- '1.0.0.0'),
1328
- join(
1329
- self.si.WindowsSdkDir, 'ExtensionSDKs', 'Microsoft.VCLibs',
1330
- '%0.1f' % self.vs_ver, 'References', 'CommonConfiguration',
1331
- 'neutral'),
1332
- ]
1333
- return libpath
1334
-
1335
- @property
1336
- def SdkTools(self):
1337
- """
1338
- Microsoft Windows SDK Tools.
1339
-
1340
- Return
1341
- ------
1342
- list of str
1343
- paths
1344
- """
1345
- return list(self._sdk_tools())
1346
-
1347
- def _sdk_tools(self):
1348
- """
1349
- Microsoft Windows SDK Tools paths generator.
1350
-
1351
- Return
1352
- ------
1353
- generator of str
1354
- paths
1355
- """
1356
- if self.vs_ver < 15.0:
1357
- bin_dir = 'Bin' if self.vs_ver <= 11.0 else r'Bin\x86'
1358
- yield join(self.si.WindowsSdkDir, bin_dir)
1359
-
1360
- if not self.pi.current_is_x86():
1361
- arch_subdir = self.pi.current_dir(x64=True)
1362
- path = 'Bin%s' % arch_subdir
1363
- yield join(self.si.WindowsSdkDir, path)
1364
-
1365
- if self.vs_ver in (10.0, 11.0):
1366
- if self.pi.target_is_x86():
1367
- arch_subdir = ''
1368
- else:
1369
- arch_subdir = self.pi.current_dir(hidex86=True, x64=True)
1370
- path = r'Bin\NETFX 4.0 Tools%s' % arch_subdir
1371
- yield join(self.si.WindowsSdkDir, path)
1372
-
1373
- elif self.vs_ver >= 15.0:
1374
- path = join(self.si.WindowsSdkDir, 'Bin')
1375
- arch_subdir = self.pi.current_dir(x64=True)
1376
- sdkver = self.si.WindowsSdkLastVersion
1377
- yield join(path, '%s%s' % (sdkver, arch_subdir))
1378
-
1379
- if self.si.WindowsSDKExecutablePath:
1380
- yield self.si.WindowsSDKExecutablePath
1381
-
1382
- @property
1383
- def _sdk_subdir(self):
1384
- """
1385
- Microsoft Windows SDK version subdir.
1386
-
1387
- Return
1388
- ------
1389
- str
1390
- subdir
1391
- """
1392
- ucrtver = self.si.WindowsSdkLastVersion
1393
- return ('%s\\' % ucrtver) if ucrtver else ''
1394
-
1395
- @property
1396
- def SdkSetup(self):
1397
- """
1398
- Microsoft Windows SDK Setup.
1399
-
1400
- Return
1401
- ------
1402
- list of str
1403
- paths
1404
- """
1405
- if self.vs_ver > 9.0:
1406
- return []
1407
-
1408
- return [join(self.si.WindowsSdkDir, 'Setup')]
1409
-
1410
- @property
1411
- def FxTools(self):
1412
- """
1413
- Microsoft .NET Framework Tools.
1414
-
1415
- Return
1416
- ------
1417
- list of str
1418
- paths
1419
- """
1420
- pi = self.pi
1421
- si = self.si
1422
-
1423
- if self.vs_ver <= 10.0:
1424
- include32 = True
1425
- include64 = not pi.target_is_x86() and not pi.current_is_x86()
1426
- else:
1427
- include32 = pi.target_is_x86() or pi.current_is_x86()
1428
- include64 = pi.current_cpu == 'amd64' or pi.target_cpu == 'amd64'
1429
-
1430
- tools = []
1431
- if include32:
1432
- tools += [join(si.FrameworkDir32, ver)
1433
- for ver in si.FrameworkVersion32]
1434
- if include64:
1435
- tools += [join(si.FrameworkDir64, ver)
1436
- for ver in si.FrameworkVersion64]
1437
- return tools
1438
-
1439
- @property
1440
- def NetFxSDKLibraries(self):
1441
- """
1442
- Microsoft .Net Framework SDK Libraries.
1443
-
1444
- Return
1445
- ------
1446
- list of str
1447
- paths
1448
- """
1449
- if self.vs_ver < 14.0 or not self.si.NetFxSdkDir:
1450
- return []
1451
-
1452
- arch_subdir = self.pi.target_dir(x64=True)
1453
- return [join(self.si.NetFxSdkDir, r'lib\um%s' % arch_subdir)]
1454
-
1455
- @property
1456
- def NetFxSDKIncludes(self):
1457
- """
1458
- Microsoft .Net Framework SDK Includes.
1459
-
1460
- Return
1461
- ------
1462
- list of str
1463
- paths
1464
- """
1465
- if self.vs_ver < 14.0 or not self.si.NetFxSdkDir:
1466
- return []
1467
-
1468
- return [join(self.si.NetFxSdkDir, r'include\um')]
1469
-
1470
- @property
1471
- def VsTDb(self):
1472
- """
1473
- Microsoft Visual Studio Team System Database.
1474
-
1475
- Return
1476
- ------
1477
- list of str
1478
- paths
1479
- """
1480
- return [join(self.si.VSInstallDir, r'VSTSDB\Deploy')]
1481
-
1482
- @property
1483
- def MSBuild(self):
1484
- """
1485
- Microsoft Build Engine.
1486
-
1487
- Return
1488
- ------
1489
- list of str
1490
- paths
1491
- """
1492
- if self.vs_ver < 12.0:
1493
- return []
1494
- elif self.vs_ver < 15.0:
1495
- base_path = self.si.ProgramFilesx86
1496
- arch_subdir = self.pi.current_dir(hidex86=True)
1497
- else:
1498
- base_path = self.si.VSInstallDir
1499
- arch_subdir = ''
1500
-
1501
- path = r'MSBuild\%0.1f\bin%s' % (self.vs_ver, arch_subdir)
1502
- build = [join(base_path, path)]
1503
-
1504
- if self.vs_ver >= 15.0:
1505
- # Add Roslyn C# & Visual Basic Compiler
1506
- build += [join(base_path, path, 'Roslyn')]
1507
-
1508
- return build
1509
-
1510
- @property
1511
- def HTMLHelpWorkshop(self):
1512
- """
1513
- Microsoft HTML Help Workshop.
1514
-
1515
- Return
1516
- ------
1517
- list of str
1518
- paths
1519
- """
1520
- if self.vs_ver < 11.0:
1521
- return []
1522
-
1523
- return [join(self.si.ProgramFilesx86, 'HTML Help Workshop')]
1524
-
1525
- @property
1526
- def UCRTLibraries(self):
1527
- """
1528
- Microsoft Universal C Runtime SDK Libraries.
1529
-
1530
- Return
1531
- ------
1532
- list of str
1533
- paths
1534
- """
1535
- if self.vs_ver < 14.0:
1536
- return []
1537
-
1538
- arch_subdir = self.pi.target_dir(x64=True)
1539
- lib = join(self.si.UniversalCRTSdkDir, 'lib')
1540
- ucrtver = self._ucrt_subdir
1541
- return [join(lib, '%sucrt%s' % (ucrtver, arch_subdir))]
1542
-
1543
- @property
1544
- def UCRTIncludes(self):
1545
- """
1546
- Microsoft Universal C Runtime SDK Include.
1547
-
1548
- Return
1549
- ------
1550
- list of str
1551
- paths
1552
- """
1553
- if self.vs_ver < 14.0:
1554
- return []
1555
-
1556
- include = join(self.si.UniversalCRTSdkDir, 'include')
1557
- return [join(include, '%sucrt' % self._ucrt_subdir)]
1558
-
1559
- @property
1560
- def _ucrt_subdir(self):
1561
- """
1562
- Microsoft Universal C Runtime SDK version subdir.
1563
-
1564
- Return
1565
- ------
1566
- str
1567
- subdir
1568
- """
1569
- ucrtver = self.si.UniversalCRTSdkLastVersion
1570
- return ('%s\\' % ucrtver) if ucrtver else ''
1571
-
1572
- @property
1573
- def FSharp(self):
1574
- """
1575
- Microsoft Visual F#.
1576
-
1577
- Return
1578
- ------
1579
- list of str
1580
- paths
1581
- """
1582
- if 11.0 > self.vs_ver > 12.0:
1583
- return []
1584
-
1585
- return [self.si.FSharpInstallDir]
1586
-
1587
- @property
1588
- def VCRuntimeRedist(self):
1589
- """
1590
- Microsoft Visual C++ runtime redistributable dll.
1591
-
1592
- Return
1593
- ------
1594
- str
1595
- path
1596
- """
1597
- vcruntime = 'vcruntime%d0.dll' % self.vc_ver
1598
- arch_subdir = self.pi.target_dir(x64=True).strip('\\')
1599
-
1600
- # Installation prefixes candidates
1601
- prefixes = []
1602
- tools_path = self.si.VCInstallDir
1603
- redist_path = dirname(tools_path.replace(r'\Tools', r'\Redist'))
1604
- if isdir(redist_path):
1605
- # Redist version may not be exactly the same as tools
1606
- redist_path = join(redist_path, listdir(redist_path)[-1])
1607
- prefixes += [redist_path, join(redist_path, 'onecore')]
1608
-
1609
- prefixes += [join(tools_path, 'redist')] # VS14 legacy path
1610
-
1611
- # CRT directory
1612
- crt_dirs = ('Microsoft.VC%d.CRT' % (self.vc_ver * 10),
1613
- # Sometime store in directory with VS version instead of VC
1614
- 'Microsoft.VC%d.CRT' % (int(self.vs_ver) * 10))
1615
-
1616
- # vcruntime path
1617
- for prefix, crt_dir in itertools.product(prefixes, crt_dirs):
1618
- path = join(prefix, arch_subdir, crt_dir, vcruntime)
1619
- if isfile(path):
1620
- return path
1621
-
1622
- def return_env(self, exists=True):
1623
- """
1624
- Return environment dict.
1625
-
1626
- Parameters
1627
- ----------
1628
- exists: bool
1629
- It True, only return existing paths.
1630
-
1631
- Return
1632
- ------
1633
- dict
1634
- environment
1635
- """
1636
- env = dict(
1637
- include=self._build_paths('include',
1638
- [self.VCIncludes,
1639
- self.OSIncludes,
1640
- self.UCRTIncludes,
1641
- self.NetFxSDKIncludes],
1642
- exists),
1643
- lib=self._build_paths('lib',
1644
- [self.VCLibraries,
1645
- self.OSLibraries,
1646
- self.FxTools,
1647
- self.UCRTLibraries,
1648
- self.NetFxSDKLibraries],
1649
- exists),
1650
- libpath=self._build_paths('libpath',
1651
- [self.VCLibraries,
1652
- self.FxTools,
1653
- self.VCStoreRefs,
1654
- self.OSLibpath],
1655
- exists),
1656
- path=self._build_paths('path',
1657
- [self.VCTools,
1658
- self.VSTools,
1659
- self.VsTDb,
1660
- self.SdkTools,
1661
- self.SdkSetup,
1662
- self.FxTools,
1663
- self.MSBuild,
1664
- self.HTMLHelpWorkshop,
1665
- self.FSharp],
1666
- exists),
1667
- )
1668
- if self.vs_ver >= 14 and isfile(self.VCRuntimeRedist):
1669
- env['py_vcruntime_redist'] = self.VCRuntimeRedist
1670
- return env
1671
-
1672
- def _build_paths(self, name, spec_path_lists, exists):
1673
- """
1674
- Given an environment variable name and specified paths,
1675
- return a pathsep-separated string of paths containing
1676
- unique, extant, directories from those paths and from
1677
- the environment variable. Raise an error if no paths
1678
- are resolved.
1679
-
1680
- Parameters
1681
- ----------
1682
- name: str
1683
- Environment variable name
1684
- spec_path_lists: list of str
1685
- Paths
1686
- exists: bool
1687
- It True, only return existing paths.
1688
-
1689
- Return
1690
- ------
1691
- str
1692
- Pathsep-separated paths
1693
- """
1694
- # flatten spec_path_lists
1695
- spec_paths = itertools.chain.from_iterable(spec_path_lists)
1696
- env_paths = environ.get(name, '').split(pathsep)
1697
- paths = itertools.chain(spec_paths, env_paths)
1698
- extant_paths = list(filter(isdir, paths)) if exists else paths
1699
- if not extant_paths:
1700
- msg = "%s environment variable is empty" % name.upper()
1701
- raise distutils.errors.DistutilsPlatformError(msg)
1702
- unique_paths = unique_everseen(extant_paths)
1703
- return pathsep.join(unique_paths)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/projects/DensePose/train_net.py DELETED
@@ -1,117 +0,0 @@
1
- #!/usr/bin/env python3
2
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
3
-
4
- """
5
- DensePose Training Script.
6
-
7
- This script is similar to the training script in detectron2/tools.
8
-
9
- It is an example of how a user might use detectron2 for a new project.
10
- """
11
-
12
- import logging
13
- import os
14
- from collections import OrderedDict
15
-
16
- import detectron2.utils.comm as comm
17
- from detectron2.checkpoint import DetectionCheckpointer
18
- from detectron2.config import CfgNode, get_cfg
19
- from detectron2.data import build_detection_test_loader, build_detection_train_loader
20
- from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, hooks, launch
21
- from detectron2.evaluation import COCOEvaluator, DatasetEvaluators, verify_results
22
- from detectron2.modeling import DatasetMapperTTA
23
- from detectron2.utils.logger import setup_logger
24
-
25
- from densepose import (
26
- DatasetMapper,
27
- DensePoseCOCOEvaluator,
28
- DensePoseGeneralizedRCNNWithTTA,
29
- add_densepose_config,
30
- load_from_cfg,
31
- )
32
-
33
-
34
- class Trainer(DefaultTrainer):
35
- @classmethod
36
- def build_evaluator(cls, cfg: CfgNode, dataset_name, output_folder=None):
37
- if output_folder is None:
38
- output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
39
- evaluators = [COCOEvaluator(dataset_name, cfg, True, output_folder)]
40
- if cfg.MODEL.DENSEPOSE_ON:
41
- evaluators.append(DensePoseCOCOEvaluator(dataset_name, True, output_folder))
42
- return DatasetEvaluators(evaluators)
43
-
44
- @classmethod
45
- def build_test_loader(cls, cfg: CfgNode, dataset_name):
46
- return build_detection_test_loader(cfg, dataset_name, mapper=DatasetMapper(cfg, False))
47
-
48
- @classmethod
49
- def build_train_loader(cls, cfg: CfgNode):
50
- return build_detection_train_loader(cfg, mapper=DatasetMapper(cfg, True))
51
-
52
- @classmethod
53
- def test_with_TTA(cls, cfg: CfgNode, model):
54
- logger = logging.getLogger("detectron2.trainer")
55
- # In the end of training, run an evaluation with TTA
56
- # Only support some R-CNN models.
57
- logger.info("Running inference with test-time augmentation ...")
58
- transform_data = load_from_cfg(cfg)
59
- model = DensePoseGeneralizedRCNNWithTTA(cfg, model, transform_data, DatasetMapperTTA(cfg))
60
- evaluators = [
61
- cls.build_evaluator(
62
- cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA")
63
- )
64
- for name in cfg.DATASETS.TEST
65
- ]
66
- res = cls.test(cfg, model, evaluators)
67
- res = OrderedDict({k + "_TTA": v for k, v in res.items()})
68
- return res
69
-
70
-
71
- def setup(args):
72
- cfg = get_cfg()
73
- add_densepose_config(cfg)
74
- cfg.merge_from_file(args.config_file)
75
- cfg.merge_from_list(args.opts)
76
- cfg.freeze()
77
- default_setup(cfg, args)
78
- # Setup logger for "densepose" module
79
- setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="densepose")
80
- return cfg
81
-
82
-
83
- def main(args):
84
- cfg = setup(args)
85
-
86
- if args.eval_only:
87
- model = Trainer.build_model(cfg)
88
- DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
89
- cfg.MODEL.WEIGHTS, resume=args.resume
90
- )
91
- res = Trainer.test(cfg, model)
92
- if cfg.TEST.AUG.ENABLED:
93
- res.update(Trainer.test_with_TTA(cfg, model))
94
- if comm.is_main_process():
95
- verify_results(cfg, res)
96
- return res
97
-
98
- trainer = Trainer(cfg)
99
- trainer.resume_or_load(resume=args.resume)
100
- if cfg.TEST.AUG.ENABLED:
101
- trainer.register_hooks(
102
- [hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))]
103
- )
104
- return trainer.train()
105
-
106
-
107
- if __name__ == "__main__":
108
- args = default_argument_parser().parse_args()
109
- print("Command Line Args:", args)
110
- launch(
111
- main,
112
- args.num_gpus,
113
- num_machines=args.num_machines,
114
- machine_rank=args.machine_rank,
115
- dist_url=args.dist_url,
116
- args=(args,),
117
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/GFPGAN-example/gfpgan/archs/arcface_arch.py DELETED
@@ -1,245 +0,0 @@
1
- import torch.nn as nn
2
- from basicsr.utils.registry import ARCH_REGISTRY
3
-
4
-
5
- def conv3x3(inplanes, outplanes, stride=1):
6
- """A simple wrapper for 3x3 convolution with padding.
7
-
8
- Args:
9
- inplanes (int): Channel number of inputs.
10
- outplanes (int): Channel number of outputs.
11
- stride (int): Stride in convolution. Default: 1.
12
- """
13
- return nn.Conv2d(inplanes, outplanes, kernel_size=3, stride=stride, padding=1, bias=False)
14
-
15
-
16
- class BasicBlock(nn.Module):
17
- """Basic residual block used in the ResNetArcFace architecture.
18
-
19
- Args:
20
- inplanes (int): Channel number of inputs.
21
- planes (int): Channel number of outputs.
22
- stride (int): Stride in convolution. Default: 1.
23
- downsample (nn.Module): The downsample module. Default: None.
24
- """
25
- expansion = 1 # output channel expansion ratio
26
-
27
- def __init__(self, inplanes, planes, stride=1, downsample=None):
28
- super(BasicBlock, self).__init__()
29
- self.conv1 = conv3x3(inplanes, planes, stride)
30
- self.bn1 = nn.BatchNorm2d(planes)
31
- self.relu = nn.ReLU(inplace=True)
32
- self.conv2 = conv3x3(planes, planes)
33
- self.bn2 = nn.BatchNorm2d(planes)
34
- self.downsample = downsample
35
- self.stride = stride
36
-
37
- def forward(self, x):
38
- residual = x
39
-
40
- out = self.conv1(x)
41
- out = self.bn1(out)
42
- out = self.relu(out)
43
-
44
- out = self.conv2(out)
45
- out = self.bn2(out)
46
-
47
- if self.downsample is not None:
48
- residual = self.downsample(x)
49
-
50
- out += residual
51
- out = self.relu(out)
52
-
53
- return out
54
-
55
-
56
- class IRBlock(nn.Module):
57
- """Improved residual block (IR Block) used in the ResNetArcFace architecture.
58
-
59
- Args:
60
- inplanes (int): Channel number of inputs.
61
- planes (int): Channel number of outputs.
62
- stride (int): Stride in convolution. Default: 1.
63
- downsample (nn.Module): The downsample module. Default: None.
64
- use_se (bool): Whether use the SEBlock (squeeze and excitation block). Default: True.
65
- """
66
- expansion = 1 # output channel expansion ratio
67
-
68
- def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True):
69
- super(IRBlock, self).__init__()
70
- self.bn0 = nn.BatchNorm2d(inplanes)
71
- self.conv1 = conv3x3(inplanes, inplanes)
72
- self.bn1 = nn.BatchNorm2d(inplanes)
73
- self.prelu = nn.PReLU()
74
- self.conv2 = conv3x3(inplanes, planes, stride)
75
- self.bn2 = nn.BatchNorm2d(planes)
76
- self.downsample = downsample
77
- self.stride = stride
78
- self.use_se = use_se
79
- if self.use_se:
80
- self.se = SEBlock(planes)
81
-
82
- def forward(self, x):
83
- residual = x
84
- out = self.bn0(x)
85
- out = self.conv1(out)
86
- out = self.bn1(out)
87
- out = self.prelu(out)
88
-
89
- out = self.conv2(out)
90
- out = self.bn2(out)
91
- if self.use_se:
92
- out = self.se(out)
93
-
94
- if self.downsample is not None:
95
- residual = self.downsample(x)
96
-
97
- out += residual
98
- out = self.prelu(out)
99
-
100
- return out
101
-
102
-
103
- class Bottleneck(nn.Module):
104
- """Bottleneck block used in the ResNetArcFace architecture.
105
-
106
- Args:
107
- inplanes (int): Channel number of inputs.
108
- planes (int): Channel number of outputs.
109
- stride (int): Stride in convolution. Default: 1.
110
- downsample (nn.Module): The downsample module. Default: None.
111
- """
112
- expansion = 4 # output channel expansion ratio
113
-
114
- def __init__(self, inplanes, planes, stride=1, downsample=None):
115
- super(Bottleneck, self).__init__()
116
- self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
117
- self.bn1 = nn.BatchNorm2d(planes)
118
- self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
119
- self.bn2 = nn.BatchNorm2d(planes)
120
- self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
121
- self.bn3 = nn.BatchNorm2d(planes * self.expansion)
122
- self.relu = nn.ReLU(inplace=True)
123
- self.downsample = downsample
124
- self.stride = stride
125
-
126
- def forward(self, x):
127
- residual = x
128
-
129
- out = self.conv1(x)
130
- out = self.bn1(out)
131
- out = self.relu(out)
132
-
133
- out = self.conv2(out)
134
- out = self.bn2(out)
135
- out = self.relu(out)
136
-
137
- out = self.conv3(out)
138
- out = self.bn3(out)
139
-
140
- if self.downsample is not None:
141
- residual = self.downsample(x)
142
-
143
- out += residual
144
- out = self.relu(out)
145
-
146
- return out
147
-
148
-
149
- class SEBlock(nn.Module):
150
- """The squeeze-and-excitation block (SEBlock) used in the IRBlock.
151
-
152
- Args:
153
- channel (int): Channel number of inputs.
154
- reduction (int): Channel reduction ration. Default: 16.
155
- """
156
-
157
- def __init__(self, channel, reduction=16):
158
- super(SEBlock, self).__init__()
159
- self.avg_pool = nn.AdaptiveAvgPool2d(1) # pool to 1x1 without spatial information
160
- self.fc = nn.Sequential(
161
- nn.Linear(channel, channel // reduction), nn.PReLU(), nn.Linear(channel // reduction, channel),
162
- nn.Sigmoid())
163
-
164
- def forward(self, x):
165
- b, c, _, _ = x.size()
166
- y = self.avg_pool(x).view(b, c)
167
- y = self.fc(y).view(b, c, 1, 1)
168
- return x * y
169
-
170
-
171
- @ARCH_REGISTRY.register()
172
- class ResNetArcFace(nn.Module):
173
- """ArcFace with ResNet architectures.
174
-
175
- Ref: ArcFace: Additive Angular Margin Loss for Deep Face Recognition.
176
-
177
- Args:
178
- block (str): Block used in the ArcFace architecture.
179
- layers (tuple(int)): Block numbers in each layer.
180
- use_se (bool): Whether use the SEBlock (squeeze and excitation block). Default: True.
181
- """
182
-
183
- def __init__(self, block, layers, use_se=True):
184
- if block == 'IRBlock':
185
- block = IRBlock
186
- self.inplanes = 64
187
- self.use_se = use_se
188
- super(ResNetArcFace, self).__init__()
189
-
190
- self.conv1 = nn.Conv2d(1, 64, kernel_size=3, padding=1, bias=False)
191
- self.bn1 = nn.BatchNorm2d(64)
192
- self.prelu = nn.PReLU()
193
- self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
194
- self.layer1 = self._make_layer(block, 64, layers[0])
195
- self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
196
- self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
197
- self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
198
- self.bn4 = nn.BatchNorm2d(512)
199
- self.dropout = nn.Dropout()
200
- self.fc5 = nn.Linear(512 * 8 * 8, 512)
201
- self.bn5 = nn.BatchNorm1d(512)
202
-
203
- # initialization
204
- for m in self.modules():
205
- if isinstance(m, nn.Conv2d):
206
- nn.init.xavier_normal_(m.weight)
207
- elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
208
- nn.init.constant_(m.weight, 1)
209
- nn.init.constant_(m.bias, 0)
210
- elif isinstance(m, nn.Linear):
211
- nn.init.xavier_normal_(m.weight)
212
- nn.init.constant_(m.bias, 0)
213
-
214
- def _make_layer(self, block, planes, num_blocks, stride=1):
215
- downsample = None
216
- if stride != 1 or self.inplanes != planes * block.expansion:
217
- downsample = nn.Sequential(
218
- nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),
219
- nn.BatchNorm2d(planes * block.expansion),
220
- )
221
- layers = []
222
- layers.append(block(self.inplanes, planes, stride, downsample, use_se=self.use_se))
223
- self.inplanes = planes
224
- for _ in range(1, num_blocks):
225
- layers.append(block(self.inplanes, planes, use_se=self.use_se))
226
-
227
- return nn.Sequential(*layers)
228
-
229
- def forward(self, x):
230
- x = self.conv1(x)
231
- x = self.bn1(x)
232
- x = self.prelu(x)
233
- x = self.maxpool(x)
234
-
235
- x = self.layer1(x)
236
- x = self.layer2(x)
237
- x = self.layer3(x)
238
- x = self.layer4(x)
239
- x = self.bn4(x)
240
- x = self.dropout(x)
241
- x = x.view(x.size(0), -1)
242
- x = self.fc5(x)
243
- x = self.bn5(x)
244
-
245
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/cmake/ThrustUtilities.cmake DELETED
@@ -1,25 +0,0 @@
1
- # Given a cu_file (e.g. foo/bar.cu) relative to CMAKE_CURRENT_SOURCE_DIR
2
- # and a thrust_target, create a cpp file that includes the .cu file, and set
3
- # ${cpp_file_var} in the parent scope to the full path of the new file. The new
4
- # file will be generated in:
5
- # ${CMAKE_CURRENT_BINARY_DIR}/<thrust_target_prefix>/${cu_file}.cpp
6
- function(thrust_wrap_cu_in_cpp cpp_file_var cu_file thrust_target)
7
- thrust_get_target_property(prefix ${thrust_target} PREFIX)
8
- set(wrapped_source_file "${CMAKE_CURRENT_SOURCE_DIR}/${cu_file}")
9
- set(cpp_file "${CMAKE_CURRENT_BINARY_DIR}/${prefix}/${cu_file}.cpp")
10
- configure_file("${Thrust_SOURCE_DIR}/cmake/wrap_source_file.cpp.in" "${cpp_file}")
11
- set(${cpp_file_var} "${cpp_file}" PARENT_SCOPE)
12
- endfunction()
13
-
14
- # Enable RDC for a CUDA target. Encapsulates compiler hacks:
15
- function(thrust_enable_rdc_for_cuda_target target_name)
16
- if ("Feta" STREQUAL "${CMAKE_CUDA_COMPILER_ID}")
17
- set_target_properties(${target_name} PROPERTIES
18
- COMPILE_FLAGS "-gpu=rdc"
19
- )
20
- else()
21
- set_target_properties(${target_name} PROPERTIES
22
- CUDA_SEPARABLE_COMPILATION ON
23
- )
24
- endif()
25
- endfunction()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/core/bbox/demodata.py DELETED
@@ -1,41 +0,0 @@
1
- import numpy as np
2
- import torch
3
-
4
- from mmdet.utils.util_random import ensure_rng
5
-
6
-
7
- def random_boxes(num=1, scale=1, rng=None):
8
- """Simple version of ``kwimage.Boxes.random``
9
-
10
- Returns:
11
- Tensor: shape (n, 4) in x1, y1, x2, y2 format.
12
-
13
- References:
14
- https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390
15
-
16
- Example:
17
- >>> num = 3
18
- >>> scale = 512
19
- >>> rng = 0
20
- >>> boxes = random_boxes(num, scale, rng)
21
- >>> print(boxes)
22
- tensor([[280.9925, 278.9802, 308.6148, 366.1769],
23
- [216.9113, 330.6978, 224.0446, 456.5878],
24
- [405.3632, 196.3221, 493.3953, 270.7942]])
25
- """
26
- rng = ensure_rng(rng)
27
-
28
- tlbr = rng.rand(num, 4).astype(np.float32)
29
-
30
- tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2])
31
- tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3])
32
- br_x = np.maximum(tlbr[:, 0], tlbr[:, 2])
33
- br_y = np.maximum(tlbr[:, 1], tlbr[:, 3])
34
-
35
- tlbr[:, 0] = tl_x * scale
36
- tlbr[:, 1] = tl_y * scale
37
- tlbr[:, 2] = br_x * scale
38
- tlbr[:, 3] = br_y * scale
39
-
40
- boxes = torch.from_numpy(tlbr)
41
- return boxes
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/necks/pafpn.py DELETED
@@ -1,142 +0,0 @@
1
- import torch.nn as nn
2
- import torch.nn.functional as F
3
- from mmcv.cnn import ConvModule
4
- from mmcv.runner import auto_fp16
5
-
6
- from ..builder import NECKS
7
- from .fpn import FPN
8
-
9
-
10
- @NECKS.register_module()
11
- class PAFPN(FPN):
12
- """Path Aggregation Network for Instance Segmentation.
13
-
14
- This is an implementation of the `PAFPN in Path Aggregation Network
15
- <https://arxiv.org/abs/1803.01534>`_.
16
-
17
- Args:
18
- in_channels (List[int]): Number of input channels per scale.
19
- out_channels (int): Number of output channels (used at each scale)
20
- num_outs (int): Number of output scales.
21
- start_level (int): Index of the start input backbone level used to
22
- build the feature pyramid. Default: 0.
23
- end_level (int): Index of the end input backbone level (exclusive) to
24
- build the feature pyramid. Default: -1, which means the last level.
25
- add_extra_convs (bool): Whether to add conv layers on top of the
26
- original feature maps. Default: False.
27
- extra_convs_on_inputs (bool): Whether to apply extra conv on
28
- the original feature from the backbone. Default: False.
29
- relu_before_extra_convs (bool): Whether to apply relu before the extra
30
- conv. Default: False.
31
- no_norm_on_lateral (bool): Whether to apply norm on lateral.
32
- Default: False.
33
- conv_cfg (dict): Config dict for convolution layer. Default: None.
34
- norm_cfg (dict): Config dict for normalization layer. Default: None.
35
- act_cfg (str): Config dict for activation layer in ConvModule.
36
- Default: None.
37
- """
38
-
39
- def __init__(self,
40
- in_channels,
41
- out_channels,
42
- num_outs,
43
- start_level=0,
44
- end_level=-1,
45
- add_extra_convs=False,
46
- extra_convs_on_inputs=True,
47
- relu_before_extra_convs=False,
48
- no_norm_on_lateral=False,
49
- conv_cfg=None,
50
- norm_cfg=None,
51
- act_cfg=None):
52
- super(PAFPN,
53
- self).__init__(in_channels, out_channels, num_outs, start_level,
54
- end_level, add_extra_convs, extra_convs_on_inputs,
55
- relu_before_extra_convs, no_norm_on_lateral,
56
- conv_cfg, norm_cfg, act_cfg)
57
- # add extra bottom up pathway
58
- self.downsample_convs = nn.ModuleList()
59
- self.pafpn_convs = nn.ModuleList()
60
- for i in range(self.start_level + 1, self.backbone_end_level):
61
- d_conv = ConvModule(
62
- out_channels,
63
- out_channels,
64
- 3,
65
- stride=2,
66
- padding=1,
67
- conv_cfg=conv_cfg,
68
- norm_cfg=norm_cfg,
69
- act_cfg=act_cfg,
70
- inplace=False)
71
- pafpn_conv = ConvModule(
72
- out_channels,
73
- out_channels,
74
- 3,
75
- padding=1,
76
- conv_cfg=conv_cfg,
77
- norm_cfg=norm_cfg,
78
- act_cfg=act_cfg,
79
- inplace=False)
80
- self.downsample_convs.append(d_conv)
81
- self.pafpn_convs.append(pafpn_conv)
82
-
83
- @auto_fp16()
84
- def forward(self, inputs):
85
- """Forward function."""
86
- assert len(inputs) == len(self.in_channels)
87
-
88
- # build laterals
89
- laterals = [
90
- lateral_conv(inputs[i + self.start_level])
91
- for i, lateral_conv in enumerate(self.lateral_convs)
92
- ]
93
-
94
- # build top-down path
95
- used_backbone_levels = len(laterals)
96
- for i in range(used_backbone_levels - 1, 0, -1):
97
- prev_shape = laterals[i - 1].shape[2:]
98
- laterals[i - 1] += F.interpolate(
99
- laterals[i], size=prev_shape, mode='nearest')
100
-
101
- # build outputs
102
- # part 1: from original levels
103
- inter_outs = [
104
- self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)
105
- ]
106
-
107
- # part 2: add bottom-up path
108
- for i in range(0, used_backbone_levels - 1):
109
- inter_outs[i + 1] += self.downsample_convs[i](inter_outs[i])
110
-
111
- outs = []
112
- outs.append(inter_outs[0])
113
- outs.extend([
114
- self.pafpn_convs[i - 1](inter_outs[i])
115
- for i in range(1, used_backbone_levels)
116
- ])
117
-
118
- # part 3: add extra levels
119
- if self.num_outs > len(outs):
120
- # use max pool to get more levels on top of outputs
121
- # (e.g., Faster R-CNN, Mask R-CNN)
122
- if not self.add_extra_convs:
123
- for i in range(self.num_outs - used_backbone_levels):
124
- outs.append(F.max_pool2d(outs[-1], 1, stride=2))
125
- # add conv layers on top of original feature maps (RetinaNet)
126
- else:
127
- if self.add_extra_convs == 'on_input':
128
- orig = inputs[self.backbone_end_level - 1]
129
- outs.append(self.fpn_convs[used_backbone_levels](orig))
130
- elif self.add_extra_convs == 'on_lateral':
131
- outs.append(self.fpn_convs[used_backbone_levels](
132
- laterals[-1]))
133
- elif self.add_extra_convs == 'on_output':
134
- outs.append(self.fpn_convs[used_backbone_levels](outs[-1]))
135
- else:
136
- raise NotImplementedError
137
- for i in range(used_backbone_levels + 1, self.num_outs):
138
- if self.relu_before_extra_convs:
139
- outs.append(self.fpn_convs[i](F.relu(outs[-1])))
140
- else:
141
- outs.append(self.fpn_convs[i](outs[-1]))
142
- return tuple(outs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cyril666/my_abi/callbacks.py DELETED
@@ -1,360 +0,0 @@
1
- import logging
2
- import shutil
3
- import time
4
-
5
- import editdistance as ed
6
- import torchvision.utils as vutils
7
- from fastai.callbacks.tensorboard import (LearnerTensorboardWriter,
8
- SummaryWriter, TBWriteRequest,
9
- asyncTBWriter)
10
- from fastai.vision import *
11
- from torch.nn.parallel import DistributedDataParallel
12
- from torchvision import transforms
13
-
14
- import dataset
15
- from utils import CharsetMapper, Timer, blend_mask
16
-
17
-
18
- class IterationCallback(LearnerTensorboardWriter):
19
- "A `TrackerCallback` that monitor in each iteration."
20
- def __init__(self, learn:Learner, name:str='model', checpoint_keep_num=5,
21
- show_iters:int=50, eval_iters:int=1000, save_iters:int=20000,
22
- start_iters:int=0, stats_iters=20000):
23
- #if self.learn.rank is not None: time.sleep(self.learn.rank) # keep all event files
24
- super().__init__(learn, base_dir='.', name=learn.path, loss_iters=show_iters,
25
- stats_iters=stats_iters, hist_iters=stats_iters)
26
- self.name, self.bestname = Path(name).name, f'best-{Path(name).name}'
27
- self.show_iters = show_iters
28
- self.eval_iters = eval_iters
29
- self.save_iters = save_iters
30
- self.start_iters = start_iters
31
- self.checpoint_keep_num = checpoint_keep_num
32
- self.metrics_root = 'metrics/' # rewrite
33
- self.timer = Timer()
34
- self.host = self.learn.rank is None or self.learn.rank == 0
35
-
36
- def _write_metrics(self, iteration:int, names:List[str], last_metrics:MetricsList)->None:
37
- "Writes training metrics to Tensorboard."
38
- for i, name in enumerate(names):
39
- if last_metrics is None or len(last_metrics) < i+1: return
40
- scalar_value = last_metrics[i]
41
- self._write_scalar(name=name, scalar_value=scalar_value, iteration=iteration)
42
-
43
- def _write_sub_loss(self, iteration:int, last_losses:dict)->None:
44
- "Writes sub loss to Tensorboard."
45
- for name, loss in last_losses.items():
46
- scalar_value = to_np(loss)
47
- tag = self.metrics_root + name
48
- self.tbwriter.add_scalar(tag=tag, scalar_value=scalar_value, global_step=iteration)
49
-
50
- def _save(self, name):
51
- if isinstance(self.learn.model, DistributedDataParallel):
52
- tmp = self.learn.model
53
- self.learn.model = self.learn.model.module
54
- self.learn.save(name)
55
- self.learn.model = tmp
56
- else: self.learn.save(name)
57
-
58
- def _validate(self, dl=None, callbacks=None, metrics=None, keeped_items=False):
59
- "Validate on `dl` with potential `callbacks` and `metrics`."
60
- dl = ifnone(dl, self.learn.data.valid_dl)
61
- metrics = ifnone(metrics, self.learn.metrics)
62
- cb_handler = CallbackHandler(ifnone(callbacks, []), metrics)
63
- cb_handler.on_train_begin(1, None, metrics); cb_handler.on_epoch_begin()
64
- if keeped_items: cb_handler.state_dict.update(dict(keeped_items=[]))
65
- val_metrics = validate(self.learn.model, dl, self.loss_func, cb_handler)
66
- cb_handler.on_epoch_end(val_metrics)
67
- if keeped_items: return cb_handler.state_dict['keeped_items']
68
- else: return cb_handler.state_dict['last_metrics']
69
-
70
- def jump_to_epoch_iter(self, epoch:int, iteration:int)->None:
71
- try:
72
- self.learn.load(f'{self.name}_{epoch}_{iteration}', purge=False)
73
- logging.info(f'Loaded {self.name}_{epoch}_{iteration}')
74
- except: logging.info(f'Model {self.name}_{epoch}_{iteration} not found.')
75
-
76
- def on_train_begin(self, n_epochs, **kwargs):
77
- # TODO: can not write graph here
78
- # super().on_train_begin(**kwargs)
79
- self.best = -float('inf')
80
- self.timer.tic()
81
- if self.host:
82
- checkpoint_path = self.learn.path/'checkpoint.yaml'
83
- if checkpoint_path.exists():
84
- os.remove(checkpoint_path)
85
- open(checkpoint_path, 'w').close()
86
- return {'skip_validate': True, 'iteration':self.start_iters} # disable default validate
87
-
88
- def on_batch_begin(self, **kwargs:Any)->None:
89
- self.timer.toc_data()
90
- super().on_batch_begin(**kwargs)
91
-
92
- def on_batch_end(self, iteration, epoch, last_loss, smooth_loss, train, **kwargs):
93
- super().on_batch_end(last_loss, iteration, train, **kwargs)
94
- if iteration == 0: return
95
-
96
- if iteration % self.loss_iters == 0:
97
- last_losses = self.learn.loss_func.last_losses
98
- self._write_sub_loss(iteration=iteration, last_losses=last_losses)
99
- self.tbwriter.add_scalar(tag=self.metrics_root + 'lr',
100
- scalar_value=self.opt.lr, global_step=iteration)
101
-
102
- if iteration % self.show_iters == 0:
103
- log_str = f'epoch {epoch} iter {iteration}: loss = {last_loss:6.4f}, ' \
104
- f'smooth loss = {smooth_loss:6.4f}'
105
- logging.info(log_str)
106
- # log_str = f'data time = {self.timer.data_diff:.4f}s, runing time = {self.timer.running_diff:.4f}s'
107
- # logging.info(log_str)
108
-
109
- if iteration % self.eval_iters == 0:
110
- # TODO: or remove time to on_epoch_end
111
- # 1. Record time
112
- log_str = f'average data time = {self.timer.average_data_time():.4f}s, ' \
113
- f'average running time = {self.timer.average_running_time():.4f}s'
114
- logging.info(log_str)
115
-
116
- # 2. Call validate
117
- last_metrics = self._validate()
118
- self.learn.model.train()
119
- log_str = f'epoch {epoch} iter {iteration}: eval loss = {last_metrics[0]:6.4f}, ' \
120
- f'ccr = {last_metrics[1]:6.4f}, cwr = {last_metrics[2]:6.4f}, ' \
121
- f'ted = {last_metrics[3]:6.4f}, ned = {last_metrics[4]:6.4f}, ' \
122
- f'ted/w = {last_metrics[5]:6.4f}, '
123
- logging.info(log_str)
124
- names = ['eval_loss', 'ccr', 'cwr', 'ted', 'ned', 'ted/w']
125
- self._write_metrics(iteration, names, last_metrics)
126
-
127
- # 3. Save best model
128
- current = last_metrics[2]
129
- if current is not None and current > self.best:
130
- logging.info(f'Better model found at epoch {epoch}, '\
131
- f'iter {iteration} with accuracy value: {current:6.4f}.')
132
- self.best = current
133
- self._save(f'{self.bestname}')
134
-
135
- if iteration % self.save_iters == 0 and self.host:
136
- logging.info(f'Save model {self.name}_{epoch}_{iteration}')
137
- filename = f'{self.name}_{epoch}_{iteration}'
138
- self._save(filename)
139
-
140
- checkpoint_path = self.learn.path/'checkpoint.yaml'
141
- if not checkpoint_path.exists():
142
- open(checkpoint_path, 'w').close()
143
- with open(checkpoint_path, 'r') as file:
144
- checkpoints = yaml.load(file, Loader=yaml.FullLoader) or dict()
145
- checkpoints['all_checkpoints'] = (
146
- checkpoints.get('all_checkpoints') or list())
147
- checkpoints['all_checkpoints'].insert(0, filename)
148
- if len(checkpoints['all_checkpoints']) > self.checpoint_keep_num:
149
- removed_checkpoint = checkpoints['all_checkpoints'].pop()
150
- removed_checkpoint = self.learn.path/self.learn.model_dir/f'{removed_checkpoint}.pth'
151
- os.remove(removed_checkpoint)
152
- checkpoints['current_checkpoint'] = filename
153
- with open(checkpoint_path, 'w') as file:
154
- yaml.dump(checkpoints, file)
155
-
156
-
157
- self.timer.toc_running()
158
-
159
- def on_train_end(self, **kwargs):
160
- #self.learn.load(f'{self.bestname}', purge=False)
161
- pass
162
-
163
- def on_epoch_end(self, last_metrics:MetricsList, iteration:int, **kwargs)->None:
164
- self._write_embedding(iteration=iteration)
165
-
166
-
167
- class TextAccuracy(Callback):
168
- _names = ['ccr', 'cwr', 'ted', 'ned', 'ted/w']
169
- def __init__(self, charset_path, max_length, case_sensitive, model_eval):
170
- self.charset_path = charset_path
171
- self.max_length = max_length
172
- self.case_sensitive = case_sensitive
173
- self.charset = CharsetMapper(charset_path, self.max_length)
174
- self.names = self._names
175
-
176
- self.model_eval = model_eval or 'alignment'
177
- assert self.model_eval in ['vision', 'language', 'alignment']
178
-
179
- def on_epoch_begin(self, **kwargs):
180
- self.total_num_char = 0.
181
- self.total_num_word = 0.
182
- self.correct_num_char = 0.
183
- self.correct_num_word = 0.
184
- self.total_ed = 0.
185
- self.total_ned = 0.
186
-
187
- def _get_output(self, last_output):
188
- if isinstance(last_output, (tuple, list)):
189
- for res in last_output:
190
- if res['name'] == self.model_eval: output = res
191
- else: output = last_output
192
- return output
193
-
194
- def _update_output(self, last_output, items):
195
- if isinstance(last_output, (tuple, list)):
196
- for res in last_output:
197
- if res['name'] == self.model_eval: res.update(items)
198
- else: last_output.update(items)
199
- return last_output
200
-
201
- def on_batch_end(self, last_output, last_target, **kwargs):
202
- output = self._get_output(last_output)
203
- logits, pt_lengths = output['logits'], output['pt_lengths']
204
- pt_text, pt_scores, pt_lengths_ = self.decode(logits)
205
- assert (pt_lengths == pt_lengths_).all(), f'{pt_lengths} != {pt_lengths_} for {pt_text}'
206
- last_output = self._update_output(last_output, {'pt_text':pt_text, 'pt_scores':pt_scores})
207
-
208
- pt_text = [self.charset.trim(t) for t in pt_text]
209
- label = last_target[0]
210
- if label.dim() == 3: label = label.argmax(dim=-1) # one-hot label
211
- gt_text = [self.charset.get_text(l, trim=True) for l in label]
212
-
213
- for i in range(len(gt_text)):
214
- if not self.case_sensitive:
215
- gt_text[i], pt_text[i] = gt_text[i].lower(), pt_text[i].lower()
216
- distance = ed.eval(gt_text[i], pt_text[i])
217
- self.total_ed += distance
218
- self.total_ned += float(distance) / max(len(gt_text[i]), 1)
219
-
220
- if gt_text[i] == pt_text[i]:
221
- self.correct_num_word += 1
222
- self.total_num_word += 1
223
-
224
- for j in range(min(len(gt_text[i]), len(pt_text[i]))):
225
- if gt_text[i][j] == pt_text[i][j]:
226
- self.correct_num_char += 1
227
- self.total_num_char += len(gt_text[i])
228
-
229
- return {'last_output': last_output}
230
-
231
- def on_epoch_end(self, last_metrics, **kwargs):
232
- mets = [self.correct_num_char / self.total_num_char,
233
- self.correct_num_word / self.total_num_word,
234
- self.total_ed,
235
- self.total_ned,
236
- self.total_ed / self.total_num_word]
237
- return add_metrics(last_metrics, mets)
238
-
239
- def decode(self, logit):
240
- """ Greed decode """
241
- # TODO: test running time and decode on GPU
242
- out = F.softmax(logit, dim=2)
243
- pt_text, pt_scores, pt_lengths = [], [], []
244
- for o in out:
245
- text = self.charset.get_text(o.argmax(dim=1), padding=False, trim=False)
246
- text = text.split(self.charset.null_char)[0] # end at end-token
247
- pt_text.append(text)
248
- pt_scores.append(o.max(dim=1)[0])
249
- pt_lengths.append(min(len(text) + 1, self.max_length)) # one for end-token
250
- pt_scores = torch.stack(pt_scores)
251
- pt_lengths = pt_scores.new_tensor(pt_lengths, dtype=torch.long)
252
- return pt_text, pt_scores, pt_lengths
253
-
254
-
255
- class TopKTextAccuracy(TextAccuracy):
256
- _names = ['ccr', 'cwr']
257
- def __init__(self, k, charset_path, max_length, case_sensitive, model_eval):
258
- self.k = k
259
- self.charset_path = charset_path
260
- self.max_length = max_length
261
- self.case_sensitive = case_sensitive
262
- self.charset = CharsetMapper(charset_path, self.max_length)
263
- self.names = self._names
264
-
265
- def on_epoch_begin(self, **kwargs):
266
- self.total_num_char = 0.
267
- self.total_num_word = 0.
268
- self.correct_num_char = 0.
269
- self.correct_num_word = 0.
270
-
271
- def on_batch_end(self, last_output, last_target, **kwargs):
272
- logits, pt_lengths = last_output['logits'], last_output['pt_lengths']
273
- gt_labels, gt_lengths = last_target[:]
274
-
275
- for logit, pt_length, label, length in zip(logits, pt_lengths, gt_labels, gt_lengths):
276
- word_flag = True
277
- for i in range(length):
278
- char_logit = logit[i].topk(self.k)[1]
279
- char_label = label[i].argmax(-1)
280
- if char_label in char_logit: self.correct_num_char += 1
281
- else: word_flag = False
282
- self.total_num_char += 1
283
- if pt_length == length and word_flag:
284
- self.correct_num_word += 1
285
- self.total_num_word += 1
286
-
287
- def on_epoch_end(self, last_metrics, **kwargs):
288
- mets = [self.correct_num_char / self.total_num_char,
289
- self.correct_num_word / self.total_num_word,
290
- 0., 0., 0.]
291
- return add_metrics(last_metrics, mets)
292
-
293
-
294
- class DumpPrediction(LearnerCallback):
295
-
296
- def __init__(self, learn, dataset, charset_path, model_eval, image_only=False, debug=False):
297
- super().__init__(learn=learn)
298
- self.debug = debug
299
- self.model_eval = model_eval or 'alignment'
300
- self.image_only = image_only
301
- assert self.model_eval in ['vision', 'language', 'alignment']
302
-
303
- self.dataset, self.root = dataset, Path(self.learn.path)/f'{dataset}-{self.model_eval}'
304
- self.attn_root = self.root/'attn'
305
- self.charset = CharsetMapper(charset_path)
306
- if self.root.exists(): shutil.rmtree(self.root)
307
- self.root.mkdir(), self.attn_root.mkdir()
308
-
309
- self.pil = transforms.ToPILImage()
310
- self.tensor = transforms.ToTensor()
311
- size = self.learn.data.img_h, self.learn.data.img_w
312
- self.resize = transforms.Resize(size=size, interpolation=0)
313
- self.c = 0
314
-
315
- def on_batch_end(self, last_input, last_output, last_target, **kwargs):
316
- if isinstance(last_output, (tuple, list)):
317
- for res in last_output:
318
- if res['name'] == self.model_eval: pt_text = res['pt_text']
319
- if res['name'] == 'vision': attn_scores = res['attn_scores'].detach().cpu()
320
- if res['name'] == self.model_eval: logits = res['logits']
321
- else:
322
- pt_text = last_output['pt_text']
323
- attn_scores = last_output['attn_scores'].detach().cpu()
324
- logits = last_output['logits']
325
-
326
- images = last_input[0] if isinstance(last_input, (tuple, list)) else last_input
327
- images = images.detach().cpu()
328
- pt_text = [self.charset.trim(t) for t in pt_text]
329
- gt_label = last_target[0]
330
- if gt_label.dim() == 3: gt_label = gt_label.argmax(dim=-1) # one-hot label
331
- gt_text = [self.charset.get_text(l, trim=True) for l in gt_label]
332
-
333
- prediction, false_prediction = [], []
334
- for gt, pt, image, attn, logit in zip(gt_text, pt_text, images, attn_scores, logits):
335
- prediction.append(f'{gt}\t{pt}\n')
336
- if gt != pt:
337
- if self.debug:
338
- scores = torch.softmax(logit, dim=-1)[:max(len(pt), len(gt)) + 1]
339
- logging.info(f'{self.c} gt {gt}, pt {pt}, logit {logit.shape}, scores {scores.topk(5, dim=-1)}')
340
- false_prediction.append(f'{gt}\t{pt}\n')
341
-
342
- image = self.learn.data.denorm(image)
343
- if not self.image_only:
344
- image_np = np.array(self.pil(image))
345
- attn_pil = [self.pil(a) for a in attn[:, None, :, :]]
346
- attn = [self.tensor(self.resize(a)).repeat(3, 1, 1) for a in attn_pil]
347
- attn_sum = np.array([np.array(a) for a in attn_pil[:len(pt)]]).sum(axis=0)
348
- blended_sum = self.tensor(blend_mask(image_np, attn_sum))
349
- blended = [self.tensor(blend_mask(image_np, np.array(a))) for a in attn_pil]
350
- save_image = torch.stack([image] + attn + [blended_sum] + blended)
351
- save_image = save_image.view(2, -1, *save_image.shape[1:])
352
- save_image = save_image.permute(1, 0, 2, 3, 4).flatten(0, 1)
353
- vutils.save_image(save_image, self.attn_root/f'{self.c}_{gt}_{pt}.jpg',
354
- nrow=2, normalize=True, scale_each=True)
355
- else:
356
- self.pil(image).save(self.attn_root/f'{self.c}_{gt}_{pt}.jpg')
357
- self.c += 1
358
-
359
- with open(self.root/f'{self.model_eval}.txt', 'a') as f: f.writelines(prediction)
360
- with open(self.root/f'{self.model_eval}-false.txt', 'a') as f: f.writelines(false_prediction)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/Image-003ee87c.css DELETED
@@ -1 +0,0 @@
1
- img.svelte-gqt00k{border-radius:var(--radius-lg);max-width:none}img.selected.svelte-gqt00k{border-color:var(--border-color-accent)}.table.svelte-gqt00k{margin:0 auto;border:2px solid var(--border-color-primary);border-radius:var(--radius-lg);width:var(--size-20);height:var(--size-20);object-fit:cover}.gallery.svelte-gqt00k{border:2px solid var(--border-color-primary);max-height:var(--size-20);object-fit:cover}
 
 
spaces/DarshanMM/OpenAICodexSummarizer/app.py DELETED
@@ -1,13 +0,0 @@
1
- #python3
2
- #build a text summarizer using hugging face and gradio
3
-
4
- import gradio as gr
5
- import pandas as pd
6
- import numpy as np
7
- from transformers import pipeline
8
-
9
- def summarize(text):
10
- summarizer = pipeline("summarization")
11
- return summarizer(text, max_length=512, min_length=30)[0]['summary_text']
12
-
13
- gr.Interface(fn=summarize, inputs=gr.inputs.Textbox(lines=7, placeholder="Enter text here"), outputs="text").launch(inline = False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Datasculptor/DescriptionGPT/tools/dump_clip_features.py DELETED
@@ -1,116 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import argparse
3
- import json
4
- import torch
5
- import numpy as np
6
- import itertools
7
- from nltk.corpus import wordnet
8
- import sys
9
-
10
- if __name__ == '__main__':
11
- parser = argparse.ArgumentParser()
12
- parser.add_argument('--ann', default='datasets/lvis/lvis_v1_val.json')
13
- parser.add_argument('--out_path', default='')
14
- parser.add_argument('--prompt', default='a')
15
- parser.add_argument('--model', default='clip')
16
- parser.add_argument('--clip_model', default="ViT-B/32")
17
- parser.add_argument('--fix_space', action='store_true')
18
- parser.add_argument('--use_underscore', action='store_true')
19
- parser.add_argument('--avg_synonyms', action='store_true')
20
- parser.add_argument('--use_wn_name', action='store_true')
21
- args = parser.parse_args()
22
-
23
- print('Loading', args.ann)
24
- data = json.load(open(args.ann, 'r'))
25
- cat_names = [x['name'] for x in \
26
- sorted(data['categories'], key=lambda x: x['id'])]
27
- if 'synonyms' in data['categories'][0]:
28
- if args.use_wn_name:
29
- synonyms = [
30
- [xx.name() for xx in wordnet.synset(x['synset']).lemmas()] \
31
- if x['synset'] != 'stop_sign.n.01' else ['stop_sign'] \
32
- for x in sorted(data['categories'], key=lambda x: x['id'])]
33
- else:
34
- synonyms = [x['synonyms'] for x in \
35
- sorted(data['categories'], key=lambda x: x['id'])]
36
- else:
37
- synonyms = []
38
- if args.fix_space:
39
- cat_names = [x.replace('_', ' ') for x in cat_names]
40
- if args.use_underscore:
41
- cat_names = [x.strip().replace('/ ', '/').replace(' ', '_') for x in cat_names]
42
- print('cat_names', cat_names)
43
- device = "cuda" if torch.cuda.is_available() else "cpu"
44
-
45
- if args.prompt == 'a':
46
- sentences = ['a ' + x for x in cat_names]
47
- sentences_synonyms = [['a ' + xx for xx in x] for x in synonyms]
48
- if args.prompt == 'none':
49
- sentences = [x for x in cat_names]
50
- sentences_synonyms = [[xx for xx in x] for x in synonyms]
51
- elif args.prompt == 'photo':
52
- sentences = ['a photo of a {}'.format(x) for x in cat_names]
53
- sentences_synonyms = [['a photo of a {}'.format(xx) for xx in x] \
54
- for x in synonyms]
55
- elif args.prompt == 'scene':
56
- sentences = ['a photo of a {} in the scene'.format(x) for x in cat_names]
57
- sentences_synonyms = [['a photo of a {} in the scene'.format(xx) for xx in x] \
58
- for x in synonyms]
59
-
60
- print('sentences_synonyms', len(sentences_synonyms), \
61
- sum(len(x) for x in sentences_synonyms))
62
- if args.model == 'clip':
63
- import clip
64
- print('Loading CLIP')
65
- model, preprocess = clip.load(args.clip_model, device=device)
66
- if args.avg_synonyms:
67
- sentences = list(itertools.chain.from_iterable(sentences_synonyms))
68
- print('flattened_sentences', len(sentences))
69
- text = clip.tokenize(sentences).to(device)
70
- with torch.no_grad():
71
- if len(text) > 10000:
72
- text_features = torch.cat([
73
- model.encode_text(text[:len(text) // 2]),
74
- model.encode_text(text[len(text) // 2:])],
75
- dim=0)
76
- else:
77
- text_features = model.encode_text(text)
78
- print('text_features.shape', text_features.shape)
79
- if args.avg_synonyms:
80
- synonyms_per_cat = [len(x) for x in sentences_synonyms]
81
- text_features = text_features.split(synonyms_per_cat, dim=0)
82
- text_features = [x.mean(dim=0) for x in text_features]
83
- text_features = torch.stack(text_features, dim=0)
84
- print('after stack', text_features.shape)
85
- text_features = text_features.cpu().numpy()
86
- elif args.model in ['bert', 'roberta']:
87
- from transformers import AutoTokenizer, AutoModel
88
- if args.model == 'bert':
89
- model_name = 'bert-large-uncased'
90
- if args.model == 'roberta':
91
- model_name = 'roberta-large'
92
- tokenizer = AutoTokenizer.from_pretrained(model_name)
93
- model = AutoModel.from_pretrained(model_name)
94
- model.eval()
95
- if args.avg_synonyms:
96
- sentences = list(itertools.chain.from_iterable(sentences_synonyms))
97
- print('flattened_sentences', len(sentences))
98
- inputs = tokenizer(sentences, padding=True, return_tensors="pt")
99
- with torch.no_grad():
100
- model_outputs = model(**inputs)
101
- outputs = model_outputs.pooler_output
102
- text_features = outputs.detach().cpu()
103
- if args.avg_synonyms:
104
- synonyms_per_cat = [len(x) for x in sentences_synonyms]
105
- text_features = text_features.split(synonyms_per_cat, dim=0)
106
- text_features = [x.mean(dim=0) for x in text_features]
107
- text_features = torch.stack(text_features, dim=0)
108
- print('after stack', text_features.shape)
109
- text_features = text_features.numpy()
110
- print('text_features.shape', text_features.shape)
111
- else:
112
- assert 0, args.model
113
- if args.out_path != '':
114
- print('saveing to', args.out_path)
115
- np.save(open(args.out_path, 'wb'), text_features)
116
- import pdb; pdb.set_trace()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DeclK/pose/tools/visualizer.py DELETED
@@ -1,346 +0,0 @@
1
- import cv2
2
- import numpy as np
3
- from skimage import draw, io
4
- from PIL import Image, ImageDraw, ImageFont
5
- from easydict import EasyDict
6
- from typing import Union
7
- from .utils import get_skeleton, Timer
8
-
9
- class FastVisualizer:
10
- """ Use skimage to draw, which is much faster than matplotlib, and
11
- more beatiful than opencv.😎
12
- """
13
- # TODO: modify color input parameter
14
- def __init__(self, image=None) -> None:
15
- self.set_image(image)
16
- self.colors = self.get_pallete()
17
- self.skeleton = get_skeleton()
18
- self.lvl_tresh = self.set_level([0.3, 0.6, 0.8])
19
-
20
- def set_image(self, image: Union[str, np.ndarray]):
21
- if isinstance(image, str):
22
- self.image = cv2.imread(image)
23
- elif isinstance(image, np.ndarray) or image is None:
24
- self.image = image
25
- else:
26
- raise TypeError(f"Type {type(image)} is not supported")
27
-
28
- def get_image(self):
29
- return self.image
30
-
31
- def draw_box(self, box_coord, color=(25, 113, 194), alpha=1.0):
32
- """ Draw a box on the image
33
- Args:
34
- box_coord: a list of [xmin, ymin, xmax, ymax]
35
- alpha: the alpha of the box
36
- color: the edge color of the box
37
- """
38
- xmin, ymin, xmax, ymax = box_coord
39
- rr, cc = draw.rectangle_perimeter((ymin, xmin), (ymax, xmax))
40
- draw.set_color(self.image, (rr, cc), color, alpha=alpha)
41
- return self
42
-
43
- def draw_rectangle(self, box_coord, color=(25, 113, 194), alpha=1.0):
44
- xmin, ymin, xmax, ymax = box_coord
45
- rr, cc = draw.rectangle((ymin, xmin), (ymax, xmax))
46
- draw.set_color(self.image, (rr, cc), color, alpha=alpha)
47
- return self
48
-
49
- def draw_point(self, point_coord, radius=5, color=(25, 113, 194), alpha=1.0):
50
- """ Coord in (x, y) format, but will be converted to (y, x)
51
- """
52
- x, y = point_coord
53
- rr, cc = draw.disk((y, x), radius=radius)
54
- draw.set_color(self.image, (rr, cc), color, alpha=alpha)
55
- return self
56
-
57
- def draw_line(self, start_point, end_point, color=(25, 113, 194), alpha=1.0):
58
- """ Not used, because I can't produce smooth line.
59
- """
60
- cv2.line(self.image, start_point, end_point, color.tolist(), 2,
61
- cv2.LINE_AA)
62
- return self
63
-
64
- def draw_line_aa(self, start_point, end_point, color=(25, 113, 194), alpha=1.0):
65
- """ Not used, because I can't produce smooth line.
66
- """
67
- x1, y1 = start_point
68
- x2, y2 = end_point
69
- rr, cc, val = draw.line_aa(y1, x1, y2, x2)
70
- draw.set_color(self.image, (rr, cc), color, alpha=alpha)
71
- return self
72
-
73
- def draw_thick_line(self, start_point, end_point, thickness=1, color=(25, 113, 194), alpha=1.0):
74
- """ Not used, because I can't produce smooth line.
75
- """
76
- x1, y1 = start_point
77
- x2, y2 = end_point
78
- dx, dy = x2 - x1, y2 - y1
79
- length = np.sqrt(dx * dx + dy * dy)
80
- cos, sin = dx / length, dy / length
81
-
82
- half_t = thickness / 2.0
83
- # Calculate the polygon vertices
84
- vertices_x = [x1 - half_t * sin, x1 + half_t * sin,
85
- x2 + half_t * sin, x2 - half_t * sin]
86
- vertices_y = [y1 + half_t * cos, y1 - half_t * cos,
87
- y2 - half_t * cos, y2 + half_t * cos]
88
- rr, cc = draw.polygon(vertices_y, vertices_x)
89
- draw.set_color(self.image, (rr, cc), color, alpha)
90
-
91
- return self
92
-
93
- def draw_text(self, text, position,
94
- font_path='assets/SmileySans/SmileySans-Oblique.ttf',
95
- font_size=20,
96
- text_color=(255, 255, 255)):
97
- """ Position is the left top corner of the text
98
- """
99
- # Convert the NumPy array to a PIL image
100
- pil_image = Image.fromarray(np.uint8(self.image))
101
- # Load the font (default is Arial)
102
- font = ImageFont.truetype(font_path, font_size)
103
- # Create a drawing object
104
- draw = ImageDraw.Draw(pil_image)
105
- # Add the text to the image
106
- draw.text(position, text, font=font, fill=text_color)
107
- # Convert the PIL image back to a NumPy array
108
- result = np.array(pil_image)
109
-
110
- self.image = result
111
- return self
112
-
113
- def xyhw_to_xyxy(self, box):
114
- hw = box[2:]
115
- x1y1 = box[:2] - hw / 2
116
- x2y2 = box[:2] + hw / 2
117
- return np.concatenate([x1y1, x2y2]).astype(np.int32)
118
-
119
- def draw_line_in_discrete_style(self, start_point, end_point, size=2, sample_points=3,
120
- color=(25, 113, 194), alpha=1.0):
121
- """ When drawing continous line, it is super fuzzy, and I can't handle them
122
- very well even tried OpneCV & PIL all kinds of ways. This is a workaround.
123
- The discrete line will be represented with few sampled cubes along the line,
124
- and it is exclusive with start & end points.
125
- """
126
- # sample points
127
- points = np.linspace(start_point, end_point, sample_points + 2)[1:-1]
128
- for p in points:
129
- rectangle_xyhw = np.array((p[0], p[1], size, size))
130
- rectangle_xyxy = self.xyhw_to_xyxy(rectangle_xyhw)
131
- self.draw_rectangle(rectangle_xyxy, color, alpha)
132
- return self
133
-
134
- def draw_human_keypoints(self, keypoints, scores=None, factor=20, draw_skeleton=False):
135
- """ Draw skeleton on the image, and give different color according
136
- to similarity scores.
137
- """
138
- # get max length of skeleton
139
- max_x, max_y = np.max(keypoints, axis=0)
140
- min_x, min_y = np.min(keypoints, axis=0)
141
- max_length = max(max_x - min_x, max_y - min_y)
142
- if max_length < 1: return self
143
- cube_size = max_length // factor
144
- line_cube_size = cube_size // 2
145
- # draw skeleton in discrete style
146
- if draw_skeleton:
147
- for key, links in self.skeleton.items():
148
- links = np.array(links)
149
- start_points = keypoints[links[:, 0]]
150
- end_points = keypoints[links[:, 1]]
151
- for s, e in zip(start_points, end_points):
152
- self.draw_line_in_discrete_style(s, e, line_cube_size,
153
- color=self.colors[key], alpha=0.9)
154
- # draw points
155
- if scores is None: # use vamos color
156
- lvl_names = ['vamos'] * len(keypoints)
157
- else: lvl_names = self.score_level_names(scores)
158
-
159
- for idx, (point, lvl_name) in enumerate(zip(keypoints, lvl_names)):
160
- if idx in set((0, 1, 2, 3, 4)):
161
- continue # do not draw head
162
- rectangle_xyhw = np.array((point[0], point[1], cube_size, cube_size))
163
- rectangle_xyxy = self.xyhw_to_xyxy(rectangle_xyhw)
164
- self.draw_rectangle(rectangle_xyxy,
165
- color=self.colors[lvl_name],
166
- alpha=0.8)
167
- return self
168
-
169
- def draw_score_bar(self, score, factor=50, bar_ratio=7):
170
- """ Draw a score bar on the left top of the image.
171
- factor: the value of image longer edge divided by the bar height
172
- bar_ratio: the ratio of bar width to bar height
173
- """
174
- # calculate bar's height and width
175
- long_edge = np.max(self.image.shape[:2])
176
- short_edge = np.min(self.image.shape[:2])
177
- bar_h = long_edge // factor
178
- bar_w = bar_h * bar_ratio
179
- if bar_w * 3 > short_edge:
180
- # when the image width is not enough
181
- bar_w = short_edge // 4
182
- bar_h = bar_w // bar_ratio
183
- cube_size = bar_h
184
- # bar's base position
185
- bar_start_point = (2*bar_h, 2*bar_h)
186
- # draw bar horizontally, and record the position of each word
187
- word_positions = []
188
- box_coords = []
189
- colors = [self.colors.bad, self.colors.good, self.colors.vamos]
190
- for i, color in enumerate(colors):
191
- x0, y0 = bar_start_point[0] + i*bar_w, bar_start_point[1]
192
- x1, y1 = x0 + bar_w - 1, y0 + bar_h
193
- box_coord = np.array((x0, y0, x1, y1), dtype=np.int32)
194
- self.draw_rectangle(box_coord, color=color)
195
-
196
- box_coords.append(box_coord)
197
- word_positions.append(np.array((x0, y1 + bar_h // 2)))
198
- # calculate cube position according to score
199
- lvl, lvl_ratio, lvl_name = self.score_level(score)
200
- # the first level start point is the first bar
201
- cube_lvl_start_x0 = [box_coord[0] - cube_size // 2 if i != 0
202
- else box_coord[0]
203
- for i, box_coord in enumerate(box_coords)]
204
- # process the last level, I want the cube stays in the bar
205
- level_length = bar_w if lvl == 1 else bar_w - cube_size // 2
206
- cube_x0 = cube_lvl_start_x0[lvl] + lvl_ratio * level_length
207
- cube_y0 = bar_start_point[1] - bar_h // 2 - cube_size
208
- cube_x1 = cube_x0 + cube_size
209
- cube_y1 = cube_y0 + cube_size
210
- # draw cube
211
- self.draw_rectangle((cube_x0, cube_y0, cube_x1, cube_y1),
212
- color=self.colors.cube)
213
- # enlarge the box, to emphasize the level
214
- enlarged_box = box_coords[lvl].copy()
215
- enlarged_box[:2] = enlarged_box[:2] - bar_h // 8
216
- enlarged_box[2:] = enlarged_box[2:] + bar_h // 8
217
- self.draw_rectangle(enlarged_box, color=self.colors[lvl_name])
218
-
219
- # draw text
220
- if lvl_name == 'vamos':
221
- lvl_name = 'vamos!!' # exciting!
222
- self.draw_text(lvl_name.capitalize(),
223
- word_positions[lvl],
224
- font_size=bar_h * 2,
225
- text_color=tuple(colors[lvl].tolist()))
226
-
227
- return self
228
-
229
- def draw_non_transparent_area(self, box_coord, alpha=0.2, extend_ratio=0.1):
230
- """ Make image outside the box transparent using alpha blend
231
- """
232
- x1, y1, x2, y2 = box_coord.astype(np.int32)
233
- # enlarge the box for 10%
234
- max_len = max((x2 - x1), (y2 - y1))
235
- extend_len = int(max_len * extend_ratio)
236
- x1, y1 = x1 - extend_len, y1 - extend_len
237
- x2, y2 = x2 + extend_len, y2 + extend_len
238
- # clip the box
239
- h, w = self.image.shape[:2]
240
- x1, y1, x2, y2 = np.clip((x1,y1,x2,y2), a_min=0,
241
- a_max=(w,h,w,h))
242
- # Create a white background color
243
- bg_color = np.ones_like(self.image) * 255
244
- # Copy the box region from the image
245
- bg_color[y1:y2, x1:x2] = self.image[y1:y2, x1:x2]
246
- # Alpha blend inplace
247
- self.image[:] = self.image * alpha + bg_color * (1 - alpha)
248
- return self
249
-
250
- def draw_logo(self, logo='assets/logo.png', factor=30, shift=20):
251
- """ Draw logo on the right bottom of the image.
252
- """
253
- H, W = self.image.shape[:2]
254
- # load logo
255
- logo_img = Image.open(logo)
256
- # scale logo
257
- logo_h = self.image.shape[0] // factor
258
- scale_size = logo_h / logo_img.size[1]
259
- logo_w = int(logo_img.size[0] * scale_size)
260
- logo_img = logo_img.resize((logo_w, logo_h))
261
- # convert to RGBA
262
- image = Image.fromarray(self.image).convert("RGBA")
263
- # alpha blend
264
- image.alpha_composite(logo_img, (W - logo_w - shift,
265
- H - logo_h - shift))
266
- self.image = np.array(image.convert("RGB"))
267
- return self
268
-
269
- def score_level(self, score):
270
- """ Return the level according to level thresh.
271
- """
272
- t = self.lvl_tresh
273
- if score < t[1]: # t[0] might bigger than 0
274
- ratio = (score - t[0]) / (t[1] - t[0])
275
- ratio = np.clip(ratio, a_min=0, a_max=1)
276
- return 0, ratio, 'bad'
277
- elif score < t[2]:
278
- ratio = (score - t[1]) / (t[2] - t[1])
279
- return 1, ratio, 'good'
280
- else:
281
- ratio = (score - t[2]) / (1 - t[2])
282
- return 2, ratio, 'vamos'
283
-
284
- def score_level_names(self, scores):
285
- """ Get multiple score level, return numpy array.
286
- np.vectorize does not speed up loop, but it is convenient.
287
- """
288
- t = self.lvl_tresh
289
- func_lvl_name = lambda x: 'bad' if x < t[1] else 'good' \
290
- if x < t[2] else 'vamos'
291
- lvl_names = np.vectorize(func_lvl_name)(scores)
292
- return lvl_names
293
-
294
- def set_level(self, thresh):
295
- """ Set level thresh for bad, good, vamos.
296
- """
297
- from collections import namedtuple
298
- Level = namedtuple('Level', ['zero', 'good', 'vamos'])
299
- return Level(thresh[0], thresh[1], thresh[2])
300
-
301
- def get_pallete(self):
302
- PALLETE = EasyDict()
303
-
304
- # light set
305
- # PALLETE.bad = np.array([253, 138, 138])
306
- # PALLETE.good = np.array([168, 209, 209])
307
- # PALLETE.vamos = np.array([241, 247, 181])
308
- # PALLETE.cube = np.array([158, 161, 212])
309
-
310
- # dark set, set 80% brightness
311
- PALLETE.bad = np.array([204, 111, 111])
312
- PALLETE.good = np.array([143, 179, 179])
313
- PALLETE.vamos = np.array([196, 204, 124])
314
- PALLETE.vamos = np.array([109, 169, 228])
315
- PALLETE.cube = np.array([152, 155, 204])
316
-
317
- PALLETE.left_arm = np.array([218, 119, 242])
318
- PALLETE.right_arm = np.array([151, 117, 250])
319
- PALLETE.left_leg = np.array([255, 212, 59])
320
- PALLETE.right_leg = np.array([255, 169, 77])
321
-
322
- PALLETE.head = np.array([134, 142, 150])
323
- PALLETE.body = np.array([134, 142, 150])
324
-
325
- # convert rgb to bgr
326
- for k, v in PALLETE.items():
327
- PALLETE[k] = v[::-1]
328
- return PALLETE
329
-
330
- if __name__ == '__main__':
331
- vis = FastVisualizer()
332
-
333
- image = '/github/Tennis.ai/assets/tempt_test.png'
334
- vis.set_image(image)
335
- np.random.seed(0)
336
- keypoints = np.random.randint(300, 600, (17, 2))
337
- from utils import Timer
338
- t= Timer()
339
- t.start()
340
- vis.draw_score_bar(0.94)
341
- # vis.draw_skeleton(keypoints)
342
- # vis.draw_non_transparent_area((0, 0, 100, 100), alpha=0.2)
343
- vis.draw_logo()
344
- cv2.imshow('test', vis.image)
345
- cv2.waitKey(0)
346
- cv2.destroyAllWindows()