parquet-converter commited on
Commit
98cd895
·
1 Parent(s): 9d551d1

Update parquet files (step 63 of 397)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1111u/oai-reverse-proxy/README.md +0 -10
  2. spaces/1gistliPinn/ChatGPT4/Examples/AdobeIllustratorCC2018v2203264BitFullwithCrackrar [2021].md +0 -13
  3. spaces/1gistliPinn/ChatGPT4/Examples/Disk Digger Serial.md +0 -25
  4. spaces/1phancelerku/anime-remove-background/Download My Talking Tom Friends The Ultimate Virtual Pet Game.md +0 -95
  5. spaces/1phancelerku/anime-remove-background/Download Ship Simulator for Mac - Enjoy the Realistic Graphics and Sounds of Ship Driving.md +0 -173
  6. spaces/1toTree/lora_test/ppdiffusers/pipelines/pndm/pipeline_pndm.py +0 -94
  7. spaces/7thHeaven/GPT2WordPress/app.py +0 -109
  8. spaces/801artistry/RVC801/lib/infer_pack/modules.py +0 -522
  9. spaces/801artistry/RVC801/venv.sh +0 -1
  10. spaces/AIFILMS/generate_human_motion/VQ-Trans/options/option_transformer.py +0 -68
  11. spaces/AIFILMS/generate_human_motion/VQ-Trans/utils/paramUtil.py +0 -63
  12. spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/factory.py +0 -257
  13. spaces/AIZeroToHero/05-RealtimeStreamlitASR/app.py +0 -119
  14. spaces/AUBADA-ALARABI/poetry202/app.py +0 -53
  15. spaces/Abdllh/poetry202/README.md +0 -13
  16. spaces/AchyuthGamer/OpenGPT/client/js/change-language.js +0 -47
  17. spaces/AdithyaSNair/Medical_price_prediction/README.md +0 -12
  18. spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/selector/basic.py +0 -27
  19. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/alphamaskimage/AlphaMaskImage.js +0 -2
  20. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/filechooser/Factory.d.ts +0 -5
  21. spaces/Ameaou/academic-chatgpt3.1/crazy_functions/批量Markdown翻译.py +0 -161
  22. spaces/Andy1621/uniformer_image_detection/configs/_base_/models/mask_rcnn_uniformer_fpn.py +0 -121
  23. spaces/Andy1621/uniformer_image_detection/configs/fast_rcnn/fast_rcnn_r101_fpn_1x_coco.py +0 -2
  24. spaces/Andy1621/uniformer_image_detection/configs/legacy_1.x/retinanet_r50_caffe_fpn_1x_coco_v1.py +0 -37
  25. spaces/Andy1621/uniformer_image_detection/configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py +0 -37
  26. spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/bbox_heads/sabl_head.py +0 -572
  27. spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py +0 -2
  28. spaces/Andyrasika/Andyrasika-dreamshaper-sdxl-1.0/README.md +0 -12
  29. spaces/Anthony7906/MengHuiMXD_GPT/modules/utils.py +0 -548
  30. spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/utils/model_list.py +0 -6
  31. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/__init__.py +0 -24
  32. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/Makefile +0 -19
  33. spaces/Awiny/Image2Paragraph/models/segment_models/configs/__init__.py +0 -1
  34. spaces/Awiny/Image2Paragraph/models/segment_models/semgent_anything_model.py +0 -29
  35. spaces/Benson/text-generation/Examples/ .md +0 -63
  36. spaces/Benson/text-generation/Examples/101 Yzbir Okey Plus Apk.md +0 -80
  37. spaces/Benson/text-generation/Examples/Call Of Duty Black Ops 2 Descarga Mvil.md +0 -102
  38. spaces/Benson/text-generation/Examples/Cmo Descargar Hill Climb Racing 2 En PC.md +0 -57
  39. spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/eventstream.py +0 -633
  40. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/register.py +0 -319
  41. spaces/BigSalmon/BackTranslation/README.md +0 -12
  42. spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/utils/env.py +0 -105
  43. spaces/CVPR/regionclip-demo/detectron2/evaluation/coco_evaluation.py +0 -610
  44. spaces/CVPR/regionclip-demo/detectron2/export/caffe2_export.py +0 -207
  45. spaces/Cobalt337/lambdalabs-sd-pokemon-diffusers/README.md +0 -12
  46. spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/roi_heads/boundary_head/inference.py +0 -207
  47. spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/builders/instruct_builder.py +0 -78
  48. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/cu2qu/cu2qu.c +0 -0
  49. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/clear_button.py +0 -70
  50. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates.py +0 -574
spaces/1111u/oai-reverse-proxy/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: Oai Reverse Proxy
3
- emoji: 🏃
4
- colorFrom: indigo
5
- colorTo: yellow
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/AdobeIllustratorCC2018v2203264BitFullwithCrackrar [2021].md DELETED
@@ -1,13 +0,0 @@
1
- <h2>AdobeIllustratorCC2018v2203264BitFullwithCrackrar</h2><br /><p><b><b>Download Zip</b> &#10027; <a href="https://imgfil.com/2uxXPS">https://imgfil.com/2uxXPS</a></b></p><br /><br />
2
- <br />
3
- none none
4
- none
5
- Title: Collection of books Series: Fiction, fantasy, mysticism
6
- Download for free without registration a collection of books fb 2.
7
- Collection of books in the series.
8
- Download free book - collection - a collection of new books.
9
- Download the book Collection of new books.
10
- none 8a78ff9644<br />
11
- <br />
12
- <br />
13
- <p></p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Disk Digger Serial.md DELETED
@@ -1,25 +0,0 @@
1
- <br />
2
- <h1>How to Recover Lost Files with DiskDigger Serial</h1>
3
- <p>Have you ever accidentally deleted some important files from your computer, memory card, or USB drive? Or have you ever formatted your camera's memory card and lost all your photos and videos? If so, you might be interested in a tool that can help you recover your lost files. That tool is called DiskDigger.</p>
4
- <h2>disk digger serial</h2><br /><p><b><b>DOWNLOAD</b> &#187;&#187;&#187; <a href="https://imgfil.com/2uy1e2">https://imgfil.com/2uy1e2</a></b></p><br /><br />
5
- <p>DiskDigger is a software that can undelete and recover lost files from any media that your PC can read, including hard disks, flash drives, memory cards, and more. It can recover files from various file systems, such as FAT, NTFS, exFAT, HFS+, and ext4. It can also recover files of various types, such as photos, videos, music, documents, and more.</p>
6
- <p>However, DiskDigger is not a free software. You need to purchase a license key to unlock its full features and functionality. A license key costs $19.99 for a single user license, or $49.99 for a site license that allows unlimited installations on multiple PCs. If you don't have a license key, you can only use DiskDigger in "preview" mode, which lets you see the recoverable files but not save them.</p>
7
- <p>So, how can you get a DiskDigger serial for free? Well, there are some websites that claim to offer DiskDigger serials, cracks, or keygens that can generate valid license keys for DiskDigger. However, these websites are not trustworthy and may contain malware, viruses, or other harmful programs that can damage your PC or steal your personal information. Moreover, using a cracked or pirated version of DiskDigger is illegal and unethical.</p>
8
- <p>The best way to get a DiskDigger serial is to buy it from the official website of DiskDigger. By doing so, you will support the developers of this useful software and ensure that you get the latest updates and bug fixes. You will also get a 30-day money-back guarantee if you are not satisfied with the product.</p>
9
- <p>To buy a DiskDigger serial, go to <a href="https://www.diskdigger.org/buy">https://www.diskdigger.org/buy</a> and choose the license type that suits your needs. You can pay with PayPal or credit card. After completing the payment process, you will receive an email with your license key and instructions on how to activate DiskDigger.</p>
10
- <p></p>
11
- <p>Once you have your DiskDigger serial, you can download the latest version of DiskDigger from <a href="https://www.diskdigger.org/download">https://www.diskdigger.org/download</a> and install it on your PC. Then run DiskDigger and enter your license key when prompted. You will then be able to use DiskDigger in full mode and recover your lost files with ease.</p>
12
- <p>DiskDigger is a powerful and reliable tool that can help you recover your lost files from any media. Don't waste your time and money on fake or illegal DiskDigger serials. Buy a genuine license key from the official website of DiskDigger and enjoy its benefits.</p>
13
-
14
- <p>How to Use DiskDigger to Recover Lost Files</p>
15
- <p>Now that you have a DiskDigger serial and have activated DiskDigger on your PC, you can start using it to recover your lost files. Here are the steps to follow:</p>
16
- <ol>
17
- <li>Launch DiskDigger and select the drive or device that you want to scan for lost files. You can also choose a specific folder or file type to narrow down the search.</li>
18
- <li>Choose the scan mode that you want to use. DiskDigger offers two scan modes: "Dig Deep" and "Dig Deeper". The "Dig Deep" mode scans the file system for deleted files and recovers them with their original names and paths. The "Dig Deeper" mode scans the entire disk surface for traces of files and recovers them based on their signatures. The "Dig Deeper" mode is more thorough but may take longer and recover more files than you need.</li>
19
- <li>Click "Next" and wait for DiskDigger to scan the selected drive or device. You will see a list of recoverable files as they are found. You can preview the files by clicking on them or filter them by name, size, date, or type.</li>
20
- <li>Select the files that you want to recover and click "Recover". You can choose to save the files to a different location on your PC, upload them to an FTP server, or send them as email attachments.</li>
21
- <li>Review the recovered files and make sure they are intact and usable. If some files are corrupted or incomplete, you can try scanning again with different settings or using another recovery software.</li>
22
- </ol>
23
- <p>DiskDigger is a simple and effective tool that can help you recover your lost files from any media. With a DiskDigger serial, you can unlock its full features and functionality and recover your files with ease. Don't hesitate to buy a DiskDigger serial from the official website of DiskDigger and enjoy its benefits.</p> d5da3c52bf<br />
24
- <br />
25
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download My Talking Tom Friends The Ultimate Virtual Pet Game.md DELETED
@@ -1,95 +0,0 @@
1
- <br />
2
- <h1>Download My Talking Tom and Friends: A World of Friendship and Fun</h1>
3
- <p>Do you love virtual pets? Do you enjoy simulation games? Do you like to customize your own characters? If you answered yes to any of these questions, then you should download My Talking Tom and Friends, the best new virtual pet game from Outfit7 Limited. In this game, you can take care of six adorable characters: Tom, Angela, Hank, Ginger, Ben, and Becca. You can interact with them, play with them, dress them up, feed them, and watch them grow. You can also explore their house, go to town, and discover new mini games and surprises. My Talking Tom and Friends is a world of friendship and fun waiting for you.</p>
4
- <h2>download my talking tom and friends</h2><br /><p><b><b>Download</b> &#10042; <a href="https://jinyurl.com/2uNMCG">https://jinyurl.com/2uNMCG</a></b></p><br /><br />
5
- <h2>What is My Talking Tom and Friends?</h2>
6
- <p>My Talking Tom and Friends is a virtual pet game that lets you take care of six different characters at once. Each character has their own personality, preferences, and hobbies. You can learn more about them by talking to them, playing with them, and watching their reactions. You can also customize their appearance by choosing from a closet full of fun fashions. You can even mix and match outfits to create your own unique style.</p>
7
- <h3>A simulation game with various activities and mini games</h3>
8
- <p>My Talking Tom and Friends is also a simulation game that lets you experience various activities with your pet friends. You can cook for them, clean for them, take them to the bathroom, put them to bed, and more. You can also enjoy creative and sporty activities with them, such as painting, gardening, dancing, skateboarding, and more. You can also play mini games with them, such as puzzles, arcade games, racing games, and more. You can earn coins by playing mini games, which you can use to buy more outfits, toys, stickers, and other items.</p>
9
- <h3>A customization game with outfits, toys, stickers, and coins</h3>
10
- <p>My Talking Tom and Friends is also a customization game that lets you personalize your pet friends' house. You can decorate their rooms with different wallpapers, furniture, accessories, and more. You can also collect toys for them to play with, such as balls, dolls, cars, robots, and more. You can also collect stickers for them to stick on their walls or albums. You can also collect coins for them to spend on more items or surprises.</p>
11
- <h2>Why should you download My Talking Tom and Friends?</h2>
12
- <p>There are many reasons why you should download My Talking Tom and Friends. Here are some of them:</p>
13
- <h3>It is free and easy to play</h3>
14
- <p>My Talking Tom and Friends is a free game that you can download from the Google Play Store or the App Store. It is also easy to play, as it has simple controls and intuitive features. You just need to tap, swipe, drag, or tilt your device to interact with your pet friends. You can also use voice commands or text messages to talk to them.</p>
15
- <h3>It is fun and engaging for all ages</h3>
16
- <p>My Talking Tom and Friends is a fun game that can entertain anyone from kids to adults. It has colorful graphics, cute animations, funny sounds, and lively music. It also has diverse content that can appeal to different tastes and interests. Whether you like cute animals, fashion trends, creative arts, or exciting games, you will find something to enjoy in My Talking Tom and Friends.</p>
17
- <h3>It is creative and interactive for all personalities</h3>
18
- <p>My Talking Tom and Friends is a creative game that lets you express yourself through your pet friends. You can choose how they look, act, and sound. You can also choose how they spend their time, what they do, and where they go. You can also interact with them in various ways, such as tickling them, poking them, hugging them, and more. You can also make them repeat what you say or sing along with you.</p>
19
- <h2>How can you download My Talking Tom and Friends?</h2>
20
- <p>Downloading My Talking Tom and Friends is easy and fast. You just need to follow these steps:</p>
21
- <h3>For Android devices</h3>
22
- <p>If you have an Android device, you can download My Talking Tom and Friends from the Google Play Store. Here is how:</p>
23
- <p>How to download my talking tom friends on android<br />
24
- My talking tom friends free download for pc<br />
25
- My talking tom friends mod apk unlimited money and stars<br />
26
- My talking tom friends game play online<br />
27
- My talking tom friends outfits and accessories<br />
28
- My talking tom friends latest version update<br />
29
- My talking tom friends tips and tricks<br />
30
- My talking tom friends best mini games<br />
31
- My talking tom friends review and rating<br />
32
- My talking tom friends fun activities and challenges<br />
33
- Download my talking tom friends from google play store<br />
34
- Download my talking tom friends from app store<br />
35
- Download my talking tom friends for windows 10<br />
36
- Download my talking tom friends for mac<br />
37
- Download my talking tom friends for fire tablet<br />
38
- Download my talking tom friends hack version<br />
39
- Download my talking tom friends without ads<br />
40
- Download my talking tom friends with all characters unlocked<br />
41
- Download my talking tom friends offline mode<br />
42
- Download my talking tom friends new features and events<br />
43
- Why you should download my talking tom friends<br />
44
- Benefits of downloading my talking tom friends<br />
45
- How to install and run my talking tom friends<br />
46
- How to uninstall and delete my talking tom friends<br />
47
- How to backup and restore my talking tom friends data<br />
48
- How to connect and share my talking tom friends with friends<br />
49
- How to watch and subscribe to my talking tom friends youtube channel<br />
50
- How to contact and get support for my talking tom friends<br />
51
- How to customize and personalize my talking tom friends<br />
52
- How to earn and spend coins and bus tokens in my talking tom friends</p>
53
- <ol>
54
- <li>Open the Google Play Store app on your device.</li>
55
- <li>Search for "My Talking Tom and Friends" in the search bar.</li>
56
- <li>Select the game from the list of results and tap on "Install".</li>
57
- <li>Wait for the game to download and install on your device.</li>
58
- <li>Tap on "Open" to launch the game and start playing.</li>
59
- </ol>
60
- <h3>For iOS devices</h3>
61
- <p>If you have an iOS device, you can download My Talking Tom and Friends from the App Store. Here is how:</p>
62
- <ol>
63
- <li>Open the App Store app on your device.</li>
64
- <li>Search for "My Talking Tom and Friends" in the search bar.</li>
65
- <li>Select the game from the list of results and tap on "Get".</li>
66
- <li>Enter your Apple ID password or use Touch ID or Face ID to confirm.</li>
67
- <li>Wait for the game to download and install on your device.</li>
68
- <li>Tap on the game icon to launch the game and start playing.</li>
69
- </ol>
70
- <h3>For YouTube videos</h3>
71
- <p>If you want to watch YouTube videos of My Talking Tom and Friends, you can visit the official YouTube channel of Outfit7 Limited. Here is how:</p>
72
- <ol>
73
- <li>Open the YouTube app or website on your device.</li>
74
- <li>Search for "Outfit7 Limited" in the search bar.</li>
75
- <li>Select the channel from the list of results and tap on "Subscribe".</li>
76
- <li>Browse through the videos of My Talking Tom and Friends and other games from Outfit7 Limited.</li>
77
- <li>Select a video that you want to watch and tap on "Play".</li>
78
- <li>Enjoy watching the video and leave a comment or a like if you want.</li>
79
- </ol>
80
- <h2>Conclusion</h2>
81
- <p>My Talking Tom and Friends is a wonderful game that you should download today. It is a virtual pet game, a simulation game, and a customization game all in one. It is free, easy, fun, engaging, creative, and interactive. It is suitable for all ages and personalities. It is a world of friendship and fun that you can enjoy with your pet friends. Download My Talking Tom and Friends now and join the millions of players who love this game.</p>
82
- <h2>FAQs</h2>
83
- <p>Here are some frequently asked questions about My Talking Tom and Friends:</p>
84
- <h4>Q: How can I update My Talking Tom and Friends?</h4>
85
- <p>A: To update My Talking Tom and Friends, you need to go to the Google Play Store or the App Store and check if there is a new version available. If there is, you can tap on "Update" to download and install the latest version of the game.</p>
86
- <h4>Q: How can I backup or restore my progress in My Talking Tom and Friends?</h4>
87
- <p>A: To backup or restore your progress in My Talking Tom and Friends, you need to connect your game to your Google Play Games account or your iCloud account. This way, you can save your progress online and access it from any device. You can also sync your progress across different games from Outfit7 Limited.</p>
88
- <h4>Q: How can I contact the support team of My Talking Tom and Friends?</h4>
89
- <p>A: To contact the support team of My Talking Tom and Friends, you need to go to the settings menu of the game and tap on "Support". You can then fill out a form with your name, email address, subject, message, and screenshots if needed. You can also visit the official website of Outfit7 Limited at https://outfit7.com/ for more information.</p>
90
- <h4>Q: How can I share my feedback or suggestions for My Talking Tom and Friends?</h4>
91
- <p>A: To share your feedback or suggestions for My Talking Tom and Friends, you need to go to the settings menu of the game and tap on "Feedback". You can then rate the game with stars, write a review, or send an email. You can also leave a comment or a review on the Google Play Store or the App Store. You can also follow the social media accounts of Outfit7 Limited on Facebook, Twitter, Instagram, and more.</p>
92
- <h4>Q: How can I get more coins in My Talking Tom and Friends?</h4>
93
- <p>A: To get more coins in My Talking Tom and Friends, you can play more mini games, complete more tasks, watch more ads, or buy more coins with real money. You can also get free coins by logging in daily, inviting friends, or joining events.</p> 197e85843d<br />
94
- <br />
95
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Ship Simulator for Mac - Enjoy the Realistic Graphics and Sounds of Ship Driving.md DELETED
@@ -1,173 +0,0 @@
1
- <br />
2
- <h1>Ship Simulator Games for Mac: Free Alternatives to Try</h1>
3
- <p>Ship simulator games are a type of simulation games that allow you to control various types of ships and experience realistic maritime scenarios. They can be fun, educational, and challenging, depending on the game mode, difficulty, and features.</p>
4
- <p>However, not all ship simulator games are free to download. Some of them require you to purchase the game or pay a subscription fee to access the full content. This can be a problem for some Mac users who want to enjoy ship simulation without spending any money.</p>
5
- <h2>ship simulator mac free download</h2><br /><p><b><b>Download Zip</b> &bull;&bull;&bull; <a href="https://jinyurl.com/2uNTZU">https://jinyurl.com/2uNTZU</a></b></p><br /><br />
6
- <p>Fortunately, there are some free alternatives that you can try if you are looking for ship simulator games for Mac. In this article, we will review three of them: Ship Handling Simulator, The Ship Simulator 202 2, and NAUTIS Home - Ship Simulator. We will compare their features, pros and cons, and how to download them for Mac users.</p>
7
- <h2>Ship Handling Simulator</h2>
8
- <p>Ship Handling Simulator is a realistic ship simulator game that lets you control different types of ships, such as tugboats, container ships, cruise ships, and more. You can choose from various locations, such as New York, Rotterdam, Hong Kong, and others. You can also adjust the weather conditions, such as wind, waves, fog, and rain. The game has a sandbox mode where you can freely explore the environment and practice your skills. You can also take on missions and challenges that test your ship handling abilities.</p>
9
- <h3>Features</h3>
10
- <ul>
11
- <li>Realistic physics and graphics</li>
12
- <li>Various ships and locations</li>
13
- <li>Weather effects and day/night cycle</li>
14
- <li>Sandbox mode and missions</li>
15
- <li>Online leaderboards and achievements</li>
16
- </ul>
17
- <h3>Pros and Cons</h3>
18
- <table>
19
- <tr>
20
- <th>Pros</th>
21
- <th>Cons</th>
22
- </tr>
23
- <tr>
24
- <td>Good graphics and sound effects</td>
25
- <td>Limited locations and scenarios</td>
26
- </tr>
27
- <tr>
28
- <td>Easy controls and interface</td>
29
- <td>Expensive price ($10.99)</td>
30
- </tr>
31
- <tr>
32
- <td>Frequent updates and improvements</td>
33
- <td>No online multiplayer mode</td>
34
- </tr>
35
- <tr>
36
- <td>Fun and educational gameplay</td>
37
- <td>No customization options for ships or settings</td>
38
- </tr>
39
- </table>
40
- <h3>How to Download</h3>
41
- <p>To download Ship Handling Simulator for Mac, you need to visit the App Store and search for the game. You can also use this link: [Ship Handling Simulator]. The game costs $10.99 and requires macOS 10.9 or later. The game size is 1.6 GB and the current version is 1.4.1.</p>
42
- <h2>The Ship Simulator 2022</h2>
43
- <p>The Ship Simulator 2022 is an open world ship simulator game that lets you explore a huge map with various ports, islands, and landmarks. You can choose from a variety of ships, such as cargo ships, cruise ships, fishing boats, yachts, and more. You can also take on different missions, such as transporting goods, rescuing people, racing against other ships, and more. The game has stunning graphics and realistic physics that make you feel like you are really sailing on the sea.</p>
44
- <h3>Features</h3>
45
- <ul>
46
- <li>Open world map with diverse locations</li>
47
- <li>Variety of ships and missions</li>
48
- <li>Realistic physics and graphics</li>
49
- <li>Free to play with in-app purchases</li>
50
- <li>Online multiplayer mode and chat system</li>
51
- </ul>
52
- <h3>Pros and Cons</h3>
53
- <table>
54
- <tr>
55
- <th>Pros</th>
56
- <th>Cons</th>
57
- </tr>
58
- <tr>
59
- <td>Immersive gameplay and environment</td>
60
- <td>In-app purchases can be expensive or intrusive</td>
61
- </tr>
62
- <tr>
63
- <td>Stunning graphics and sound effects</td>
64
- <td>Bugs and glitches can affect the performance or experience</td>
65
- </tr>
66
- <tr>
67
- <td>Frequent updates and new content</td>
68
- <td>No offline mode or save option</td></tr><tr><td>Social features and interaction with other players </td>
69
- <td>No customization options for ships or settings</td>
70
- </tr>
71
- </table>
72
- <h3>How to Download</h3>
73
- <p>To download The Ship Simulator 2022 for Mac, you need to visit the App Store and search for the game. You can also use this link: [The Ship Simulator 2022]. The game is free to play but offers in-app purchases for extra content and features. The game requires iOS 10 or later. The game size is 1.1 GB and the current version is 1.0.2.</p>
74
- <h2>NAUTIS Home - Ship Simulator</h2>
75
- <p>NAUTIS Home - Ship Simulator is a realistic maritime simulation game that lets you experience various scenarios and situations that occur in the real world of shipping. You can choose from famous ports and locations, such as Rotterdam, Hamburg, Singapore, and more. You can also select from different types of ships, such as container ships, bulk carriers, ferries, and more. The game has an online multiplayer mode where you can join other players and compete or cooperate in various missions and challenges.</p>
76
- <h3>Features</h3>
77
- <ul>
78
- <li>Realistic maritime simulation with high standard of safety</li>
79
- <li>Famous ports and locations with accurate models and data</li>
80
- <li>Different types of ships with realistic controls and behavior</li>
81
- <li>Online multiplayer mode with voice chat and leaderboards</li>
82
- <li>Reduced costs, enhanced performance, fast learning process, objective assessment, flexibility, etc.</li>
83
- </ul>
84
- <h3>Pros and Cons</h3>
85
- <table>
86
- <tr>
87
- <th>Pros</th>
88
- <th>Cons</th>
89
- </tr>
90
- <tr>
91
- <td>High quality graphics and sound effects</td>
92
- <td>Subscription fee required ($9.99 per month or $99 per year)</td>
93
- </tr>
94
- <tr>
95
- <td>Educational and professional gameplay</td>
96
- <td>Limited free trial period (14 days)</td>
97
- </tr>
98
- <tr>
99
- <td>Frequent updates and new content</td>
100
- <td>No offline mode or save option</td></tr><tr><td>Social features and interaction with other players </td>
101
- <td>No customization options for ships or settings</td>
102
- </tr>
103
- </table>
104
- <h3>How to Download</h3>
105
- <p>To download NAUTIS Home - Ship Simulator for Mac, you need to visit the VSTEP LXP website and search for the game. You can also use this link: [NAUTIS Home - Ship Simulator]. The game requires a subscription fee of $9.99 per month or $99 per year to access the full content and features. The game also requires a minimum system requirement of macOS 10.13 or later. The game size is 2.5 GB and the current version is 1.0.0.</p>
106
- <p>ship handling simulator mac download<br />
107
- ship simulator 2022 for mac free<br />
108
- ship captain simulator mac free<br />
109
- ship simulator extremes mac download<br />
110
- ship simulator 2008 mac free download<br />
111
- ship simulator games for mac free<br />
112
- ship simulator titanic mac download<br />
113
- ship simulator world war 2 mac free<br />
114
- ship simulator sandbox mode mac download<br />
115
- ship simulator cruise liner mac free<br />
116
- ship simulator naval warfare mac download<br />
117
- ship simulator steam ships mac free<br />
118
- ship simulator aircraft carrier mac download<br />
119
- ship simulator battleships mac free<br />
120
- ship simulator cargo ships mac download<br />
121
- ship simulator sailing ships mac free<br />
122
- ship simulator tugboats mac download<br />
123
- ship simulator ferry boats mac free<br />
124
- ship simulator realistic physics mac download<br />
125
- ship simulator weather effects mac free<br />
126
- ship simulator historical ships mac download<br />
127
- ship simulator modern ships mac free<br />
128
- ship simulator port cities mac download<br />
129
- ship simulator open world map mac free<br />
130
- ship simulator mooring to a pier mac download<br />
131
- ship simulator maneuvering and docking mac free<br />
132
- ship simulator single and multi-screw vessels mac download<br />
133
- ship simulator azimuth propulsors mac free<br />
134
- ship simulator nuclear powered ships mac download<br />
135
- ship simulator electric propulsion ships mac free<br />
136
- ship simulator dynamic positioning system mac download<br />
137
- ship simulator bow and stern thrusters mac free<br />
138
- ship simulator rudder and propeller control mac download<br />
139
- ship simulator engine and speed control mac free<br />
140
- ship simulator helm and steering wheel mac download<br />
141
- ship simulator mini map and compass mac free<br />
142
- ship simulator multiple camera views mac download<br />
143
- ship simulator realistic sounds and graphics mac free<br />
144
- ship simulator challenging missions and levels mac download<br />
145
- ship simulator time and fuel management mac free<br />
146
- ship simulator collision and damage system mac download<br />
147
- ship simulator emergency situations and alarms mac free<br />
148
- ship simulator walk around the ship and add passengers mac download<br />
149
- ship simulator shoot guns on battleships and aircraft carriers mac free<br />
150
- ship simulator add planes that can fly and shoot on aircraft carriers mac download <br />
151
- ship simulator make funnels fall and split in half on sinking ships mac free <br />
152
- ship simulator add terrain in the sandbox mode mac download <br />
153
- ship simulator add real ports and landmarks in the open world map mac free <br />
154
- ship simulator add more real cruise ships and luxury liners mac download <br />
155
- ship simulator add more variety of horns for modern and classic ships mac free</p>
156
- <h1>Conclusion</h1>
157
- <p>In conclusion, ship simulator games are a great way to experience the thrill and challenge of sailing on the sea. They can also help you learn more about the maritime industry and improve your skills and knowledge. However, not all ship simulator games are free to download for Mac users. Some of them require you to pay a certain amount of money or subscribe to a service to enjoy the full content and features.</p>
158
- <p>However, there are also some free alternatives that you can try if you are looking for ship simulator games for Mac. We have reviewed three of them in this article: Ship Handling Simulator, The Ship Simulator 2022, and NAUTIS Home - Ship Simulator. We have compared their features, pros and cons, and how to download them for Mac users. We hope that this article has helped you find the best ship simulator game for your Mac device.</p>
159
- <h2>FAQs</h2>
160
- <ol>
161
- <li>What are ship simulator games?</li>
162
- <p>Ship simulator games are a type of simulation games that allow you to control various types of ships and experience realistic maritime scenarios.</p>
163
- <li>Why are ship simulator games popular?</li>
164
- <p>Ship simulator games are popular because they can be fun, educational, and challenging, depending on the game mode, difficulty, and features.</p>
165
- <li>Are all ship simulator games free to download for Mac users?</li>
166
- <p>No, not all ship simulator games are free to download for Mac users. Some of them require you to purchase the game or pay a subscription fee to access the full content.</p>
167
- <li>What are some free alternatives for ship simulator games for Mac users?</li>
168
- <p>Some free alternatives for ship simulator games for Mac users are Ship Handling Simulator, The Ship Simulator 2022, and NAUTIS Home - Ship Simulator.</p>
169
- <li>How can I download ship simulator games for Mac users?</li>
170
- <p>You can download ship simulator games for Mac users from the App Store or from the official websites of the developers.</p>
171
- </ol></p> 401be4b1e0<br />
172
- <br />
173
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1toTree/lora_test/ppdiffusers/pipelines/pndm/pipeline_pndm.py DELETED
@@ -1,94 +0,0 @@
1
- # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2
- # Copyright 2022 The HuggingFace Team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- from typing import List, Optional, Tuple, Union
17
-
18
- import paddle
19
-
20
- from ...models import UNet2DModel
21
- from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput
22
- from ...schedulers import PNDMScheduler
23
-
24
-
25
- class PNDMPipeline(DiffusionPipeline):
26
- r"""
27
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
28
- library implements for all the pipelines (such as downloading or saving, running on a particular xxxx, etc.)
29
-
30
- Parameters:
31
- unet (`UNet2DModel`): U-Net architecture to denoise the encoded image latents.
32
- scheduler ([`SchedulerMixin`]):
33
- The `PNDMScheduler` to be used in combination with `unet` to denoise the encoded image.
34
- """
35
-
36
- unet: UNet2DModel
37
- scheduler: PNDMScheduler
38
-
39
- def __init__(self, unet: UNet2DModel, scheduler: PNDMScheduler):
40
- super().__init__()
41
- self.register_modules(unet=unet, scheduler=scheduler)
42
-
43
- @paddle.no_grad()
44
- def __call__(
45
- self,
46
- batch_size: int = 1,
47
- num_inference_steps: int = 50,
48
- generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None,
49
- output_type: Optional[str] = "pil",
50
- return_dict: bool = True,
51
- **kwargs,
52
- ) -> Union[ImagePipelineOutput, Tuple]:
53
- r"""
54
- Args:
55
- batch_size (`int`, `optional`, defaults to 1): The number of images to generate.
56
- num_inference_steps (`int`, `optional`, defaults to 50):
57
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
58
- expense of slower inference.
59
- generator (`paddle.Generator`, `optional`): A [paddle
60
- generator](to make generation deterministic.
61
- output_type (`str`, `optional`, defaults to `"pil"`): The output format of the generate image. Choose
62
- between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
63
- return_dict (`bool`, `optional`, defaults to `True`): Whether or not to return a
64
- [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple.
65
-
66
- Returns:
67
- [`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if
68
- `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the
69
- generated images.
70
- """
71
- # For more information on the sampling method you can take a look at Algorithm 2 of
72
- # the official paper: https://arxiv.org/pdf/2202.09778.pdf
73
-
74
- # Sample gaussian noise to begin loop
75
- image = paddle.randn(
76
- (batch_size, self.unet.in_channels, self.unet.sample_size, self.unet.sample_size),
77
- generator=generator,
78
- )
79
-
80
- self.scheduler.set_timesteps(num_inference_steps)
81
- for t in self.progress_bar(self.scheduler.timesteps):
82
- model_output = self.unet(image, t).sample
83
-
84
- image = self.scheduler.step(model_output, t, image).prev_sample
85
-
86
- image = (image / 2 + 0.5).clip(0, 1)
87
- image = image.transpose([0, 2, 3, 1]).numpy()
88
- if output_type == "pil":
89
- image = self.numpy_to_pil(image)
90
-
91
- if not return_dict:
92
- return (image,)
93
-
94
- return ImagePipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/7thHeaven/GPT2WordPress/app.py DELETED
@@ -1,109 +0,0 @@
1
- import streamlit as st
2
- import requests
3
- from wordpress_xmlrpc import Client, WordPressPost
4
- from wordpress_xmlrpc.methods.posts import NewPost
5
- import os
6
- from dotenv import load_dotenv
7
-
8
- load_dotenv()
9
- openai_api_key = os.getenv("OPENAI_API_KEY")
10
- wp_url = f"{os.getenv('WP_URL')}/xmlrpc.php"
11
- wp_username = os.getenv("WP_USERNAME")
12
- wp_password = os.getenv("WP_PASSWORD")
13
-
14
- if openai_api_key:
15
-
16
- def get_filetext(filename, cache={}):
17
- if filename not in cache:
18
- if not os.path.exists(filename):
19
- raise ValueError(f"ファイル '{filename}' が見つかりませんでした")
20
- with open(filename, "r") as f:
21
- cache[filename] = f.read()
22
- return cache[filename]
23
-
24
- def generate_blog_post(prompt):
25
- constraints = get_filetext(filename="constraints.md")
26
-
27
- data = {
28
- "model": "gpt-4",
29
- "messages": [
30
- {"role": "system", "content": constraints},
31
- {"role": "user", "content": prompt},
32
- ],
33
- "max_tokens": 1024,
34
- "n": 1,
35
- "stop": None,
36
- "temperature": 0.7,
37
- }
38
-
39
- response = requests.post(
40
- "https://api.openai.com/v1/chat/completions",
41
- headers={
42
- "Content-Type": "application/json",
43
- "Authorization": f"Bearer {openai_api_key}"
44
- },
45
- json=data
46
- )
47
-
48
- response.raise_for_status()
49
- choice = response.json()['choices'][0]
50
- blog_text = choice['message']['content'].strip()
51
- return blog_text
52
-
53
- def post_to_wordpress(title, content):
54
- client = Client(wp_url, wp_username, wp_password)
55
- post = WordPressPost()
56
- post.title = title
57
- post.content = content
58
- post.post_status = "publish"
59
- post_id = client.call(NewPost(post))
60
- return post_id
61
-
62
- st.title("ChatGPTによるブログ記事生成")
63
- prompt = st.text_input("記事のタイトルを入力してください:")
64
-
65
- generated_post = st.session_state.get("generated_post", None)
66
-
67
- if st.button("記事生成"):
68
- generated_post = generate_blog_post(prompt)
69
- st.session_state.generated_post = generated_post
70
- st.write("生成されたブログ記事:")
71
- st.write(generated_post)
72
-
73
- if generated_post:
74
- if st.button("投稿"):
75
- post_id = post_to_wordpress(prompt, generated_post)
76
- st.write(f"ブログ記事が投稿されました。記事ID: {post_id}")
77
-
78
- else:
79
- st.write("サービスを利用するためには、このスペースを複製し、以下の環境変数を定義してください。設定方法はosenv_setting_tips.txtを参照してください。")
80
- st.write("OPENAI_API_KEY, WP_URL, WP_USERNAME, WP_PASSWORD")
81
-
82
- st.markdown(
83
- """
84
- <h3>注意事項</h3>
85
- <ol>
86
- <li style="font-size: small;">投稿前に記事の内容をよく確認してください。</li>
87
- <li style="font-size: small;">OpenAIのAPIキーや、WordPressのURL、ユーザーID、パスワードはシステム設定にて設定します。詳しくはosenv_setting_tips.txtを参照ください。</li>
88
- <li style="font-size: small;">constraints.mdを修正すると、生成される記事の内容、雰囲気をカスタマイズすることが可能です。</li>
89
- <li style="font-size: small;">当サービスでは、OpenAI社のChatGPT APIのgpt-4を使用しております。</li>
90
- <li style="font-size: small;">当サービスで生成されたコンテンツは、OpenAI が提供する人工知能によるものであり、当サービスやOpenAI がその正確性や信頼性を保証するものではありません。</li>
91
- <li style="font-size: small;"><a href="https://platform.openai.com/docs/usage-policies">OpenAI の利用規約</a>に従い、データ保持しない方針です(ただし諸般の事情によっては変更する可能性はございます)。
92
- <li style="font-size: small;">当サービスで生成されたコンテンツは事実確認をした上で、コンテンツ生成者およびコンテンツ利用者の責任において利用してください。</li>
93
- <li style="font-size: small;">当サービスでの使用により発生したいかなる損害についても、当社は一切の責任を負いません。</li>
94
- <li style="font-size: small;">当サービスはβ版のため、予告なくサービスを終了する場合がございます。</li>
95
- </ol>
96
- <h3>謝辞</h3>
97
- <ol>
98
- <li style="font-size: small;">このサービスは<a href="https://huggingface.co/spaces/shigel/aiemo" target="_blank">aiemo</a>を参考に作成しました。大変感謝しております!特に、性格設定のアイデアは秀逸です。ありがとうございました!</li>
99
- </ol>
100
- """,
101
- unsafe_allow_html=True,
102
- )
103
-
104
- st.markdown(
105
- f'<a href="https://huggingface.co/spaces/7thHeaven/GPT2WordPress?duplicate=true">'
106
- f'<img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>',
107
- unsafe_allow_html=True,
108
- )
109
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/lib/infer_pack/modules.py DELETED
@@ -1,522 +0,0 @@
1
- import copy
2
- import math
3
- import numpy as np
4
- import scipy
5
- import torch
6
- from torch import nn
7
- from torch.nn import functional as F
8
-
9
- from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
10
- from torch.nn.utils import weight_norm, remove_weight_norm
11
-
12
- from lib.infer_pack import commons
13
- from lib.infer_pack.commons import init_weights, get_padding
14
- from lib.infer_pack.transforms import piecewise_rational_quadratic_transform
15
-
16
-
17
- LRELU_SLOPE = 0.1
18
-
19
-
20
- class LayerNorm(nn.Module):
21
- def __init__(self, channels, eps=1e-5):
22
- super().__init__()
23
- self.channels = channels
24
- self.eps = eps
25
-
26
- self.gamma = nn.Parameter(torch.ones(channels))
27
- self.beta = nn.Parameter(torch.zeros(channels))
28
-
29
- def forward(self, x):
30
- x = x.transpose(1, -1)
31
- x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps)
32
- return x.transpose(1, -1)
33
-
34
-
35
- class ConvReluNorm(nn.Module):
36
- def __init__(
37
- self,
38
- in_channels,
39
- hidden_channels,
40
- out_channels,
41
- kernel_size,
42
- n_layers,
43
- p_dropout,
44
- ):
45
- super().__init__()
46
- self.in_channels = in_channels
47
- self.hidden_channels = hidden_channels
48
- self.out_channels = out_channels
49
- self.kernel_size = kernel_size
50
- self.n_layers = n_layers
51
- self.p_dropout = p_dropout
52
- assert n_layers > 1, "Number of layers should be larger than 0."
53
-
54
- self.conv_layers = nn.ModuleList()
55
- self.norm_layers = nn.ModuleList()
56
- self.conv_layers.append(
57
- nn.Conv1d(
58
- in_channels, hidden_channels, kernel_size, padding=kernel_size // 2
59
- )
60
- )
61
- self.norm_layers.append(LayerNorm(hidden_channels))
62
- self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout))
63
- for _ in range(n_layers - 1):
64
- self.conv_layers.append(
65
- nn.Conv1d(
66
- hidden_channels,
67
- hidden_channels,
68
- kernel_size,
69
- padding=kernel_size // 2,
70
- )
71
- )
72
- self.norm_layers.append(LayerNorm(hidden_channels))
73
- self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
74
- self.proj.weight.data.zero_()
75
- self.proj.bias.data.zero_()
76
-
77
- def forward(self, x, x_mask):
78
- x_org = x
79
- for i in range(self.n_layers):
80
- x = self.conv_layers[i](x * x_mask)
81
- x = self.norm_layers[i](x)
82
- x = self.relu_drop(x)
83
- x = x_org + self.proj(x)
84
- return x * x_mask
85
-
86
-
87
- class DDSConv(nn.Module):
88
- """
89
- Dialted and Depth-Separable Convolution
90
- """
91
-
92
- def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0):
93
- super().__init__()
94
- self.channels = channels
95
- self.kernel_size = kernel_size
96
- self.n_layers = n_layers
97
- self.p_dropout = p_dropout
98
-
99
- self.drop = nn.Dropout(p_dropout)
100
- self.convs_sep = nn.ModuleList()
101
- self.convs_1x1 = nn.ModuleList()
102
- self.norms_1 = nn.ModuleList()
103
- self.norms_2 = nn.ModuleList()
104
- for i in range(n_layers):
105
- dilation = kernel_size**i
106
- padding = (kernel_size * dilation - dilation) // 2
107
- self.convs_sep.append(
108
- nn.Conv1d(
109
- channels,
110
- channels,
111
- kernel_size,
112
- groups=channels,
113
- dilation=dilation,
114
- padding=padding,
115
- )
116
- )
117
- self.convs_1x1.append(nn.Conv1d(channels, channels, 1))
118
- self.norms_1.append(LayerNorm(channels))
119
- self.norms_2.append(LayerNorm(channels))
120
-
121
- def forward(self, x, x_mask, g=None):
122
- if g is not None:
123
- x = x + g
124
- for i in range(self.n_layers):
125
- y = self.convs_sep[i](x * x_mask)
126
- y = self.norms_1[i](y)
127
- y = F.gelu(y)
128
- y = self.convs_1x1[i](y)
129
- y = self.norms_2[i](y)
130
- y = F.gelu(y)
131
- y = self.drop(y)
132
- x = x + y
133
- return x * x_mask
134
-
135
-
136
- class WN(torch.nn.Module):
137
- def __init__(
138
- self,
139
- hidden_channels,
140
- kernel_size,
141
- dilation_rate,
142
- n_layers,
143
- gin_channels=0,
144
- p_dropout=0,
145
- ):
146
- super(WN, self).__init__()
147
- assert kernel_size % 2 == 1
148
- self.hidden_channels = hidden_channels
149
- self.kernel_size = (kernel_size,)
150
- self.dilation_rate = dilation_rate
151
- self.n_layers = n_layers
152
- self.gin_channels = gin_channels
153
- self.p_dropout = p_dropout
154
-
155
- self.in_layers = torch.nn.ModuleList()
156
- self.res_skip_layers = torch.nn.ModuleList()
157
- self.drop = nn.Dropout(p_dropout)
158
-
159
- if gin_channels != 0:
160
- cond_layer = torch.nn.Conv1d(
161
- gin_channels, 2 * hidden_channels * n_layers, 1
162
- )
163
- self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight")
164
-
165
- for i in range(n_layers):
166
- dilation = dilation_rate**i
167
- padding = int((kernel_size * dilation - dilation) / 2)
168
- in_layer = torch.nn.Conv1d(
169
- hidden_channels,
170
- 2 * hidden_channels,
171
- kernel_size,
172
- dilation=dilation,
173
- padding=padding,
174
- )
175
- in_layer = torch.nn.utils.weight_norm(in_layer, name="weight")
176
- self.in_layers.append(in_layer)
177
-
178
- # last one is not necessary
179
- if i < n_layers - 1:
180
- res_skip_channels = 2 * hidden_channels
181
- else:
182
- res_skip_channels = hidden_channels
183
-
184
- res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
185
- res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight")
186
- self.res_skip_layers.append(res_skip_layer)
187
-
188
- def forward(self, x, x_mask, g=None, **kwargs):
189
- output = torch.zeros_like(x)
190
- n_channels_tensor = torch.IntTensor([self.hidden_channels])
191
-
192
- if g is not None:
193
- g = self.cond_layer(g)
194
-
195
- for i in range(self.n_layers):
196
- x_in = self.in_layers[i](x)
197
- if g is not None:
198
- cond_offset = i * 2 * self.hidden_channels
199
- g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :]
200
- else:
201
- g_l = torch.zeros_like(x_in)
202
-
203
- acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
204
- acts = self.drop(acts)
205
-
206
- res_skip_acts = self.res_skip_layers[i](acts)
207
- if i < self.n_layers - 1:
208
- res_acts = res_skip_acts[:, : self.hidden_channels, :]
209
- x = (x + res_acts) * x_mask
210
- output = output + res_skip_acts[:, self.hidden_channels :, :]
211
- else:
212
- output = output + res_skip_acts
213
- return output * x_mask
214
-
215
- def remove_weight_norm(self):
216
- if self.gin_channels != 0:
217
- torch.nn.utils.remove_weight_norm(self.cond_layer)
218
- for l in self.in_layers:
219
- torch.nn.utils.remove_weight_norm(l)
220
- for l in self.res_skip_layers:
221
- torch.nn.utils.remove_weight_norm(l)
222
-
223
-
224
- class ResBlock1(torch.nn.Module):
225
- def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
226
- super(ResBlock1, self).__init__()
227
- self.convs1 = nn.ModuleList(
228
- [
229
- weight_norm(
230
- Conv1d(
231
- channels,
232
- channels,
233
- kernel_size,
234
- 1,
235
- dilation=dilation[0],
236
- padding=get_padding(kernel_size, dilation[0]),
237
- )
238
- ),
239
- weight_norm(
240
- Conv1d(
241
- channels,
242
- channels,
243
- kernel_size,
244
- 1,
245
- dilation=dilation[1],
246
- padding=get_padding(kernel_size, dilation[1]),
247
- )
248
- ),
249
- weight_norm(
250
- Conv1d(
251
- channels,
252
- channels,
253
- kernel_size,
254
- 1,
255
- dilation=dilation[2],
256
- padding=get_padding(kernel_size, dilation[2]),
257
- )
258
- ),
259
- ]
260
- )
261
- self.convs1.apply(init_weights)
262
-
263
- self.convs2 = nn.ModuleList(
264
- [
265
- weight_norm(
266
- Conv1d(
267
- channels,
268
- channels,
269
- kernel_size,
270
- 1,
271
- dilation=1,
272
- padding=get_padding(kernel_size, 1),
273
- )
274
- ),
275
- weight_norm(
276
- Conv1d(
277
- channels,
278
- channels,
279
- kernel_size,
280
- 1,
281
- dilation=1,
282
- padding=get_padding(kernel_size, 1),
283
- )
284
- ),
285
- weight_norm(
286
- Conv1d(
287
- channels,
288
- channels,
289
- kernel_size,
290
- 1,
291
- dilation=1,
292
- padding=get_padding(kernel_size, 1),
293
- )
294
- ),
295
- ]
296
- )
297
- self.convs2.apply(init_weights)
298
-
299
- def forward(self, x, x_mask=None):
300
- for c1, c2 in zip(self.convs1, self.convs2):
301
- xt = F.leaky_relu(x, LRELU_SLOPE)
302
- if x_mask is not None:
303
- xt = xt * x_mask
304
- xt = c1(xt)
305
- xt = F.leaky_relu(xt, LRELU_SLOPE)
306
- if x_mask is not None:
307
- xt = xt * x_mask
308
- xt = c2(xt)
309
- x = xt + x
310
- if x_mask is not None:
311
- x = x * x_mask
312
- return x
313
-
314
- def remove_weight_norm(self):
315
- for l in self.convs1:
316
- remove_weight_norm(l)
317
- for l in self.convs2:
318
- remove_weight_norm(l)
319
-
320
-
321
- class ResBlock2(torch.nn.Module):
322
- def __init__(self, channels, kernel_size=3, dilation=(1, 3)):
323
- super(ResBlock2, self).__init__()
324
- self.convs = nn.ModuleList(
325
- [
326
- weight_norm(
327
- Conv1d(
328
- channels,
329
- channels,
330
- kernel_size,
331
- 1,
332
- dilation=dilation[0],
333
- padding=get_padding(kernel_size, dilation[0]),
334
- )
335
- ),
336
- weight_norm(
337
- Conv1d(
338
- channels,
339
- channels,
340
- kernel_size,
341
- 1,
342
- dilation=dilation[1],
343
- padding=get_padding(kernel_size, dilation[1]),
344
- )
345
- ),
346
- ]
347
- )
348
- self.convs.apply(init_weights)
349
-
350
- def forward(self, x, x_mask=None):
351
- for c in self.convs:
352
- xt = F.leaky_relu(x, LRELU_SLOPE)
353
- if x_mask is not None:
354
- xt = xt * x_mask
355
- xt = c(xt)
356
- x = xt + x
357
- if x_mask is not None:
358
- x = x * x_mask
359
- return x
360
-
361
- def remove_weight_norm(self):
362
- for l in self.convs:
363
- remove_weight_norm(l)
364
-
365
-
366
- class Log(nn.Module):
367
- def forward(self, x, x_mask, reverse=False, **kwargs):
368
- if not reverse:
369
- y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask
370
- logdet = torch.sum(-y, [1, 2])
371
- return y, logdet
372
- else:
373
- x = torch.exp(x) * x_mask
374
- return x
375
-
376
-
377
- class Flip(nn.Module):
378
- def forward(self, x, *args, reverse=False, **kwargs):
379
- x = torch.flip(x, [1])
380
- if not reverse:
381
- logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
382
- return x, logdet
383
- else:
384
- return x
385
-
386
-
387
- class ElementwiseAffine(nn.Module):
388
- def __init__(self, channels):
389
- super().__init__()
390
- self.channels = channels
391
- self.m = nn.Parameter(torch.zeros(channels, 1))
392
- self.logs = nn.Parameter(torch.zeros(channels, 1))
393
-
394
- def forward(self, x, x_mask, reverse=False, **kwargs):
395
- if not reverse:
396
- y = self.m + torch.exp(self.logs) * x
397
- y = y * x_mask
398
- logdet = torch.sum(self.logs * x_mask, [1, 2])
399
- return y, logdet
400
- else:
401
- x = (x - self.m) * torch.exp(-self.logs) * x_mask
402
- return x
403
-
404
-
405
- class ResidualCouplingLayer(nn.Module):
406
- def __init__(
407
- self,
408
- channels,
409
- hidden_channels,
410
- kernel_size,
411
- dilation_rate,
412
- n_layers,
413
- p_dropout=0,
414
- gin_channels=0,
415
- mean_only=False,
416
- ):
417
- assert channels % 2 == 0, "channels should be divisible by 2"
418
- super().__init__()
419
- self.channels = channels
420
- self.hidden_channels = hidden_channels
421
- self.kernel_size = kernel_size
422
- self.dilation_rate = dilation_rate
423
- self.n_layers = n_layers
424
- self.half_channels = channels // 2
425
- self.mean_only = mean_only
426
-
427
- self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1)
428
- self.enc = WN(
429
- hidden_channels,
430
- kernel_size,
431
- dilation_rate,
432
- n_layers,
433
- p_dropout=p_dropout,
434
- gin_channels=gin_channels,
435
- )
436
- self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1)
437
- self.post.weight.data.zero_()
438
- self.post.bias.data.zero_()
439
-
440
- def forward(self, x, x_mask, g=None, reverse=False):
441
- x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
442
- h = self.pre(x0) * x_mask
443
- h = self.enc(h, x_mask, g=g)
444
- stats = self.post(h) * x_mask
445
- if not self.mean_only:
446
- m, logs = torch.split(stats, [self.half_channels] * 2, 1)
447
- else:
448
- m = stats
449
- logs = torch.zeros_like(m)
450
-
451
- if not reverse:
452
- x1 = m + x1 * torch.exp(logs) * x_mask
453
- x = torch.cat([x0, x1], 1)
454
- logdet = torch.sum(logs, [1, 2])
455
- return x, logdet
456
- else:
457
- x1 = (x1 - m) * torch.exp(-logs) * x_mask
458
- x = torch.cat([x0, x1], 1)
459
- return x
460
-
461
- def remove_weight_norm(self):
462
- self.enc.remove_weight_norm()
463
-
464
-
465
- class ConvFlow(nn.Module):
466
- def __init__(
467
- self,
468
- in_channels,
469
- filter_channels,
470
- kernel_size,
471
- n_layers,
472
- num_bins=10,
473
- tail_bound=5.0,
474
- ):
475
- super().__init__()
476
- self.in_channels = in_channels
477
- self.filter_channels = filter_channels
478
- self.kernel_size = kernel_size
479
- self.n_layers = n_layers
480
- self.num_bins = num_bins
481
- self.tail_bound = tail_bound
482
- self.half_channels = in_channels // 2
483
-
484
- self.pre = nn.Conv1d(self.half_channels, filter_channels, 1)
485
- self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0)
486
- self.proj = nn.Conv1d(
487
- filter_channels, self.half_channels * (num_bins * 3 - 1), 1
488
- )
489
- self.proj.weight.data.zero_()
490
- self.proj.bias.data.zero_()
491
-
492
- def forward(self, x, x_mask, g=None, reverse=False):
493
- x0, x1 = torch.split(x, [self.half_channels] * 2, 1)
494
- h = self.pre(x0)
495
- h = self.convs(h, x_mask, g=g)
496
- h = self.proj(h) * x_mask
497
-
498
- b, c, t = x0.shape
499
- h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?]
500
-
501
- unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels)
502
- unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(
503
- self.filter_channels
504
- )
505
- unnormalized_derivatives = h[..., 2 * self.num_bins :]
506
-
507
- x1, logabsdet = piecewise_rational_quadratic_transform(
508
- x1,
509
- unnormalized_widths,
510
- unnormalized_heights,
511
- unnormalized_derivatives,
512
- inverse=reverse,
513
- tails="linear",
514
- tail_bound=self.tail_bound,
515
- )
516
-
517
- x = torch.cat([x0, x1], 1) * x_mask
518
- logdet = torch.sum(logabsdet * x_mask, [1, 2])
519
- if not reverse:
520
- return x, logdet
521
- else:
522
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/venv.sh DELETED
@@ -1 +0,0 @@
1
- python3.8 -m venv .venv
 
 
spaces/AIFILMS/generate_human_motion/VQ-Trans/options/option_transformer.py DELETED
@@ -1,68 +0,0 @@
1
- import argparse
2
-
3
- def get_args_parser():
4
- parser = argparse.ArgumentParser(description='Optimal Transport AutoEncoder training for Amass',
5
- add_help=True,
6
- formatter_class=argparse.ArgumentDefaultsHelpFormatter)
7
-
8
- ## dataloader
9
-
10
- parser.add_argument('--dataname', type=str, default='kit', help='dataset directory')
11
- parser.add_argument('--batch-size', default=128, type=int, help='batch size')
12
- parser.add_argument('--fps', default=[20], nargs="+", type=int, help='frames per second')
13
- parser.add_argument('--seq-len', type=int, default=64, help='training motion length')
14
-
15
- ## optimization
16
- parser.add_argument('--total-iter', default=100000, type=int, help='number of total iterations to run')
17
- parser.add_argument('--warm-up-iter', default=1000, type=int, help='number of total iterations for warmup')
18
- parser.add_argument('--lr', default=2e-4, type=float, help='max learning rate')
19
- parser.add_argument('--lr-scheduler', default=[60000], nargs="+", type=int, help="learning rate schedule (iterations)")
20
- parser.add_argument('--gamma', default=0.05, type=float, help="learning rate decay")
21
-
22
- parser.add_argument('--weight-decay', default=1e-6, type=float, help='weight decay')
23
- parser.add_argument('--decay-option',default='all', type=str, choices=['all', 'noVQ'], help='disable weight decay on codebook')
24
- parser.add_argument('--optimizer',default='adamw', type=str, choices=['adam', 'adamw'], help='disable weight decay on codebook')
25
-
26
- ## vqvae arch
27
- parser.add_argument("--code-dim", type=int, default=512, help="embedding dimension")
28
- parser.add_argument("--nb-code", type=int, default=512, help="nb of embedding")
29
- parser.add_argument("--mu", type=float, default=0.99, help="exponential moving average to update the codebook")
30
- parser.add_argument("--down-t", type=int, default=3, help="downsampling rate")
31
- parser.add_argument("--stride-t", type=int, default=2, help="stride size")
32
- parser.add_argument("--width", type=int, default=512, help="width of the network")
33
- parser.add_argument("--depth", type=int, default=3, help="depth of the network")
34
- parser.add_argument("--dilation-growth-rate", type=int, default=3, help="dilation growth rate")
35
- parser.add_argument("--output-emb-width", type=int, default=512, help="output embedding width")
36
- parser.add_argument('--vq-act', type=str, default='relu', choices = ['relu', 'silu', 'gelu'], help='dataset directory')
37
-
38
- ## gpt arch
39
- parser.add_argument("--block-size", type=int, default=25, help="seq len")
40
- parser.add_argument("--embed-dim-gpt", type=int, default=512, help="embedding dimension")
41
- parser.add_argument("--clip-dim", type=int, default=512, help="latent dimension in the clip feature")
42
- parser.add_argument("--num-layers", type=int, default=2, help="nb of transformer layers")
43
- parser.add_argument("--n-head-gpt", type=int, default=8, help="nb of heads")
44
- parser.add_argument("--ff-rate", type=int, default=4, help="feedforward size")
45
- parser.add_argument("--drop-out-rate", type=float, default=0.1, help="dropout ratio in the pos encoding")
46
-
47
- ## quantizer
48
- parser.add_argument("--quantizer", type=str, default='ema_reset', choices = ['ema', 'orig', 'ema_reset', 'reset'], help="eps for optimal transport")
49
- parser.add_argument('--quantbeta', type=float, default=1.0, help='dataset directory')
50
-
51
- ## resume
52
- parser.add_argument("--resume-pth", type=str, default=None, help='resume vq pth')
53
- parser.add_argument("--resume-trans", type=str, default=None, help='resume gpt pth')
54
-
55
-
56
- ## output directory
57
- parser.add_argument('--out-dir', type=str, default='output_GPT_Final/', help='output directory')
58
- parser.add_argument('--exp-name', type=str, default='exp_debug', help='name of the experiment, will create a file inside out-dir')
59
- parser.add_argument('--vq-name', type=str, default='exp_debug', help='name of the generated dataset .npy, will create a file inside out-dir')
60
- ## other
61
- parser.add_argument('--print-iter', default=200, type=int, help='print frequency')
62
- parser.add_argument('--eval-iter', default=5000, type=int, help='evaluation frequency')
63
- parser.add_argument('--seed', default=123, type=int, help='seed for initializing training. ')
64
- parser.add_argument("--if-maxtest", action='store_true', help="test in max")
65
- parser.add_argument('--pkeep', type=float, default=1.0, help='keep rate for gpt training')
66
-
67
-
68
- return parser.parse_args()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/generate_human_motion/VQ-Trans/utils/paramUtil.py DELETED
@@ -1,63 +0,0 @@
1
- import numpy as np
2
-
3
- # Define a kinematic tree for the skeletal struture
4
- kit_kinematic_chain = [[0, 11, 12, 13, 14, 15], [0, 16, 17, 18, 19, 20], [0, 1, 2, 3, 4], [3, 5, 6, 7], [3, 8, 9, 10]]
5
-
6
- kit_raw_offsets = np.array(
7
- [
8
- [0, 0, 0],
9
- [0, 1, 0],
10
- [0, 1, 0],
11
- [0, 1, 0],
12
- [0, 1, 0],
13
- [1, 0, 0],
14
- [0, -1, 0],
15
- [0, -1, 0],
16
- [-1, 0, 0],
17
- [0, -1, 0],
18
- [0, -1, 0],
19
- [1, 0, 0],
20
- [0, -1, 0],
21
- [0, -1, 0],
22
- [0, 0, 1],
23
- [0, 0, 1],
24
- [-1, 0, 0],
25
- [0, -1, 0],
26
- [0, -1, 0],
27
- [0, 0, 1],
28
- [0, 0, 1]
29
- ]
30
- )
31
-
32
- t2m_raw_offsets = np.array([[0,0,0],
33
- [1,0,0],
34
- [-1,0,0],
35
- [0,1,0],
36
- [0,-1,0],
37
- [0,-1,0],
38
- [0,1,0],
39
- [0,-1,0],
40
- [0,-1,0],
41
- [0,1,0],
42
- [0,0,1],
43
- [0,0,1],
44
- [0,1,0],
45
- [1,0,0],
46
- [-1,0,0],
47
- [0,0,1],
48
- [0,-1,0],
49
- [0,-1,0],
50
- [0,-1,0],
51
- [0,-1,0],
52
- [0,-1,0],
53
- [0,-1,0]])
54
-
55
- t2m_kinematic_chain = [[0, 2, 5, 8, 11], [0, 1, 4, 7, 10], [0, 3, 6, 9, 12, 15], [9, 14, 17, 19, 21], [9, 13, 16, 18, 20]]
56
- t2m_left_hand_chain = [[20, 22, 23, 24], [20, 34, 35, 36], [20, 25, 26, 27], [20, 31, 32, 33], [20, 28, 29, 30]]
57
- t2m_right_hand_chain = [[21, 43, 44, 45], [21, 46, 47, 48], [21, 40, 41, 42], [21, 37, 38, 39], [21, 49, 50, 51]]
58
-
59
-
60
- kit_tgt_skel_id = '03950'
61
-
62
- t2m_tgt_skel_id = '000021'
63
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_audio/Make_An_Audio/ldm/modules/encoders/open_clap/factory.py DELETED
@@ -1,257 +0,0 @@
1
- import json
2
- import logging
3
- import os
4
- import pathlib
5
- import re
6
- from copy import deepcopy
7
- from pathlib import Path
8
-
9
- import torch
10
-
11
- from .model import CLAP, convert_weights_to_fp16
12
- from .openai import load_openai_model
13
- from .pretrained import get_pretrained_url, download_pretrained
14
- from .transform import image_transform
15
-
16
- _MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
17
- _MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
18
-
19
-
20
- def _natural_key(string_):
21
- return [int(s) if s.isdigit() else s for s in re.split(r"(\d+)", string_.lower())]
22
-
23
-
24
- def _rescan_model_configs():
25
- global _MODEL_CONFIGS
26
-
27
- config_ext = (".json",)
28
- config_files = []
29
- for config_path in _MODEL_CONFIG_PATHS:
30
- if config_path.is_file() and config_path.suffix in config_ext:
31
- config_files.append(config_path)
32
- elif config_path.is_dir():
33
- for ext in config_ext:
34
- config_files.extend(config_path.glob(f"*{ext}"))
35
-
36
- for cf in config_files:
37
- with open(cf, "r") as f:
38
- model_cfg = json.load(f)
39
- if all(a in model_cfg for a in ("embed_dim", "audio_cfg", "text_cfg")):
40
- _MODEL_CONFIGS[cf.stem] = model_cfg
41
-
42
- _MODEL_CONFIGS = {
43
- k: v
44
- for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))
45
- }
46
-
47
-
48
- _rescan_model_configs() # initial populate of model config registry
49
-
50
-
51
- def load_state_dict(checkpoint_path: str, map_location="cpu", skip_params=True):
52
- checkpoint = torch.load(checkpoint_path, map_location=map_location)
53
- if isinstance(checkpoint, dict) and "state_dict" in checkpoint:
54
- state_dict = checkpoint["state_dict"]
55
- else:
56
- state_dict = checkpoint
57
- if skip_params:
58
- if next(iter(state_dict.items()))[0].startswith("module"):
59
- state_dict = {k[7:]: v for k, v in state_dict.items()}
60
- # for k in state_dict:
61
- # if k.startswith('transformer'):
62
- # v = state_dict.pop(k)
63
- # state_dict['text_branch.' + k[12:]] = v
64
- return state_dict
65
-
66
-
67
- def create_model(
68
- amodel_name: str,
69
- tmodel_name: str,
70
- pretrained: str = "",
71
- precision: str = "fp32",
72
- device: torch.device = torch.device("cpu"),
73
- jit: bool = False,
74
- force_quick_gelu: bool = False,
75
- openai_model_cache_dir: str = os.path.expanduser("~/.cache/clip"),
76
- skip_params=True,
77
- pretrained_audio: str = "",
78
- pretrained_text: str = "",
79
- enable_fusion: bool = False,
80
- fusion_type: str = 'None'
81
- # pretrained_image: bool = False,
82
- ):
83
- amodel_name = amodel_name.replace(
84
- "/", "-"
85
- ) # for callers using old naming with / in ViT names
86
- pretrained_orig = pretrained
87
- pretrained = pretrained.lower()
88
- if pretrained == "openai":
89
- if amodel_name in _MODEL_CONFIGS:
90
- logging.info(f"Loading {amodel_name} model config.")
91
- model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name])
92
- else:
93
- logging.error(
94
- f"Model config for {amodel_name} not found; available models {list_models()}."
95
- )
96
- raise RuntimeError(f"Model config for {amodel_name} not found.")
97
-
98
- logging.info(f"Loading pretrained ViT-B-16 text encoder from OpenAI.")
99
- # Hard Code in model name
100
- model_cfg["text_cfg"]["model_type"] = tmodel_name
101
- model = load_openai_model(
102
- "ViT-B-16",
103
- model_cfg,
104
- device=device,
105
- jit=jit,
106
- cache_dir=openai_model_cache_dir,
107
- enable_fusion=enable_fusion,
108
- fusion_type=fusion_type
109
- )
110
- # See https://discuss.pytorch.org/t/valueerror-attemting-to-unscale-fp16-gradients/81372
111
- if precision == "amp" or precision == "fp32":
112
- model = model.float()
113
- else:
114
- if amodel_name in _MODEL_CONFIGS:
115
- logging.info(f"Loading {amodel_name} model config.")
116
- model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name])
117
- else:
118
- logging.error(
119
- f"Model config for {amodel_name} not found; available models {list_models()}."
120
- )
121
- raise RuntimeError(f"Model config for {amodel_name} not found.")
122
-
123
- if force_quick_gelu:
124
- # override for use of QuickGELU on non-OpenAI transformer models
125
- model_cfg["quick_gelu"] = True
126
-
127
- # if pretrained_image:
128
- # if 'timm_amodel_name' in model_cfg.get('vision_cfg', {}):
129
- # # pretrained weight loading for timm models set via vision_cfg
130
- # model_cfg['vision_cfg']['timm_model_pretrained'] = True
131
- # else:
132
- # assert False, 'pretrained image towers currently only supported for timm models'
133
- model_cfg["text_cfg"]["model_type"] = tmodel_name
134
- model_cfg["enable_fusion"] = enable_fusion
135
- model_cfg["fusion_type"] = fusion_type
136
- model = CLAP(**model_cfg)
137
-
138
- if pretrained:
139
- checkpoint_path = ""
140
- url = get_pretrained_url(amodel_name, pretrained)
141
- if url:
142
- checkpoint_path = download_pretrained(url, root=openai_model_cache_dir)
143
- elif os.path.exists(pretrained_orig):
144
- checkpoint_path = pretrained_orig
145
- if checkpoint_path:
146
- logging.info(f"Loading pretrained {amodel_name}-{tmodel_name} weights ({pretrained}).")
147
- ckpt = load_state_dict(checkpoint_path, skip_params=True)
148
- model.load_state_dict(ckpt)
149
- param_names = [n for n, p in model.named_parameters()]
150
- for n in param_names:
151
- print(n, "\t", "Loaded" if n in ckpt else "Unloaded")
152
- else:
153
- logging.warning(
154
- f"Pretrained weights ({pretrained}) not found for model {amodel_name}."
155
- )
156
- raise RuntimeError(
157
- f"Pretrained weights ({pretrained}) not found for model {amodel_name}."
158
- )
159
-
160
- if pretrained_audio:
161
- if amodel_name.startswith('PANN'):
162
- if 'Cnn14_mAP' in pretrained_audio: # official checkpoint
163
- audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
164
- audio_ckpt = audio_ckpt['model']
165
- keys = list(audio_ckpt.keys())
166
- for key in keys:
167
- if 'spectrogram_extractor' not in key and 'logmel_extractor' not in key:
168
- v = audio_ckpt.pop(key)
169
- audio_ckpt['audio_branch.' + key] = v
170
- elif os.path.basename(pretrained_audio).startswith('PANN'): # checkpoint trained via HTSAT codebase
171
- audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
172
- audio_ckpt = audio_ckpt['state_dict']
173
- keys = list(audio_ckpt.keys())
174
- for key in keys:
175
- if key.startswith('sed_model'):
176
- v = audio_ckpt.pop(key)
177
- audio_ckpt['audio_branch.' + key[10:]] = v
178
- elif os.path.basename(pretrained_audio).startswith('finetuned'): # checkpoint trained via linear probe codebase
179
- audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
180
- else:
181
- raise ValueError('Unknown audio checkpoint')
182
- elif amodel_name.startswith('HTSAT'):
183
- if 'HTSAT_AudioSet_Saved' in pretrained_audio: # official checkpoint
184
- audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
185
- audio_ckpt = audio_ckpt['state_dict']
186
- keys = list(audio_ckpt.keys())
187
- for key in keys:
188
- if key.startswith('sed_model') and ('spectrogram_extractor' not in key
189
- and 'logmel_extractor' not in key):
190
- v = audio_ckpt.pop(key)
191
- audio_ckpt['audio_branch.' + key[10:]] = v
192
- elif os.path.basename(pretrained_audio).startswith('HTSAT'): # checkpoint trained via HTSAT codebase
193
- audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
194
- audio_ckpt = audio_ckpt['state_dict']
195
- keys = list(audio_ckpt.keys())
196
- for key in keys:
197
- if key.startswith('sed_model'):
198
- v = audio_ckpt.pop(key)
199
- audio_ckpt['audio_branch.' + key[10:]] = v
200
- elif os.path.basename(pretrained_audio).startswith('finetuned'): # checkpoint trained via linear probe codebase
201
- audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
202
- else:
203
- raise ValueError('Unknown audio checkpoint')
204
- else:
205
- raise f'this audio encoder pretrained checkpoint is not support'
206
-
207
- model.load_state_dict(audio_ckpt, strict=False)
208
- logging.info(f"Loading pretrained {amodel_name} weights ({pretrained_audio}).")
209
- param_names = [n for n, p in model.named_parameters()]
210
- for n in param_names:
211
- print(n, "\t", "Loaded" if n in audio_ckpt else "Unloaded")
212
-
213
- model.to(device=device)
214
- if precision == "fp16":
215
- assert device.type != "cpu"
216
- convert_weights_to_fp16(model)
217
-
218
- if jit:
219
- model = torch.jit.script(model)
220
-
221
- return model, model_cfg
222
-
223
-
224
- def create_model_and_transforms(
225
- model_name: str,
226
- pretrained: str = "",
227
- precision: str = "fp32",
228
- device: torch.device = torch.device("cpu"),
229
- jit: bool = False,
230
- force_quick_gelu: bool = False,
231
- # pretrained_image: bool = False,
232
- ):
233
- model = create_model(
234
- model_name,
235
- pretrained,
236
- precision,
237
- device,
238
- jit,
239
- force_quick_gelu=force_quick_gelu,
240
- # pretrained_image=pretrained_image
241
- )
242
- preprocess_train = image_transform(model.visual.image_size, is_train=True)
243
- preprocess_val = image_transform(model.visual.image_size, is_train=False)
244
- return model, preprocess_train, preprocess_val
245
-
246
-
247
- def list_models():
248
- """enumerate available model architectures based on config files"""
249
- return list(_MODEL_CONFIGS.keys())
250
-
251
-
252
- def add_model_config(path):
253
- """add model config path or file and update registry"""
254
- if not isinstance(path, Path):
255
- path = Path(path)
256
- _MODEL_CONFIG_PATHS.append(path)
257
- _rescan_model_configs()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIZeroToHero/05-RealtimeStreamlitASR/app.py DELETED
@@ -1,119 +0,0 @@
1
- from collections import deque
2
- import streamlit as st
3
- import torch
4
- from streamlit_player import st_player
5
- from transformers import AutoModelForCTC, Wav2Vec2Processor
6
- from streaming import ffmpeg_stream
7
-
8
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
9
- player_options = {
10
- "events": ["onProgress"],
11
- "progress_interval": 200,
12
- "volume": 1.0,
13
- "playing": True,
14
- "loop": False,
15
- "controls": False,
16
- "muted": False,
17
- "config": {"youtube": {"playerVars": {"start": 1}}},
18
- }
19
-
20
- # disable rapid fading in and out on `st.code` updates
21
- st.markdown("<style>.element-container{opacity:1 !important}</style>", unsafe_allow_html=True)
22
-
23
- @st.cache(hash_funcs={torch.nn.parameter.Parameter: lambda _: None})
24
- def load_model(model_path="facebook/wav2vec2-large-robust-ft-swbd-300h"):
25
- processor = Wav2Vec2Processor.from_pretrained(model_path)
26
- model = AutoModelForCTC.from_pretrained(model_path).to(device)
27
- return processor, model
28
-
29
- processor, model = load_model()
30
-
31
- def stream_text(url, chunk_duration_ms, pad_duration_ms):
32
- sampling_rate = processor.feature_extractor.sampling_rate
33
-
34
- # calculate the length of logits to cut from the sides of the output to account for input padding
35
- output_pad_len = model._get_feat_extract_output_lengths(int(sampling_rate * pad_duration_ms / 1000))
36
-
37
- # define the audio chunk generator
38
- stream = ffmpeg_stream(url, sampling_rate, chunk_duration_ms=chunk_duration_ms, pad_duration_ms=pad_duration_ms)
39
-
40
- leftover_text = ""
41
- for i, chunk in enumerate(stream):
42
- input_values = processor(chunk, sampling_rate=sampling_rate, return_tensors="pt").input_values
43
-
44
- with torch.no_grad():
45
- logits = model(input_values.to(device)).logits[0]
46
- if i > 0:
47
- logits = logits[output_pad_len : len(logits) - output_pad_len]
48
- else: # don't count padding at the start of the clip
49
- logits = logits[: len(logits) - output_pad_len]
50
-
51
- predicted_ids = torch.argmax(logits, dim=-1).cpu().tolist()
52
- if processor.decode(predicted_ids).strip():
53
- leftover_ids = processor.tokenizer.encode(leftover_text)
54
- # concat the last word (or its part) from the last frame with the current text
55
- text = processor.decode(leftover_ids + predicted_ids)
56
- # don't return the last word in case it's just partially recognized
57
- text, leftover_text = text.rsplit(" ", 1)
58
- yield text
59
- else:
60
- yield leftover_text
61
- leftover_text = ""
62
- yield leftover_text
63
-
64
- def main():
65
- state = st.session_state
66
- st.header("Video ASR Streamlit from Youtube Link")
67
-
68
- with st.form(key="inputs_form"):
69
-
70
- # Our worlds best teachers on subjects of AI, Cognitive, Neuroscience for our Behavioral and Medical Health
71
- ytJoschaBach="https://youtu.be/cC1HszE5Hcw?list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L&t=8984"
72
- ytSamHarris="https://www.youtube.com/watch?v=4dC_nRYIDZU&list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L&index=2"
73
- ytJohnAbramson="https://www.youtube.com/watch?v=arrokG3wCdE&list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L&index=3"
74
- ytElonMusk="https://www.youtube.com/watch?v=DxREm3s1scA&list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L&index=4"
75
- ytJeffreyShainline="https://www.youtube.com/watch?v=EwueqdgIvq4&list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L&index=5"
76
- ytJeffHawkins="https://www.youtube.com/watch?v=Z1KwkpTUbkg&list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L&index=6"
77
- ytSamHarris="https://youtu.be/Ui38ZzTymDY?list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L"
78
- ytSamHarris="https://youtu.be/4dC_nRYIDZU?list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L&t=7809"
79
- ytSamHarris="https://youtu.be/4dC_nRYIDZU?list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L&t=7809"
80
- ytSamHarris="https://youtu.be/4dC_nRYIDZU?list=PLHgX2IExbFouJoqEr8JMF5MbZSbyC91-L&t=7809"
81
- ytTimelapseAI="https://www.youtube.com/watch?v=63yr9dlI0cU&list=PLHgX2IExbFovQybyfltywXnqZi5YvaSS-"
82
- state.youtube_url = st.text_input("YouTube URL", ytTimelapseAI)
83
-
84
-
85
- state.chunk_duration_ms = st.slider("Audio chunk duration (ms)", 2000, 10000, 3000, 100)
86
- state.pad_duration_ms = st.slider("Padding duration (ms)", 100, 5000, 1000, 100)
87
- submit_button = st.form_submit_button(label="Submit")
88
-
89
- if submit_button or "asr_stream" not in state:
90
- # a hack to update the video player on value changes
91
- state.youtube_url = (
92
- state.youtube_url.split("&hash=")[0]
93
- + f"&hash={state.chunk_duration_ms}-{state.pad_duration_ms}"
94
- )
95
- state.asr_stream = stream_text(
96
- state.youtube_url, state.chunk_duration_ms, state.pad_duration_ms
97
- )
98
- state.chunks_taken = 0
99
-
100
-
101
- state.lines = deque([], maxlen=100) # limit to the last n lines of subs
102
-
103
-
104
- player = st_player(state.youtube_url, **player_options, key="youtube_player")
105
-
106
- if "asr_stream" in state and player.data and player.data["played"] < 1.0:
107
- # check how many seconds were played, and if more than processed - write the next text chunk
108
- processed_seconds = state.chunks_taken * (state.chunk_duration_ms / 1000)
109
- if processed_seconds < player.data["playedSeconds"]:
110
- text = next(state.asr_stream)
111
- state.lines.append(text)
112
- state.chunks_taken += 1
113
- if "lines" in state:
114
- # print the lines of subs
115
- st.code("\n".join(state.lines))
116
-
117
-
118
- if __name__ == "__main__":
119
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AUBADA-ALARABI/poetry202/app.py DELETED
@@ -1,53 +0,0 @@
1
- import gc
2
- import gradio as gr
3
- from transformers import pipeline, set_seed
4
-
5
- pipe = pipeline('text-generation', framework='pt', model='akhooli/ap2023', tokenizer='akhooli/ap2023')
6
- #gc.collect()
7
- samples = [['أنت'
8
- ,1.0, 50, 1.0, 1.0, 114],['هل غادر'
9
- ,1.0, 50, 1.0, 1.0, 114 ],['ألا ليت'
10
- ,1.0, 50, 1.0, 1.0, 114 ],['يا قدس'
11
- ,1.0, 50, 1.0, 1.0, 114],['عيد بأية حال'
12
- ,1.0, 50, 1.0, 1.0, 114],['لكل شيء إذا ما'
13
- ,1.0, 50, 1.0, 1.0, 114 ],['.'
14
- ,1.0, 50, 1.0, 1.0, 114]]
15
-
16
- notes = """
17
- - Enter a short prompt or select (click) one of the examples and click SEND
18
- - Adjust parameters (temperture, top k, top p and penalty) through the slider (keep close to default values).
19
- - For the same seed (randomness), the same output is regenerated if other parameters are fixed
20
- - Clear and enter new prompt or select another example and SEND to regenerate
21
- - The '.' means start a new line from no prompt (your prompt need not be long)
22
- - Be patient: this runs on CPU (free tier)
23
- - Feedback (Twitter): @akhooli (https://twitter.com/akhooli/status/1611025232201977859)
24
- - Note/Disclaimer: may generate unaccepted or inappropriate content. Use at your own risk.
25
- """
26
- def sayPoetry(prompt, temp=1.0, topk = 50, topp = 1.0, penalty=1.0, seed=114):
27
- if not int(seed) >= 0: seed=114
28
- set_seed(seed)
29
- gen = pipe(prompt, max_length=96, do_sample=True, temperature=temp, top_k=topk, top_p=topp, repetition_penalty=penalty,
30
- min_length = 64, no_repeat_ngram_size = 3, return_full_text=True,
31
- num_beams=5, num_return_sequences=1)[0]["generated_text"]
32
- poetry =""
33
- for line in gen.split('.')[:-1]:
34
- poetry += line #+ "\n"
35
- return poetry
36
- poetry = gr.Interface(fn=sayPoetry,
37
- inputs=[
38
- gr.Textbox(label="Enter short prompt or select from examples:"),
39
- gr.Slider(0.70, 1.2, step=0.01,value=1.0, label='control temperature'),
40
- gr.Slider(25, 100, step=1,value=50, label='control top k'),
41
- gr.Slider(0.80, 1.0, step=0.01,value=1.0, label='control top p'),
42
- gr.Slider(0.90, 1.50, step=0.01,value=1.0, label='control penalty'),
43
- gr.Number(value=139750, precision=0, label='Seed'),
44
- ],
45
- outputs=[gr.Textbox(label="Generated Poetry:")],
46
-
47
- allow_flagging='never',
48
- title='Arabic Poetry Generation Demo (updated Jan. 2023)',
49
- description = "A simple demo of AI generated poetry based on 1M poems fine-tuned using AraGPT2 (be patient, runs on cpu)",
50
- examples=samples,
51
- cache_examples=False,
52
- article = notes)
53
- poetry.launch() # show_error = True, debug=True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Abdllh/poetry202/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Poetry2023
3
- emoji: 👁
4
- colorFrom: green
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.16.0
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: akhooli/poetry2023
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/client/js/change-language.js DELETED
@@ -1,47 +0,0 @@
1
- document.addEventListener('DOMContentLoaded', fetchLanguages);
2
-
3
- async function fetchLanguages() {
4
- try {
5
- const [languagesResponse, currentLanguageResponse] = await Promise.all([
6
- fetch(`${url_prefix}/get-languages`),
7
- fetch(`${url_prefix}/get-locale`)
8
- ]);
9
-
10
- const languages = await languagesResponse.json();
11
- const currentLanguage = await currentLanguageResponse.text();
12
-
13
- const languageSelect = document.getElementById('language');
14
- languages.forEach(lang => {
15
- const option = document.createElement('option');
16
- option.value = lang;
17
- option.textContent = lang;
18
- languageSelect.appendChild(option);
19
- });
20
-
21
- const savedLanguage = localStorage.getItem("language") || currentLanguage;
22
- setLanguageOnPageLoad(savedLanguage);
23
- } catch (error) {
24
- console.error("Failed to fetch languages or current language");
25
- }
26
- }
27
-
28
- function setLanguageOnPageLoad(language) {
29
- document.getElementById("language").value = language;
30
- }
31
-
32
- function changeLanguage(lang) {
33
- fetch(`${url_prefix}/change-language`, {
34
- method: "POST",
35
- headers: {
36
- "Content-Type": "application/json",
37
- },
38
- body: JSON.stringify({ language: lang }),
39
- }).then((response) => {
40
- if (response.ok) {
41
- localStorage.setItem("language", lang);
42
- location.reload();
43
- } else {
44
- console.error("Failed to change language");
45
- }
46
- });
47
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AdithyaSNair/Medical_price_prediction/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Medical Price Prediction
3
- emoji: 📚
4
- colorFrom: red
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.16.2
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/agentverse/environments/simulation_env/rules/selector/basic.py DELETED
@@ -1,27 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import TYPE_CHECKING, List
4
-
5
- from agentverse.message import Message
6
-
7
- from . import selector_registry as SelectorRegistry
8
- from .base import BaseSelector
9
-
10
- if TYPE_CHECKING:
11
- from agentverse.environments import BaseEnvironment
12
-
13
-
14
- @SelectorRegistry.register("basic")
15
- class BasicSelector(BaseSelector):
16
- """
17
- Base class for all selecters
18
- """
19
-
20
- def select_message(
21
- self, environment: BaseEnvironment, messages: List[Message]
22
- ) -> List[Message]:
23
- """Selects a set of valid messages from all messages"""
24
- return messages
25
-
26
- def reset(self) -> None:
27
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/alphamaskimage/AlphaMaskImage.js DELETED
@@ -1,2 +0,0 @@
1
- import AlphaMaskImage from '../../../plugins/alphamaskimage.js';
2
- export default AlphaMaskImage;
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/filechooser/Factory.d.ts DELETED
@@ -1,5 +0,0 @@
1
- import { FileChooser } from './FileChooser.js';
2
-
3
- export default function (
4
- config?: FileChooser.IConfig
5
- ): FileChooser;
 
 
 
 
 
 
spaces/Ameaou/academic-chatgpt3.1/crazy_functions/批量Markdown翻译.py DELETED
@@ -1,161 +0,0 @@
1
- from toolbox import update_ui
2
- from toolbox import CatchException, report_execption, write_results_to_file
3
- fast_debug = False
4
-
5
- class PaperFileGroup():
6
- def __init__(self):
7
- self.file_paths = []
8
- self.file_contents = []
9
- self.sp_file_contents = []
10
- self.sp_file_index = []
11
- self.sp_file_tag = []
12
-
13
- # count_token
14
- from request_llm.bridge_all import model_info
15
- enc = model_info["gpt-3.5-turbo"]['tokenizer']
16
- def get_token_num(txt): return len(enc.encode(txt, disallowed_special=()))
17
- self.get_token_num = get_token_num
18
-
19
- def run_file_split(self, max_token_limit=1900):
20
- """
21
- 将长文本分离开来
22
- """
23
- for index, file_content in enumerate(self.file_contents):
24
- if self.get_token_num(file_content) < max_token_limit:
25
- self.sp_file_contents.append(file_content)
26
- self.sp_file_index.append(index)
27
- self.sp_file_tag.append(self.file_paths[index])
28
- else:
29
- from .crazy_utils import breakdown_txt_to_satisfy_token_limit_for_pdf
30
- segments = breakdown_txt_to_satisfy_token_limit_for_pdf(file_content, self.get_token_num, max_token_limit)
31
- for j, segment in enumerate(segments):
32
- self.sp_file_contents.append(segment)
33
- self.sp_file_index.append(index)
34
- self.sp_file_tag.append(self.file_paths[index] + f".part-{j}.md")
35
-
36
- print('Segmentation: done')
37
-
38
- def 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en'):
39
- import time, os, re
40
- from .crazy_utils import request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency
41
-
42
- # <-------- 读取Markdown文件,删除其中的所有注释 ---------->
43
- pfg = PaperFileGroup()
44
-
45
- for index, fp in enumerate(file_manifest):
46
- with open(fp, 'r', encoding='utf-8', errors='replace') as f:
47
- file_content = f.read()
48
- # 记录删除注释后的文本
49
- pfg.file_paths.append(fp)
50
- pfg.file_contents.append(file_content)
51
-
52
- # <-------- 拆分过长的Markdown文件 ---------->
53
- pfg.run_file_split(max_token_limit=1500)
54
- n_split = len(pfg.sp_file_contents)
55
-
56
- # <-------- 多线程润色开始 ---------->
57
- if language == 'en->zh':
58
- inputs_array = ["This is a Markdown file, translate it into Chinese, do not modify any existing Markdown commands:" +
59
- f"\n\n{frag}" for frag in pfg.sp_file_contents]
60
- inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
61
- sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
62
- elif language == 'zh->en':
63
- inputs_array = [f"This is a Markdown file, translate it into English, do not modify any existing Markdown commands:" +
64
- f"\n\n{frag}" for frag in pfg.sp_file_contents]
65
- inputs_show_user_array = [f"翻译 {f}" for f in pfg.sp_file_tag]
66
- sys_prompt_array = ["You are a professional academic paper translator." for _ in range(n_split)]
67
-
68
- gpt_response_collection = yield from request_gpt_model_multi_threads_with_very_awesome_ui_and_high_efficiency(
69
- inputs_array=inputs_array,
70
- inputs_show_user_array=inputs_show_user_array,
71
- llm_kwargs=llm_kwargs,
72
- chatbot=chatbot,
73
- history_array=[[""] for _ in range(n_split)],
74
- sys_prompt_array=sys_prompt_array,
75
- # max_workers=5, # OpenAI所允许的最大并行过载
76
- scroller_max_len = 80
77
- )
78
-
79
- # <-------- 整理结果,退出 ---------->
80
- create_report_file_name = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()) + f"-chatgpt.polish.md"
81
- res = write_results_to_file(gpt_response_collection, file_name=create_report_file_name)
82
- history = gpt_response_collection
83
- chatbot.append((f"{fp}完成了吗?", res))
84
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
85
-
86
-
87
-
88
-
89
-
90
- @CatchException
91
- def Markdown英译中(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
92
- # 基本信息:功能、贡献者
93
- chatbot.append([
94
- "函数插件功能?",
95
- "对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"])
96
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
97
-
98
- # 尝试导入依赖,如果缺少依赖,则给出安装建议
99
- try:
100
- import tiktoken
101
- except:
102
- report_execption(chatbot, history,
103
- a=f"解析项目: {txt}",
104
- b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
105
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
106
- return
107
- history = [] # 清空历史,以免输入溢出
108
- import glob, os
109
- if os.path.exists(txt):
110
- project_folder = txt
111
- else:
112
- if txt == "": txt = '空空如也的输入栏'
113
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
114
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
115
- return
116
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.md', recursive=True)]
117
- if len(file_manifest) == 0:
118
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
119
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
120
- return
121
- yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='en->zh')
122
-
123
-
124
-
125
-
126
-
127
- @CatchException
128
- def Markdown中译英(txt, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, web_port):
129
- # 基本信息:功能、贡献者
130
- chatbot.append([
131
- "函数插件功能?",
132
- "对整个Markdown项目进行翻译。函数插件贡献者: Binary-Husky"])
133
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
134
-
135
- # 尝试导入依赖,如果缺少依赖,则给出安装建议
136
- try:
137
- import tiktoken
138
- except:
139
- report_execption(chatbot, history,
140
- a=f"解析项目: {txt}",
141
- b=f"导入软件依赖失败。使用该模块需要额外依赖,安装方法```pip install --upgrade tiktoken```。")
142
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
143
- return
144
- history = [] # 清空历史,以免输入溢出
145
- import glob, os
146
- if os.path.exists(txt):
147
- project_folder = txt
148
- else:
149
- if txt == "": txt = '空空如也的输入栏'
150
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到本地项目或无权访问: {txt}")
151
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
152
- return
153
- if txt.endswith('.md'):
154
- file_manifest = [txt]
155
- else:
156
- file_manifest = [f for f in glob.glob(f'{project_folder}/**/*.md', recursive=True)]
157
- if len(file_manifest) == 0:
158
- report_execption(chatbot, history, a = f"解析项目: {txt}", b = f"找不到任何.md文件: {txt}")
159
- yield from update_ui(chatbot=chatbot, history=history) # 刷新界面
160
- return
161
- yield from 多文件翻译(file_manifest, project_folder, llm_kwargs, plugin_kwargs, chatbot, history, system_prompt, language='zh->en')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/_base_/models/mask_rcnn_uniformer_fpn.py DELETED
@@ -1,121 +0,0 @@
1
- # model settings
2
- model = dict(
3
- type='MaskRCNN',
4
- pretrained=None,
5
- backbone=dict(
6
- type='UniFormer',
7
- embed_dim=[64, 128, 320, 512],
8
- layers=[3, 4, 8, 3],
9
- head_dim=64,
10
- mlp_ratio=4.,
11
- qkv_bias=True,
12
- drop_rate=0.,
13
- attn_drop_rate=0.,
14
- drop_path_rate=0.2),
15
- neck=dict(
16
- type='FPN',
17
- in_channels=[64, 128, 320, 512],
18
- out_channels=256,
19
- num_outs=5),
20
- rpn_head=dict(
21
- type='RPNHead',
22
- in_channels=256,
23
- feat_channels=256,
24
- anchor_generator=dict(
25
- type='AnchorGenerator',
26
- scales=[8],
27
- ratios=[0.5, 1.0, 2.0],
28
- strides=[4, 8, 16, 32, 64]),
29
- bbox_coder=dict(
30
- type='DeltaXYWHBBoxCoder',
31
- target_means=[.0, .0, .0, .0],
32
- target_stds=[1.0, 1.0, 1.0, 1.0]),
33
- loss_cls=dict(
34
- type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
35
- loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
36
- roi_head=dict(
37
- type='StandardRoIHead',
38
- bbox_roi_extractor=dict(
39
- type='SingleRoIExtractor',
40
- roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
41
- out_channels=256,
42
- featmap_strides=[4, 8, 16, 32]),
43
- bbox_head=dict(
44
- type='Shared2FCBBoxHead',
45
- in_channels=256,
46
- fc_out_channels=1024,
47
- roi_feat_size=7,
48
- num_classes=80,
49
- bbox_coder=dict(
50
- type='DeltaXYWHBBoxCoder',
51
- target_means=[0., 0., 0., 0.],
52
- target_stds=[0.1, 0.1, 0.2, 0.2]),
53
- reg_class_agnostic=False,
54
- loss_cls=dict(
55
- type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
56
- loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
57
- mask_roi_extractor=dict(
58
- type='SingleRoIExtractor',
59
- roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
60
- out_channels=256,
61
- featmap_strides=[4, 8, 16, 32]),
62
- mask_head=dict(
63
- type='FCNMaskHead',
64
- num_convs=4,
65
- in_channels=256,
66
- conv_out_channels=256,
67
- num_classes=80,
68
- loss_mask=dict(
69
- type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
70
- # model training and testing settings
71
- train_cfg=dict(
72
- rpn=dict(
73
- assigner=dict(
74
- type='MaxIoUAssigner',
75
- pos_iou_thr=0.7,
76
- neg_iou_thr=0.3,
77
- min_pos_iou=0.3,
78
- match_low_quality=True,
79
- ignore_iof_thr=-1),
80
- sampler=dict(
81
- type='RandomSampler',
82
- num=256,
83
- pos_fraction=0.5,
84
- neg_pos_ub=-1,
85
- add_gt_as_proposals=False),
86
- allowed_border=-1,
87
- pos_weight=-1,
88
- debug=False),
89
- rpn_proposal=dict(
90
- nms_pre=2000,
91
- max_per_img=1000,
92
- nms=dict(type='nms', iou_threshold=0.7),
93
- min_bbox_size=0),
94
- rcnn=dict(
95
- assigner=dict(
96
- type='MaxIoUAssigner',
97
- pos_iou_thr=0.5,
98
- neg_iou_thr=0.5,
99
- min_pos_iou=0.5,
100
- match_low_quality=True,
101
- ignore_iof_thr=-1),
102
- sampler=dict(
103
- type='RandomSampler',
104
- num=512,
105
- pos_fraction=0.25,
106
- neg_pos_ub=-1,
107
- add_gt_as_proposals=True),
108
- mask_size=28,
109
- pos_weight=-1,
110
- debug=False)),
111
- test_cfg=dict(
112
- rpn=dict(
113
- nms_pre=1000,
114
- max_per_img=1000,
115
- nms=dict(type='nms', iou_threshold=0.7),
116
- min_bbox_size=0),
117
- rcnn=dict(
118
- score_thr=0.05,
119
- nms=dict(type='nms', iou_threshold=0.5),
120
- max_per_img=100,
121
- mask_thr_binary=0.5)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/fast_rcnn/fast_rcnn_r101_fpn_1x_coco.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './fast_rcnn_r50_fpn_1x_coco.py'
2
- model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/legacy_1.x/retinanet_r50_caffe_fpn_1x_coco_v1.py DELETED
@@ -1,37 +0,0 @@
1
- _base_ = './retinanet_r50_fpn_1x_coco_v1.py'
2
- model = dict(
3
- pretrained='open-mmlab://detectron/resnet50_caffe',
4
- backbone=dict(
5
- norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe'))
6
- # use caffe img_norm
7
- img_norm_cfg = dict(
8
- mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
9
- train_pipeline = [
10
- dict(type='LoadImageFromFile'),
11
- dict(type='LoadAnnotations', with_bbox=True),
12
- dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
13
- dict(type='RandomFlip', flip_ratio=0.5),
14
- dict(type='Normalize', **img_norm_cfg),
15
- dict(type='Pad', size_divisor=32),
16
- dict(type='DefaultFormatBundle'),
17
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
18
- ]
19
- test_pipeline = [
20
- dict(type='LoadImageFromFile'),
21
- dict(
22
- type='MultiScaleFlipAug',
23
- img_scale=(1333, 800),
24
- flip=False,
25
- transforms=[
26
- dict(type='Resize', keep_ratio=True),
27
- dict(type='RandomFlip'),
28
- dict(type='Normalize', **img_norm_cfg),
29
- dict(type='Pad', size_divisor=32),
30
- dict(type='ImageToTensor', keys=['img']),
31
- dict(type='Collect', keys=['img']),
32
- ])
33
- ]
34
- data = dict(
35
- train=dict(pipeline=train_pipeline),
36
- val=dict(pipeline=test_pipeline),
37
- test=dict(pipeline=test_pipeline))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py DELETED
@@ -1,37 +0,0 @@
1
- _base_ = './retinanet_r50_fpn_1x_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://detectron2/resnet50_caffe',
4
- backbone=dict(
5
- norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe'))
6
- # use caffe img_norm
7
- img_norm_cfg = dict(
8
- mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
9
- train_pipeline = [
10
- dict(type='LoadImageFromFile'),
11
- dict(type='LoadAnnotations', with_bbox=True),
12
- dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
13
- dict(type='RandomFlip', flip_ratio=0.5),
14
- dict(type='Normalize', **img_norm_cfg),
15
- dict(type='Pad', size_divisor=32),
16
- dict(type='DefaultFormatBundle'),
17
- dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
18
- ]
19
- test_pipeline = [
20
- dict(type='LoadImageFromFile'),
21
- dict(
22
- type='MultiScaleFlipAug',
23
- img_scale=(1333, 800),
24
- flip=False,
25
- transforms=[
26
- dict(type='Resize', keep_ratio=True),
27
- dict(type='RandomFlip'),
28
- dict(type='Normalize', **img_norm_cfg),
29
- dict(type='Pad', size_divisor=32),
30
- dict(type='ImageToTensor', keys=['img']),
31
- dict(type='Collect', keys=['img']),
32
- ])
33
- ]
34
- data = dict(
35
- train=dict(pipeline=train_pipeline),
36
- val=dict(pipeline=test_pipeline),
37
- test=dict(pipeline=test_pipeline))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/bbox_heads/sabl_head.py DELETED
@@ -1,572 +0,0 @@
1
- import numpy as np
2
- import torch
3
- import torch.nn as nn
4
- import torch.nn.functional as F
5
- from mmcv.cnn import ConvModule, kaiming_init, normal_init, xavier_init
6
- from mmcv.runner import force_fp32
7
-
8
- from mmdet.core import build_bbox_coder, multi_apply, multiclass_nms
9
- from mmdet.models.builder import HEADS, build_loss
10
- from mmdet.models.losses import accuracy
11
-
12
-
13
- @HEADS.register_module()
14
- class SABLHead(nn.Module):
15
- """Side-Aware Boundary Localization (SABL) for RoI-Head.
16
-
17
- Side-Aware features are extracted by conv layers
18
- with an attention mechanism.
19
- Boundary Localization with Bucketing and Bucketing Guided Rescoring
20
- are implemented in BucketingBBoxCoder.
21
-
22
- Please refer to https://arxiv.org/abs/1912.04260 for more details.
23
-
24
- Args:
25
- cls_in_channels (int): Input channels of cls RoI feature. \
26
- Defaults to 256.
27
- reg_in_channels (int): Input channels of reg RoI feature. \
28
- Defaults to 256.
29
- roi_feat_size (int): Size of RoI features. Defaults to 7.
30
- reg_feat_up_ratio (int): Upsample ratio of reg features. \
31
- Defaults to 2.
32
- reg_pre_kernel (int): Kernel of 2D conv layers before \
33
- attention pooling. Defaults to 3.
34
- reg_post_kernel (int): Kernel of 1D conv layers after \
35
- attention pooling. Defaults to 3.
36
- reg_pre_num (int): Number of pre convs. Defaults to 2.
37
- reg_post_num (int): Number of post convs. Defaults to 1.
38
- num_classes (int): Number of classes in dataset. Defaults to 80.
39
- cls_out_channels (int): Hidden channels in cls fcs. Defaults to 1024.
40
- reg_offset_out_channels (int): Hidden and output channel \
41
- of reg offset branch. Defaults to 256.
42
- reg_cls_out_channels (int): Hidden and output channel \
43
- of reg cls branch. Defaults to 256.
44
- num_cls_fcs (int): Number of fcs for cls branch. Defaults to 1.
45
- num_reg_fcs (int): Number of fcs for reg branch.. Defaults to 0.
46
- reg_class_agnostic (bool): Class agnostic regresion or not. \
47
- Defaults to True.
48
- norm_cfg (dict): Config of norm layers. Defaults to None.
49
- bbox_coder (dict): Config of bbox coder. Defaults 'BucketingBBoxCoder'.
50
- loss_cls (dict): Config of classification loss.
51
- loss_bbox_cls (dict): Config of classification loss for bbox branch.
52
- loss_bbox_reg (dict): Config of regression loss for bbox branch.
53
- """
54
-
55
- def __init__(self,
56
- num_classes,
57
- cls_in_channels=256,
58
- reg_in_channels=256,
59
- roi_feat_size=7,
60
- reg_feat_up_ratio=2,
61
- reg_pre_kernel=3,
62
- reg_post_kernel=3,
63
- reg_pre_num=2,
64
- reg_post_num=1,
65
- cls_out_channels=1024,
66
- reg_offset_out_channels=256,
67
- reg_cls_out_channels=256,
68
- num_cls_fcs=1,
69
- num_reg_fcs=0,
70
- reg_class_agnostic=True,
71
- norm_cfg=None,
72
- bbox_coder=dict(
73
- type='BucketingBBoxCoder',
74
- num_buckets=14,
75
- scale_factor=1.7),
76
- loss_cls=dict(
77
- type='CrossEntropyLoss',
78
- use_sigmoid=False,
79
- loss_weight=1.0),
80
- loss_bbox_cls=dict(
81
- type='CrossEntropyLoss',
82
- use_sigmoid=True,
83
- loss_weight=1.0),
84
- loss_bbox_reg=dict(
85
- type='SmoothL1Loss', beta=0.1, loss_weight=1.0)):
86
- super(SABLHead, self).__init__()
87
- self.cls_in_channels = cls_in_channels
88
- self.reg_in_channels = reg_in_channels
89
- self.roi_feat_size = roi_feat_size
90
- self.reg_feat_up_ratio = int(reg_feat_up_ratio)
91
- self.num_buckets = bbox_coder['num_buckets']
92
- assert self.reg_feat_up_ratio // 2 >= 1
93
- self.up_reg_feat_size = roi_feat_size * self.reg_feat_up_ratio
94
- assert self.up_reg_feat_size == bbox_coder['num_buckets']
95
- self.reg_pre_kernel = reg_pre_kernel
96
- self.reg_post_kernel = reg_post_kernel
97
- self.reg_pre_num = reg_pre_num
98
- self.reg_post_num = reg_post_num
99
- self.num_classes = num_classes
100
- self.cls_out_channels = cls_out_channels
101
- self.reg_offset_out_channels = reg_offset_out_channels
102
- self.reg_cls_out_channels = reg_cls_out_channels
103
- self.num_cls_fcs = num_cls_fcs
104
- self.num_reg_fcs = num_reg_fcs
105
- self.reg_class_agnostic = reg_class_agnostic
106
- assert self.reg_class_agnostic
107
- self.norm_cfg = norm_cfg
108
-
109
- self.bbox_coder = build_bbox_coder(bbox_coder)
110
- self.loss_cls = build_loss(loss_cls)
111
- self.loss_bbox_cls = build_loss(loss_bbox_cls)
112
- self.loss_bbox_reg = build_loss(loss_bbox_reg)
113
-
114
- self.cls_fcs = self._add_fc_branch(self.num_cls_fcs,
115
- self.cls_in_channels,
116
- self.roi_feat_size,
117
- self.cls_out_channels)
118
-
119
- self.side_num = int(np.ceil(self.num_buckets / 2))
120
-
121
- if self.reg_feat_up_ratio > 1:
122
- self.upsample_x = nn.ConvTranspose1d(
123
- reg_in_channels,
124
- reg_in_channels,
125
- self.reg_feat_up_ratio,
126
- stride=self.reg_feat_up_ratio)
127
- self.upsample_y = nn.ConvTranspose1d(
128
- reg_in_channels,
129
- reg_in_channels,
130
- self.reg_feat_up_ratio,
131
- stride=self.reg_feat_up_ratio)
132
-
133
- self.reg_pre_convs = nn.ModuleList()
134
- for i in range(self.reg_pre_num):
135
- reg_pre_conv = ConvModule(
136
- reg_in_channels,
137
- reg_in_channels,
138
- kernel_size=reg_pre_kernel,
139
- padding=reg_pre_kernel // 2,
140
- norm_cfg=norm_cfg,
141
- act_cfg=dict(type='ReLU'))
142
- self.reg_pre_convs.append(reg_pre_conv)
143
-
144
- self.reg_post_conv_xs = nn.ModuleList()
145
- for i in range(self.reg_post_num):
146
- reg_post_conv_x = ConvModule(
147
- reg_in_channels,
148
- reg_in_channels,
149
- kernel_size=(1, reg_post_kernel),
150
- padding=(0, reg_post_kernel // 2),
151
- norm_cfg=norm_cfg,
152
- act_cfg=dict(type='ReLU'))
153
- self.reg_post_conv_xs.append(reg_post_conv_x)
154
- self.reg_post_conv_ys = nn.ModuleList()
155
- for i in range(self.reg_post_num):
156
- reg_post_conv_y = ConvModule(
157
- reg_in_channels,
158
- reg_in_channels,
159
- kernel_size=(reg_post_kernel, 1),
160
- padding=(reg_post_kernel // 2, 0),
161
- norm_cfg=norm_cfg,
162
- act_cfg=dict(type='ReLU'))
163
- self.reg_post_conv_ys.append(reg_post_conv_y)
164
-
165
- self.reg_conv_att_x = nn.Conv2d(reg_in_channels, 1, 1)
166
- self.reg_conv_att_y = nn.Conv2d(reg_in_channels, 1, 1)
167
-
168
- self.fc_cls = nn.Linear(self.cls_out_channels, self.num_classes + 1)
169
- self.relu = nn.ReLU(inplace=True)
170
-
171
- self.reg_cls_fcs = self._add_fc_branch(self.num_reg_fcs,
172
- self.reg_in_channels, 1,
173
- self.reg_cls_out_channels)
174
- self.reg_offset_fcs = self._add_fc_branch(self.num_reg_fcs,
175
- self.reg_in_channels, 1,
176
- self.reg_offset_out_channels)
177
- self.fc_reg_cls = nn.Linear(self.reg_cls_out_channels, 1)
178
- self.fc_reg_offset = nn.Linear(self.reg_offset_out_channels, 1)
179
-
180
- def _add_fc_branch(self, num_branch_fcs, in_channels, roi_feat_size,
181
- fc_out_channels):
182
- in_channels = in_channels * roi_feat_size * roi_feat_size
183
- branch_fcs = nn.ModuleList()
184
- for i in range(num_branch_fcs):
185
- fc_in_channels = (in_channels if i == 0 else fc_out_channels)
186
- branch_fcs.append(nn.Linear(fc_in_channels, fc_out_channels))
187
- return branch_fcs
188
-
189
- def init_weights(self):
190
- for module_list in [
191
- self.reg_cls_fcs, self.reg_offset_fcs, self.cls_fcs
192
- ]:
193
- for m in module_list.modules():
194
- if isinstance(m, nn.Linear):
195
- xavier_init(m, distribution='uniform')
196
- if self.reg_feat_up_ratio > 1:
197
- kaiming_init(self.upsample_x, distribution='normal')
198
- kaiming_init(self.upsample_y, distribution='normal')
199
-
200
- normal_init(self.reg_conv_att_x, 0, 0.01)
201
- normal_init(self.reg_conv_att_y, 0, 0.01)
202
- normal_init(self.fc_reg_offset, 0, 0.001)
203
- normal_init(self.fc_reg_cls, 0, 0.01)
204
- normal_init(self.fc_cls, 0, 0.01)
205
-
206
- def cls_forward(self, cls_x):
207
- cls_x = cls_x.view(cls_x.size(0), -1)
208
- for fc in self.cls_fcs:
209
- cls_x = self.relu(fc(cls_x))
210
- cls_score = self.fc_cls(cls_x)
211
- return cls_score
212
-
213
- def attention_pool(self, reg_x):
214
- """Extract direction-specific features fx and fy with attention
215
- methanism."""
216
- reg_fx = reg_x
217
- reg_fy = reg_x
218
- reg_fx_att = self.reg_conv_att_x(reg_fx).sigmoid()
219
- reg_fy_att = self.reg_conv_att_y(reg_fy).sigmoid()
220
- reg_fx_att = reg_fx_att / reg_fx_att.sum(dim=2).unsqueeze(2)
221
- reg_fy_att = reg_fy_att / reg_fy_att.sum(dim=3).unsqueeze(3)
222
- reg_fx = (reg_fx * reg_fx_att).sum(dim=2)
223
- reg_fy = (reg_fy * reg_fy_att).sum(dim=3)
224
- return reg_fx, reg_fy
225
-
226
- def side_aware_feature_extractor(self, reg_x):
227
- """Refine and extract side-aware features without split them."""
228
- for reg_pre_conv in self.reg_pre_convs:
229
- reg_x = reg_pre_conv(reg_x)
230
- reg_fx, reg_fy = self.attention_pool(reg_x)
231
-
232
- if self.reg_post_num > 0:
233
- reg_fx = reg_fx.unsqueeze(2)
234
- reg_fy = reg_fy.unsqueeze(3)
235
- for i in range(self.reg_post_num):
236
- reg_fx = self.reg_post_conv_xs[i](reg_fx)
237
- reg_fy = self.reg_post_conv_ys[i](reg_fy)
238
- reg_fx = reg_fx.squeeze(2)
239
- reg_fy = reg_fy.squeeze(3)
240
- if self.reg_feat_up_ratio > 1:
241
- reg_fx = self.relu(self.upsample_x(reg_fx))
242
- reg_fy = self.relu(self.upsample_y(reg_fy))
243
- reg_fx = torch.transpose(reg_fx, 1, 2)
244
- reg_fy = torch.transpose(reg_fy, 1, 2)
245
- return reg_fx.contiguous(), reg_fy.contiguous()
246
-
247
- def reg_pred(self, x, offset_fcs, cls_fcs):
248
- """Predict bucketing estimation (cls_pred) and fine regression (offset
249
- pred) with side-aware features."""
250
- x_offset = x.view(-1, self.reg_in_channels)
251
- x_cls = x.view(-1, self.reg_in_channels)
252
-
253
- for fc in offset_fcs:
254
- x_offset = self.relu(fc(x_offset))
255
- for fc in cls_fcs:
256
- x_cls = self.relu(fc(x_cls))
257
- offset_pred = self.fc_reg_offset(x_offset)
258
- cls_pred = self.fc_reg_cls(x_cls)
259
-
260
- offset_pred = offset_pred.view(x.size(0), -1)
261
- cls_pred = cls_pred.view(x.size(0), -1)
262
-
263
- return offset_pred, cls_pred
264
-
265
- def side_aware_split(self, feat):
266
- """Split side-aware features aligned with orders of bucketing
267
- targets."""
268
- l_end = int(np.ceil(self.up_reg_feat_size / 2))
269
- r_start = int(np.floor(self.up_reg_feat_size / 2))
270
- feat_fl = feat[:, :l_end]
271
- feat_fr = feat[:, r_start:].flip(dims=(1, ))
272
- feat_fl = feat_fl.contiguous()
273
- feat_fr = feat_fr.contiguous()
274
- feat = torch.cat([feat_fl, feat_fr], dim=-1)
275
- return feat
276
-
277
- def bbox_pred_split(self, bbox_pred, num_proposals_per_img):
278
- """Split batch bbox prediction back to each image."""
279
- bucket_cls_preds, bucket_offset_preds = bbox_pred
280
- bucket_cls_preds = bucket_cls_preds.split(num_proposals_per_img, 0)
281
- bucket_offset_preds = bucket_offset_preds.split(
282
- num_proposals_per_img, 0)
283
- bbox_pred = tuple(zip(bucket_cls_preds, bucket_offset_preds))
284
- return bbox_pred
285
-
286
- def reg_forward(self, reg_x):
287
- outs = self.side_aware_feature_extractor(reg_x)
288
- edge_offset_preds = []
289
- edge_cls_preds = []
290
- reg_fx = outs[0]
291
- reg_fy = outs[1]
292
- offset_pred_x, cls_pred_x = self.reg_pred(reg_fx, self.reg_offset_fcs,
293
- self.reg_cls_fcs)
294
- offset_pred_y, cls_pred_y = self.reg_pred(reg_fy, self.reg_offset_fcs,
295
- self.reg_cls_fcs)
296
- offset_pred_x = self.side_aware_split(offset_pred_x)
297
- offset_pred_y = self.side_aware_split(offset_pred_y)
298
- cls_pred_x = self.side_aware_split(cls_pred_x)
299
- cls_pred_y = self.side_aware_split(cls_pred_y)
300
- edge_offset_preds = torch.cat([offset_pred_x, offset_pred_y], dim=-1)
301
- edge_cls_preds = torch.cat([cls_pred_x, cls_pred_y], dim=-1)
302
-
303
- return (edge_cls_preds, edge_offset_preds)
304
-
305
- def forward(self, x):
306
-
307
- bbox_pred = self.reg_forward(x)
308
- cls_score = self.cls_forward(x)
309
-
310
- return cls_score, bbox_pred
311
-
312
- def get_targets(self, sampling_results, gt_bboxes, gt_labels,
313
- rcnn_train_cfg):
314
- pos_proposals = [res.pos_bboxes for res in sampling_results]
315
- neg_proposals = [res.neg_bboxes for res in sampling_results]
316
- pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results]
317
- pos_gt_labels = [res.pos_gt_labels for res in sampling_results]
318
- cls_reg_targets = self.bucket_target(pos_proposals, neg_proposals,
319
- pos_gt_bboxes, pos_gt_labels,
320
- rcnn_train_cfg)
321
- (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
322
- bucket_offset_targets, bucket_offset_weights) = cls_reg_targets
323
- return (labels, label_weights, (bucket_cls_targets,
324
- bucket_offset_targets),
325
- (bucket_cls_weights, bucket_offset_weights))
326
-
327
- def bucket_target(self,
328
- pos_proposals_list,
329
- neg_proposals_list,
330
- pos_gt_bboxes_list,
331
- pos_gt_labels_list,
332
- rcnn_train_cfg,
333
- concat=True):
334
- (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
335
- bucket_offset_targets, bucket_offset_weights) = multi_apply(
336
- self._bucket_target_single,
337
- pos_proposals_list,
338
- neg_proposals_list,
339
- pos_gt_bboxes_list,
340
- pos_gt_labels_list,
341
- cfg=rcnn_train_cfg)
342
-
343
- if concat:
344
- labels = torch.cat(labels, 0)
345
- label_weights = torch.cat(label_weights, 0)
346
- bucket_cls_targets = torch.cat(bucket_cls_targets, 0)
347
- bucket_cls_weights = torch.cat(bucket_cls_weights, 0)
348
- bucket_offset_targets = torch.cat(bucket_offset_targets, 0)
349
- bucket_offset_weights = torch.cat(bucket_offset_weights, 0)
350
- return (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
351
- bucket_offset_targets, bucket_offset_weights)
352
-
353
- def _bucket_target_single(self, pos_proposals, neg_proposals,
354
- pos_gt_bboxes, pos_gt_labels, cfg):
355
- """Compute bucketing estimation targets and fine regression targets for
356
- a single image.
357
-
358
- Args:
359
- pos_proposals (Tensor): positive proposals of a single image,
360
- Shape (n_pos, 4)
361
- neg_proposals (Tensor): negative proposals of a single image,
362
- Shape (n_neg, 4).
363
- pos_gt_bboxes (Tensor): gt bboxes assigned to positive proposals
364
- of a single image, Shape (n_pos, 4).
365
- pos_gt_labels (Tensor): gt labels assigned to positive proposals
366
- of a single image, Shape (n_pos, ).
367
- cfg (dict): Config of calculating targets
368
-
369
- Returns:
370
- tuple:
371
-
372
- - labels (Tensor): Labels in a single image. \
373
- Shape (n,).
374
- - label_weights (Tensor): Label weights in a single image.\
375
- Shape (n,)
376
- - bucket_cls_targets (Tensor): Bucket cls targets in \
377
- a single image. Shape (n, num_buckets*2).
378
- - bucket_cls_weights (Tensor): Bucket cls weights in \
379
- a single image. Shape (n, num_buckets*2).
380
- - bucket_offset_targets (Tensor): Bucket offset targets \
381
- in a single image. Shape (n, num_buckets*2).
382
- - bucket_offset_targets (Tensor): Bucket offset weights \
383
- in a single image. Shape (n, num_buckets*2).
384
- """
385
- num_pos = pos_proposals.size(0)
386
- num_neg = neg_proposals.size(0)
387
- num_samples = num_pos + num_neg
388
- labels = pos_gt_bboxes.new_full((num_samples, ),
389
- self.num_classes,
390
- dtype=torch.long)
391
- label_weights = pos_proposals.new_zeros(num_samples)
392
- bucket_cls_targets = pos_proposals.new_zeros(num_samples,
393
- 4 * self.side_num)
394
- bucket_cls_weights = pos_proposals.new_zeros(num_samples,
395
- 4 * self.side_num)
396
- bucket_offset_targets = pos_proposals.new_zeros(
397
- num_samples, 4 * self.side_num)
398
- bucket_offset_weights = pos_proposals.new_zeros(
399
- num_samples, 4 * self.side_num)
400
- if num_pos > 0:
401
- labels[:num_pos] = pos_gt_labels
402
- label_weights[:num_pos] = 1.0
403
- (pos_bucket_offset_targets, pos_bucket_offset_weights,
404
- pos_bucket_cls_targets,
405
- pos_bucket_cls_weights) = self.bbox_coder.encode(
406
- pos_proposals, pos_gt_bboxes)
407
- bucket_cls_targets[:num_pos, :] = pos_bucket_cls_targets
408
- bucket_cls_weights[:num_pos, :] = pos_bucket_cls_weights
409
- bucket_offset_targets[:num_pos, :] = pos_bucket_offset_targets
410
- bucket_offset_weights[:num_pos, :] = pos_bucket_offset_weights
411
- if num_neg > 0:
412
- label_weights[-num_neg:] = 1.0
413
- return (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
414
- bucket_offset_targets, bucket_offset_weights)
415
-
416
- def loss(self,
417
- cls_score,
418
- bbox_pred,
419
- rois,
420
- labels,
421
- label_weights,
422
- bbox_targets,
423
- bbox_weights,
424
- reduction_override=None):
425
- losses = dict()
426
- if cls_score is not None:
427
- avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
428
- losses['loss_cls'] = self.loss_cls(
429
- cls_score,
430
- labels,
431
- label_weights,
432
- avg_factor=avg_factor,
433
- reduction_override=reduction_override)
434
- losses['acc'] = accuracy(cls_score, labels)
435
-
436
- if bbox_pred is not None:
437
- bucket_cls_preds, bucket_offset_preds = bbox_pred
438
- bucket_cls_targets, bucket_offset_targets = bbox_targets
439
- bucket_cls_weights, bucket_offset_weights = bbox_weights
440
- # edge cls
441
- bucket_cls_preds = bucket_cls_preds.view(-1, self.side_num)
442
- bucket_cls_targets = bucket_cls_targets.view(-1, self.side_num)
443
- bucket_cls_weights = bucket_cls_weights.view(-1, self.side_num)
444
- losses['loss_bbox_cls'] = self.loss_bbox_cls(
445
- bucket_cls_preds,
446
- bucket_cls_targets,
447
- bucket_cls_weights,
448
- avg_factor=bucket_cls_targets.size(0),
449
- reduction_override=reduction_override)
450
-
451
- losses['loss_bbox_reg'] = self.loss_bbox_reg(
452
- bucket_offset_preds,
453
- bucket_offset_targets,
454
- bucket_offset_weights,
455
- avg_factor=bucket_offset_targets.size(0),
456
- reduction_override=reduction_override)
457
-
458
- return losses
459
-
460
- @force_fp32(apply_to=('cls_score', 'bbox_pred'))
461
- def get_bboxes(self,
462
- rois,
463
- cls_score,
464
- bbox_pred,
465
- img_shape,
466
- scale_factor,
467
- rescale=False,
468
- cfg=None):
469
- if isinstance(cls_score, list):
470
- cls_score = sum(cls_score) / float(len(cls_score))
471
- scores = F.softmax(cls_score, dim=1) if cls_score is not None else None
472
-
473
- if bbox_pred is not None:
474
- bboxes, confids = self.bbox_coder.decode(rois[:, 1:], bbox_pred,
475
- img_shape)
476
- else:
477
- bboxes = rois[:, 1:].clone()
478
- confids = None
479
- if img_shape is not None:
480
- bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1)
481
- bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1)
482
-
483
- if rescale and bboxes.size(0) > 0:
484
- if isinstance(scale_factor, float):
485
- bboxes /= scale_factor
486
- else:
487
- bboxes /= torch.from_numpy(scale_factor).to(bboxes.device)
488
-
489
- if cfg is None:
490
- return bboxes, scores
491
- else:
492
- det_bboxes, det_labels = multiclass_nms(
493
- bboxes,
494
- scores,
495
- cfg.score_thr,
496
- cfg.nms,
497
- cfg.max_per_img,
498
- score_factors=confids)
499
-
500
- return det_bboxes, det_labels
501
-
502
- @force_fp32(apply_to=('bbox_preds', ))
503
- def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):
504
- """Refine bboxes during training.
505
-
506
- Args:
507
- rois (Tensor): Shape (n*bs, 5), where n is image number per GPU,
508
- and bs is the sampled RoIs per image.
509
- labels (Tensor): Shape (n*bs, ).
510
- bbox_preds (list[Tensor]): Shape [(n*bs, num_buckets*2), \
511
- (n*bs, num_buckets*2)].
512
- pos_is_gts (list[Tensor]): Flags indicating if each positive bbox
513
- is a gt bbox.
514
- img_metas (list[dict]): Meta info of each image.
515
-
516
- Returns:
517
- list[Tensor]: Refined bboxes of each image in a mini-batch.
518
- """
519
- img_ids = rois[:, 0].long().unique(sorted=True)
520
- assert img_ids.numel() == len(img_metas)
521
-
522
- bboxes_list = []
523
- for i in range(len(img_metas)):
524
- inds = torch.nonzero(
525
- rois[:, 0] == i, as_tuple=False).squeeze(dim=1)
526
- num_rois = inds.numel()
527
-
528
- bboxes_ = rois[inds, 1:]
529
- label_ = labels[inds]
530
- edge_cls_preds, edge_offset_preds = bbox_preds
531
- edge_cls_preds_ = edge_cls_preds[inds]
532
- edge_offset_preds_ = edge_offset_preds[inds]
533
- bbox_pred_ = [edge_cls_preds_, edge_offset_preds_]
534
- img_meta_ = img_metas[i]
535
- pos_is_gts_ = pos_is_gts[i]
536
-
537
- bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,
538
- img_meta_)
539
- # filter gt bboxes
540
- pos_keep = 1 - pos_is_gts_
541
- keep_inds = pos_is_gts_.new_ones(num_rois)
542
- keep_inds[:len(pos_is_gts_)] = pos_keep
543
-
544
- bboxes_list.append(bboxes[keep_inds.type(torch.bool)])
545
-
546
- return bboxes_list
547
-
548
- @force_fp32(apply_to=('bbox_pred', ))
549
- def regress_by_class(self, rois, label, bbox_pred, img_meta):
550
- """Regress the bbox for the predicted class. Used in Cascade R-CNN.
551
-
552
- Args:
553
- rois (Tensor): shape (n, 4) or (n, 5)
554
- label (Tensor): shape (n, )
555
- bbox_pred (list[Tensor]): shape [(n, num_buckets *2), \
556
- (n, num_buckets *2)]
557
- img_meta (dict): Image meta info.
558
-
559
- Returns:
560
- Tensor: Regressed bboxes, the same shape as input rois.
561
- """
562
- assert rois.size(1) == 4 or rois.size(1) == 5
563
-
564
- if rois.size(1) == 4:
565
- new_rois, _ = self.bbox_coder.decode(rois, bbox_pred,
566
- img_meta['img_shape'])
567
- else:
568
- bboxes, _ = self.bbox_coder.decode(rois[:, 1:], bbox_pred,
569
- img_meta['img_shape'])
570
- new_rois = torch.cat((rois[:, [0]], bboxes), dim=1)
571
-
572
- return new_rois
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './pspnet_r50-d8_512x1024_80k_cityscapes.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/Andyrasika/Andyrasika-dreamshaper-sdxl-1.0/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Andyrasika Dreamshaper Sdxl 1.0
3
- emoji: 👀
4
- colorFrom: pink
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.40.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anthony7906/MengHuiMXD_GPT/modules/utils.py DELETED
@@ -1,548 +0,0 @@
1
- # -*- coding:utf-8 -*-
2
- from __future__ import annotations
3
- from typing import TYPE_CHECKING, Any, Callable, Dict, List, Tuple, Type
4
- import logging
5
- import json
6
- import os
7
- import datetime
8
- import hashlib
9
- import csv
10
- import requests
11
- import re
12
- import html
13
- import sys
14
- import subprocess
15
-
16
- import gradio as gr
17
- from pypinyin import lazy_pinyin
18
- import tiktoken
19
- import mdtex2html
20
- from markdown import markdown
21
- from pygments import highlight
22
- from pygments.lexers import get_lexer_by_name
23
- from pygments.formatters import HtmlFormatter
24
- import pandas as pd
25
-
26
- from modules.presets import *
27
- from . import shared
28
- from modules.config import retrieve_proxy
29
-
30
- if TYPE_CHECKING:
31
- from typing import TypedDict
32
-
33
- class DataframeData(TypedDict):
34
- headers: List[str]
35
- data: List[List[str | int | bool]]
36
-
37
- def predict(current_model, *args):
38
- iter = current_model.predict(*args)
39
- for i in iter:
40
- yield i
41
-
42
- def billing_info(current_model):
43
- return current_model.billing_info()
44
-
45
- def set_key(current_model, *args):
46
- return current_model.set_key(*args)
47
-
48
- def load_chat_history(current_model, *args):
49
- return current_model.load_chat_history(*args)
50
-
51
- def interrupt(current_model, *args):
52
- return current_model.interrupt(*args)
53
-
54
- def reset(current_model, *args):
55
- return current_model.reset(*args)
56
-
57
- def retry(current_model, *args):
58
- iter = current_model.retry(*args)
59
- for i in iter:
60
- yield i
61
-
62
- def delete_first_conversation(current_model, *args):
63
- return current_model.delete_first_conversation(*args)
64
-
65
- def delete_last_conversation(current_model, *args):
66
- return current_model.delete_last_conversation(*args)
67
-
68
- def set_system_prompt(current_model, *args):
69
- return current_model.set_system_prompt(*args)
70
-
71
- def save_chat_history(current_model, *args):
72
- return current_model.save_chat_history(*args)
73
-
74
- def export_markdown(current_model, *args):
75
- return current_model.export_markdown(*args)
76
-
77
- def load_chat_history(current_model, *args):
78
- return current_model.load_chat_history(*args)
79
-
80
- def set_token_upper_limit(current_model, *args):
81
- return current_model.set_token_upper_limit(*args)
82
-
83
- def set_temperature(current_model, *args):
84
- current_model.set_temperature(*args)
85
-
86
- def set_top_p(current_model, *args):
87
- current_model.set_top_p(*args)
88
-
89
- def set_n_choices(current_model, *args):
90
- current_model.set_n_choices(*args)
91
-
92
- def set_stop_sequence(current_model, *args):
93
- current_model.set_stop_sequence(*args)
94
-
95
- def set_max_tokens(current_model, *args):
96
- current_model.set_max_tokens(*args)
97
-
98
- def set_presence_penalty(current_model, *args):
99
- current_model.set_presence_penalty(*args)
100
-
101
- def set_frequency_penalty(current_model, *args):
102
- current_model.set_frequency_penalty(*args)
103
-
104
- def set_logit_bias(current_model, *args):
105
- current_model.set_logit_bias(*args)
106
-
107
- def set_user_identifier(current_model, *args):
108
- current_model.set_user_identifier(*args)
109
-
110
- def set_single_turn(current_model, *args):
111
- current_model.set_single_turn(*args)
112
-
113
- def handle_file_upload(current_model, *args):
114
- return current_model.handle_file_upload(*args)
115
-
116
- def like(current_model, *args):
117
- return current_model.like(*args)
118
-
119
- def dislike(current_model, *args):
120
- return current_model.dislike(*args)
121
-
122
-
123
- def count_token(message):
124
- encoding = tiktoken.get_encoding("cl100k_base")
125
- input_str = f"role: {message['role']}, content: {message['content']}"
126
- length = len(encoding.encode(input_str))
127
- return length
128
-
129
-
130
- def markdown_to_html_with_syntax_highlight(md_str):
131
- def replacer(match):
132
- lang = match.group(1) or "text"
133
- code = match.group(2)
134
-
135
- try:
136
- lexer = get_lexer_by_name(lang, stripall=True)
137
- except ValueError:
138
- lexer = get_lexer_by_name("text", stripall=True)
139
-
140
- formatter = HtmlFormatter()
141
- highlighted_code = highlight(code, lexer, formatter)
142
-
143
- return f'<pre><code class="{lang}">{highlighted_code}</code></pre>'
144
-
145
- code_block_pattern = r"```(\w+)?\n([\s\S]+?)\n```"
146
- md_str = re.sub(code_block_pattern, replacer, md_str, flags=re.MULTILINE)
147
-
148
- html_str = markdown(md_str)
149
- return html_str
150
-
151
-
152
- def normalize_markdown(md_text: str) -> str:
153
- lines = md_text.split("\n")
154
- normalized_lines = []
155
- inside_list = False
156
-
157
- for i, line in enumerate(lines):
158
- if re.match(r"^(\d+\.|-|\*|\+)\s", line.strip()):
159
- if not inside_list and i > 0 and lines[i - 1].strip() != "":
160
- normalized_lines.append("")
161
- inside_list = True
162
- normalized_lines.append(line)
163
- elif inside_list and line.strip() == "":
164
- if i < len(lines) - 1 and not re.match(
165
- r"^(\d+\.|-|\*|\+)\s", lines[i + 1].strip()
166
- ):
167
- normalized_lines.append(line)
168
- continue
169
- else:
170
- inside_list = False
171
- normalized_lines.append(line)
172
-
173
- return "\n".join(normalized_lines)
174
-
175
-
176
- def convert_mdtext(md_text):
177
- code_block_pattern = re.compile(r"```(.*?)(?:```|$)", re.DOTALL)
178
- inline_code_pattern = re.compile(r"`(.*?)`", re.DOTALL)
179
- code_blocks = code_block_pattern.findall(md_text)
180
- non_code_parts = code_block_pattern.split(md_text)[::2]
181
-
182
- result = []
183
- for non_code, code in zip(non_code_parts, code_blocks + [""]):
184
- if non_code.strip():
185
- non_code = normalize_markdown(non_code)
186
- if inline_code_pattern.search(non_code):
187
- result.append(markdown(non_code, extensions=["tables"]))
188
- else:
189
- result.append(mdtex2html.convert(non_code, extensions=["tables"]))
190
- if code.strip():
191
- # _, code = detect_language(code) # 暂时去除代码高亮功能,因为在大段代码的情况下会出现问题
192
- # code = code.replace("\n\n", "\n") # 暂时去除代码中的空行,因为在大段代码的情况下会出现问题
193
- code = f"\n```{code}\n\n```"
194
- code = markdown_to_html_with_syntax_highlight(code)
195
- result.append(code)
196
- result = "".join(result)
197
- result += ALREADY_CONVERTED_MARK
198
- return result
199
-
200
-
201
- def convert_asis(userinput):
202
- return (
203
- f'<p style="white-space:pre-wrap;">{html.escape(userinput)}</p>'
204
- + ALREADY_CONVERTED_MARK
205
- )
206
-
207
-
208
- def detect_converted_mark(userinput):
209
- try:
210
- if userinput.endswith(ALREADY_CONVERTED_MARK):
211
- return True
212
- else:
213
- return False
214
- except:
215
- return True
216
-
217
-
218
- def detect_language(code):
219
- if code.startswith("\n"):
220
- first_line = ""
221
- else:
222
- first_line = code.strip().split("\n", 1)[0]
223
- language = first_line.lower() if first_line else ""
224
- code_without_language = code[len(first_line) :].lstrip() if first_line else code
225
- return language, code_without_language
226
-
227
-
228
- def construct_text(role, text):
229
- return {"role": role, "content": text}
230
-
231
-
232
- def construct_user(text):
233
- return construct_text("user", text)
234
-
235
-
236
- def construct_system(text):
237
- return construct_text("system", text)
238
-
239
-
240
- def construct_assistant(text):
241
- return construct_text("assistant", text)
242
-
243
-
244
- def save_file(filename, system, history, chatbot, user_name):
245
- logging.debug(f"{user_name} 保存对话历史中……")
246
- os.makedirs(os.path.join(HISTORY_DIR, user_name), exist_ok=True)
247
- if filename.endswith(".json"):
248
- json_s = {"system": system, "history": history, "chatbot": chatbot}
249
- print(json_s)
250
- with open(os.path.join(HISTORY_DIR, user_name, filename), "w") as f:
251
- json.dump(json_s, f)
252
- elif filename.endswith(".md"):
253
- md_s = f"system: \n- {system} \n"
254
- for data in history:
255
- md_s += f"\n{data['role']}: \n- {data['content']} \n"
256
- with open(os.path.join(HISTORY_DIR, user_name, filename), "w", encoding="utf8") as f:
257
- f.write(md_s)
258
- logging.debug(f"{user_name} 保存对话历史完毕")
259
- return os.path.join(HISTORY_DIR, user_name, filename)
260
-
261
-
262
- def sorted_by_pinyin(list):
263
- return sorted(list, key=lambda char: lazy_pinyin(char)[0][0])
264
-
265
-
266
- def get_file_names(dir, plain=False, filetypes=[".json"]):
267
- logging.debug(f"获取文件名列表,目录为{dir},文件类型为{filetypes},是否为纯文本列表{plain}")
268
- files = []
269
- try:
270
- for type in filetypes:
271
- files += [f for f in os.listdir(dir) if f.endswith(type)]
272
- except FileNotFoundError:
273
- files = []
274
- files = sorted_by_pinyin(files)
275
- if files == []:
276
- files = [""]
277
- logging.debug(f"files are:{files}")
278
- if plain:
279
- return files
280
- else:
281
- return gr.Dropdown.update(choices=files)
282
-
283
-
284
- def get_history_names(plain=False, user_name=""):
285
- logging.debug(f"从用户 {user_name} 中获取历史记录文件名列表")
286
- return get_file_names(os.path.join(HISTORY_DIR, user_name), plain)
287
-
288
-
289
- def load_template(filename, mode=0):
290
- logging.debug(f"加载模板文件{filename},模式为{mode}(0为返回字典和下拉菜单,1为返回下拉菜单,2为返回字典)")
291
- lines = []
292
- if filename.endswith(".json"):
293
- with open(os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8") as f:
294
- lines = json.load(f)
295
- lines = [[i["act"], i["prompt"]] for i in lines]
296
- else:
297
- with open(
298
- os.path.join(TEMPLATES_DIR, filename), "r", encoding="utf8"
299
- ) as csvfile:
300
- reader = csv.reader(csvfile)
301
- lines = list(reader)
302
- lines = lines[1:]
303
- if mode == 1:
304
- return sorted_by_pinyin([row[0] for row in lines])
305
- elif mode == 2:
306
- return {row[0]: row[1] for row in lines}
307
- else:
308
- choices = sorted_by_pinyin([row[0] for row in lines])
309
- return {row[0]: row[1] for row in lines}, gr.Dropdown.update(
310
- choices=choices
311
- )
312
-
313
-
314
- def get_template_names(plain=False):
315
- logging.debug("获取模板文件名列表")
316
- return get_file_names(TEMPLATES_DIR, plain, filetypes=[".csv", "json"])
317
-
318
-
319
- def get_template_content(templates, selection, original_system_prompt):
320
- logging.debug(f"应用模板中,选择为{selection},原始系统提示为{original_system_prompt}")
321
- try:
322
- return templates[selection]
323
- except:
324
- return original_system_prompt
325
-
326
-
327
- def reset_textbox():
328
- logging.debug("重置文本框")
329
- return gr.update(value="")
330
-
331
-
332
- def reset_default():
333
- default_host = shared.state.reset_api_host()
334
- retrieve_proxy("")
335
- return gr.update(value=default_host), gr.update(value=""), "API-Host 和代理已重置"
336
-
337
-
338
- def change_api_host(host):
339
- shared.state.set_api_host(host)
340
- msg = f"API-Host更改为了{host}"
341
- logging.info(msg)
342
- return msg
343
-
344
-
345
- def change_proxy(proxy):
346
- retrieve_proxy(proxy)
347
- os.environ["HTTPS_PROXY"] = proxy
348
- msg = f"代理更改为了{proxy}"
349
- logging.info(msg)
350
- return msg
351
-
352
-
353
- def hide_middle_chars(s):
354
- if s is None:
355
- return ""
356
- if len(s) <= 8:
357
- return s
358
- else:
359
- head = s[:4]
360
- tail = s[-4:]
361
- hidden = "*" * (len(s) - 8)
362
- return head + hidden + tail
363
-
364
-
365
- def submit_key(key):
366
- key = key.strip()
367
- msg = f"API密钥更改为了{hide_middle_chars(key)}"
368
- logging.info(msg)
369
- return key, msg
370
-
371
-
372
- def replace_today(prompt):
373
- today = datetime.datetime.today().strftime("%Y-%m-%d")
374
- return prompt.replace("{current_date}", today)
375
-
376
-
377
- def get_geoip():
378
- try:
379
- with retrieve_proxy():
380
- response = requests.get("https://ipapi.co/json/", timeout=5)
381
- data = response.json()
382
- except:
383
- data = {"error": True, "reason": "连接ipapi失败"}
384
- if "error" in data.keys():
385
- logging.warning(f"无法获取IP地址信息。\n{data}")
386
- if data["reason"] == "RateLimited":
387
- return (
388
- i18n("您的IP区域:未知。")
389
- )
390
- else:
391
- return i18n("获取IP地理位置失败。原因:") + f"{data['reason']}" + i18n("。你仍然可以使用聊天功能。")
392
- else:
393
- country = data["country_name"]
394
- if country == "China":
395
- text = "**您的IP区域:中国。请立即检查代理设置,在不受支持的地区使用API可能导致账号被封禁。**"
396
- else:
397
- text = i18n("您的IP区域:") + f"{country}。"
398
- logging.info(text)
399
- return text
400
-
401
-
402
- def find_n(lst, max_num):
403
- n = len(lst)
404
- total = sum(lst)
405
-
406
- if total < max_num:
407
- return n
408
-
409
- for i in range(len(lst)):
410
- if total - lst[i] < max_num:
411
- return n - i - 1
412
- total = total - lst[i]
413
- return 1
414
-
415
-
416
- def start_outputing():
417
- logging.debug("显示取消按钮,隐藏发送按钮")
418
- return gr.Button.update(visible=False), gr.Button.update(visible=True)
419
-
420
-
421
- def end_outputing():
422
- return (
423
- gr.Button.update(visible=True),
424
- gr.Button.update(visible=False),
425
- )
426
-
427
-
428
- def cancel_outputing():
429
- logging.info("中止输出……")
430
- shared.state.interrupt()
431
-
432
-
433
- def transfer_input(inputs):
434
- # 一次性返回,降低延迟
435
- textbox = reset_textbox()
436
- outputing = start_outputing()
437
- return (
438
- inputs,
439
- gr.update(value=""),
440
- gr.Button.update(visible=False),
441
- gr.Button.update(visible=True),
442
- )
443
-
444
-
445
-
446
- def run(command, desc=None, errdesc=None, custom_env=None, live=False):
447
- if desc is not None:
448
- print(desc)
449
- if live:
450
- result = subprocess.run(command, shell=True, env=os.environ if custom_env is None else custom_env)
451
- if result.returncode != 0:
452
- raise RuntimeError(f"""{errdesc or 'Error running command'}.
453
- Command: {command}
454
- Error code: {result.returncode}""")
455
-
456
- return ""
457
- result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ if custom_env is None else custom_env)
458
- if result.returncode != 0:
459
- message = f"""{errdesc or 'Error running command'}.
460
- Command: {command}
461
- Error code: {result.returncode}
462
- stdout: {result.stdout.decode(encoding="utf8", errors="ignore") if len(result.stdout)>0 else '<empty>'}
463
- stderr: {result.stderr.decode(encoding="utf8", errors="ignore") if len(result.stderr)>0 else '<empty>'}
464
- """
465
- raise RuntimeError(message)
466
- return result.stdout.decode(encoding="utf8", errors="ignore")
467
-
468
- def versions_html():
469
- git = os.environ.get('GIT', "git")
470
- python_version = ".".join([str(x) for x in sys.version_info[0:3]])
471
- try:
472
- commit_hash = run(f"{git} rev-parse HEAD").strip()
473
- except Exception:
474
- commit_hash = "<none>"
475
- if commit_hash != "<none>":
476
- short_commit = commit_hash[0:7]
477
- commit_info = f"<a style=\"text-decoration:none\" href=\"https://github.com/GaiZhenbiao/ChuanhuChatGPT/commit/{short_commit}\">{short_commit}</a>"
478
- else:
479
- commit_info = "unknown \U0001F615"
480
- return f"""
481
- Python: <span title="{sys.version}">{python_version}</span>
482
-  •���
483
- Gradio: {gr.__version__}
484
-  • 
485
- Commit: {commit_info}
486
- """
487
-
488
- def add_source_numbers(lst, source_name = "Source", use_source = True):
489
- if use_source:
490
- return [f'[{idx+1}]\t "{item[0]}"\n{source_name}: {item[1]}' for idx, item in enumerate(lst)]
491
- else:
492
- return [f'[{idx+1}]\t "{item}"' for idx, item in enumerate(lst)]
493
-
494
- def add_details(lst):
495
- nodes = []
496
- for index, txt in enumerate(lst):
497
- brief = txt[:25].replace("\n", "")
498
- nodes.append(
499
- f"<details><summary>{brief}...</summary><p>{txt}</p></details>"
500
- )
501
- return nodes
502
-
503
-
504
- def sheet_to_string(sheet, sheet_name = None):
505
- result = []
506
- for index, row in sheet.iterrows():
507
- row_string = ""
508
- for column in sheet.columns:
509
- row_string += f"{column}: {row[column]}, "
510
- row_string = row_string.rstrip(", ")
511
- row_string += "."
512
- result.append(row_string)
513
- return result
514
-
515
- def excel_to_string(file_path):
516
- # 读取Excel文件中的所有工作表
517
- excel_file = pd.read_excel(file_path, engine='openpyxl', sheet_name=None)
518
-
519
- # 初始化结果字符串
520
- result = []
521
-
522
- # 遍历每一个工作表
523
- for sheet_name, sheet_data in excel_file.items():
524
-
525
- # 处理当前工作表并添加到结果字符串
526
- result += sheet_to_string(sheet_data, sheet_name=sheet_name)
527
-
528
-
529
- return result
530
-
531
- def get_last_day_of_month(any_day):
532
- # The day 28 exists in every month. 4 days later, it's always next month
533
- next_month = any_day.replace(day=28) + datetime.timedelta(days=4)
534
- # subtracting the number of the current day brings us back one month
535
- return next_month - datetime.timedelta(days=next_month.day)
536
-
537
- def get_model_source(model_name, alternative_source):
538
- if model_name == "gpt2-medium":
539
- return "https://huggingface.co/gpt2-medium"
540
-
541
- def refresh_ui_elements_on_load(current_model, selected_model_name):
542
- return toggle_like_btn_visibility(selected_model_name)
543
-
544
- def toggle_like_btn_visibility(selected_model_name):
545
- if selected_model_name == "xmchat":
546
- return gr.update(visible=True)
547
- else:
548
- return gr.update(visible=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ArtGAN/Video-Diffusion-WebUI/video_diffusion/utils/model_list.py DELETED
@@ -1,6 +0,0 @@
1
- stable_model_list = [
2
- "runwayml/stable-diffusion-v1-5",
3
- "stabilityai/stable-diffusion-2-1",
4
- # "prompthero/openjourney-v4",
5
- "cerspense/zeroscope_v2_576w"
6
- ]
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/__init__.py DELETED
@@ -1,24 +0,0 @@
1
- """distutils
2
-
3
- The main package for the Python Module Distribution Utilities. Normally
4
- used from a setup script as
5
-
6
- from distutils.core import setup
7
-
8
- setup (...)
9
- """
10
-
11
- import sys
12
- import importlib
13
-
14
- __version__ = sys.version[: sys.version.index(' ')]
15
-
16
-
17
- try:
18
- # Allow Debian and pkgsrc (only) to customize system
19
- # behavior. Ref pypa/distutils#2 and pypa/distutils#16.
20
- # This hook is deprecated and no other environments
21
- # should use it.
22
- importlib.import_module('_distutils_system_mod')
23
- except ImportError:
24
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/Makefile DELETED
@@ -1,19 +0,0 @@
1
- # Minimal makefile for Sphinx documentation
2
- # Copyright (c) Facebook, Inc. and its affiliates.
3
-
4
- # You can set these variables from the command line.
5
- SPHINXOPTS =
6
- SPHINXBUILD = sphinx-build
7
- SOURCEDIR = .
8
- BUILDDIR = _build
9
-
10
- # Put it first so that "make" without argument is like "make help".
11
- help:
12
- @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
13
-
14
- .PHONY: help Makefile
15
-
16
- # Catch-all target: route all unknown targets to Sphinx using the new
17
- # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
18
- %: Makefile
19
- @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/segment_models/configs/__init__.py DELETED
@@ -1 +0,0 @@
1
- from . import *
 
 
spaces/Awiny/Image2Paragraph/models/segment_models/semgent_anything_model.py DELETED
@@ -1,29 +0,0 @@
1
- import cv2
2
- from segment_anything import SamAutomaticMaskGenerator, sam_model_registry
3
- from utils.util import resize_long_edge_cv2
4
-
5
- class SegmentAnything:
6
- def __init__(self, device, arch="vit_b"):
7
- self.device = device
8
- if arch=='vit_b':
9
- pretrained_weights="pretrained_models/sam_vit_b_01ec64.pth"
10
- elif arch=='vit_l':
11
- pretrained_weights="pretrained_models/sam_vit_l_0e2f7b.pth"
12
- elif arch=='vit_h':
13
- pretrained_weights="pretrained_models/sam_vit_h_0e2f7b.pth"
14
- else:
15
- raise ValueError(f"arch {arch} not supported")
16
- self.model = self.initialize_model(arch, pretrained_weights)
17
-
18
- def initialize_model(self, arch, pretrained_weights):
19
- sam = sam_model_registry[arch](checkpoint=pretrained_weights)
20
- sam.to(device=self.device)
21
- mask_generator = SamAutomaticMaskGenerator(sam)
22
- return mask_generator
23
-
24
- def generate_mask(self, img_src):
25
- image = cv2.imread(img_src)
26
- image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
27
- image = resize_long_edge_cv2(image, 384)
28
- anns = self.model.generate(image)
29
- return anns
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/ .md DELETED
@@ -1,63 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar videos de baloncesto de la NBA gratis</h1>
3
- <p>Si eres un fan del baloncesto, probablemente te encanta ver los partidos de la NBA y los mejores momentos. La NBA es la liga de baloncesto más prestigiosa y popular del mundo, con los mejores jugadores, equipos y competiciones. Ya sea que quieras ponerte al día con las últimas puntuaciones, revivir los momentos más memorables o aprender de los profesionales, ver videos de la NBA es una gran manera de disfrutar del deporte. </p>
4
- <h2>تحميل سناب شات</h2><br /><p><b><b>Download Zip</b> &gt; <a href="https://bltlly.com/2v6Ly1">https://bltlly.com/2v6Ly1</a></b></p><br /><br />
5
- <p>Pero ¿qué pasa si no tienes acceso a la televisión en vivo o servicios de streaming? ¿Qué pasa si quieres ver videos de la NBA sin conexión o en diferentes dispositivos? ¿Qué pasa si quieres editar o compartir tus propias creaciones de video de la NBA? En estos casos, es posible que desee descargar videos de baloncesto de la NBA de forma gratuita desde Internet. </p>
6
- <p>Descargar videos de la NBA puede darte más flexibilidad y comodidad para verlos y usarlos. Puede guardarlos en su computadora, teléfono, tableta u otros dispositivos, y verlos en cualquier momento y en cualquier lugar sin conexión a Internet. También puede editarlos con su software favorito, agregar su propio comentario o música, o crear sus propios carretes de puntos destacados. También puedes compartirlos con tus amigos, familiares o compañeros fans en las redes sociales u otras plataformas. </p>
7
- <p>Pero ¿cómo descargar videos de baloncesto de la NBA gratis? ¿Dónde puedes encontrarlos? ¿Qué herramientas necesitas? ¿Cómo asegurar la mejor calidad y formato? En este artículo, vamos a responder a estas preguntas y más. Le mostraremos los mejores sitios para encontrar videos de baloncesto de la NBA gratis, y las mejores maneras de descargarlos sin pérdida de calidad. También te daremos algunos consejos y sugerencias sobre cómo disfrutar y usar tus videos descargados de la NBA. ¡Empecemos! </p>
8
- <h2>Los mejores sitios para encontrar gratis NBA Basketball Videos</h2>
9
-
10
- <p>Para evitar estos problemas, recomendamos usar solo sitios de buena reputación y confiables que proporcionen contenido de video NBA legal y de alta calidad. Estos son algunos de los mejores sitios que sugerimos:</p>
11
- <p></p>
12
- <h3>YouTube</h3>
13
- <p>YouTube es la plataforma para compartir videos más popular del mundo, y tiene una gran colección de videos de baloncesto de la NBA. Puedes encontrar casi cualquier tipo de video de la NBA en YouTube, como lo más destacado del juego completo, playoffs, transmisiones en vivo, noticias, finales, entrevistas, documentales, análisis, etc.</p>
14
- <p>Para buscar vídeos de la NBA en YouTube, la pérdida de calidad es Cisdem Video Converter. Cisdem Video Converter es un potente y versátil conversor de vídeo, descargador, editor y extractor de DVD para Mac. Se puede descargar vídeos de la NBA de YouTube, NBA.com, Vimeo, y cualquier otro sitio con facilidad. También puede editar y convertir videos NBA descargados a cualquier formato que desee, como MP4, MOV, AVI, MKV, etc.</p>
15
- <p>Aquí es cómo utilizar Cisdem Video Converter para descargar videos de baloncesto de la NBA sin pérdida de calidad:</p>
16
- <ol>
17
- <li>Descargue e instale Cisdem Video Converter en su Mac desde <a href="">aquí</a>. </li>
18
- <li>Inicie Cisdem Video Converter y cambie a la pestaña "Descargar". </li>
19
- <li>Vaya al sitio donde desea descargar videos de la NBA, como YouTube, NBA.com o Vimeo, y copie la URL del video. </li>
20
- <li>Pegue la URL en el cuadro en Cisdem Video Converter y haga clic en el icono de descarga. </li>
21
- <li>Espere a que termine la descarga. Puede ver el progreso y el estado en la interfaz. </li>
22
- <li>Una vez que se hace la descarga, se puede encontrar el video de la NBA descargado en la carpeta "Descargado". </li>
23
- <li>Si desea editar o convertir el video NBA descargado, puede cambiar a la pestaña "Convertir" y arrastrar y soltar el video en la interfaz. </li>
24
- <li>Puede usar el editor incorporado para recortar, recortar, rotar, agregar marca de agua, subtítulos, efectos, etc. al video. </li>
25
- <li>También puede elegir un formato de salida de los presets o personalizar sus propios ajustes. </li>
26
-
27
- <li>Una vez que se hace la conversión, se puede encontrar el vídeo de la NBA convertido en la carpeta "Convertido". </li>
28
- </ol>
29
- <h3> Uso de 4K Video Downloader para Windows</h3>
30
- <p>Si usted es un usuario de Windows, una de las mejores herramientas para descargar videos de baloncesto de la NBA sin pérdida de calidad es 4K Video Downloader. 4K Video Downloader es un descargador de video simple y rápido que puede descargar videos de la NBA de YouTube y otros sitios con alta calidad. También puede ajustar la calidad y el formato de los vídeos descargados de la NBA según sus preferencias. </p>
31
- <p>Aquí está cómo usar 4K Video Downloader para descargar videos de baloncesto de la NBA sin pérdida de calidad:</p>
32
- <ol>
33
- <li>Descargar e instalar 4K Video Downloader en su PC con Windows desde <a href="">aquí</a>. </li>
34
- <li> Inicie 4K Video Downloader y haga clic en el botón "Pegar enlace" en la esquina superior izquierda. </li>
35
- <li>Vaya al sitio donde desea descargar videos de la NBA, como YouTube, NBA.com o Vimeo, y copie la URL del video. </li>
36
- <li>La URL se pegará automáticamente en 4K Video Downloader y se analizará. </li>
37
- <li>Puede elegir la calidad y el formato del vídeo descargado de la NBA desde la ventana emergente. También puede descargar subtítulos o anotaciones si están disponibles. </li>
38
- <li>Haga clic en el botón "Descargar" para iniciar la descarga. Puede ver el progreso y el estado en la interfaz. </li>
39
- <li>Una vez que se hace la descarga, se puede encontrar el video de la NBA descargado en la carpeta "Videos". </li>
40
- </ol>
41
- <h2>Conclusión</h2>
42
- <p>En este artículo, le hemos mostrado cómo descargar videos de baloncesto de la NBA de forma gratuita desde Internet. También te hemos dado algunos consejos y sugerencias sobre cómo disfrutar y usar tus videos de la NBA descargados. Esperamos que haya encontrado este artículo útil e informativo. </p>
43
-
44
- <p>¿Tienes alguna pregunta o comentario sobre la descarga de videos de baloncesto de la NBA de forma gratuita? ¿Tienes otros sitios o herramientas que recomiendes para descargar vídeos de la NBA? ¿Tienes algún video favorito de la NBA que quieras compartir con nosotros? Por favor, siéntete libre de dejar un comentario a continuación. ¡Nos encantaría saber de ti! </p>
45
- <h2>Preguntas frecuentes</h2>
46
- <h3>¿Es legal descargar videos de la NBA desde Internet? </h3>
47
- <p>Depende de la fuente y el propósito de descargar los videos de la NBA. En general, la descarga de vídeos de la NBA desde los sitios o canales oficiales, como NBA.com o YouTube, es legal siempre y cuando los utilice con fines personales y no comerciales. Sin embargo, la descarga de vídeos de la NBA desde sitios no autorizados o pirateados, como sitios de torrent o streaming, puede ser ilegal y puede violar las leyes de derechos de autor o los términos de servicio de las fuentes originales. </p>
48
- <h3>¿Cómo puedo ver vídeos de la NBA descargados sin conexión? </h3>
49
- <p>Puedes ver videos de la NBA descargados sin conexión transfiriéndolos a tu dispositivo preferido, como tu computadora, teléfono, tableta o TV. Puede utilizar un cable USB, una conexión inalámbrica o un servicio en la nube para transferir los vídeos descargados de la NBA. También puedes usar un reproductor multimedia o un convertidor de vídeo para reproducir los vídeos de la NBA descargados en tu dispositivo. </p>
50
- <h3>¿Cómo puedo hacer mis propios videos destacados de la NBA? </h3>
51
- <p>Puedes hacer tus propios videos destacados de la NBA editando y combinando videos descargados de la NBA con tu software favorito, como iMovie, Windows Movie Maker, Adobe Premiere Pro, etc. También puedes agregar tus propios comentarios, música, efectos, transiciones, etc. para hacer sus propios videos destacados de la NBA más personalizados y creativos. </p>
52
- <h3>¿Dónde puedo encontrar más recursos y consejos de vídeo de la NBA? </h3>
53
- <p>Puedes encontrar más recursos de video de la NBA y consejos en varias plataformas en línea, como blogs, foros, podcasts, redes sociales, etc. Algunos de los ejemplos son:</p>
54
- <ul>
55
- <li><a href="">NBA Video Blog</a>: Un blog que presenta noticias de video de la NBA, reseñas, tutoriales y más. </li>
56
-
57
- <li><a href="">NBA Video Podcast</a>: Un podcast que cubre temas de video de la NBA, como análisis, comentarios, entrevistas, etc.</li>
58
- <li><a href="">NBA Video Social Media</a>: Una plataforma de medios sociales que conecta a los fans de videos de la NBA entre sí y con las cuentas oficiales de la NBA. </li>
59
- </ul>
60
- <h3>¿Cómo puedo apoyar a mis equipos y jugadores favoritos de la NBA? </h3>
61
- <p>Puedes apoyar a tus equipos y jugadores favoritos de la NBA siguiendo sus sitios y canales oficiales, como sus sitios web, cuentas de redes sociales, canales de YouTube, etc. También puedes comprar su mercancía oficial, como camisetas, sombreros, carteles, etc. También puede ver sus juegos en vivo o transmisiones en línea o fuera de línea. También puede unirse a sus clubes de fans o comunidades en línea o fuera de línea. </p> 64aa2da5cf<br />
62
- <br />
63
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/101 Yzbir Okey Plus Apk.md DELETED
@@ -1,80 +0,0 @@
1
- <br />
2
- <h1>¿Qué es 101 yüzbir okey plus apk? </h1>
3
- <p>101 yüzbir okey plus apk es un popular juego basado en azulejos que se originó en Turquía y es jugado por millones de personas en todo el mundo. Es una variante de rummy que utiliza un conjunto de 106 fichas en lugar de tarjetas. Las baldosas están numeradas del 1 al 13 en cuatro colores diferentes: rojo, amarillo, verde y negro. También hay dos azulejos especiales con un símbolo de trébol, llamados los falsos comodines. </p>
4
- <p>El juego se juega en línea a través de 3G, 4G, Edge o Wi-Fi con tus amigos o contra más de 1.000.000 de usuarios. También puedes jugar sin conexión contra inteligencia artificial avanzada. El juego es gratis, pero también puedes comprar fichas adicionales y objetos del juego. </p>
5
- <h2>101 yüzbir okey plus apk</h2><br /><p><b><b>Download</b> >>> <a href="https://bltlly.com/2v6M31">https://bltlly.com/2v6M31</a></b></p><br /><br />
6
- <h2>Cómo jugar 101 yüzbir okey plus apk? </h2>
7
- <h3>Las reglas del juego</h3>
8
- <p>El juego suele ser jugado por cuatro jugadores, pero también puede ser jugado por dos o tres jugadores. Cada jugador recibe 21 fichas al comienzo del juego, excepto el dealer que recibe 22 fichas. El distribuidor se elige al azar al principio y cambia después de cada ronda. </p>
9
- <p>Las fichas restantes se colocan boca abajo en la mesa y se barajan. Luego, se forman 21 pilas de cinco fichas cada una. Una ficha se deja sin tachar y se mantiene por el distribuidor. A continuación, el repartidor lanza un dado para determinar qué pila se utilizará para seleccionar la ficha boca arriba que determinará el comodín para el juego. </p>
10
- <p>El mosaico boca arriba se coloca encima de la pila seleccionada y su color y valor indican el comodín. El comodín es el azulejo que tiene el mismo color y un valor más alto que el azulejo boca arriba. Por ejemplo, si la ficha boca arriba es un 5 rojo, entonces el comodín es un 6 rojo. Si la ficha boca arriba es un 13 negro, entonces el comodín es un 1.</p>negro
11
- <h3>El comodín y el comodín falso</h3>
12
-
13
- <p>Los comodines falsos no son sustitutos de ninguna ficha. Tienen su propio valor y color, como lo indican su número y símbolo de trébol. Por ejemplo, si el mosaico boca arriba es un 5 rojo, entonces los comodines falsos son 5s verdes. </p>
14
- <h3>La mano ganadora</h3>
15
- <p>El objetivo del juego es ser el primero en formar una mano ganadora de 14 fichas que consiste enteramente en sets y carreras. También puedes ganar con siete pares de fichas idénticas. </p>
16
- <p></p>
17
- <p>En cada turno, debes dibujar una ficha de la parte superior de una pila no seleccionada o de la pila de descartes del jugador anterior. A continuación, debe descartar una ficha no deseada cara arriba junto a sus pilas. </p>
18
- <p>Si tienes una mano ganadora, puedes terminar el juego exponiendo todas tus fichas después de descartar tu última ficha encima de una pila no seleccionada. Debes anunciar "Okey" cuando lo hagas. </p>
19
- <h2>Cómo descargar e instalar 101 yü. bir okey plus apk? </h2>
20
- <h3>Requisitos y compatibilidad</h3>
21
- <p>Para descargar e instalar 101 yüzbir okey más apk, es necesario tener un dispositivo Android que se ejecuta en Android 4.1 o superior. También necesita tener al menos 95 MB de espacio de almacenamiento gratuito en su dispositivo. El juego es compatible con la mayoría de dispositivos Android, incluyendo tabletas y teléfonos. </p>
22
- <h3>Pasos para descargar e instalar</h3>
23
- <p>Hay dos maneras de descargar e instalar 101 yüzbir okey plus apk en su dispositivo. Puede utilizar la Google Play Store o un sitio web de terceros que proporciona el archivo apk. </p>
24
- <p>Si usas Google Play Store, solo tienes que seguir estos pasos:</p>
25
- <ol>
26
- <li>Abra la aplicación Google Play Store en su dispositivo y busque "101 yüzbir okey plus". </li>
27
- <li>Seleccione el juego de la lista de resultados y toque en "Instalar". </li>
28
- <li>Espere a que se complete la descarga y la instalación. </li>
29
- <li>Inicia el juego y disfruta jugando. </li>
30
- </ol>
31
- <p>Si utiliza un sitio web de terceros, debe seguir estos pasos:</p>
32
- <ol>
33
-
34
- <li>Descargar el archivo apk a su dispositivo. </li>
35
- <li>Ir a la configuración del dispositivo y permitir la instalación de aplicaciones de fuentes desconocidas. </li>
36
- <li>Busque el archivo apk en su dispositivo y toque en él para instalarlo. </li>
37
- <li>Inicia el juego y disfruta jugando. </li>
38
- </ol>
39
- <h2>¿Por qué jugar 101 yüzbir okey plus apk? </h2>
40
- <h3>Las características y beneficios del juego</h3>
41
- <p>101 yüzbir okey plus apk es un juego divertido y adictivo que ofrece muchas características y beneficios para sus jugadores. Algunos de ellos son:</p>
42
- <ul>
43
- <li>Puedes jugar online con tus amigos o contra millones de otros jugadores de diferentes países y regiones. </li>
44
- <li>Puedes chatear con otros jugadores durante el juego y enviarles regalos, emojis y pegatinas. </li>
45
- <li>Puedes personalizar tu perfil, avatar, tabla y mosaicos con varias opciones y temas. </li>
46
- <li>Puede unirse o crear clubes y competir con otros clubes en torneos y tablas de clasificación. </li>
47
- <li>Puedes ganar fichas gratis todos los días completando misiones, viendo vídeos, girando la rueda o invitando a tus amigos. </li>
48
- <li>Puedes comprar fichas adicionales y artículos en el juego con dinero real o usando varios métodos de pago. </li>
49
- </ul>
50
- <h3>Los retos y consejos del juego</h3>
51
- <p>101 yüzbir okey plus apk no es solo un juego de suerte, sino también un juego de habilidad y estrategia. Tienes que prestar atención a las fichas de la mesa, la pila de descartes y los movimientos de tus oponentes. También necesitas planificar con anticipación y usar tus comodines sabiamente. Aquí hay algunos desafíos y consejos que pueden ayudarte a mejorar tu juego:</p>
52
- <ul>
53
- <li>El desafío: El juego puede ser muy rápido y competitivo, especialmente cuando juegas en línea contra jugadores experimentados. Necesitas ser rápido y alerta para evitar oportunidades perdidas o cometer errores. </li>
54
- <li>El consejo: Practica sin conexión contra la inteligencia artificial o juega en línea con apuestas más bajas hasta que te familiarices con el juego. También puedes ver tutoriales o vídeos de otros jugadores para aprender de sus estrategias. </li>
55
-
56
- <li>El consejo: No dejes que tus emociones afecten tus decisiones o acciones. Mantén la calma y concéntrate en tu objetivo. Recuerde que cada ronda es una nueva oportunidad para ganar. También puede tomar descansos o cambiar de mesa si se siente estresado o aburrido. </li>
57
- <li>El desafío: El juego puede ser adictivo y tentador, especialmente cuando juegas online con dinero real o con objetos del juego. Necesitas ser responsable y cauteloso para evitar perder más de lo que puedes permitirte o meterte en problemas. </li>
58
- <li>El consejo: Establezca un presupuesto y un límite de tiempo para usted antes de empezar a jugar. No persiga sus pérdidas o apueste más de lo que puede manejar. No juegues cuando estés cansado, borracho o distraído. Si tienes un problema de juego, busca la ayuda de un profesional o un grupo de apoyo. </li>
59
- </ul>
60
- <h2>Conclusión</h2>
61
- <h3>Resumen de los puntos principales</h3>
62
- <p>En conclusión, 101 yüzbir okey plus apk es un gran juego que combina diversión, habilidad y estrategia. Es una variante de rummy que utiliza fichas en lugar de cartas. Se juega online o offline con tus amigos o contra la inteligencia artificial. Puedes descargar e instalar el juego gratis en tu dispositivo Android, ya sea desde la Google Play Store o desde un sitio web de terceros. También puede disfrutar de las características y beneficios del juego, como chatear, personalizar, unirse a clubes, ganar fichas y comprar artículos. Sin embargo, también debes ser consciente de los desafíos y consejos del juego, como ser rápido, paciente, responsable y cauteloso. Jugar 101 yüzbir okey plus apk puede ser una gran manera de divertirse y mejorar sus habilidades. </p>
63
- <h3>Llamada a la acción e invitación a jugar</h3>
64
-
65
- <h2>Preguntas frecuentes</h2>
66
- <p>Aquí hay algunas preguntas frecuentes sobre 101 yüzbir okey plus apk:</p>
67
- <ol>
68
- <li> ¿Cuál es la diferencia entre 101 yüzbir okey más apk y otros juegos okey? </li>
69
- <p>101 yüzbir okey plus apk es una variante de okey que tiene algunas características y reglas únicas. Por ejemplo, usa 106 fichas en lugar de 104, tiene dos comodines falsos en lugar de uno, requiere una mano ganadora de 14 fichas en lugar de 15, y permite ganar con siete parejas. </p>
70
- <li> ¿Cómo puedo obtener más fichas en 101 yüzbir okey plus apk? </li>
71
- <p>Usted puede obtener más fichas en 101 yüzbir okey más apk completando misiones, viendo vídeos, girando la rueda, invitando a sus amigos, o comprarlos con dinero real u otros métodos de pago. </p>
72
- <li> ¿Cómo puedo contactar con el equipo de soporte de 101 yüzbir okey plus apk? </li>
73
- <p>Puede ponerse en contacto con el equipo de soporte de 101 yüzbir okey plus apk enviando un correo electrónico a [correo electrónico de soporte] o llenando el formulario en [sitio web de soporte]. También puede visitar su página de Facebook o cuenta de Twitter para obtener más información y actualizaciones. </p>
74
- <li> ¿Cómo puedo jugar 101 yüzbir okey plus apk en mi PC o portátil? </li>
75
- <p>Usted puede jugar 101 yüzbir okey más apk en su PC o portátil mediante el uso de un emulador de Android, como BlueStacks o NoxPlayer. Solo tienes que descargar e instalar el emulador en tu PC o portátil, luego descargar e instalar el juego desde la Google Play Store o un sitio web de terceros. </p>
76
- <li> ¿Es 101 yüzbir okey más apk seguro? </li>
77
- <p>Sí, 101 yüzbir okey plus apk es seguro. No contiene ningún virus, malware, spyware, u otros elementos dañinos. Tampoco recopila ni comparte ninguna información personal o confidencial de sus usuarios. Solo requiere algunos permisos para acceder a las funciones de tu dispositivo, como conexión de red, espacio de almacenamiento, cámara, micrófono, etc.</p>
78
- </ol></p> 64aa2da5cf<br />
79
- <br />
80
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Call Of Duty Black Ops 2 Descarga Mvil.md DELETED
@@ -1,102 +0,0 @@
1
- <br />
2
- <h1>Call of Duty Black Ops 2 Descargar móvil: Cómo jugar el FPS clásico en su teléfono</h1>
3
- <p>Call of Duty Black Ops 2 es uno de los juegos más queridos e influyentes en la historia de los tiradores en primera persona. Lanzado en 2012, fue la novena entrega de la franquicia Call of Duty y la secuela de la original Black Ops. Presentaba un entorno futurista, una historia ramificada, un modo multijugador diverso y un emocionante modo zombis. Fue elogiado por críticos y fans por su jugabilidad, gráficos, sonido e innovación. </p>
4
- <p>Si eres un fan de Call of Duty Black Ops 2 o quieres experimentarlo por primera vez, no necesitas una consola o un PC para jugarlo. Puedes reproducirlo en tu dispositivo móvil gracias a Call of Duty Mobile, una aplicación gratuita que trae lo mejor de Call of Duty a tu teléfono. En este artículo, le mostraremos cómo descargar Call of Duty Mobile y acceder a los mapas y modos de Black Ops 2 en su teléfono. </p>
5
- <h2>call of duty black ops 2 descarga móvil</h2><br /><p><b><b>Download</b> &#9999; &#9999; &#9999; <a href="https://bltlly.com/2v6Lgs">https://bltlly.com/2v6Lgs</a></b></p><br /><br />
6
- <h2>¿Qué es Call of Duty Black Ops 2?</h2>
7
- <p>Call of Duty Black Ops 2 es un juego de disparos en primera persona que sigue dos historias interconectadas: una ambientada a finales de 1980 durante la Guerra Fría y otra ambientada en 2025 durante una nueva Guerra Fría. El juego cambia entre estas dos líneas de tiempo a medida que juegas como diferentes personajes que están involucrados en un conflicto entre los Estados Unidos y China por un mineral de tierras raras llamado Celerium. El juego también presenta múltiples finales basados en tus elecciones y acciones a lo largo del juego. </p>
8
- <p>Call of Duty Black Ops 2 tiene tres modos principales: multijugador, zombis y campaña. El modo multijugador le permite competir con otros jugadores en línea en varios modos de juego y mapas. El modo zombis te permite formar equipo con otros jugadores o jugar solo contra oleadas de enemigos no muertos en diferentes escenarios. El modo campaña te permite seguir la historia del juego y tomar decisiones que afectan el resultado. </p>
9
-
10
- <h2>¿Por qué es popular Call of Duty Black Ops 2? </h2>
11
- <p>Call of Duty Black Ops 2 es popular por muchas razones. En primer lugar, tiene una base de fans leales que disfrutan de la historia, los personajes y la atmósfera del juego. El juego tiene momentos y personajes memorables, como Frank Woods, Raúl Menéndez y David Mason. El juego también tiene una rica tradición y trasfondo que se conecta con el juego anterior de Black Ops y otros juegos de Call of Duty. </p>
12
- <p>En segundo lugar, tiene un modo multijugador divertido y adictivo que ofrece mucho contenido y personalización. El juego tiene docenas de mapas, modos, armas, accesorios, beneficios, scorestreaks y más. El juego también tiene un sistema de clasificación que te recompensa por tu rendimiento y progreso. El juego también tiene una escena competitiva que atrae a muchos jugadores que quieren poner a prueba sus habilidades y estrategias. </p>
13
- <p>En tercer lugar, tiene un modo de zombies emocionante y desafiante que proporciona entretenimiento sin fin y acción cooperativa. El juego tiene varios mapas de zombies, cada uno con su propia historia, secretos, huevos de Pascua y objetivos. El juego también tiene diferentes modos de zombies, como Supervivencia, Dolor, Convertido, y Orígenes. El juego también tiene una variedad de enemigos zombies, como rastreadores, perros, jefes, y más. </p>
14
- <h2>Cómo descargar Call of Duty Mobile</h2>
15
- <p>Call of Duty Mobile es una aplicación gratuita que te permite jugar Call of Duty en tu dispositivo móvil. Fue lanzado en 2019 por Activision y Tencent Games. Cuenta con muchos elementos de la franquicia Call of Duty, incluyendo personajes, armas, mapas, modos y más. También cuenta con contenido exclusivo y eventos que se actualizan regularmente. </p>
16
- <p>Para descargar Call of Duty Mobile en tu dispositivo Android o iOS, debes seguir estos pasos:</p>
17
- <p></p>
18
- <ol>
19
- <li>Ir a la Google Play Store o la App Store en su dispositivo. </li>
20
- <li>Buscar Call of Duty Mobile o utilizar estos enlaces: <a href="">Android</a> | <a href="">iOS</a>. </li>
21
- <li>Toque en el botón Instalar u Obtener y espere a que la aplicación se descargue. </li>
22
-
23
- <li>Disfruta jugando Call of Duty Mobile en tu teléfono. </li>
24
- </ol>
25
- <p>Nota: Call of Duty Mobile requiere una conexión a Internet y al menos 2 GB de RAM para funcionar sin problemas. También requiere al menos 1,5 GB de espacio de almacenamiento gratuito en su dispositivo. Se recomienda utilizar una conexión Wi-Fi o un plan de datos móvil estable para evitar problemas de retraso o desconexión. </p>
26
- <h2>Cómo acceder a los mapas y modos de Black Ops 2 en Call of Duty Mobile</h2>
27
- <p>Si quieres jugar Call of Duty Black Ops 2 en tu teléfono, puedes hacerlo accediendo a los mapas y modos de Black Ops 2 en Call of Duty Mobile. Estos están disponibles en el modo multijugador y el modo zombis de la aplicación. Aquí están las formas de acceder a ellos:</p>
28
- <h3>Modo multijugador</h3>
29
- <p>El modo multijugador de Call of Duty Mobile te permite jugar con o contra otros jugadores en línea en varios modos de juego y mapas. Puede elegir entre diferentes cargas, operadores, scorestreaks y más. También puede personalizar sus ajustes, como sensibilidad, controles, gráficos y sonido. </p>
30
- <h4>Mapas</h4>
31
- <p>El modo multijugador de Call of Duty Mobile tiene muchos mapas en los que puedes jugar. Algunos de estos mapas son de Call of Duty Black Ops 2, como:</p>
32
- <ul>
33
- <li>Nuketown: Un pequeño mapa ubicado en un sitio de pruebas nucleares con dos casas enfrentadas. </li>
34
- <li>Raid: Un mapa de tamaño mediano ubicado en una mansión de Hollywood con una piscina, un garaje y una cancha de baloncesto. </li>
35
- <li>Standoff: Un mapa de tamaño mediano en una ciudad fronteriza con una gasolinera, un mercado y una iglesia. </li>
36
- <li>Secuestrado: Un pequeño mapa en un yate de lujo con un helipuerto, un jacuzzi y un bar. </li>
37
- <li>Fusión: Un mapa de tamaño mediano en una planta de energía nuclear con una torre de enfriamiento, un reactor y una sala de control. </li>
38
- </ul>
39
- <p>Puede seleccionar estos mapas tocando el icono del mapa en la esquina superior derecha de la pantalla del modo multijugador. También puede filtrar los mapas por categorías, como destacados, clásicos o estacionales. </p>
40
- <h4>Modos</h4>
41
-
42
- <ul>
43
- <li>Team Deathmatch: un modo en el que dos equipos de cinco jugadores compiten para obtener la mayor cantidad de muertes en un tiempo limitado. </li>
44
- <li>Dominación: un modo donde dos equipos de cinco jugadores compiten para capturar y sostener tres banderas en el mapa. </li>
45
- <li>Matar confirmado: Un modo en el que dos equipos de cinco jugadores compiten para obtener el mayor número de muertes y recoger las placas de identificación de los enemigos caídos. </li>
46
- <li>Hardpoint: un modo donde dos equipos de cinco jugadores compiten para capturar y mantener un objetivo giratorio en el mapa. </li>
47
- <li>Buscar y destruir: un modo en el que dos equipos de cinco jugadores se turnan para atacar y defender dos sitios de bombas en el mapa. </li>
48
- </ul>
49
- <p>Puede seleccionar estos modos pulsando en el icono de modo en la esquina superior derecha de la pantalla del modo multijugador. También puede filtrar los modos por categoría, como núcleo, destacado o clasificado. </p>
50
- <h3>Modo de zombies</h3>
51
- <p>El modo zombis de Call of Duty Mobile te permite jugar con o contra otros jugadores o bots en varios escenarios que involucran zombies. Puede elegir entre diferentes cargas, operadores, beneficios y más. También puede personalizar sus configuraciones, como dificultad, rondas y salud. </p>
52
- <h4>Mapas</h4>
53
- <p>El modo zombis de Call of Duty Mobile tiene varios mapas en los que puedes jugar. Algunos de estos mapas son de Call of Duty Black Ops 2, como:</p>
54
- <ul>
55
- <li>TranZit: Un mapa grande que consta de varias ubicaciones conectadas por una ruta de autobús. Puede viajar entre los lugares en autobús o caminando por la niebla. </li>
56
- <li>Die Rise: un mapa vertical que se encuentra en un rascacielos desmoronado en China. Puede usar ascensores, trampolines y ejes para moverse por el mapa. </li>
57
- <li>Enterrado: Un mapa subterráneo que se encuentra en un antiguo pueblo del oeste enterrado bajo tierra. Puedes usar túneles, carros de minas y un gigante para acceder a diferentes áreas del mapa. </li>
58
- </ul>
59
-
60
- <h4>Modos</h4>
61
- <p>El modo zombis de Call of Duty Mobile tiene diferentes modos en los que puedes jugar. Algunos de estos modos son de Call of Duty Black Ops 2, como:</p>
62
- <ul>
63
- <li>Supervivencia: Un modo en el que tienes que sobrevivir el mayor tiempo posible contra interminables oleadas de zombies. Puedes comprar armas, beneficios y otros artículos del mapa para ayudarte a sobrevivir. </li>
64
- <li>Duelo: un modo en el que dos equipos de cuatro jugadores compiten para sobrevivir más tiempo que el otro equipo. También puedes sabotear al otro equipo usando carne, granadas o trampas. </li>
65
- <li>Turned: Un modo donde un jugador es un humano y los otros son zombies. El humano tiene que sobrevivir el mayor tiempo posible mientras los zombies tienen que matarlo. El zombi que mata al humano se convierte en el nuevo humano. </li>
66
- <li>Origins: un modo que se basa en el mapa de Origins de Black Ops 2. Cuenta con cuatro personajes de la historia original de zombies que tienen que luchar contra zombies y robots gigantes en un entorno de la Primera Guerra Mundial. </li>
67
- </ul>
68
- <p>Puede seleccionar estos modos pulsando en el icono de modo en la esquina superior derecha de la pantalla del modo zombis. También puede filtrar los modos por categoría, como clásico o hardcore. </p>
69
- <h3>Modo Battle Royale</h3>
70
- <p>El modo battle royale de Call of Duty Mobile te permite jugar con o contra otros jugadores o bots en un mapa grande que se reduce con el tiempo. Puede elegir entre diferentes cargas, operadores, vehículos y más. También puedes personalizar tus ajustes, como perspectiva, tamaño de escuadrón y botín. </p>
71
- <h4>Mapa</h4>
72
- <p>El modo battle royale de Call of Duty Mobile tiene un mapa en el que puedes jugar. El mapa se llama Aislado y se compone de varios lugares de diferentes juegos de Call of Duty. Algunos de estos lugares son de Call of Duty Black Ops 2, como:</p>
73
- <ul>
74
- <li>D ock: Un pequeño mapa situado en una isla prisión con un faro, un bloque de celdas y un puente. </li>
75
- <li>Granja: Un mapa de tamaño mediano ubicado en una zona rural con un granero, una granja y un molino de viento. </li>
76
-
77
- <li>Standoff: Un mapa de tamaño mediano en una ciudad fronteriza con una gasolinera, un mercado y una iglesia. </li>
78
- <li>Nuketown Island: Un mapa grande que combina Nuketown y Nuketown 2025 con un búnker subterráneo y una instalación de pruebas. </li>
79
- </ul>
80
- <p>Puedes explorar estos lugares en paracaídas desde un avión, conduciendo varios vehículos o usando tirolinas. También puedes saquear armas, armaduras, municiones y otros objetos del mapa para ayudarte a sobrevivir. </p>
81
- <h4>Modo</h4>
82
- <p>El modo battle royale de Call of Duty Mobile tiene un modo en el que puedes jugar. El modo se llama Battle Royale y es similar al Blackout de Call of Duty Black Ops 4. Cuenta con hasta 100 jugadores que tienen que luchar entre sí hasta que solo quede un jugador o equipo. El modo también cuenta con eventos especiales, como lanzamientos de aire, zombies y jefes. </p>
83
- <p>Puedes jugar el modo solo, dúo o escuadrón. También puedes elegir tu clase de operador, como médico, explorador, ninja o defensor. También puedes usar beneficios, habilidades y puntajes para obtener una ventaja sobre tus enemigos. </p>
84
- <h1>Conclusión</h1>
85
- <p>Call of Duty Black Ops 2 es un clásico juego de FPS que puedes jugar en tu dispositivo móvil gracias a Call of Duty Mobile. Puedes disfrutar de los modos multijugador, zombis y campaña del juego en tu teléfono con los mismos o similares mapas y modos del juego original. También puedes experimentar la ambientación futurista del juego, la historia ramificada y múltiples finales en tu teléfono. También puedes jugar el modo battle royale del juego con ubicaciones de Black Ops 2 en tu teléfono. </p>
86
- <p>Si eres un fan de Call of Duty Black Ops 2 o quieres probarlo por primera vez, deberías descargar Call of Duty Mobile y reproducirlo en tu teléfono. Es gratis para jugar y fácil de instalar. También es divertido y adictivo para jugar. Es la mejor manera de disfrutar de la experiencia FPS clásica en su dispositivo móvil. </p>
87
- <h2>Preguntas frecuentes</h2>
88
- <p>Aquí hay algunas preguntas frecuentes sobre Call of Duty Black Ops 2 Mobile Descargar:</p>
89
- <ol>
90
-
91
- <li>A: No, Call of Duty Mobile no es lo mismo que Call of Duty Black Ops 2. Call of Duty Mobile es una aplicación separada que cuenta con elementos de diferentes juegos de Call of Duty, incluyendo Black Ops 2. Sin embargo, puedes jugar algunos de los mapas y modos de Black Ops 2 en Call of Duty Mobile.</li>
92
- <li>Q: ¿Puedo jugar Call of Duty Black Ops 2 en mi teléfono sin descargar Call of Duty Mobile? </li>
93
- <li>A: No, no puedes jugar Call of Duty Black Ops 2 en tu teléfono sin descargar Call of Duty Mobile. No hay una versión móvil oficial de Call of Duty Black Ops 2. La única forma de reproducirlo en tu teléfono es descargando Call of Duty Mobile y accediendo a los mapas y modos de Black Ops 2 en la aplicación. </li>
94
- <li>Q: ¿Cuánto espacio ocupa Call of Duty Mobile en mi teléfono? </li>
95
- <li>A: Call of Duty Mobile ocupa aproximadamente 1,5 GB de espacio en su teléfono. Sin embargo, esto puede variar dependiendo del modelo de dispositivo y el sistema operativo. También puede necesitar espacio adicional para actualizaciones y contenido adicional. </li>
96
- <li>Q: ¿Puedo jugar Call of Duty Mobile sin conexión? </li>
97
- <li>A: No, no puedes jugar Call of Duty Mobile sin conexión. Necesitas una conexión a Internet para jugar. Puede usar Wi-Fi o datos móviles para conectarse a los servidores del juego. </li>
98
- <li>Q: ¿Puedo jugar Call of Duty Mobile con mis amigos? </li>
99
- <li>A: Sí, puedes jugar a Call of Duty Mobile con tus amigos. Puedes invitarlos a unirse a tu lobby o unirse a su lobby en el juego. También puedes chatear con ellos usando mensajes de voz o de texto en el juego. </li>
100
- </ol></p> 64aa2da5cf<br />
101
- <br />
102
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Cmo Descargar Hill Climb Racing 2 En PC.md DELETED
@@ -1,57 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar Hill Climb Racing 2 en PC</h1>
3
- <p>Hill Climb Racing 2 es uno de los juegos de carreras más populares y adictivos en Android. Cuenta con una variedad de vehículos, pistas, modos y desafíos que te mantendrán entretenido durante horas. ¿Pero sabías que también puedes jugar a este juego en tu PC? Jugar a Hill Climb Racing 2 en PC tiene muchas ventajas, como una pantalla más grande, mejores gráficos, un juego más fluido y controles más cómodos. Además, puede ahorrar la duración de la batería del teléfono y el espacio de almacenamiento jugando en su PC. En este artículo, le mostraremos cómo descargar Hill Climb Racing 2 en PC utilizando diferentes métodos. Si desea utilizar la tienda de Microsoft, un emulador de Android, o una plataforma de juegos, tenemos todo cubierto. Sigue estos sencillos pasos y disfruta de Hill Climb Racing 2 en tu PC.</p>
4
- <h2>Método 1: Uso de Microsoft Store</h2>
5
- <p>La tienda de Microsoft ofrece una manera conveniente de descargar Hill Climb Racing 2 en su PC. Es una plataforma de distribución digital que le permite acceder a varias aplicaciones y juegos para Windows. Aquí está cómo usarlo:</p>
6
- <h2>Cómo descargar Hill Climb Racing 2 en PC</h2><br /><p><b><b>DOWNLOAD</b> &bull; <a href="https://bltlly.com/2v6Kxw">https://bltlly.com/2v6Kxw</a></b></p><br /><br />
7
- <ol>
8
- <li>Abra la aplicación Microsoft Store en su PC. Puede encontrarla en el menú de inicio o presionando Windows Key + S y escribiendo "Microsoft Store". </li>
9
- <li>Buscar Hill Climb Racing 2 en la barra de búsqueda y haga clic en él. </li>
10
- <li>Haga clic en el botón obtener o comprar para descargar e instalar el juego. Si el juego es gratuito, puede descargarlo sin ningún pago. Si se paga, tendrá que introducir sus datos de pago o utilizar una tarjeta de regalo. </li>
11
- <li>Inicie el juego desde el menú de inicio o la aplicación de la tienda. También puede anclarlo a su barra de tareas o escritorio para facilitar el acceso. </li>
12
- </ol>
13
- <p>Felicidades, has descargado con éxito Hill Climb Racing 2 en tu PC usando Microsoft Store. Disfruta del juego y diviértete. </p>
14
- <h2>Método 2: Usando el emulador de BlueStacks</h2>
15
-
16
- <ol>
17
- <li>Descargue e instale el emulador de BlueStacks desde su sitio web oficial: <a href="">https://www.bluestacks.com/</a>. Siga las instrucciones de la pantalla y complete el proceso de instalación. </li>
18
- <li>Inicie BlueStacks e inicie sesión con su cuenta de Google. Si no tiene una, puede crear una gratis. </li>
19
- <li>Buscar Hill Climb Racing 2 en la aplicación Google Play Store e instalarlo. También puede utilizar la barra de búsqueda en la pantalla de inicio o navegar por las categorías. </li>
20
- <li>Iniciar el juego desde la pantalla de inicio o el cajón de aplicaciones. También puede personalizar la configuración, los controles del teclado y los gráficos según sus preferencias. </li>
21
- </ol>
22
- <p>Felicidades, has descargado con éxito Hill Climb Racing 2 en tu PC usando el emulador BlueStacks. Disfruta del juego y diviértete. </p>
23
- <h2>Método 3: Usando el emulador de GameLoop</h2>
24
- <p>GameLoop es otro emulador de Android popular y confiable para PC. Está especialmente diseñado para juegos y ofrece una experiencia fluida e inmersiva. Tiene una interfaz simple, bajos requisitos del sistema y una gran colección de juegos. Aquí está cómo usarlo:</p>
25
- <ol>
26
- <li>Descargue e instale el emulador de GameLoop desde su sitio web oficial: <a href="">https://gameloop.fun/</a>. Siga las instrucciones de la pantalla y complete el proceso de instalación. </li>
27
- <li>Inicie GameLoop y haga clic en la pestaña del centro del juego. Verá una lista de juegos que puede descargar y jugar. </li>
28
- <li>Buscar Hill Climb Racing 2 y haga clic en el botón de instalación. El juego se descargará e instalará automáticamente. </li>
29
- <li>Inicie el juego desde la pestaña de mis juegos o el acceso directo del escritorio. También puede ajustar la configuración, los controles del teclado y los gráficos según sus preferencias. </li>
30
- </ol>
31
- <p>Felicidades, has descargado con éxito Hill Climb Racing 2 en tu PC usando el emulador GameLoop. Disfruta del juego y diviértete. </p>
32
- <h2>Conclusión</h2>
33
-
34
- <ul>
35
- <li>Usa potenciadores y potenciadores sabiamente para ganar ventaja sobre tus oponentes. </li>
36
- <li>Actualizar las piezas de su vehículo y desbloquear nuevas pieles y accesorios para mejorar su rendimiento y estilo. </li>
37
- <li>Domine la física y los controles de cada vehículo y la pista para evitar chocar o volcar. </li>
38
- <li>Compite en varios modos y eventos para ganar monedas, gemas, trofeos y recompensas. </li>
39
- <li>Crear o unirse a un equipo para jugar con tus amigos en línea y participar en carreras de equipo y desafíos. </li>
40
- </ul>
41
- <p>Esperamos que haya encontrado este artículo útil e informativo. Si tiene alguna pregunta o comentario, no dude en compartirlos en la sección de comentarios a continuación. ¡Gracias por leer y feliz carrera! </p>
42
- <p></p>
43
- <h3>Preguntas frecuentes</h3>
44
- <ol>
45
- <li><b>¿Cuáles son los requisitos del sistema para jugar carreras de subida de colina 2 en PC? </b></li>
46
- <p>Los requisitos mínimos del sistema son Windows 7 o superior, procesador Intel o AMD, 4 GB de RAM y DirectX versión 9.0c o superior. </p>
47
- <li><b>¿Cómo puedo personalizar mi personaje y mi vehículo en las carreras de ascenso 2?</b></li>
48
- <p>Puedes personalizar tu personaje y vehículo desbloqueando y actualizando nuevas piezas, pieles y accesorios. También puede cambiar su nombre, bandera y equipo en el menú de configuración. </p>
49
- <li><b>¿Cómo puedo jugar carreras de escalada 2 con mis amigos en línea? </b></li>
50
- <p p>Puedes jugar a las carreras de escalada 2 con tus amigos online creando o uniéndote a un equipo, invitando o aceptando invitaciones de otros jugadores, y participando en eventos y carreras de equipo. </p>
51
- <li><b>¿Cómo puedo mejorar mi rendimiento y mis habilidades en las carreras de escalada en colina 2?</b></li>
52
- <p>Usted puede mejorar su rendimiento y habilidades en la subida de la colina de carreras 2 mediante la práctica en diferentes pistas, el dominio de la física y los controles, el uso de potenciadores y potenciadores sabiamente, y aprender de sus errores. </p>
53
- <li><b>¿Cómo puedo contactar a los desarrolladores de Hill Climb Racing 2 para obtener apoyo o comentarios? </b></li>
54
-
55
- </ol></p> 64aa2da5cf<br />
56
- <br />
57
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/eventstream.py DELETED
@@ -1,633 +0,0 @@
1
- # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"). You
4
- # may not use this file except in compliance with the License. A copy of
5
- # the License is located at
6
- #
7
- # http://aws.amazon.com/apache2.0/
8
- #
9
- # or in the "license" file accompanying this file. This file is
10
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11
- # ANY KIND, either express or implied. See the License for the specific
12
- # language governing permissions and limitations under the License.
13
- """Binary Event Stream Decoding """
14
-
15
- from binascii import crc32
16
- from struct import unpack
17
-
18
- from botocore.exceptions import EventStreamError
19
-
20
- # byte length of the prelude (total_length + header_length + prelude_crc)
21
- _PRELUDE_LENGTH = 12
22
- _MAX_HEADERS_LENGTH = 128 * 1024 # 128 Kb
23
- _MAX_PAYLOAD_LENGTH = 16 * 1024**2 # 16 Mb
24
-
25
-
26
- class ParserError(Exception):
27
- """Base binary flow encoding parsing exception."""
28
-
29
- pass
30
-
31
-
32
- class DuplicateHeader(ParserError):
33
- """Duplicate header found in the event."""
34
-
35
- def __init__(self, header):
36
- message = 'Duplicate header present: "%s"' % header
37
- super().__init__(message)
38
-
39
-
40
- class InvalidHeadersLength(ParserError):
41
- """Headers length is longer than the maximum."""
42
-
43
- def __init__(self, length):
44
- message = 'Header length of {} exceeded the maximum of {}'.format(
45
- length,
46
- _MAX_HEADERS_LENGTH,
47
- )
48
- super().__init__(message)
49
-
50
-
51
- class InvalidPayloadLength(ParserError):
52
- """Payload length is longer than the maximum."""
53
-
54
- def __init__(self, length):
55
- message = 'Payload length of {} exceeded the maximum of {}'.format(
56
- length,
57
- _MAX_PAYLOAD_LENGTH,
58
- )
59
- super().__init__(message)
60
-
61
-
62
- class ChecksumMismatch(ParserError):
63
- """Calculated checksum did not match the expected checksum."""
64
-
65
- def __init__(self, expected, calculated):
66
- message = (
67
- 'Checksum mismatch: expected 0x{:08x}, calculated 0x{:08x}'.format(
68
- expected,
69
- calculated,
70
- )
71
- )
72
- super().__init__(message)
73
-
74
-
75
- class NoInitialResponseError(ParserError):
76
- """An event of type initial-response was not received.
77
-
78
- This exception is raised when the event stream produced no events or
79
- the first event in the stream was not of the initial-response type.
80
- """
81
-
82
- def __init__(self):
83
- message = 'First event was not of the initial-response type'
84
- super().__init__(message)
85
-
86
-
87
- class DecodeUtils:
88
- """Unpacking utility functions used in the decoder.
89
-
90
- All methods on this class take raw bytes and return a tuple containing
91
- the value parsed from the bytes and the number of bytes consumed to parse
92
- that value.
93
- """
94
-
95
- UINT8_BYTE_FORMAT = '!B'
96
- UINT16_BYTE_FORMAT = '!H'
97
- UINT32_BYTE_FORMAT = '!I'
98
- INT8_BYTE_FORMAT = '!b'
99
- INT16_BYTE_FORMAT = '!h'
100
- INT32_BYTE_FORMAT = '!i'
101
- INT64_BYTE_FORMAT = '!q'
102
- PRELUDE_BYTE_FORMAT = '!III'
103
-
104
- # uint byte size to unpack format
105
- UINT_BYTE_FORMAT = {
106
- 1: UINT8_BYTE_FORMAT,
107
- 2: UINT16_BYTE_FORMAT,
108
- 4: UINT32_BYTE_FORMAT,
109
- }
110
-
111
- @staticmethod
112
- def unpack_true(data):
113
- """This method consumes none of the provided bytes and returns True.
114
-
115
- :type data: bytes
116
- :param data: The bytes to parse from. This is ignored in this method.
117
-
118
- :rtype: tuple
119
- :rtype: (bool, int)
120
- :returns: The tuple (True, 0)
121
- """
122
- return True, 0
123
-
124
- @staticmethod
125
- def unpack_false(data):
126
- """This method consumes none of the provided bytes and returns False.
127
-
128
- :type data: bytes
129
- :param data: The bytes to parse from. This is ignored in this method.
130
-
131
- :rtype: tuple
132
- :rtype: (bool, int)
133
- :returns: The tuple (False, 0)
134
- """
135
- return False, 0
136
-
137
- @staticmethod
138
- def unpack_uint8(data):
139
- """Parse an unsigned 8-bit integer from the bytes.
140
-
141
- :type data: bytes
142
- :param data: The bytes to parse from.
143
-
144
- :rtype: (int, int)
145
- :returns: A tuple containing the (parsed integer value, bytes consumed)
146
- """
147
- value = unpack(DecodeUtils.UINT8_BYTE_FORMAT, data[:1])[0]
148
- return value, 1
149
-
150
- @staticmethod
151
- def unpack_uint32(data):
152
- """Parse an unsigned 32-bit integer from the bytes.
153
-
154
- :type data: bytes
155
- :param data: The bytes to parse from.
156
-
157
- :rtype: (int, int)
158
- :returns: A tuple containing the (parsed integer value, bytes consumed)
159
- """
160
- value = unpack(DecodeUtils.UINT32_BYTE_FORMAT, data[:4])[0]
161
- return value, 4
162
-
163
- @staticmethod
164
- def unpack_int8(data):
165
- """Parse a signed 8-bit integer from the bytes.
166
-
167
- :type data: bytes
168
- :param data: The bytes to parse from.
169
-
170
- :rtype: (int, int)
171
- :returns: A tuple containing the (parsed integer value, bytes consumed)
172
- """
173
- value = unpack(DecodeUtils.INT8_BYTE_FORMAT, data[:1])[0]
174
- return value, 1
175
-
176
- @staticmethod
177
- def unpack_int16(data):
178
- """Parse a signed 16-bit integer from the bytes.
179
-
180
- :type data: bytes
181
- :param data: The bytes to parse from.
182
-
183
- :rtype: tuple
184
- :rtype: (int, int)
185
- :returns: A tuple containing the (parsed integer value, bytes consumed)
186
- """
187
- value = unpack(DecodeUtils.INT16_BYTE_FORMAT, data[:2])[0]
188
- return value, 2
189
-
190
- @staticmethod
191
- def unpack_int32(data):
192
- """Parse a signed 32-bit integer from the bytes.
193
-
194
- :type data: bytes
195
- :param data: The bytes to parse from.
196
-
197
- :rtype: tuple
198
- :rtype: (int, int)
199
- :returns: A tuple containing the (parsed integer value, bytes consumed)
200
- """
201
- value = unpack(DecodeUtils.INT32_BYTE_FORMAT, data[:4])[0]
202
- return value, 4
203
-
204
- @staticmethod
205
- def unpack_int64(data):
206
- """Parse a signed 64-bit integer from the bytes.
207
-
208
- :type data: bytes
209
- :param data: The bytes to parse from.
210
-
211
- :rtype: tuple
212
- :rtype: (int, int)
213
- :returns: A tuple containing the (parsed integer value, bytes consumed)
214
- """
215
- value = unpack(DecodeUtils.INT64_BYTE_FORMAT, data[:8])[0]
216
- return value, 8
217
-
218
- @staticmethod
219
- def unpack_byte_array(data, length_byte_size=2):
220
- """Parse a variable length byte array from the bytes.
221
-
222
- The bytes are expected to be in the following format:
223
- [ length ][0 ... length bytes]
224
- where length is an unsigned integer represented in the smallest number
225
- of bytes to hold the maximum length of the array.
226
-
227
- :type data: bytes
228
- :param data: The bytes to parse from.
229
-
230
- :type length_byte_size: int
231
- :param length_byte_size: The byte size of the preceeding integer that
232
- represents the length of the array. Supported values are 1, 2, and 4.
233
-
234
- :rtype: (bytes, int)
235
- :returns: A tuple containing the (parsed byte array, bytes consumed).
236
- """
237
- uint_byte_format = DecodeUtils.UINT_BYTE_FORMAT[length_byte_size]
238
- length = unpack(uint_byte_format, data[:length_byte_size])[0]
239
- bytes_end = length + length_byte_size
240
- array_bytes = data[length_byte_size:bytes_end]
241
- return array_bytes, bytes_end
242
-
243
- @staticmethod
244
- def unpack_utf8_string(data, length_byte_size=2):
245
- """Parse a variable length utf-8 string from the bytes.
246
-
247
- The bytes are expected to be in the following format:
248
- [ length ][0 ... length bytes]
249
- where length is an unsigned integer represented in the smallest number
250
- of bytes to hold the maximum length of the array and the following
251
- bytes are a valid utf-8 string.
252
-
253
- :type data: bytes
254
- :param bytes: The bytes to parse from.
255
-
256
- :type length_byte_size: int
257
- :param length_byte_size: The byte size of the preceeding integer that
258
- represents the length of the array. Supported values are 1, 2, and 4.
259
-
260
- :rtype: (str, int)
261
- :returns: A tuple containing the (utf-8 string, bytes consumed).
262
- """
263
- array_bytes, consumed = DecodeUtils.unpack_byte_array(
264
- data, length_byte_size
265
- )
266
- return array_bytes.decode('utf-8'), consumed
267
-
268
- @staticmethod
269
- def unpack_uuid(data):
270
- """Parse a 16-byte uuid from the bytes.
271
-
272
- :type data: bytes
273
- :param data: The bytes to parse from.
274
-
275
- :rtype: (bytes, int)
276
- :returns: A tuple containing the (uuid bytes, bytes consumed).
277
- """
278
- return data[:16], 16
279
-
280
- @staticmethod
281
- def unpack_prelude(data):
282
- """Parse the prelude for an event stream message from the bytes.
283
-
284
- The prelude for an event stream message has the following format:
285
- [total_length][header_length][prelude_crc]
286
- where each field is an unsigned 32-bit integer.
287
-
288
- :rtype: ((int, int, int), int)
289
- :returns: A tuple of ((total_length, headers_length, prelude_crc),
290
- consumed)
291
- """
292
- return (unpack(DecodeUtils.PRELUDE_BYTE_FORMAT, data), _PRELUDE_LENGTH)
293
-
294
-
295
- def _validate_checksum(data, checksum, crc=0):
296
- # To generate the same numeric value across all Python versions and
297
- # platforms use crc32(data) & 0xffffffff.
298
- computed_checksum = crc32(data, crc) & 0xFFFFFFFF
299
- if checksum != computed_checksum:
300
- raise ChecksumMismatch(checksum, computed_checksum)
301
-
302
-
303
- class MessagePrelude:
304
- """Represents the prelude of an event stream message."""
305
-
306
- def __init__(self, total_length, headers_length, crc):
307
- self.total_length = total_length
308
- self.headers_length = headers_length
309
- self.crc = crc
310
-
311
- @property
312
- def payload_length(self):
313
- """Calculates the total payload length.
314
-
315
- The extra minus 4 bytes is for the message CRC.
316
-
317
- :rtype: int
318
- :returns: The total payload length.
319
- """
320
- return self.total_length - self.headers_length - _PRELUDE_LENGTH - 4
321
-
322
- @property
323
- def payload_end(self):
324
- """Calculates the byte offset for the end of the message payload.
325
-
326
- The extra minus 4 bytes is for the message CRC.
327
-
328
- :rtype: int
329
- :returns: The byte offset from the beginning of the event stream
330
- message to the end of the payload.
331
- """
332
- return self.total_length - 4
333
-
334
- @property
335
- def headers_end(self):
336
- """Calculates the byte offset for the end of the message headers.
337
-
338
- :rtype: int
339
- :returns: The byte offset from the beginning of the event stream
340
- message to the end of the headers.
341
- """
342
- return _PRELUDE_LENGTH + self.headers_length
343
-
344
-
345
- class EventStreamMessage:
346
- """Represents an event stream message."""
347
-
348
- def __init__(self, prelude, headers, payload, crc):
349
- self.prelude = prelude
350
- self.headers = headers
351
- self.payload = payload
352
- self.crc = crc
353
-
354
- def to_response_dict(self, status_code=200):
355
- message_type = self.headers.get(':message-type')
356
- if message_type == 'error' or message_type == 'exception':
357
- status_code = 400
358
- return {
359
- 'status_code': status_code,
360
- 'headers': self.headers,
361
- 'body': self.payload,
362
- }
363
-
364
-
365
- class EventStreamHeaderParser:
366
- """Parses the event headers from an event stream message.
367
-
368
- Expects all of the header data upfront and creates a dictionary of headers
369
- to return. This object can be reused multiple times to parse the headers
370
- from multiple event stream messages.
371
- """
372
-
373
- # Maps header type to appropriate unpacking function
374
- # These unpacking functions return the value and the amount unpacked
375
- _HEADER_TYPE_MAP = {
376
- # boolean_true
377
- 0: DecodeUtils.unpack_true,
378
- # boolean_false
379
- 1: DecodeUtils.unpack_false,
380
- # byte
381
- 2: DecodeUtils.unpack_int8,
382
- # short
383
- 3: DecodeUtils.unpack_int16,
384
- # integer
385
- 4: DecodeUtils.unpack_int32,
386
- # long
387
- 5: DecodeUtils.unpack_int64,
388
- # byte_array
389
- 6: DecodeUtils.unpack_byte_array,
390
- # string
391
- 7: DecodeUtils.unpack_utf8_string,
392
- # timestamp
393
- 8: DecodeUtils.unpack_int64,
394
- # uuid
395
- 9: DecodeUtils.unpack_uuid,
396
- }
397
-
398
- def __init__(self):
399
- self._data = None
400
-
401
- def parse(self, data):
402
- """Parses the event stream headers from an event stream message.
403
-
404
- :type data: bytes
405
- :param data: The bytes that correspond to the headers section of an
406
- event stream message.
407
-
408
- :rtype: dict
409
- :returns: A dicionary of header key, value pairs.
410
- """
411
- self._data = data
412
- return self._parse_headers()
413
-
414
- def _parse_headers(self):
415
- headers = {}
416
- while self._data:
417
- name, value = self._parse_header()
418
- if name in headers:
419
- raise DuplicateHeader(name)
420
- headers[name] = value
421
- return headers
422
-
423
- def _parse_header(self):
424
- name = self._parse_name()
425
- value = self._parse_value()
426
- return name, value
427
-
428
- def _parse_name(self):
429
- name, consumed = DecodeUtils.unpack_utf8_string(self._data, 1)
430
- self._advance_data(consumed)
431
- return name
432
-
433
- def _parse_type(self):
434
- type, consumed = DecodeUtils.unpack_uint8(self._data)
435
- self._advance_data(consumed)
436
- return type
437
-
438
- def _parse_value(self):
439
- header_type = self._parse_type()
440
- value_unpacker = self._HEADER_TYPE_MAP[header_type]
441
- value, consumed = value_unpacker(self._data)
442
- self._advance_data(consumed)
443
- return value
444
-
445
- def _advance_data(self, consumed):
446
- self._data = self._data[consumed:]
447
-
448
-
449
- class EventStreamBuffer:
450
- """Streaming based event stream buffer
451
-
452
- A buffer class that wraps bytes from an event stream providing parsed
453
- messages as they become available via an iterable interface.
454
- """
455
-
456
- def __init__(self):
457
- self._data = b''
458
- self._prelude = None
459
- self._header_parser = EventStreamHeaderParser()
460
-
461
- def add_data(self, data):
462
- """Add data to the buffer.
463
-
464
- :type data: bytes
465
- :param data: The bytes to add to the buffer to be used when parsing
466
- """
467
- self._data += data
468
-
469
- def _validate_prelude(self, prelude):
470
- if prelude.headers_length > _MAX_HEADERS_LENGTH:
471
- raise InvalidHeadersLength(prelude.headers_length)
472
-
473
- if prelude.payload_length > _MAX_PAYLOAD_LENGTH:
474
- raise InvalidPayloadLength(prelude.payload_length)
475
-
476
- def _parse_prelude(self):
477
- prelude_bytes = self._data[:_PRELUDE_LENGTH]
478
- raw_prelude, _ = DecodeUtils.unpack_prelude(prelude_bytes)
479
- prelude = MessagePrelude(*raw_prelude)
480
- self._validate_prelude(prelude)
481
- # The minus 4 removes the prelude crc from the bytes to be checked
482
- _validate_checksum(prelude_bytes[: _PRELUDE_LENGTH - 4], prelude.crc)
483
- return prelude
484
-
485
- def _parse_headers(self):
486
- header_bytes = self._data[_PRELUDE_LENGTH : self._prelude.headers_end]
487
- return self._header_parser.parse(header_bytes)
488
-
489
- def _parse_payload(self):
490
- prelude = self._prelude
491
- payload_bytes = self._data[prelude.headers_end : prelude.payload_end]
492
- return payload_bytes
493
-
494
- def _parse_message_crc(self):
495
- prelude = self._prelude
496
- crc_bytes = self._data[prelude.payload_end : prelude.total_length]
497
- message_crc, _ = DecodeUtils.unpack_uint32(crc_bytes)
498
- return message_crc
499
-
500
- def _parse_message_bytes(self):
501
- # The minus 4 includes the prelude crc to the bytes to be checked
502
- message_bytes = self._data[
503
- _PRELUDE_LENGTH - 4 : self._prelude.payload_end
504
- ]
505
- return message_bytes
506
-
507
- def _validate_message_crc(self):
508
- message_crc = self._parse_message_crc()
509
- message_bytes = self._parse_message_bytes()
510
- _validate_checksum(message_bytes, message_crc, crc=self._prelude.crc)
511
- return message_crc
512
-
513
- def _parse_message(self):
514
- crc = self._validate_message_crc()
515
- headers = self._parse_headers()
516
- payload = self._parse_payload()
517
- message = EventStreamMessage(self._prelude, headers, payload, crc)
518
- self._prepare_for_next_message()
519
- return message
520
-
521
- def _prepare_for_next_message(self):
522
- # Advance the data and reset the current prelude
523
- self._data = self._data[self._prelude.total_length :]
524
- self._prelude = None
525
-
526
- def next(self):
527
- """Provides the next available message parsed from the stream
528
-
529
- :rtype: EventStreamMessage
530
- :returns: The next event stream message
531
- """
532
- if len(self._data) < _PRELUDE_LENGTH:
533
- raise StopIteration()
534
-
535
- if self._prelude is None:
536
- self._prelude = self._parse_prelude()
537
-
538
- if len(self._data) < self._prelude.total_length:
539
- raise StopIteration()
540
-
541
- return self._parse_message()
542
-
543
- def __next__(self):
544
- return self.next()
545
-
546
- def __iter__(self):
547
- return self
548
-
549
-
550
- class EventStream:
551
- """Wrapper class for an event stream body.
552
-
553
- This wraps the underlying streaming body, parsing it for individual events
554
- and yielding them as they come available through the iterator interface.
555
-
556
- The following example uses the S3 select API to get structured data out of
557
- an object stored in S3 using an event stream.
558
-
559
- **Example:**
560
- ::
561
- from botocore.session import Session
562
-
563
- s3 = Session().create_client('s3')
564
- response = s3.select_object_content(
565
- Bucket='bucketname',
566
- Key='keyname',
567
- ExpressionType='SQL',
568
- RequestProgress={'Enabled': True},
569
- Expression="SELECT * FROM S3Object s",
570
- InputSerialization={'CSV': {}},
571
- OutputSerialization={'CSV': {}},
572
- )
573
- # This is the event stream in the response
574
- event_stream = response['Payload']
575
- end_event_received = False
576
- with open('output', 'wb') as f:
577
- # Iterate over events in the event stream as they come
578
- for event in event_stream:
579
- # If we received a records event, write the data to a file
580
- if 'Records' in event:
581
- data = event['Records']['Payload']
582
- f.write(data)
583
- # If we received a progress event, print the details
584
- elif 'Progress' in event:
585
- print(event['Progress']['Details'])
586
- # End event indicates that the request finished successfully
587
- elif 'End' in event:
588
- print('Result is complete')
589
- end_event_received = True
590
- if not end_event_received:
591
- raise Exception("End event not received, request incomplete.")
592
- """
593
-
594
- def __init__(self, raw_stream, output_shape, parser, operation_name):
595
- self._raw_stream = raw_stream
596
- self._output_shape = output_shape
597
- self._operation_name = operation_name
598
- self._parser = parser
599
- self._event_generator = self._create_raw_event_generator()
600
-
601
- def __iter__(self):
602
- for event in self._event_generator:
603
- parsed_event = self._parse_event(event)
604
- if parsed_event:
605
- yield parsed_event
606
-
607
- def _create_raw_event_generator(self):
608
- event_stream_buffer = EventStreamBuffer()
609
- for chunk in self._raw_stream.stream():
610
- event_stream_buffer.add_data(chunk)
611
- yield from event_stream_buffer
612
-
613
- def _parse_event(self, event):
614
- response_dict = event.to_response_dict()
615
- parsed_response = self._parser.parse(response_dict, self._output_shape)
616
- if response_dict['status_code'] == 200:
617
- return parsed_response
618
- else:
619
- raise EventStreamError(parsed_response, self._operation_name)
620
-
621
- def get_initial_response(self):
622
- try:
623
- initial_event = next(self._event_generator)
624
- event_type = initial_event.headers.get(':event-type')
625
- if event_type == 'initial-response':
626
- return initial_event
627
- except StopIteration:
628
- pass
629
- raise NoInitialResponseError()
630
-
631
- def close(self):
632
- """Closes the underlying streaming body."""
633
- self._raw_stream.close()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/command/register.py DELETED
@@ -1,319 +0,0 @@
1
- """distutils.command.register
2
-
3
- Implements the Distutils 'register' command (register with the repository).
4
- """
5
-
6
- # created 2002/10/21, Richard Jones
7
-
8
- import getpass
9
- import io
10
- import urllib.parse
11
- import urllib.request
12
- from warnings import warn
13
-
14
- from distutils.core import PyPIRCCommand
15
- from distutils import log
16
-
17
-
18
- class register(PyPIRCCommand):
19
-
20
- description = "register the distribution with the Python package index"
21
- user_options = PyPIRCCommand.user_options + [
22
- ('list-classifiers', None, 'list the valid Trove classifiers'),
23
- (
24
- 'strict',
25
- None,
26
- 'Will stop the registering if the meta-data are not fully compliant',
27
- ),
28
- ]
29
- boolean_options = PyPIRCCommand.boolean_options + [
30
- 'verify',
31
- 'list-classifiers',
32
- 'strict',
33
- ]
34
-
35
- sub_commands = [('check', lambda self: True)]
36
-
37
- def initialize_options(self):
38
- PyPIRCCommand.initialize_options(self)
39
- self.list_classifiers = 0
40
- self.strict = 0
41
-
42
- def finalize_options(self):
43
- PyPIRCCommand.finalize_options(self)
44
- # setting options for the `check` subcommand
45
- check_options = {
46
- 'strict': ('register', self.strict),
47
- 'restructuredtext': ('register', 1),
48
- }
49
- self.distribution.command_options['check'] = check_options
50
-
51
- def run(self):
52
- self.finalize_options()
53
- self._set_config()
54
-
55
- # Run sub commands
56
- for cmd_name in self.get_sub_commands():
57
- self.run_command(cmd_name)
58
-
59
- if self.dry_run:
60
- self.verify_metadata()
61
- elif self.list_classifiers:
62
- self.classifiers()
63
- else:
64
- self.send_metadata()
65
-
66
- def check_metadata(self):
67
- """Deprecated API."""
68
- warn(
69
- "distutils.command.register.check_metadata is deprecated; "
70
- "use the check command instead",
71
- DeprecationWarning,
72
- )
73
- check = self.distribution.get_command_obj('check')
74
- check.ensure_finalized()
75
- check.strict = self.strict
76
- check.restructuredtext = 1
77
- check.run()
78
-
79
- def _set_config(self):
80
- '''Reads the configuration file and set attributes.'''
81
- config = self._read_pypirc()
82
- if config != {}:
83
- self.username = config['username']
84
- self.password = config['password']
85
- self.repository = config['repository']
86
- self.realm = config['realm']
87
- self.has_config = True
88
- else:
89
- if self.repository not in ('pypi', self.DEFAULT_REPOSITORY):
90
- raise ValueError('%s not found in .pypirc' % self.repository)
91
- if self.repository == 'pypi':
92
- self.repository = self.DEFAULT_REPOSITORY
93
- self.has_config = False
94
-
95
- def classifiers(self):
96
- '''Fetch the list of classifiers from the server.'''
97
- url = self.repository + '?:action=list_classifiers'
98
- response = urllib.request.urlopen(url)
99
- log.info(self._read_pypi_response(response))
100
-
101
- def verify_metadata(self):
102
- '''Send the metadata to the package index server to be checked.'''
103
- # send the info to the server and report the result
104
- (code, result) = self.post_to_server(self.build_post_data('verify'))
105
- log.info('Server response (%s): %s', code, result)
106
-
107
- def send_metadata(self): # noqa: C901
108
- '''Send the metadata to the package index server.
109
-
110
- Well, do the following:
111
- 1. figure who the user is, and then
112
- 2. send the data as a Basic auth'ed POST.
113
-
114
- First we try to read the username/password from $HOME/.pypirc,
115
- which is a ConfigParser-formatted file with a section
116
- [distutils] containing username and password entries (both
117
- in clear text). Eg:
118
-
119
- [distutils]
120
- index-servers =
121
- pypi
122
-
123
- [pypi]
124
- username: fred
125
- password: sekrit
126
-
127
- Otherwise, to figure who the user is, we offer the user three
128
- choices:
129
-
130
- 1. use existing login,
131
- 2. register as a new user, or
132
- 3. set the password to a random string and email the user.
133
-
134
- '''
135
- # see if we can short-cut and get the username/password from the
136
- # config
137
- if self.has_config:
138
- choice = '1'
139
- username = self.username
140
- password = self.password
141
- else:
142
- choice = 'x'
143
- username = password = ''
144
-
145
- # get the user's login info
146
- choices = '1 2 3 4'.split()
147
- while choice not in choices:
148
- self.announce(
149
- '''\
150
- We need to know who you are, so please choose either:
151
- 1. use your existing login,
152
- 2. register as a new user,
153
- 3. have the server generate a new password for you (and email it to you), or
154
- 4. quit
155
- Your selection [default 1]: ''',
156
- log.INFO,
157
- )
158
- choice = input()
159
- if not choice:
160
- choice = '1'
161
- elif choice not in choices:
162
- print('Please choose one of the four options!')
163
-
164
- if choice == '1':
165
- # get the username and password
166
- while not username:
167
- username = input('Username: ')
168
- while not password:
169
- password = getpass.getpass('Password: ')
170
-
171
- # set up the authentication
172
- auth = urllib.request.HTTPPasswordMgr()
173
- host = urllib.parse.urlparse(self.repository)[1]
174
- auth.add_password(self.realm, host, username, password)
175
- # send the info to the server and report the result
176
- code, result = self.post_to_server(self.build_post_data('submit'), auth)
177
- self.announce('Server response ({}): {}'.format(code, result), log.INFO)
178
-
179
- # possibly save the login
180
- if code == 200:
181
- if self.has_config:
182
- # sharing the password in the distribution instance
183
- # so the upload command can reuse it
184
- self.distribution.password = password
185
- else:
186
- self.announce(
187
- (
188
- 'I can store your PyPI login so future '
189
- 'submissions will be faster.'
190
- ),
191
- log.INFO,
192
- )
193
- self.announce(
194
- '(the login will be stored in %s)' % self._get_rc_file(),
195
- log.INFO,
196
- )
197
- choice = 'X'
198
- while choice.lower() not in 'yn':
199
- choice = input('Save your login (y/N)?')
200
- if not choice:
201
- choice = 'n'
202
- if choice.lower() == 'y':
203
- self._store_pypirc(username, password)
204
-
205
- elif choice == '2':
206
- data = {':action': 'user'}
207
- data['name'] = data['password'] = data['email'] = ''
208
- data['confirm'] = None
209
- while not data['name']:
210
- data['name'] = input('Username: ')
211
- while data['password'] != data['confirm']:
212
- while not data['password']:
213
- data['password'] = getpass.getpass('Password: ')
214
- while not data['confirm']:
215
- data['confirm'] = getpass.getpass(' Confirm: ')
216
- if data['password'] != data['confirm']:
217
- data['password'] = ''
218
- data['confirm'] = None
219
- print("Password and confirm don't match!")
220
- while not data['email']:
221
- data['email'] = input(' EMail: ')
222
- code, result = self.post_to_server(data)
223
- if code != 200:
224
- log.info('Server response (%s): %s', code, result)
225
- else:
226
- log.info('You will receive an email shortly.')
227
- log.info('Follow the instructions in it to ' 'complete registration.')
228
- elif choice == '3':
229
- data = {':action': 'password_reset'}
230
- data['email'] = ''
231
- while not data['email']:
232
- data['email'] = input('Your email address: ')
233
- code, result = self.post_to_server(data)
234
- log.info('Server response (%s): %s', code, result)
235
-
236
- def build_post_data(self, action):
237
- # figure the data to send - the metadata plus some additional
238
- # information used by the package server
239
- meta = self.distribution.metadata
240
- data = {
241
- ':action': action,
242
- 'metadata_version': '1.0',
243
- 'name': meta.get_name(),
244
- 'version': meta.get_version(),
245
- 'summary': meta.get_description(),
246
- 'home_page': meta.get_url(),
247
- 'author': meta.get_contact(),
248
- 'author_email': meta.get_contact_email(),
249
- 'license': meta.get_licence(),
250
- 'description': meta.get_long_description(),
251
- 'keywords': meta.get_keywords(),
252
- 'platform': meta.get_platforms(),
253
- 'classifiers': meta.get_classifiers(),
254
- 'download_url': meta.get_download_url(),
255
- # PEP 314
256
- 'provides': meta.get_provides(),
257
- 'requires': meta.get_requires(),
258
- 'obsoletes': meta.get_obsoletes(),
259
- }
260
- if data['provides'] or data['requires'] or data['obsoletes']:
261
- data['metadata_version'] = '1.1'
262
- return data
263
-
264
- def post_to_server(self, data, auth=None): # noqa: C901
265
- '''Post a query to the server, and return a string response.'''
266
- if 'name' in data:
267
- self.announce(
268
- 'Registering {} to {}'.format(data['name'], self.repository), log.INFO
269
- )
270
- # Build up the MIME payload for the urllib2 POST data
271
- boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
272
- sep_boundary = '\n--' + boundary
273
- end_boundary = sep_boundary + '--'
274
- body = io.StringIO()
275
- for key, value in data.items():
276
- # handle multiple entries for the same name
277
- if type(value) not in (type([]), type(())):
278
- value = [value]
279
- for value in value:
280
- value = str(value)
281
- body.write(sep_boundary)
282
- body.write('\nContent-Disposition: form-data; name="%s"' % key)
283
- body.write("\n\n")
284
- body.write(value)
285
- if value and value[-1] == '\r':
286
- body.write('\n') # write an extra newline (lurve Macs)
287
- body.write(end_boundary)
288
- body.write("\n")
289
- body = body.getvalue().encode("utf-8")
290
-
291
- # build the Request
292
- headers = {
293
- 'Content-type': 'multipart/form-data; boundary=%s; charset=utf-8'
294
- % boundary,
295
- 'Content-length': str(len(body)),
296
- }
297
- req = urllib.request.Request(self.repository, body, headers)
298
-
299
- # handle HTTP and include the Basic Auth handler
300
- opener = urllib.request.build_opener(
301
- urllib.request.HTTPBasicAuthHandler(password_mgr=auth)
302
- )
303
- data = ''
304
- try:
305
- result = opener.open(req)
306
- except urllib.error.HTTPError as e:
307
- if self.show_response:
308
- data = e.fp.read()
309
- result = e.code, e.msg
310
- except urllib.error.URLError as e:
311
- result = 500, str(e)
312
- else:
313
- if self.show_response:
314
- data = self._read_pypi_response(result)
315
- result = 200, 'OK'
316
- if self.show_response:
317
- msg = '\n'.join(('-' * 75, data, '-' * 75))
318
- self.announce(msg, log.INFO)
319
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BigSalmon/BackTranslation/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: BackTranslation
3
- emoji: 🐨
4
- colorFrom: gray
5
- colorTo: pink
6
- sdk: streamlit
7
- sdk_version: 1.2.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Dual-Key_Backdoor_Attacks/datagen/detectron2/detectron2/utils/env.py DELETED
@@ -1,105 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
- import importlib
3
- import importlib.util
4
- import logging
5
- import numpy as np
6
- import os
7
- import random
8
- import sys
9
- from datetime import datetime
10
- import torch
11
-
12
- __all__ = ["seed_all_rng"]
13
-
14
-
15
- def seed_all_rng(seed=None):
16
- """
17
- Set the random seed for the RNG in torch, numpy and python.
18
-
19
- Args:
20
- seed (int): if None, will use a strong random seed.
21
- """
22
- if seed is None:
23
- seed = (
24
- os.getpid()
25
- + int(datetime.now().strftime("%S%f"))
26
- + int.from_bytes(os.urandom(2), "big")
27
- )
28
- logger = logging.getLogger(__name__)
29
- logger.info("Using a generated random seed {}".format(seed))
30
- np.random.seed(seed)
31
- torch.set_rng_state(torch.manual_seed(seed).get_state())
32
- random.seed(seed)
33
-
34
-
35
- # from https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path
36
- def _import_file(module_name, file_path, make_importable=False):
37
- spec = importlib.util.spec_from_file_location(module_name, file_path)
38
- module = importlib.util.module_from_spec(spec)
39
- spec.loader.exec_module(module)
40
- if make_importable:
41
- sys.modules[module_name] = module
42
- return module
43
-
44
-
45
- def _configure_libraries():
46
- """
47
- Configurations for some libraries.
48
- """
49
- # An environment option to disable `import cv2` globally,
50
- # in case it leads to negative performance impact
51
- disable_cv2 = int(os.environ.get("DETECTRON2_DISABLE_CV2", False))
52
- if disable_cv2:
53
- sys.modules["cv2"] = None
54
- else:
55
- # Disable opencl in opencv since its interaction with cuda often has negative effects
56
- # This envvar is supported after OpenCV 3.4.0
57
- os.environ["OPENCV_OPENCL_RUNTIME"] = "disabled"
58
- try:
59
- import cv2
60
-
61
- if int(cv2.__version__.split(".")[0]) >= 3:
62
- cv2.ocl.setUseOpenCL(False)
63
- except ImportError:
64
- pass
65
-
66
-
67
- _ENV_SETUP_DONE = False
68
-
69
-
70
- def setup_environment():
71
- """Perform environment setup work. The default setup is a no-op, but this
72
- function allows the user to specify a Python source file or a module in
73
- the $DETECTRON2_ENV_MODULE environment variable, that performs
74
- custom setup work that may be necessary to their computing environment.
75
- """
76
- global _ENV_SETUP_DONE
77
- if _ENV_SETUP_DONE:
78
- return
79
- _ENV_SETUP_DONE = True
80
-
81
- _configure_libraries()
82
-
83
- custom_module_path = os.environ.get("DETECTRON2_ENV_MODULE")
84
-
85
- if custom_module_path:
86
- setup_custom_environment(custom_module_path)
87
- else:
88
- # The default setup is a no-op
89
- pass
90
-
91
-
92
- def setup_custom_environment(custom_module):
93
- """
94
- Load custom environment setup by importing a Python source file or a
95
- module, and run the setup function.
96
- """
97
- if custom_module.endswith(".py"):
98
- module = _import_file("detectron2.utils.env.custom_module", custom_module)
99
- else:
100
- module = importlib.import_module(custom_module)
101
- assert hasattr(module, "setup_environment") and callable(module.setup_environment), (
102
- "Custom environment module defined in {} does not have the "
103
- "required callable attribute 'setup_environment'."
104
- ).format(custom_module)
105
- module.setup_environment()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/evaluation/coco_evaluation.py DELETED
@@ -1,610 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import contextlib
3
- import copy
4
- import io
5
- import itertools
6
- import json
7
- import logging
8
- import numpy as np
9
- import os
10
- import pickle
11
- from collections import OrderedDict
12
- import pycocotools.mask as mask_util
13
- import torch
14
- from pycocotools.coco import COCO
15
- from pycocotools.cocoeval import COCOeval
16
- from tabulate import tabulate
17
-
18
- import detectron2.utils.comm as comm
19
- from detectron2.config import CfgNode
20
- from detectron2.data import MetadataCatalog
21
- from detectron2.data.datasets.coco import convert_to_coco_json
22
- from detectron2.data.datasets.coco_zeroshot_categories import COCO_UNSEEN_CLS, COCO_SEEN_CLS, COCO_OVD_ALL_CLS
23
- from detectron2.evaluation.fast_eval_api import COCOeval_opt
24
- from detectron2.structures import Boxes, BoxMode, pairwise_iou
25
- from detectron2.utils.file_io import PathManager
26
- from detectron2.utils.logger import create_small_table
27
-
28
- from .evaluator import DatasetEvaluator
29
-
30
-
31
- class COCOEvaluator(DatasetEvaluator):
32
- """
33
- Evaluate AR for object proposals, AP for instance detection/segmentation, AP
34
- for keypoint detection outputs using COCO's metrics.
35
- See http://cocodataset.org/#detection-eval and
36
- http://cocodataset.org/#keypoints-eval to understand its metrics.
37
- The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means
38
- the metric cannot be computed (e.g. due to no predictions made).
39
-
40
- In addition to COCO, this evaluator is able to support any bounding box detection,
41
- instance segmentation, or keypoint detection dataset.
42
- """
43
-
44
- def __init__(
45
- self,
46
- dataset_name,
47
- tasks=None,
48
- distributed=True,
49
- output_dir=None,
50
- *,
51
- use_fast_impl=True,
52
- kpt_oks_sigmas=(),
53
- ):
54
- """
55
- Args:
56
- dataset_name (str): name of the dataset to be evaluated.
57
- It must have either the following corresponding metadata:
58
-
59
- "json_file": the path to the COCO format annotation
60
-
61
- Or it must be in detectron2's standard dataset format
62
- so it can be converted to COCO format automatically.
63
- tasks (tuple[str]): tasks that can be evaluated under the given
64
- configuration. A task is one of "bbox", "segm", "keypoints".
65
- By default, will infer this automatically from predictions.
66
- distributed (True): if True, will collect results from all ranks and run evaluation
67
- in the main process.
68
- Otherwise, will only evaluate the results in the current process.
69
- output_dir (str): optional, an output directory to dump all
70
- results predicted on the dataset. The dump contains two files:
71
-
72
- 1. "instances_predictions.pth" a file that can be loaded with `torch.load` and
73
- contains all the results in the format they are produced by the model.
74
- 2. "coco_instances_results.json" a json file in COCO's result format.
75
- use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP.
76
- Although the results should be very close to the official implementation in COCO
77
- API, it is still recommended to compute results with the official API for use in
78
- papers. The faster implementation also uses more RAM.
79
- kpt_oks_sigmas (list[float]): The sigmas used to calculate keypoint OKS.
80
- See http://cocodataset.org/#keypoints-eval
81
- When empty, it will use the defaults in COCO.
82
- Otherwise it should be the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS.
83
- """
84
- self._logger = logging.getLogger(__name__)
85
- self._distributed = distributed
86
- self._output_dir = output_dir
87
- self._use_fast_impl = use_fast_impl
88
-
89
- if tasks is not None and isinstance(tasks, CfgNode):
90
- kpt_oks_sigmas = (
91
- tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas
92
- )
93
- self._logger.warn(
94
- "COCO Evaluator instantiated using config, this is deprecated behavior."
95
- " Please pass in explicit arguments instead."
96
- )
97
- self._tasks = None # Infering it from predictions should be better
98
- else:
99
- self._tasks = tasks
100
-
101
- self._cpu_device = torch.device("cpu")
102
-
103
- self._metadata = MetadataCatalog.get(dataset_name)
104
- if not hasattr(self._metadata, "json_file"):
105
- self._logger.info(
106
- f"'{dataset_name}' is not registered by `register_coco_instances`."
107
- " Therefore trying to convert it to COCO format ..."
108
- )
109
-
110
- cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json")
111
- self._metadata.json_file = cache_path
112
- convert_to_coco_json(dataset_name, cache_path)
113
-
114
- json_file = PathManager.get_local_path(self._metadata.json_file)
115
- with contextlib.redirect_stdout(io.StringIO()):
116
- self._coco_api = COCO(json_file)
117
-
118
- # Test set json files do not contain annotations (evaluation must be
119
- # performed using the COCO evaluation server).
120
- self._do_evaluation = "annotations" in self._coco_api.dataset
121
- if self._do_evaluation:
122
- self._kpt_oks_sigmas = kpt_oks_sigmas
123
-
124
- def reset(self):
125
- self._predictions = []
126
-
127
- def process(self, inputs, outputs):
128
- """
129
- Args:
130
- inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).
131
- It is a list of dict. Each dict corresponds to an image and
132
- contains keys like "height", "width", "file_name", "image_id".
133
- outputs: the outputs of a COCO model. It is a list of dicts with key
134
- "instances" that contains :class:`Instances`.
135
- """
136
- for input, output in zip(inputs, outputs):
137
- prediction = {"image_id": input["image_id"]}
138
-
139
- if "instances" in output:
140
- instances = output["instances"].to(self._cpu_device)
141
- prediction["instances"] = instances_to_coco_json(instances, input["image_id"])
142
- if "proposals" in output:
143
- prediction["proposals"] = output["proposals"].to(self._cpu_device)
144
- if len(prediction) > 1:
145
- self._predictions.append(prediction)
146
-
147
- def evaluate(self, img_ids=None):
148
- """
149
- Args:
150
- img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset
151
- """
152
- if self._distributed:
153
- comm.synchronize()
154
- predictions = comm.gather(self._predictions, dst=0)
155
- predictions = list(itertools.chain(*predictions))
156
-
157
- if not comm.is_main_process():
158
- return {}
159
- else:
160
- predictions = self._predictions
161
-
162
- if len(predictions) == 0:
163
- self._logger.warning("[COCOEvaluator] Did not receive valid predictions.")
164
- return {}
165
-
166
- if self._output_dir:
167
- PathManager.mkdirs(self._output_dir)
168
- file_path = os.path.join(self._output_dir, "instances_predictions.pth")
169
- with PathManager.open(file_path, "wb") as f:
170
- torch.save(predictions, f)
171
-
172
- self._results = OrderedDict()
173
- if "proposals" in predictions[0]:
174
- self._eval_box_proposals(predictions)
175
- if "instances" in predictions[0]:
176
- self._eval_predictions(predictions, img_ids=img_ids)
177
- # Copy so the caller can do whatever with results
178
- return copy.deepcopy(self._results)
179
-
180
- def _tasks_from_predictions(self, predictions):
181
- """
182
- Get COCO API "tasks" (i.e. iou_type) from COCO-format predictions.
183
- """
184
- tasks = {"bbox"}
185
- for pred in predictions:
186
- if "segmentation" in pred:
187
- tasks.add("segm")
188
- if "keypoints" in pred:
189
- tasks.add("keypoints")
190
- return sorted(tasks)
191
-
192
- def _eval_predictions(self, predictions, img_ids=None):
193
- """
194
- Evaluate predictions. Fill self._results with the metrics of the tasks.
195
- """
196
- self._logger.info("Preparing results for COCO format ...")
197
- coco_results = list(itertools.chain(*[x["instances"] for x in predictions]))
198
- tasks = self._tasks or self._tasks_from_predictions(coco_results)
199
-
200
- # unmap the category ids for COCO
201
- if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
202
- dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id
203
- all_contiguous_ids = list(dataset_id_to_contiguous_id.values())
204
- num_classes = len(all_contiguous_ids)
205
- assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1
206
-
207
- reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}
208
- for result in coco_results:
209
- category_id = result["category_id"]
210
- assert category_id < num_classes, (
211
- f"A prediction has class={category_id}, "
212
- f"but the dataset only has {num_classes} classes and "
213
- f"predicted class id should be in [0, {num_classes - 1}]."
214
- )
215
- result["category_id"] = reverse_id_mapping[category_id]
216
-
217
- if self._output_dir:
218
- file_path = os.path.join(self._output_dir, "coco_instances_results.json")
219
- self._logger.info("Saving results to {}".format(file_path))
220
- with PathManager.open(file_path, "w") as f:
221
- f.write(json.dumps(coco_results))
222
- f.flush()
223
-
224
- if not self._do_evaluation:
225
- self._logger.info("Annotations are not available for evaluation.")
226
- return
227
-
228
- self._logger.info(
229
- "Evaluating predictions with {} COCO API...".format(
230
- "unofficial" if self._use_fast_impl else "official"
231
- )
232
- )
233
- for task in sorted(tasks):
234
- assert task in {"bbox", "segm", "keypoints"}, f"Got unknown task: {task}!"
235
- coco_eval = (
236
- _evaluate_predictions_on_coco(
237
- self._coco_api,
238
- coco_results,
239
- task,
240
- kpt_oks_sigmas=self._kpt_oks_sigmas,
241
- use_fast_impl=self._use_fast_impl,
242
- img_ids=img_ids,
243
- )
244
- if len(coco_results) > 0
245
- else None # cocoapi does not handle empty results very well
246
- )
247
-
248
- res = self._derive_coco_results(
249
- coco_eval, task, class_names=self._metadata.get("thing_classes")
250
- )
251
- self._results[task] = res
252
-
253
- def _eval_box_proposals(self, predictions):
254
- """
255
- Evaluate the box proposals in predictions.
256
- Fill self._results with the metrics for "box_proposals" task.
257
- """
258
- if self._output_dir:
259
- # Saving generated box proposals to file.
260
- # Predicted box_proposals are in XYXY_ABS mode.
261
- bbox_mode = BoxMode.XYXY_ABS.value
262
- ids, boxes, objectness_logits = [], [], []
263
- for prediction in predictions:
264
- ids.append(prediction["image_id"])
265
- boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy())
266
- objectness_logits.append(prediction["proposals"].objectness_logits.numpy())
267
-
268
- proposal_data = {
269
- "boxes": boxes,
270
- "objectness_logits": objectness_logits,
271
- "ids": ids,
272
- "bbox_mode": bbox_mode,
273
- }
274
- with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f:
275
- pickle.dump(proposal_data, f)
276
-
277
- if not self._do_evaluation:
278
- self._logger.info("Annotations are not available for evaluation.")
279
- return
280
-
281
- self._logger.info("Evaluating bbox proposals ...")
282
- res = {}
283
- areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
284
- for limit in [100, 1000]:
285
- for area, suffix in areas.items():
286
- stats = _evaluate_box_proposals(predictions, self._coco_api, area=area, limit=limit)
287
- key = "AR{}@{:d}".format(suffix, limit)
288
- res[key] = float(stats["ar"].item() * 100)
289
- self._logger.info("Proposal metrics: \n" + create_small_table(res))
290
- self._results["box_proposals"] = res
291
-
292
- def _derive_coco_results(self, coco_eval, iou_type, class_names=None):
293
- """
294
- Derive the desired score numbers from summarized COCOeval.
295
-
296
- Args:
297
- coco_eval (None or COCOEval): None represents no predictions from model.
298
- iou_type (str):
299
- class_names (None or list[str]): if provided, will use it to predict
300
- per-category AP.
301
-
302
- Returns:
303
- a dict of {metric name: score}
304
- """
305
-
306
- metrics = {
307
- "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
308
- "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
309
- "keypoints": ["AP", "AP50", "AP75", "APm", "APl"],
310
- }[iou_type]
311
-
312
- if coco_eval is None:
313
- self._logger.warn("No predictions from the model!")
314
- return {metric: float("nan") for metric in metrics}
315
-
316
- # the standard metrics
317
- results = {
318
- metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan")
319
- for idx, metric in enumerate(metrics)
320
- }
321
- self._logger.info(
322
- "Evaluation results for {}: \n".format(iou_type) + create_small_table(results)
323
- )
324
- if not np.isfinite(sum(results.values())):
325
- self._logger.info("Some metrics cannot be computed and is shown as NaN.")
326
-
327
- if class_names is None or len(class_names) <= 1:
328
- return results
329
- # Compute per-category AP
330
- # from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa
331
- precisions = coco_eval.eval["precision"]
332
- # precision has dims (iou, recall, cls, area range, max dets)
333
- assert len(class_names) == precisions.shape[2]
334
-
335
- results_per_category = []
336
- for idx, name in enumerate(class_names):
337
- # area range index 0: all area ranges
338
- # max dets index -1: typically 100 per image
339
- precision = precisions[:, :, idx, 0, -1]
340
- precision = precision[precision > -1]
341
- ap = np.mean(precision) if precision.size else float("nan")
342
- results_per_category.append(("{}".format(name), float(ap * 100)))
343
-
344
- # Computing AP50 for (seen/unseen) split in generalized zeroshot setting (eg. all 65 categories)
345
- # from https://github.com/alirezazareian/ovr-cnn/blob/master/maskrcnn_benchmark/data/datasets/evaluation/coco/coco_eval.py
346
- if len(class_names) == 65:
347
- p = coco_eval.params
348
- maxDets = p.maxDets[2]
349
- areaRng = 'all'
350
- iouThr = 0.5
351
- aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
352
- mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
353
- t = np.where(iouThr == p.iouThrs)[0]
354
- s = coco_eval.eval['precision']
355
- s = s[t,:,:,aind,mind]
356
-
357
- unseen_cids = [p.catIds[i] for i, c in enumerate(class_names) if c in COCO_UNSEEN_CLS]
358
- seen_cids = [p.catIds[i] for i, c in enumerate(class_names) if c in COCO_SEEN_CLS]
359
- all_cids = [p.catIds[i] for i, c in enumerate(class_names) if c in COCO_OVD_ALL_CLS]
360
- res = {}
361
- for split, cid_list in [('target',unseen_cids), ('base',seen_cids), ('all',all_cids)]:
362
- cinds = []
363
- for cid in cid_list:
364
- cinds.extend([i for i, c in enumerate(p.catIds) if c == cid])
365
- s_split = s[:, :, cinds]
366
- if len(s_split[s_split>-1])==0:
367
- mean_s = -1
368
- else:
369
- mean_s = np.mean(s_split[s_split>-1])
370
- res[f'AP50_split_{split}'] = mean_s
371
- for res_item in res:
372
- self._logger.info("{} AP: {}\n".format(res_item, res[res_item]))
373
-
374
- # tabulate it
375
- N_COLS = min(6, len(results_per_category) * 2)
376
- results_flatten = list(itertools.chain(*results_per_category))
377
- results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)])
378
- table = tabulate(
379
- results_2d,
380
- tablefmt="pipe",
381
- floatfmt=".3f",
382
- headers=["category", "AP"] * (N_COLS // 2),
383
- numalign="left",
384
- )
385
- self._logger.info("Per-category {} AP: \n".format(iou_type) + table)
386
-
387
- results.update({"AP-" + name: ap for name, ap in results_per_category})
388
- return results
389
-
390
-
391
- def instances_to_coco_json(instances, img_id):
392
- """
393
- Dump an "Instances" object to a COCO-format json that's used for evaluation.
394
-
395
- Args:
396
- instances (Instances):
397
- img_id (int): the image id
398
-
399
- Returns:
400
- list[dict]: list of json annotations in COCO format.
401
- """
402
- num_instance = len(instances)
403
- if num_instance == 0:
404
- return []
405
-
406
- boxes = instances.pred_boxes.tensor.numpy()
407
- boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
408
- boxes = boxes.tolist()
409
- scores = instances.scores.tolist()
410
- classes = instances.pred_classes.tolist()
411
-
412
- has_mask = instances.has("pred_masks")
413
- if has_mask:
414
- # use RLE to encode the masks, because they are too large and takes memory
415
- # since this evaluator stores outputs of the entire dataset
416
- rles = [
417
- mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0]
418
- for mask in instances.pred_masks
419
- ]
420
- for rle in rles:
421
- # "counts" is an array encoded by mask_util as a byte-stream. Python3's
422
- # json writer which always produces strings cannot serialize a bytestream
423
- # unless you decode it. Thankfully, utf-8 works out (which is also what
424
- # the pycocotools/_mask.pyx does).
425
- rle["counts"] = rle["counts"].decode("utf-8")
426
-
427
- has_keypoints = instances.has("pred_keypoints")
428
- if has_keypoints:
429
- keypoints = instances.pred_keypoints
430
-
431
- results = []
432
- for k in range(num_instance):
433
- result = {
434
- "image_id": img_id,
435
- "category_id": classes[k],
436
- "bbox": boxes[k],
437
- "score": scores[k],
438
- }
439
- if has_mask:
440
- result["segmentation"] = rles[k]
441
- if has_keypoints:
442
- # In COCO annotations,
443
- # keypoints coordinates are pixel indices.
444
- # However our predictions are floating point coordinates.
445
- # Therefore we subtract 0.5 to be consistent with the annotation format.
446
- # This is the inverse of data loading logic in `datasets/coco.py`.
447
- keypoints[k][:, :2] -= 0.5
448
- result["keypoints"] = keypoints[k].flatten().tolist()
449
- results.append(result)
450
- return results
451
-
452
-
453
- # inspired from Detectron:
454
- # https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa
455
- def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None):
456
- """
457
- Evaluate detection proposal recall metrics. This function is a much
458
- faster alternative to the official COCO API recall evaluation code. However,
459
- it produces slightly different results.
460
- """
461
- # Record max overlap value for each gt box
462
- # Return vector of overlap values
463
- areas = {
464
- "all": 0,
465
- "small": 1,
466
- "medium": 2,
467
- "large": 3,
468
- "96-128": 4,
469
- "128-256": 5,
470
- "256-512": 6,
471
- "512-inf": 7,
472
- }
473
- area_ranges = [
474
- [0 ** 2, 1e5 ** 2], # all
475
- [0 ** 2, 32 ** 2], # small
476
- [32 ** 2, 96 ** 2], # medium
477
- [96 ** 2, 1e5 ** 2], # large
478
- [96 ** 2, 128 ** 2], # 96-128
479
- [128 ** 2, 256 ** 2], # 128-256
480
- [256 ** 2, 512 ** 2], # 256-512
481
- [512 ** 2, 1e5 ** 2],
482
- ] # 512-inf
483
- assert area in areas, "Unknown area range: {}".format(area)
484
- area_range = area_ranges[areas[area]]
485
- gt_overlaps = []
486
- num_pos = 0
487
-
488
- for prediction_dict in dataset_predictions:
489
- predictions = prediction_dict["proposals"]
490
-
491
- # sort predictions in descending order
492
- # TODO maybe remove this and make it explicit in the documentation
493
- inds = predictions.objectness_logits.sort(descending=True)[1]
494
- predictions = predictions[inds]
495
-
496
- ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"])
497
- anno = coco_api.loadAnns(ann_ids)
498
- gt_boxes = [
499
- BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)
500
- for obj in anno
501
- if obj["iscrowd"] == 0
502
- ]
503
- gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
504
- gt_boxes = Boxes(gt_boxes)
505
- gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0])
506
-
507
- if len(gt_boxes) == 0 or len(predictions) == 0:
508
- continue
509
-
510
- valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
511
- gt_boxes = gt_boxes[valid_gt_inds]
512
-
513
- num_pos += len(gt_boxes)
514
-
515
- if len(gt_boxes) == 0:
516
- continue
517
-
518
- if limit is not None and len(predictions) > limit:
519
- predictions = predictions[:limit]
520
-
521
- overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes)
522
-
523
- _gt_overlaps = torch.zeros(len(gt_boxes))
524
- for j in range(min(len(predictions), len(gt_boxes))):
525
- # find which proposal box maximally covers each gt box
526
- # and get the iou amount of coverage for each gt box
527
- max_overlaps, argmax_overlaps = overlaps.max(dim=0)
528
-
529
- # find which gt box is 'best' covered (i.e. 'best' = most iou)
530
- gt_ovr, gt_ind = max_overlaps.max(dim=0)
531
- assert gt_ovr >= 0
532
- # find the proposal box that covers the best covered gt box
533
- box_ind = argmax_overlaps[gt_ind]
534
- # record the iou coverage of this gt box
535
- _gt_overlaps[j] = overlaps[box_ind, gt_ind]
536
- assert _gt_overlaps[j] == gt_ovr
537
- # mark the proposal box and the gt box as used
538
- overlaps[box_ind, :] = -1
539
- overlaps[:, gt_ind] = -1
540
-
541
- # append recorded iou coverage level
542
- gt_overlaps.append(_gt_overlaps)
543
- gt_overlaps = (
544
- torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32)
545
- )
546
- gt_overlaps, _ = torch.sort(gt_overlaps)
547
-
548
- if thresholds is None:
549
- step = 0.05
550
- thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
551
- recalls = torch.zeros_like(thresholds)
552
- # compute recall for each iou threshold
553
- for i, t in enumerate(thresholds):
554
- recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
555
- # ar = 2 * np.trapz(recalls, thresholds)
556
- ar = recalls.mean()
557
- return {
558
- "ar": ar,
559
- "recalls": recalls,
560
- "thresholds": thresholds,
561
- "gt_overlaps": gt_overlaps,
562
- "num_pos": num_pos,
563
- }
564
-
565
-
566
- def _evaluate_predictions_on_coco(
567
- coco_gt, coco_results, iou_type, kpt_oks_sigmas=None, use_fast_impl=True, img_ids=None
568
- ):
569
- """
570
- Evaluate the coco results using COCOEval API.
571
- """
572
- assert len(coco_results) > 0
573
-
574
- if iou_type == "segm":
575
- coco_results = copy.deepcopy(coco_results)
576
- # When evaluating mask AP, if the results contain bbox, cocoapi will
577
- # use the box area as the area of the instance, instead of the mask area.
578
- # This leads to a different definition of small/medium/large.
579
- # We remove the bbox field to let mask AP use mask area.
580
- for c in coco_results:
581
- c.pop("bbox", None)
582
-
583
- coco_dt = coco_gt.loadRes(coco_results)
584
- coco_eval = (COCOeval_opt if use_fast_impl else COCOeval)(coco_gt, coco_dt, iou_type)
585
- if img_ids is not None:
586
- coco_eval.params.imgIds = img_ids
587
-
588
- if iou_type == "keypoints":
589
- # Use the COCO default keypoint OKS sigmas unless overrides are specified
590
- if kpt_oks_sigmas:
591
- assert hasattr(coco_eval.params, "kpt_oks_sigmas"), "pycocotools is too old!"
592
- coco_eval.params.kpt_oks_sigmas = np.array(kpt_oks_sigmas)
593
- # COCOAPI requires every detection and every gt to have keypoints, so
594
- # we just take the first entry from both
595
- num_keypoints_dt = len(coco_results[0]["keypoints"]) // 3
596
- num_keypoints_gt = len(next(iter(coco_gt.anns.values()))["keypoints"]) // 3
597
- num_keypoints_oks = len(coco_eval.params.kpt_oks_sigmas)
598
- assert num_keypoints_oks == num_keypoints_dt == num_keypoints_gt, (
599
- f"[COCOEvaluator] Prediction contain {num_keypoints_dt} keypoints. "
600
- f"Ground truth contains {num_keypoints_gt} keypoints. "
601
- f"The length of cfg.TEST.KEYPOINT_OKS_SIGMAS is {num_keypoints_oks}. "
602
- "They have to agree with each other. For meaning of OKS, please refer to "
603
- "http://cocodataset.org/#keypoints-eval."
604
- )
605
-
606
- coco_eval.evaluate()
607
- coco_eval.accumulate()
608
- coco_eval.summarize()
609
-
610
- return coco_eval
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/regionclip-demo/detectron2/export/caffe2_export.py DELETED
@@ -1,207 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
-
3
- import copy
4
- import io
5
- import logging
6
- import numpy as np
7
- from typing import List
8
- import onnx
9
- import torch
10
- from caffe2.proto import caffe2_pb2
11
- from caffe2.python import core
12
- from caffe2.python.onnx.backend import Caffe2Backend
13
- from tabulate import tabulate
14
- from termcolor import colored
15
- from torch.onnx import OperatorExportTypes
16
-
17
- from .shared import (
18
- ScopedWS,
19
- construct_init_net_from_params,
20
- fuse_alias_placeholder,
21
- fuse_copy_between_cpu_and_gpu,
22
- get_params_from_init_net,
23
- group_norm_replace_aten_with_caffe2,
24
- infer_device_type,
25
- remove_dead_end_ops,
26
- remove_reshape_for_fc,
27
- save_graph,
28
- )
29
-
30
- logger = logging.getLogger(__name__)
31
-
32
-
33
- def export_onnx_model(model, inputs):
34
- """
35
- Trace and export a model to onnx format.
36
-
37
- Args:
38
- model (nn.Module):
39
- inputs (tuple[args]): the model will be called by `model(*inputs)`
40
-
41
- Returns:
42
- an onnx model
43
- """
44
- assert isinstance(model, torch.nn.Module)
45
-
46
- # make sure all modules are in eval mode, onnx may change the training state
47
- # of the module if the states are not consistent
48
- def _check_eval(module):
49
- assert not module.training
50
-
51
- model.apply(_check_eval)
52
-
53
- # Export the model to ONNX
54
- with torch.no_grad():
55
- with io.BytesIO() as f:
56
- torch.onnx.export(
57
- model,
58
- inputs,
59
- f,
60
- operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,
61
- # verbose=True, # NOTE: uncomment this for debugging
62
- # export_params=True,
63
- )
64
- onnx_model = onnx.load_from_string(f.getvalue())
65
-
66
- # Apply ONNX's Optimization
67
- all_passes = onnx.optimizer.get_available_passes()
68
- passes = ["fuse_bn_into_conv"]
69
- assert all(p in all_passes for p in passes)
70
- onnx_model = onnx.optimizer.optimize(onnx_model, passes)
71
- return onnx_model
72
-
73
-
74
- def _op_stats(net_def):
75
- type_count = {}
76
- for t in [op.type for op in net_def.op]:
77
- type_count[t] = type_count.get(t, 0) + 1
78
- type_count_list = sorted(type_count.items(), key=lambda kv: kv[0]) # alphabet
79
- type_count_list = sorted(type_count_list, key=lambda kv: -kv[1]) # count
80
- return "\n".join("{:>4}x {}".format(count, name) for name, count in type_count_list)
81
-
82
-
83
- def _assign_device_option(
84
- predict_net: caffe2_pb2.NetDef, init_net: caffe2_pb2.NetDef, tensor_inputs: List[torch.Tensor]
85
- ):
86
- """
87
- ONNX exported network doesn't have concept of device, assign necessary
88
- device option for each op in order to make it runable on GPU runtime.
89
- """
90
-
91
- def _get_device_type(torch_tensor):
92
- assert torch_tensor.device.type in ["cpu", "cuda"]
93
- assert torch_tensor.device.index == 0
94
- return torch_tensor.device.type
95
-
96
- def _assign_op_device_option(net_proto, net_ssa, blob_device_types):
97
- for op, ssa_i in zip(net_proto.op, net_ssa):
98
- if op.type in ["CopyCPUToGPU", "CopyGPUToCPU"]:
99
- op.device_option.CopyFrom(core.DeviceOption(caffe2_pb2.CUDA, 0))
100
- else:
101
- devices = [blob_device_types[b] for b in ssa_i[0] + ssa_i[1]]
102
- assert all(d == devices[0] for d in devices)
103
- if devices[0] == "cuda":
104
- op.device_option.CopyFrom(core.DeviceOption(caffe2_pb2.CUDA, 0))
105
-
106
- # update ops in predict_net
107
- predict_net_input_device_types = {
108
- (name, 0): _get_device_type(tensor)
109
- for name, tensor in zip(predict_net.external_input, tensor_inputs)
110
- }
111
- predict_net_device_types = infer_device_type(
112
- predict_net, known_status=predict_net_input_device_types, device_name_style="pytorch"
113
- )
114
- predict_net_ssa, _ = core.get_ssa(predict_net)
115
- _assign_op_device_option(predict_net, predict_net_ssa, predict_net_device_types)
116
-
117
- # update ops in init_net
118
- init_net_ssa, versions = core.get_ssa(init_net)
119
- init_net_output_device_types = {
120
- (name, versions[name]): predict_net_device_types[(name, 0)]
121
- for name in init_net.external_output
122
- }
123
- init_net_device_types = infer_device_type(
124
- init_net, known_status=init_net_output_device_types, device_name_style="pytorch"
125
- )
126
- _assign_op_device_option(init_net, init_net_ssa, init_net_device_types)
127
-
128
-
129
- def export_caffe2_detection_model(model: torch.nn.Module, tensor_inputs: List[torch.Tensor]):
130
- """
131
- Export a caffe2-compatible Detectron2 model to caffe2 format via ONNX.
132
-
133
- Arg:
134
- model: a caffe2-compatible version of detectron2 model, defined in caffe2_modeling.py
135
- tensor_inputs: a list of tensors that caffe2 model takes as input.
136
- """
137
- model = copy.deepcopy(model)
138
- assert isinstance(model, torch.nn.Module)
139
- assert hasattr(model, "encode_additional_info")
140
-
141
- # Export via ONNX
142
- logger.info(
143
- "Exporting a {} model via ONNX ...".format(type(model).__name__)
144
- + " Some warnings from ONNX are expected and are usually not to worry about."
145
- )
146
- onnx_model = export_onnx_model(model, (tensor_inputs,))
147
- # Convert ONNX model to Caffe2 protobuf
148
- init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model)
149
- ops_table = [[op.type, op.input, op.output] for op in predict_net.op]
150
- table = tabulate(ops_table, headers=["type", "input", "output"], tablefmt="pipe")
151
- logger.info(
152
- "ONNX export Done. Exported predict_net (before optimizations):\n" + colored(table, "cyan")
153
- )
154
-
155
- # Apply protobuf optimization
156
- fuse_alias_placeholder(predict_net, init_net)
157
- if any(t.device.type != "cpu" for t in tensor_inputs):
158
- fuse_copy_between_cpu_and_gpu(predict_net)
159
- remove_dead_end_ops(init_net)
160
- _assign_device_option(predict_net, init_net, tensor_inputs)
161
- params, device_options = get_params_from_init_net(init_net)
162
- predict_net, params = remove_reshape_for_fc(predict_net, params)
163
- init_net = construct_init_net_from_params(params, device_options)
164
- group_norm_replace_aten_with_caffe2(predict_net)
165
-
166
- # Record necessary information for running the pb model in Detectron2 system.
167
- model.encode_additional_info(predict_net, init_net)
168
-
169
- logger.info("Operators used in predict_net: \n{}".format(_op_stats(predict_net)))
170
- logger.info("Operators used in init_net: \n{}".format(_op_stats(init_net)))
171
-
172
- return predict_net, init_net
173
-
174
-
175
- def run_and_save_graph(predict_net, init_net, tensor_inputs, graph_save_path):
176
- """
177
- Run the caffe2 model on given inputs, recording the shape and draw the graph.
178
-
179
- predict_net/init_net: caffe2 model.
180
- tensor_inputs: a list of tensors that caffe2 model takes as input.
181
- graph_save_path: path for saving graph of exported model.
182
- """
183
-
184
- logger.info("Saving graph of ONNX exported model to {} ...".format(graph_save_path))
185
- save_graph(predict_net, graph_save_path, op_only=False)
186
-
187
- # Run the exported Caffe2 net
188
- logger.info("Running ONNX exported model ...")
189
- with ScopedWS("__ws_tmp__", True) as ws:
190
- ws.RunNetOnce(init_net)
191
- initialized_blobs = set(ws.Blobs())
192
- uninitialized = [inp for inp in predict_net.external_input if inp not in initialized_blobs]
193
- for name, blob in zip(uninitialized, tensor_inputs):
194
- ws.FeedBlob(name, blob)
195
-
196
- try:
197
- ws.RunNetOnce(predict_net)
198
- except RuntimeError as e:
199
- logger.warning("Encountered RuntimeError: \n{}".format(str(e)))
200
-
201
- ws_blobs = {b: ws.FetchBlob(b) for b in ws.Blobs()}
202
- blob_sizes = {b: ws_blobs[b].shape for b in ws_blobs if isinstance(ws_blobs[b], np.ndarray)}
203
-
204
- logger.info("Saving graph with blob shapes to {} ...".format(graph_save_path))
205
- save_graph(predict_net, graph_save_path, op_only=False, blob_sizes=blob_sizes)
206
-
207
- return ws_blobs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cobalt337/lambdalabs-sd-pokemon-diffusers/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Lambdalabs Sd Pokemon Diffusers
3
- emoji: 🚀
4
- colorFrom: blue
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.24.1
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/modeling/roi_heads/boundary_head/inference.py DELETED
@@ -1,207 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
2
- import numpy as np
3
- import torch
4
- from torch import nn
5
- from maskrcnn_benchmark.layers.misc import interpolate
6
-
7
- from maskrcnn_benchmark.structures.bounding_box import BoxList
8
-
9
-
10
- # TODO check if want to return a single BoxList or a composite
11
- # object
12
- class MaskPostProcessor(nn.Module):
13
- """
14
- From the results of the CNN, post process the masks
15
- by taking the mask corresponding to the class with max
16
- probability (which are of fixed size and directly output
17
- by the CNN) and return the masks in the mask field of the BoxList.
18
-
19
- If a masker object is passed, it will additionally
20
- project the masks in the image according to the locations in boxes,
21
- """
22
-
23
- def __init__(self, masker=None):
24
- super(MaskPostProcessor, self).__init__()
25
- self.masker = masker
26
-
27
- def forward(self, x, y, boxes):
28
- """
29
- Arguments:
30
- x (Tensor): the mask logits
31
- boxes (list[BoxList]): bounding boxes that are used as
32
- reference, one for ech image
33
-
34
- Returns:
35
- results (list[BoxList]): one BoxList for each image, containing
36
- the extra field mask
37
- """
38
- mask_prob_x = x.sigmoid()
39
- mask_prob_y = y.sigmoid()
40
- # select masks coresponding to the predicted classes
41
- num_masks = x.shape[0] # 286
42
- labels = [bbox.get_field("labels") for bbox in boxes]
43
- labels = torch.cat(labels)
44
- index = torch.arange(num_masks, device=labels.device)
45
- mask_prob_x = mask_prob_x[index, 0][:, None]
46
- mask_prob_y = mask_prob_y[index, 0][:, None]
47
-
48
- boxes_per_image = [len(box) for box in boxes] # boxes for one image
49
- mask_prob_x = mask_prob_x.split(boxes_per_image, dim=0)
50
- mask_prob_y = mask_prob_y.split(boxes_per_image, dim=0)
51
-
52
- if self.masker:
53
- print('yes!!!')
54
- mask_prob_x = self.masker(mask_prob_x, boxes)
55
- mask_prob_y = self.masker(mask_prob_y, boxes)
56
-
57
- results = []
58
- for prob_x, prob_y, box in zip(mask_prob_x, mask_prob_y, boxes):
59
- bbox = BoxList(box.bbox, box.size, mode="xyxy")
60
- for field in box.fields():
61
- bbox.add_field(field, box.get_field(field))
62
- bbox.add_field("mask_x", prob_x)
63
- bbox.add_field("mask_y", prob_y)
64
- results.append(bbox)
65
- return results
66
-
67
-
68
- class MaskPostProcessorCOCOFormat(MaskPostProcessor):
69
- """
70
- From the results of the CNN, post process the results
71
- so that the masks are pasted in the image, and
72
- additionally convert the results to COCO format.
73
- """
74
-
75
- def forward(self, x, boxes):
76
- import pycocotools.mask as mask_util
77
- import numpy as np
78
-
79
- results = super(MaskPostProcessorCOCOFormat, self).forward(x, boxes)
80
- for result in results:
81
- masks = result.get_field("mask").cpu()
82
- rles = [
83
- mask_util.encode(np.array(mask[0, :, :, np.newaxis], order="F"))[0]
84
- for mask in masks
85
- ]
86
- for rle in rles:
87
- rle["counts"] = rle["counts"].decode("utf-8")
88
- result.add_field("mask", rles)
89
- return results
90
-
91
-
92
- # the next two functions should be merged inside Masker
93
- # but are kept here for the moment while we need them
94
- # temporarily gor paste_mask_in_image
95
- def expand_boxes(boxes, scale):
96
- w_half = (boxes[:, 2] - boxes[:, 0]) * .5
97
- h_half = (boxes[:, 3] - boxes[:, 1]) * .5
98
- x_c = (boxes[:, 2] + boxes[:, 0]) * .5
99
- y_c = (boxes[:, 3] + boxes[:, 1]) * .5
100
-
101
- w_half *= scale
102
- h_half *= scale
103
-
104
- boxes_exp = torch.zeros_like(boxes)
105
- boxes_exp[:, 0] = x_c - w_half
106
- boxes_exp[:, 2] = x_c + w_half
107
- boxes_exp[:, 1] = y_c - h_half
108
- boxes_exp[:, 3] = y_c + h_half
109
- return boxes_exp
110
-
111
-
112
- def expand_masks(mask, padding):
113
- N = mask.shape[0]
114
- M = mask.shape[-1]
115
- pad2 = 2 * padding
116
- scale = float(M + pad2) / M
117
- padded_mask = mask.new_zeros((N, 1, M + pad2, M + pad2))
118
-
119
- padded_mask[:, :, padding:-padding, padding:-padding] = mask
120
- return padded_mask, scale
121
-
122
-
123
- def paste_mask_in_image(mask, box, im_h, im_w, thresh=0.5, padding=1):
124
- padded_mask, scale = expand_masks(mask[None], padding=padding)
125
- mask = padded_mask[0, 0]
126
- box = expand_boxes(box[None], scale)[0]
127
- box = box.to(dtype=torch.int32)
128
- TO_REMOVE = 1
129
- w = int(box[2] - box[0] + TO_REMOVE)
130
- h = int(box[3] - box[1] + TO_REMOVE)
131
- w = max(w, 1)
132
- h = max(h, 1)
133
-
134
- # Set shape to [batchxCxHxW]
135
- mask = mask.expand((1, 1, -1, -1))
136
-
137
- # Resize mask
138
- mask = mask.to(torch.float32)
139
- mask = interpolate(mask, size=(h, w), mode='bilinear', align_corners=False)
140
- mask = mask[0][0]
141
-
142
- if thresh >= 0:
143
- mask = mask > thresh
144
- else:
145
- # for visualization and debugging, we also
146
- # allow it to return an unmodified mask
147
- mask = (mask * 255).to(torch.uint8)
148
-
149
- im_mask = torch.zeros((im_h, im_w), dtype=torch.uint8)
150
- x_0 = max(box[0], 0)
151
- x_1 = min(box[2] + 1, im_w)
152
- y_0 = max(box[1], 0)
153
- y_1 = min(box[3] + 1, im_h)
154
-
155
- im_mask[y_0:y_1, x_0:x_1] = mask[
156
- (y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0])
157
- ]
158
- return im_mask
159
-
160
-
161
- class Masker(object):
162
- """
163
- Projects a set of masks in an image on the locations specified by the bounding boxes
164
- """
165
-
166
- def __init__(self, threshold=0.5, padding=1):
167
- self.threshold = threshold
168
- self.padding = padding
169
-
170
- def forward_single_image(self, masks, boxes):
171
- boxes = boxes.convert("xyxy")
172
- im_w, im_h = boxes.size
173
- res = [
174
- paste_mask_in_image(mask[0], box, im_h, im_w, self.threshold, self.padding)
175
- for mask, box in zip(masks, boxes.bbox)
176
- ]
177
- if len(res) > 0:
178
- res = torch.stack(res, dim=0)[:, None]
179
- else:
180
- res = masks.new_empty((0, 1, masks.shape[-2], masks.shape[-1]))
181
- return res
182
-
183
- def __call__(self, masks, boxes):
184
- if isinstance(boxes, BoxList):
185
- boxes = [boxes]
186
-
187
- # Make some sanity check
188
- assert len(boxes) == len(masks), "Masks and boxes should have the same length."
189
-
190
- # TODO: Is this JIT compatible?
191
- # If not we should make it compatible.
192
- results = []
193
- for mask, box in zip(masks, boxes):
194
- assert mask.shape[0] == len(box), "Number of objects should be the same."
195
- result = self.forward_single_image(mask, box)
196
- results.append(result)
197
- return results
198
-
199
-
200
- def make_roi_boundary_post_processor(cfg):
201
- if cfg.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS:
202
- mask_threshold = cfg.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS_THRESHOLD ## 0.5
203
- masker = Masker(threshold=mask_threshold, padding=1)
204
- else:
205
- masker = None
206
- mask_post_processor = MaskPostProcessor(masker)
207
- return mask_post_processor
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/datasets/builders/instruct_builder.py DELETED
@@ -1,78 +0,0 @@
1
- import os
2
- import logging
3
- import warnings
4
-
5
- from video_llama.common.registry import registry
6
- from video_llama.datasets.builders.base_dataset_builder import BaseDatasetBuilder
7
- from video_llama.datasets.datasets.laion_dataset import LaionDataset
8
- from video_llama.datasets.datasets.llava_instruct_dataset import Instruct_Dataset
9
- from video_llama.datasets.datasets.video_instruct_dataset import Video_Instruct_Dataset
10
-
11
- @registry.register_builder("instruct")
12
- class Instruct_Builder(BaseDatasetBuilder):
13
- train_dataset_cls = Instruct_Dataset
14
-
15
- DATASET_CONFIG_DICT = {"default": "configs/datasets/instruct/defaults.yaml"}
16
-
17
- def _download_ann(self):
18
- pass
19
-
20
- def _download_vis(self):
21
- pass
22
-
23
- def build(self):
24
- self.build_processors()
25
- datasets = dict()
26
- split = "train"
27
-
28
- build_info = self.config.build_info
29
- dataset_cls = self.train_dataset_cls
30
- if self.config.num_video_query_token:
31
- num_video_query_token = self.config.num_video_query_token
32
- else:
33
- num_video_query_token = 32
34
-
35
- if self.config.tokenizer_name:
36
- tokenizer_name = self.config.tokenizer_name
37
- else:
38
- tokenizer_name = '/mnt/workspace/ckpt/vicuna-13b/'
39
-
40
-
41
- datasets[split] = dataset_cls(
42
- vis_processor=self.vis_processors[split],
43
- text_processor=self.text_processors[split],
44
- vis_root=build_info.videos_dir,
45
- ann_root=build_info.anno_dir,
46
- num_video_query_token = num_video_query_token,
47
- tokenizer_name = tokenizer_name,
48
- data_type = self.config.data_type
49
- )
50
-
51
- return datasets
52
-
53
- @registry.register_builder("webvid_instruct")
54
- class WebvidInstruct_Builder(Instruct_Builder):
55
- train_dataset_cls = Video_Instruct_Dataset
56
-
57
- DATASET_CONFIG_DICT = {
58
- "default": "configs/datasets/instruct/webvid_instruct.yaml",
59
- }
60
-
61
- @registry.register_builder("webvid_instruct_zh")
62
- class WebvidInstruct_zh_Builder(Instruct_Builder):
63
- train_dataset_cls = Video_Instruct_Dataset
64
-
65
- DATASET_CONFIG_DICT = {
66
- "default": "configs/datasets/instruct/webvid_instruct.yaml",
67
- }
68
-
69
-
70
-
71
- @registry.register_builder("llava_instruct")
72
- class LlavaInstruct_Builder(Instruct_Builder):
73
- train_dataset_cls = Instruct_Dataset
74
-
75
- DATASET_CONFIG_DICT = {
76
- "default": "configs/datasets/instruct/llava_instruct.yaml",
77
- }
78
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/cu2qu/cu2qu.c DELETED
The diff for this file is too large to render. See raw diff
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/components/clear_button.py DELETED
@@ -1,70 +0,0 @@
1
- """ Predefined buttons with bound events that can be included in a gr.Blocks for convenience. """
2
-
3
- from __future__ import annotations
4
-
5
- import json
6
- from typing import Literal
7
-
8
- from gradio_client.documentation import document, set_documentation_group
9
-
10
- from gradio.components import Button, Component
11
-
12
- set_documentation_group("component")
13
-
14
-
15
- @document("add")
16
- class ClearButton(Button):
17
- """
18
- Button that clears the value of a component or a list of components when clicked. It is instantiated with the list of components to clear.
19
- Preprocessing: passes the button value as a {str} into the function
20
- Postprocessing: expects a {str} to be returned from a function, which is set as the label of the button
21
- """
22
-
23
- is_template = True
24
-
25
- def __init__(
26
- self,
27
- components: None | list[Component] | Component = None,
28
- *,
29
- value: str = "Clear",
30
- variant: Literal["primary", "secondary", "stop"] = "secondary",
31
- size: Literal["sm", "lg"] | None = None,
32
- visible: bool = True,
33
- interactive: bool = True,
34
- elem_id: str | None = None,
35
- elem_classes: list[str] | str | None = None,
36
- scale: int | None = None,
37
- min_width: int | None = None,
38
- **kwargs,
39
- ):
40
- super().__init__(
41
- value,
42
- variant=variant,
43
- size=size,
44
- visible=visible,
45
- interactive=interactive,
46
- elem_id=elem_id,
47
- elem_classes=elem_classes,
48
- scale=scale,
49
- min_width=min_width,
50
- **kwargs,
51
- )
52
- self.add(components)
53
-
54
- def add(self, components: None | Component | list[Component]) -> ClearButton:
55
- """
56
- Adds a component or list of components to the list of components that will be cleared when the button is clicked.
57
- """
58
- if not components:
59
- # This needs to be here because when the ClearButton is created in an gr.Interface, we don't
60
- # want to create dependencies for it before we have created the dependencies for the submit function.
61
- # We generally assume that the submit function dependency is the first thing created in an gr.Interface.
62
- return self
63
-
64
- if isinstance(components, Component):
65
- components = [components]
66
- clear_values = json.dumps(
67
- [component.postprocess(None) for component in components]
68
- )
69
- self.click(None, [], components, _js=f"() => {clear_values}")
70
- return self
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates.py DELETED
@@ -1,574 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import Any, Callable, Literal
4
-
5
- import numpy as np
6
- from PIL.Image import Image
7
-
8
- from gradio import components
9
-
10
-
11
- class TextArea(components.Textbox):
12
- """
13
- Sets: lines=7
14
- """
15
-
16
- is_template = True
17
-
18
- def __init__(
19
- self,
20
- value: str | Callable | None = "",
21
- *,
22
- lines: int = 7,
23
- max_lines: int = 20,
24
- placeholder: str | None = None,
25
- label: str | None = None,
26
- show_label: bool = True,
27
- interactive: bool | None = None,
28
- visible: bool = True,
29
- elem_id: str | None = None,
30
- **kwargs,
31
- ):
32
- super().__init__(
33
- value=value,
34
- lines=lines,
35
- max_lines=max_lines,
36
- placeholder=placeholder,
37
- label=label,
38
- show_label=show_label,
39
- interactive=interactive,
40
- visible=visible,
41
- elem_id=elem_id,
42
- **kwargs,
43
- )
44
-
45
-
46
- class Webcam(components.Image):
47
- """
48
- Sets: source="webcam", interactive=True
49
- """
50
-
51
- is_template = True
52
-
53
- def __init__(
54
- self,
55
- value: str | Image | np.ndarray | None = None,
56
- *,
57
- shape: tuple[int, int] | None = None,
58
- image_mode: Literal["RGB", "L"] = "RGB",
59
- invert_colors: bool = False,
60
- source: Literal["webcam"] = "webcam",
61
- tool: Literal["editor", "select", "sketch", "color-sketch"] | None = None,
62
- type: Literal["numpy", "pil", "filepath"] = "numpy",
63
- label: str | None = None,
64
- show_label: bool = True,
65
- interactive: bool | None = True,
66
- visible: bool = True,
67
- streaming: bool = False,
68
- elem_id: str | None = None,
69
- mirror_webcam: bool = True,
70
- brush_radius: float | None = None,
71
- **kwargs,
72
- ):
73
- super().__init__(
74
- value=value,
75
- shape=shape,
76
- image_mode=image_mode,
77
- invert_colors=invert_colors,
78
- source=source,
79
- tool=tool,
80
- type=type,
81
- label=label,
82
- show_label=show_label,
83
- interactive=interactive,
84
- visible=visible,
85
- streaming=streaming,
86
- elem_id=elem_id,
87
- mirror_webcam=mirror_webcam,
88
- brush_radius=brush_radius,
89
- **kwargs,
90
- )
91
-
92
-
93
- class Sketchpad(components.Image):
94
- """
95
- Sets: image_mode="L", source="canvas", shape=(28, 28), invert_colors=True, interactive=True
96
- """
97
-
98
- is_template = True
99
-
100
- def __init__(
101
- self,
102
- value: str | Image | np.ndarray | None = None,
103
- *,
104
- shape: tuple[int, int] = (28, 28),
105
- image_mode: Literal["L"] = "L",
106
- invert_colors: bool = True,
107
- source: Literal["canvas"] = "canvas",
108
- tool: Literal["editor", "select", "sketch", "color-sketch"] | None = None,
109
- type: Literal["numpy", "pil", "filepath"] = "numpy",
110
- label: str | None = None,
111
- show_label: bool = True,
112
- interactive: bool | None = True,
113
- visible: bool = True,
114
- streaming: bool = False,
115
- elem_id: str | None = None,
116
- mirror_webcam: bool = True,
117
- brush_radius: float | None = None,
118
- **kwargs,
119
- ):
120
- super().__init__(
121
- value=value,
122
- shape=shape,
123
- image_mode=image_mode,
124
- invert_colors=invert_colors,
125
- source=source,
126
- tool=tool,
127
- type=type,
128
- label=label,
129
- show_label=show_label,
130
- interactive=interactive,
131
- visible=visible,
132
- streaming=streaming,
133
- elem_id=elem_id,
134
- mirror_webcam=mirror_webcam,
135
- brush_radius=brush_radius,
136
- **kwargs,
137
- )
138
-
139
-
140
- class Paint(components.Image):
141
- """
142
- Sets: source="canvas", tool="color-sketch", interactive=True
143
- """
144
-
145
- is_template = True
146
-
147
- def __init__(
148
- self,
149
- value: str | Image | np.ndarray | None = None,
150
- *,
151
- shape: tuple[int, int] | None = None,
152
- image_mode: Literal["RGB"] = "RGB",
153
- invert_colors: bool = False,
154
- source: Literal["canvas"] = "canvas",
155
- tool: Literal["color-sketch"] = "color-sketch",
156
- type: Literal["numpy", "pil", "filepath"] = "numpy",
157
- label: str | None = None,
158
- show_label: bool = True,
159
- interactive: bool | None = True,
160
- visible: bool = True,
161
- streaming: bool = False,
162
- elem_id: str | None = None,
163
- mirror_webcam: bool = True,
164
- brush_radius: float | None = None,
165
- **kwargs,
166
- ):
167
- super().__init__(
168
- value=value,
169
- shape=shape,
170
- image_mode=image_mode,
171
- invert_colors=invert_colors,
172
- source=source,
173
- tool=tool,
174
- type=type,
175
- label=label,
176
- show_label=show_label,
177
- interactive=interactive,
178
- visible=visible,
179
- streaming=streaming,
180
- elem_id=elem_id,
181
- mirror_webcam=mirror_webcam,
182
- brush_radius=brush_radius,
183
- **kwargs,
184
- )
185
-
186
-
187
- class ImageMask(components.Image):
188
- """
189
- Sets: source="upload", tool="sketch", interactive=True
190
- """
191
-
192
- is_template = True
193
-
194
- def __init__(
195
- self,
196
- value: str | Image | np.ndarray | None = None,
197
- *,
198
- shape: tuple[int, int] | None = None,
199
- image_mode: Literal["RGB", "L"] = "RGB",
200
- invert_colors: bool = False,
201
- source: Literal["upload"] = "upload",
202
- tool: Literal["sketch"] = "sketch",
203
- type: Literal["numpy", "pil", "filepath"] = "numpy",
204
- label: str | None = None,
205
- show_label: bool = True,
206
- interactive: bool | None = True,
207
- visible: bool = True,
208
- streaming: bool = False,
209
- elem_id: str | None = None,
210
- mirror_webcam: bool = True,
211
- brush_radius: float | None = None,
212
- **kwargs,
213
- ):
214
- super().__init__(
215
- value=value,
216
- shape=shape,
217
- image_mode=image_mode,
218
- invert_colors=invert_colors,
219
- source=source,
220
- tool=tool,
221
- type=type,
222
- label=label,
223
- show_label=show_label,
224
- interactive=interactive,
225
- visible=visible,
226
- streaming=streaming,
227
- elem_id=elem_id,
228
- mirror_webcam=mirror_webcam,
229
- brush_radius=brush_radius,
230
- **kwargs,
231
- )
232
-
233
-
234
- class ImagePaint(components.Image):
235
- """
236
- Sets: source="upload", tool="color-sketch", interactive=True
237
- """
238
-
239
- is_template = True
240
-
241
- def __init__(
242
- self,
243
- value: str | Image | np.ndarray | None = None,
244
- *,
245
- shape: tuple[int, int] | None = None,
246
- image_mode: Literal["RGB", "L"] = "RGB",
247
- invert_colors: bool = False,
248
- source: Literal["upload"] = "upload",
249
- tool: Literal["color-sketch"] = "color-sketch",
250
- type: Literal["numpy", "pil", "filepath"] = "numpy",
251
- label: str | None = None,
252
- show_label: bool = True,
253
- interactive: bool | None = True,
254
- visible: bool = True,
255
- streaming: bool = False,
256
- elem_id: str | None = None,
257
- mirror_webcam: bool = True,
258
- brush_radius: float | None = None,
259
- **kwargs,
260
- ):
261
- super().__init__(
262
- value=value,
263
- shape=shape,
264
- image_mode=image_mode,
265
- invert_colors=invert_colors,
266
- source=source,
267
- tool=tool,
268
- type=type,
269
- label=label,
270
- show_label=show_label,
271
- interactive=interactive,
272
- visible=visible,
273
- streaming=streaming,
274
- elem_id=elem_id,
275
- mirror_webcam=mirror_webcam,
276
- brush_radius=brush_radius,
277
- **kwargs,
278
- )
279
-
280
-
281
- class Pil(components.Image):
282
- """
283
- Sets: type="pil"
284
- """
285
-
286
- is_template = True
287
-
288
- def __init__(
289
- self,
290
- value: str | Image | np.ndarray | None = None,
291
- *,
292
- shape: tuple[int, int] | None = None,
293
- image_mode: Literal["RGB", "L"] = "RGB",
294
- invert_colors: bool = False,
295
- source: Literal["upload", "webcam", "canvas"] = "upload",
296
- tool: Literal["editor", "select", "sketch", "color-sketch"] | None = None,
297
- type: Literal["pil"] = "pil",
298
- label: str | None = None,
299
- show_label: bool = True,
300
- interactive: bool | None = None,
301
- visible: bool = True,
302
- streaming: bool = False,
303
- elem_id: str | None = None,
304
- mirror_webcam: bool = True,
305
- brush_radius: float | None = None,
306
- **kwargs,
307
- ):
308
- super().__init__(
309
- value=value,
310
- shape=shape,
311
- image_mode=image_mode,
312
- invert_colors=invert_colors,
313
- source=source,
314
- tool=tool,
315
- type=type,
316
- label=label,
317
- show_label=show_label,
318
- interactive=interactive,
319
- visible=visible,
320
- streaming=streaming,
321
- elem_id=elem_id,
322
- mirror_webcam=mirror_webcam,
323
- brush_radius=brush_radius,
324
- **kwargs,
325
- )
326
-
327
-
328
- class PlayableVideo(components.Video):
329
- """
330
- Sets: format="mp4"
331
- """
332
-
333
- is_template = True
334
-
335
- def __init__(
336
- self,
337
- value: str | Callable | None = None,
338
- *,
339
- format: Literal["mp4"] | None = "mp4",
340
- source: Literal["upload", "webcam"] = "upload",
341
- label: str | None = None,
342
- show_label: bool = True,
343
- interactive: bool | None = None,
344
- visible: bool = True,
345
- elem_id: str | None = None,
346
- mirror_webcam: bool = True,
347
- include_audio: bool | None = None,
348
- **kwargs,
349
- ):
350
- super().__init__(
351
- value=value,
352
- format=format,
353
- source=source,
354
- label=label,
355
- show_label=show_label,
356
- interactive=interactive,
357
- visible=visible,
358
- elem_id=elem_id,
359
- mirror_webcam=mirror_webcam,
360
- include_audio=include_audio,
361
- **kwargs,
362
- )
363
-
364
-
365
- class Microphone(components.Audio):
366
- """
367
- Sets: source="microphone"
368
- """
369
-
370
- is_template = True
371
-
372
- def __init__(
373
- self,
374
- value: str | tuple[int, np.ndarray] | Callable | None = None,
375
- *,
376
- source: Literal["microphone"] = "microphone",
377
- type: Literal["numpy", "filepath"] = "numpy",
378
- label: str | None = None,
379
- show_label: bool = True,
380
- interactive: bool | None = None,
381
- visible: bool = True,
382
- streaming: bool = False,
383
- elem_id: str | None = None,
384
- **kwargs,
385
- ):
386
- super().__init__(
387
- value=value,
388
- source=source,
389
- type=type,
390
- label=label,
391
- show_label=show_label,
392
- interactive=interactive,
393
- visible=visible,
394
- streaming=streaming,
395
- elem_id=elem_id,
396
- **kwargs,
397
- )
398
-
399
-
400
- class Files(components.File):
401
- """
402
- Sets: file_count="multiple"
403
- """
404
-
405
- is_template = True
406
-
407
- def __init__(
408
- self,
409
- value: str | list[str] | Callable | None = None,
410
- *,
411
- file_count: Literal["multiple"] = "multiple",
412
- type: Literal["file", "binary"] = "file",
413
- label: str | None = None,
414
- show_label: bool = True,
415
- interactive: bool | None = None,
416
- visible: bool = True,
417
- elem_id: str | None = None,
418
- **kwargs,
419
- ):
420
- super().__init__(
421
- value=value,
422
- file_count=file_count,
423
- type=type,
424
- label=label,
425
- show_label=show_label,
426
- interactive=interactive,
427
- visible=visible,
428
- elem_id=elem_id,
429
- **kwargs,
430
- )
431
-
432
-
433
- class Numpy(components.Dataframe):
434
- """
435
- Sets: type="numpy"
436
- """
437
-
438
- is_template = True
439
-
440
- def __init__(
441
- self,
442
- value: list[list[Any]] | Callable | None = None,
443
- *,
444
- headers: list[str] | None = None,
445
- row_count: int | tuple[int, str] = (1, "dynamic"),
446
- col_count: int | tuple[int, str] | None = None,
447
- datatype: str | list[str] = "str",
448
- type: Literal["numpy"] = "numpy",
449
- max_rows: int | None = 20,
450
- max_cols: int | None = None,
451
- overflow_row_behaviour: Literal["paginate", "show_ends"] = "paginate",
452
- label: str | None = None,
453
- show_label: bool = True,
454
- interactive: bool | None = None,
455
- visible: bool = True,
456
- elem_id: str | None = None,
457
- wrap: bool = False,
458
- **kwargs,
459
- ):
460
- super().__init__(
461
- value=value,
462
- headers=headers,
463
- row_count=row_count,
464
- col_count=col_count,
465
- datatype=datatype,
466
- type=type,
467
- max_rows=max_rows,
468
- max_cols=max_cols,
469
- overflow_row_behaviour=overflow_row_behaviour,
470
- label=label,
471
- show_label=show_label,
472
- interactive=interactive,
473
- visible=visible,
474
- elem_id=elem_id,
475
- wrap=wrap,
476
- **kwargs,
477
- )
478
-
479
-
480
- class Matrix(components.Dataframe):
481
- """
482
- Sets: type="array"
483
- """
484
-
485
- is_template = True
486
-
487
- def __init__(
488
- self,
489
- value: list[list[Any]] | Callable | None = None,
490
- *,
491
- headers: list[str] | None = None,
492
- row_count: int | tuple[int, str] = (1, "dynamic"),
493
- col_count: int | tuple[int, str] | None = None,
494
- datatype: str | list[str] = "str",
495
- type: Literal["array"] = "array",
496
- max_rows: int | None = 20,
497
- max_cols: int | None = None,
498
- overflow_row_behaviour: Literal["paginate", "show_ends"] = "paginate",
499
- label: str | None = None,
500
- show_label: bool = True,
501
- interactive: bool | None = None,
502
- visible: bool = True,
503
- elem_id: str | None = None,
504
- wrap: bool = False,
505
- **kwargs,
506
- ):
507
- super().__init__(
508
- value=value,
509
- headers=headers,
510
- row_count=row_count,
511
- col_count=col_count,
512
- datatype=datatype,
513
- type=type,
514
- max_rows=max_rows,
515
- max_cols=max_cols,
516
- overflow_row_behaviour=overflow_row_behaviour,
517
- label=label,
518
- show_label=show_label,
519
- interactive=interactive,
520
- visible=visible,
521
- elem_id=elem_id,
522
- wrap=wrap,
523
- **kwargs,
524
- )
525
-
526
-
527
- class List(components.Dataframe):
528
- """
529
- Sets: type="array", col_count=1
530
- """
531
-
532
- is_template = True
533
-
534
- def __init__(
535
- self,
536
- value: list[list[Any]] | Callable | None = None,
537
- *,
538
- headers: list[str] | None = None,
539
- row_count: int | tuple[int, str] = (1, "dynamic"),
540
- col_count: Literal[1] = 1,
541
- datatype: str | list[str] = "str",
542
- type: Literal["array"] = "array",
543
- max_rows: int | None = 20,
544
- max_cols: int | None = None,
545
- overflow_row_behaviour: Literal["paginate", "show_ends"] = "paginate",
546
- label: str | None = None,
547
- show_label: bool = True,
548
- interactive: bool | None = None,
549
- visible: bool = True,
550
- elem_id: str | None = None,
551
- wrap: bool = False,
552
- **kwargs,
553
- ):
554
- super().__init__(
555
- value=value,
556
- headers=headers,
557
- row_count=row_count,
558
- col_count=col_count,
559
- datatype=datatype,
560
- type=type,
561
- max_rows=max_rows,
562
- max_cols=max_cols,
563
- overflow_row_behaviour=overflow_row_behaviour,
564
- label=label,
565
- show_label=show_label,
566
- interactive=interactive,
567
- visible=visible,
568
- elem_id=elem_id,
569
- wrap=wrap,
570
- **kwargs,
571
- )
572
-
573
-
574
- Mic = Microphone