parquet-converter commited on
Commit
b3463b5
·
1 Parent(s): b4815c2

Update parquet files (step 18 of 296)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Ashampoo [CRACKED] Free Burning Studio.md +0 -14
  2. spaces/1gistliPinn/ChatGPT4/Examples/Doroga V Rossiyu 1 Pdf 110.md +0 -6
  3. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/ARK Survival Evolved Full APK - Capture Train and Ride 80 Dinosaurs on Your Android.md +0 -104
  4. spaces/1phancelerku/anime-remove-background/Download Asphalt 8 APK and Race with Luxury Cars and Motorbikes.md +0 -74
  5. spaces/1phancelerku/anime-remove-background/Download Haunted Dorm MOD APK v1.4.2 for Android and Experience a Thrilling Horror Adventure.md +0 -88
  6. spaces/1phancelerku/anime-remove-background/Enjoy the World of 3D Models with Pocket World 3D MOD APK on Android 1.md +0 -93
  7. spaces/1phancelerku/anime-remove-background/Experience the 80s with GTA Vice City - Download for Windows 7 32 Bit.md +0 -98
  8. spaces/AIConsultant/MusicGen/setup.py +0 -62
  9. spaces/AIFILMS/StyleGANEX/datasets/images_dataset.py +0 -33
  10. spaces/AIFILMS/generate_human_motion/VQ-Trans/models/encdec.py +0 -67
  11. spaces/AIGC-Audio/Make_An_Audio/vocoder/bigvgan/alias_free_torch/__init__.py +0 -6
  12. spaces/AILab-CVC/SEED-Bench_Leaderboard/app.py +0 -313
  13. spaces/AILab-CVC/SEED-LLaMA/models/seed_qformer/blip2.py +0 -186
  14. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb16-150e_deepfashion2_long_sleeved_dress_256x192/td_hm_res50_4xb16-150e_deepfashion2_long_sleeved_dress_256x192.py +0 -2861
  15. spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/models/loaders.py +0 -96
  16. spaces/AchyuthGamer/OpenGPT/g4f/Provider/npm/node_modules/crypto-js/README.md +0 -261
  17. spaces/Adapter/CoAdapter/ldm/models/diffusion/plms.py +0 -243
  18. spaces/Aditya9790/yolo7-object-tracking/utils/general.py +0 -892
  19. spaces/AgentVerse/agentVerse/ui/dist/index.html +0 -20
  20. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/pages/methods/Methods.js +0 -17
  21. spaces/AkshayKollimarala/MygenAI/README.md +0 -12
  22. spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/utils/utils_callbacks.py +0 -117
  23. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_ipex.py +0 -848
  24. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/__init__.py +0 -0
  25. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py +0 -294
  26. spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/assigners/max_iou_assigner.py +0 -212
  27. spaces/AnimalEquality/chatbot/_proc/_docs/site_libs/quarto-html/quarto-syntax-highlighting.css +0 -203
  28. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/ui_session.py +0 -68
  29. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/configs/_base_/datasets/stare.py +0 -59
  30. spaces/Aphrodite/stable-diffusion-2/README.md +0 -12
  31. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/coco.py +0 -539
  32. spaces/Bart92/RVC_HF/demucs/separate.py +0 -185
  33. spaces/Benson/text-generation/Examples/Camin Simulador ltimo Mod Apk Android Oyun Club.md +0 -124
  34. spaces/Benson/text-generation/Examples/Descargar Apk Mod pica Conquista 2.md +0 -68
  35. spaces/Benson/text-generation/Examples/Descargar Estn Llegando Desde El Este Mp3.md +0 -55
  36. spaces/Benson/text-generation/Examples/Descargar Gratis Nba 2k20 V97.md +0 -67
  37. spaces/BetterAPI/BetterChat/src/lib/utils/trimSuffix.ts +0 -6
  38. spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/resources/response.py +0 -318
  39. spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/eucjpprober.py +0 -102
  40. spaces/Big-Web/MMSD/env/Lib/site-packages/s3transfer/__init__.py +0 -875
  41. spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/util/__init__.py +0 -49
  42. spaces/Boadiwaa/Recipes/openai/api_resources/abstract/listable_api_resource.py +0 -47
  43. spaces/CVPR/LIVE/pybind11/tests/test_numpy_array.cpp +0 -388
  44. spaces/CVPR/LIVE/thrust/thrust/fill.h +0 -209
  45. spaces/CVPR/WALT/mmdet/models/necks/rfp.py +0 -128
  46. spaces/CVPR/WALT/walt/datasets/pipelines/compose.py +0 -52
  47. spaces/CVPR/lama-example/bin/gen_outpainting_dataset.py +0 -88
  48. spaces/CofAI/chat/server/babel.py +0 -48
  49. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiofiles/threadpool/__init__.py +0 -134
  50. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/dsv-576afacd.js +0 -6
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Ashampoo [CRACKED] Free Burning Studio.md DELETED
@@ -1,14 +0,0 @@
1
-
2
- <h1>Ashampoo Free Burning Studio: The Best Free Software for Burning CDs, DVDs, and Blu-rays</h1>
3
- <p>If you are looking for a free and easy-to-use software for burning CDs, DVDs, and Blu-rays, you might want to check out Ashampoo Free Burning Studio. This software allows you to create and copy discs with various features and tools. You can also backup and restore your data, rip audio CDs, create covers and labels, and more.</p>
4
- <h2>ashampoo free burning studio</h2><br /><p><b><b>Download</b> &harr; <a href="https://byltly.com/2uKwBz">https://byltly.com/2uKwBz</a></b></p><br /><br />
5
- <p>But what makes Ashampoo Free Burning Studio the best free software for burning discs? Here are some of the reasons:</p>
6
- <ul>
7
- <li><b>It supports all types of discs.</b> You can burn CDs, DVDs, and Blu-rays with Ashampoo Free Burning Studio. You can also burn data discs, audio discs, video discs, bootable discs, and multisession discs. You can also erase rewritable discs and finalize discs.</li>
8
- <li><b>It has a user-friendly interface.</b> You can easily navigate through the software with its intuitive and simple interface. You can also customize the settings and preferences according to your needs. You can also access the online help and tutorials if you need any assistance.</li>
9
- <li><b>It has fast and reliable performance.</b> You can burn discs with Ashampoo Free Burning Studio in a matter of minutes. You can also verify the quality and integrity of the discs after burning. You can also adjust the burning speed and mode to suit your requirements.</li>
10
- <li><b>It has additional features and functions.</b> You can do more than just burning discs with Ashampoo Free Burning Studio. You can also backup and restore your data to discs or external devices. You can also rip audio CDs to MP3 or other formats. You can also create covers and labels for your discs using templates or your own images.</li>
11
- </ul>
12
- <p>These are some of the reasons why Ashampoo Free Burning Studio is the best free software for burning discs. You can download it from the official website and use it without any limitations or registration. You can also upgrade to the premium version if you want to enjoy more features and benefits.</p> ddb901b051<br />
13
- <br />
14
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Doroga V Rossiyu 1 Pdf 110.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Doroga V Rossiyu 1 Pdf 110</h2><br /><p><b><b>Download Zip</b> >> <a href="https://imgfil.com/2uxZBt">https://imgfil.com/2uxZBt</a></b></p><br /><br />
2
- <br />
3
- 3.3.1 BU-SFL English Preparatory Unit 5.1.3.5.1 Common Code for Class Representative Elections .. Road to Russia 1. Start your trip to Russia by learning about its history and culture. 1. 2. 3. 4. 5. Read "Russia" by William Pierce (London, 1980) and "Beyond the Ages" (London, 1985) by James Cook (New York, 1980) to get acquainted with the history of Russia and its traditions . 1. Hear about Russian culture, as well as the nature and climate of Russia, and the history of Russia in Dr. Paul Offit's lecture entitled "The Russians" (New York, 1990; London, 1982). Read this book by Paul Offit and discuss the main issues with your 8a78ff9644<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/ARK Survival Evolved Full APK - Capture Train and Ride 80 Dinosaurs on Your Android.md DELETED
@@ -1,104 +0,0 @@
1
-
2
- <h1>ARK: Survival Evolved Full APK - How to Download and Play on Android</h1>
3
- <p>If you are a fan of dinosaurs and survival games, you might have heard of ARK: Survival Evolved, a popular game that lets you explore a massive prehistoric world full of dangers and wonders. But did you know that you can also play this game on your Android device? In this article, we will show you how to download and install the full APK version of ARK: Survival Evolved on your Android phone or tablet, and what features and benefits it offers.</p>
4
- <h2>ark survival evolved full apk</h2><br /><p><b><b>Download File</b> &#9734;&#9734;&#9734;&#9734;&#9734; <a href="https://urlin.us/2uT1Up">https://urlin.us/2uT1Up</a></b></p><br /><br />
5
- <h2>What is ARK: Survival Evolved?</h2>
6
- <p>ARK: Survival Evolved is a game that combines action, adventure, survival and sandbox elements. It was first released for PC and consoles in 2017 by Studio Wildcard, and later ported to mobile platforms in 2018. The game is set on a mysterious island called ARK, where you start out as a naked and unarmed human who has to survive in a harsh environment populated by over 80 different species of dinosaurs and other creatures. You can gather resources, craft tools and weapons, build shelters and bases, tame and ride dinosaurs, form tribes with other players, and fight against enemies and predators.</p>
7
- <h2>What is the difference between the full APK and the Google Play version?</h2>
8
- <p>The Google Play version of ARK: Survival Evolved is the official version that you can download from the Play Store for free. However, this version has some limitations and drawbacks, such as:</p>
9
- <ul>
10
- <li>It requires at least 3 GB of RAM and up-to-date Vulkan support to run smoothly.</li>
11
- <li>It has ads that can interrupt your gameplay.</li>
12
- <li>It has a limited number of servers and slots for online multiplayer.</li>
13
- <li>It has a monthly or yearly subscription fee for the Primal Pass, which gives you access to premium features such as 2x XP boost, preferred servers and slots, no ads, special news and updates, etc.</li>
14
- </ul>
15
- <p>The full APK version of ARK: Survival Evolved is an unofficial version that you can download from third-party websites such as FileHippo. This version has some advantages over the Google Play version, such as:</p>
16
- <ul>
17
- <li>It does not require any specific hardware or software requirements to run.</li>
18
- <li>It does not have any ads or in-app purchases.</li>
19
- <li>It has unlimited servers and slots for online multiplayer.</li>
20
- <li>It has all the premium features of the Primal Pass for free.</li>
21
- </ul>
22
- <h2>How to download and install the full APK version of ARK: Survival Evolved?</h2>
23
- <p>To download and install the full APK version of ARK: Survival Evolved on your Android device, you need to follow these steps:</p>
24
- <ol>
25
- <li>Go to [FileHippo](^1^) or any other website that offers the full APK file of ARK: Survival Evolved. Make sure that the file size is around 2 GB and that it matches the latest version of the game (2.0.28 as of June 2023).</li>
26
- <li>Download the APK file to your device's storage. You might need to enable the option to install apps from unknown sources in your device's settings.</li>
27
- <li>Once the download is complete, locate the APK file in your file manager and tap on it to install it. You might need to grant some permissions for the installation process.</li>
28
- <li>Wait for the installation to finish. You might need to restart your device for the changes to take effect.</li>
29
- <li>Launch the game from your app drawer or home screen. You might need to allow some additional permissions for the game to run properly.</li>
30
- </ol>
31
- <h2>What are the features and benefits of playing ARK: Survival Evolved on Android?</h2>
32
- <p>Playing ARK: Survival Evolved on Android has many features and benefits that make it a fun and immersive experience. Some of them are:</p>
33
- <p>ark survival evolved full apk download free<br />
34
- ark survival evolved full apk mod unlimited money<br />
35
- ark survival evolved full apk obb data<br />
36
- ark survival evolved full apk latest version<br />
37
- ark survival evolved full apk android 1<br />
38
- ark survival evolved full apk rexdl<br />
39
- ark survival evolved full apk revdl<br />
40
- ark survival evolved full apk offline<br />
41
- ark survival evolved full apk no root<br />
42
- ark survival evolved full apk andropalace<br />
43
- ark survival evolved full apk highly compressed<br />
44
- ark survival evolved full apk for pc<br />
45
- ark survival evolved full apk for ios<br />
46
- ark survival evolved full apk for windows 10<br />
47
- ark survival evolved full apk for mac<br />
48
- ark survival evolved full apk filehippo[^1^]<br />
49
- ark survival evolved full apk google play[^2^]<br />
50
- ark survival evolved full apk apkcombo[^3^]<br />
51
- ark survival evolved full apk uptodown<br />
52
- ark survival evolved full apk apkpure<br />
53
- ark survival evolved full apk happymod<br />
54
- ark survival evolved full apk android oyun club<br />
55
- ark survival evolved full apk android republic<br />
56
- ark survival evolved full apk platinmods<br />
57
- ark survival evolved full apk blackmod<br />
58
- ark survival evolved full apk unlimited amber<br />
59
- ark survival evolved full apk god mode<br />
60
- ark survival evolved full apk mega mod<br />
61
- ark survival evolved full apk all unlocked<br />
62
- ark survival evolved full apk premium features<br />
63
- ark survival evolved full apk cheats codes<br />
64
- ark survival evolved full apk tips and tricks<br />
65
- ark survival evolved full apk best settings<br />
66
- ark survival evolved full apk system requirements<br />
67
- ark survival evolved full apk gameplay video<br />
68
- ark survival evolved full apk review and rating<br />
69
- ark survival evolved full apk update news<br />
70
- ark survival evolved full apk new features<br />
71
- ark survival evolved full apk bug fixes and improvements<br />
72
- ark survival evolved full apk download size and speed</p>
73
- <ul>
74
- <li>You can enjoy a massive open-world game with stunning graphics and realistic physics on your mobile device.</li>
75
- <li>You can experience a thrilling survival adventure with dinosaurs and other players as you explore, craft, fight and tame.</li>
76
- <li>You can customize your character, your base, your weapons and your dinosaurs with a variety of skins, items and accessories.</li>
77
- <li>You can play online with thousands of other players on dedicated servers or offline with a single-player mode.</li>
78
- <li>You can join or create your own tribe and cooperate or compete with other tribes for resources, territory and dominance.</li>
79
- <li>You can access all the content and updates of the PC and console versions of the game, including the expansion packs Scorched Earth, Aberration, Extinction and Genesis.</li>
80
- </ul>
81
- <h2>What are the drawbacks and risks of playing ARK: Survival Evolved on Android?</h2>
82
- <p>Playing ARK: Survival Evolved on Android also has some drawbacks and risks that you should be aware of before downloading and installing the game. Some of them are:</p>
83
- <ul>
84
- <li>The game is very demanding and might not run well on low-end or older devices. You might experience lag, crashes, glitches or overheating issues.</li>
85
- <li>The game consumes a lot of battery power and data, so you might need to charge your device frequently or use a Wi-Fi connection.</li>
86
- <li>The game is not officially supported by Studio Wildcard, so you might encounter bugs, errors or compatibility issues that are not fixed or addressed by the developers.</li>
87
- <li>The game might contain malware, viruses or spyware that could harm your device or compromise your privacy. You should always scan the APK file before installing it and use a reputable antivirus app.</li>
88
- <li>The game might violate the terms of service or the intellectual property rights of Studio Wildcard, so you might face legal consequences or penalties for playing it.</li>
89
- </ul>
90
- <h2>Conclusion</h2>
91
- <p>ARK: Survival Evolved is an amazing game that lets you experience a prehistoric world full of dinosaurs and adventure on your Android device. However, if you want to play the full APK version of the game, you need to be careful about where you download it from, how you install it and what risks you might face. We hope this article has helped you understand how to download and play ARK: Survival Evolved full APK on Android, and what features and benefits it offers. If you have any questions or feedback, feel free to leave a comment below.</p>
92
- <h3>FAQs</h3>
93
- <h4>Is ARK: Survival Evolved free on Android?</h4>
94
- <p>The official Google Play version of ARK: Survival Evolved is free to download and play, but it has ads and in-app purchases. The full APK version of ARK: Survival Evolved is also free to download and play, but it has no ads or in-app purchases.</p>
95
- <h4>How do I update ARK: Survival Evolved on Android?</h4>
96
- <p>The Google Play version of ARK: Survival Evolved will update automatically when a new version is available. The full APK version of ARK: Survival Evolved will not update automatically, so you will need to download and install the latest APK file manually when a new version is released.</p>
97
- <h4>Can I play ARK: Survival Evolved offline on Android?</h4>
98
- <p>Yes, you can play ARK: Survival Evolved offline on Android by choosing the single-player mode. However, you will not be able to access some features such as online multiplayer, cloud saving or Primal Pass benefits.</p>
99
- <h4>Can I transfer my progress from PC or console to Android?</h4>
100
- <p>No, you cannot transfer your progress from PC or console to Android. The PC and console versions of ARK: Survival Evolved are different from the mobile version and use different servers and platforms. You will need to start a new game on Android.</p>
101
- <h4>Can I play ARK: Survival Evolved with a controller on Android?</h4>
102
- <p>Yes, you can play ARK: Survival Evolved with a controller on Android by connecting a compatible Bluetooth controller to your device. You can also customize the controller settings in the game options menu.</p> 197e85843d<br />
103
- <br />
104
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Asphalt 8 APK and Race with Luxury Cars and Motorbikes.md DELETED
@@ -1,74 +0,0 @@
1
-
2
- <h1>Asphalt 8 - Car Racing Game APK Download</h1>
3
- <p>If you are looking for a thrilling and adrenaline-pumping car racing game for your Android device, you should definitely check out Asphalt 8. This is one of the most popular and acclaimed games from Gameloft, a leading developer of mobile games. Asphalt 8 offers you an amazing racing experience with over 300 licensed cars and motorbikes, action-packed races, stunning graphics, realistic physics, and both online and offline modes. In this article, we will tell you everything you need to know about this game and how to download Asphalt 8 APK for Android.</p>
4
- <h2>asphalt 8 car racing game apk download</h2><br /><p><b><b>Download</b> > <a href="https://jinyurl.com/2uNKdb">https://jinyurl.com/2uNKdb</a></b></p><br /><br />
5
- <h2>Licensed luxury cars and motorcycles</h2>
6
- <p>One of the main attractions of Asphalt 8 is its impressive collection of vehicles that you can drive, drift, and put to the test on the asphalt. You can choose from over 300 high-performance cars and bikes from top licensed manufacturers, such as Lamborghini, Bugatti, Porsche, Ferrari, Ducati, BMW, and many more. You can also customize and design your own race cars and motorcycles with various options for colors, decals, rims, tires, etc. You can collect high-end vehicles, special edition cars, and rare models as you progress in the game. You can also explore different worlds and scenarios, from the Nevada Desert to Tokyo streets, as you race on more than 75 tracks.</p>
7
- <h2>Get Airborne with Asphalt 8</h2>
8
- <p>Asphalt 8 is not just about driving fast on the ground. It is also about taking off into the air and performing spectacular stunts and jumps that defy gravity. You can hit the ramps and launch your car or bike into the sky, performing barrel rolls, wild 360° jumps, flips, twists, and more. You can also maneuver through the air while pulling off stunts to maximize your speed and score. You can customize your controls to suit your preferences, whether you want to use tilt, touch, or tap controls. You can also rearrange your on-screen icons and adjust your sensitivity settings.</p>
9
- <h2>Endless stream of content for fast racers</h2>
10
- <h3>Seasons and events</h3>
11
- <p>Asphalt 8 offers you a lot of content to keep you entertained and challenged. You can play different seasons, each with its own set of races and difficulties. You can also participate in live events that are updated frequently with new themes and objectives. You can win various rewards, such as credits, tokens, cards, blueprints, etc., by completing these modes. You can also check out other modes for a fresh twist on racing, such as Infected, Gate Drift, Knockdown, Elimination, etc.</p>
12
- <p>asphalt 8 airborne car racing game apk download<br />
13
- asphalt 8 mod apk unlimited money and tokens download<br />
14
- asphalt 8 best cars for each class and how to get them<br />
15
- asphalt 8 offline mode apk download for android<br />
16
- asphalt 8 cheats and hacks for free credits and stars<br />
17
- asphalt 8 gameplay tips and tricks for beginners<br />
18
- asphalt 8 latest version apk download with new features and updates<br />
19
- asphalt 8 multiplayer mode guide and strategies<br />
20
- asphalt 8 car list and stats comparison<br />
21
- asphalt 8 soundtrack and how to change it<br />
22
- asphalt 8 requirements and compatibility for different devices<br />
23
- asphalt 8 review and ratings from users and critics<br />
24
- asphalt 8 download size and installation process<br />
25
- asphalt 8 alternatives and similar games to try<br />
26
- asphalt 8 events and rewards calendar<br />
27
- asphalt 8 pro kits and how to use them effectively<br />
28
- asphalt 8 seasons and career mode walkthrough<br />
29
- asphalt 8 license verification error and how to fix it<br />
30
- asphalt 8 custom decals and how to create them<br />
31
- asphalt 8 nitro and drift techniques and controls<br />
32
- asphalt 8 online generator tool for unlimited resources<br />
33
- asphalt 8 legends update apk download with new cars and tracks<br />
34
- asphalt 8 hidden routes and shortcuts on every map<br />
35
- asphalt 8 bugs and glitches and how to report them<br />
36
- asphalt 8 support and customer service contact information<br />
37
- asphalt 8 forum and community for discussions and feedback<br />
38
- asphalt 8 wallpapers and images for desktop and mobile<br />
39
- asphalt 8 videos and tutorials from experts and influencers<br />
40
- asphalt 8 news and announcements from the developers<br />
41
- asphalt 8 memes and jokes for fun and entertainment</p>
42
- <h3>Limited-Time Cups</h3>
43
- <p>Another way to enjoy Asphalt 8 is to join the Limited-Time Cups that are available every day. These are special competitions that offer you exclusive access to some of the latest cars or motorbikes in the game. You can race against other players or against the clock to earn the chance to unlock or upgrade these vehicles. You can also get special rewards, such as fusion coins, boosters, pro kits, etc., by ranking high in these cups. You can check the schedule of the upcoming cups and plan your strategy accordingly.</p>
44
- <h2>Multiplayer and single racing experience</h2>
45
- <h3>World Series</h3>
46
- <p>If you want to test your skills against other players from around the world, you can join the World Series mode in Asphalt 8. This is the online multiplayer mode where you can race against up to 7 other players in real-time. You can choose from different leagues and divisions, depending on your rank and rating. You can also chat with other players, join or create clubs, and participate in club races and events. You can earn reputation points and rewards by winning races and completing missions.</p>
47
- <h3>Racing Events</h3>
48
- <p>Another way to enjoy multiplayer racing is to join the Racing Events mode. This is a limited-time mode where you can compete for points and prizes in different categories, such as speed, stunts, style, etc. You can also choose from different themes and rules, such as classic, slipstream, tag racing, etc. You can earn event coins and exchange them for exclusive items in the event shop. You can also check your progress and ranking on the event leaderboard.</p>
49
- <h2>How to download Asphalt 8 APK for Android</h2>
50
- <p>If you are interested in playing Asphalt 8 on your Android device, you have several options to download and install the game. Here are some of the ways you can do it:</p>
51
- <ul>
52
- <li>The easiest and safest way is to download the game from the Google Play Store. You can simply search for Asphalt 8 on the store or use this link to access the game page. Then, you can tap on the Install button and wait for the game to download and install on your device.</li>
53
- <li>Another way is to download the game from the official Gameloft website. You can go to this link and choose your device model from the list. Then, you can tap on the Download button and follow the instructions to install the game on your device.</li>
54
- <li>A third way is to download the game from a third-party website that offers APK files. However, this method is not recommended as it may expose your device to malware or viruses. If you still want to try this method, you should make sure that you have enabled the Unknown Sources option in your device settings. Then, you can search for Asphalt 8 APK on a reliable website and download it to your device. After that, you can locate the file in your file manager and tap on it to install it.</li>
55
- </ul>
56
- <p>Whichever method you choose, you should make sure that you have enough storage space on your device and a stable internet connection. You should also check the compatibility of your device with the game requirements before downloading it.</p>
57
- <h2>Conclusion</h2>
58
- <p>Asphalt 8 is a fantastic car racing game that will keep you hooked for hours with its stunning graphics, realistic physics, amazing vehicles, exciting modes, and multiplayer features. If you are a fan of racing games, you should not miss this one. You can download Asphalt 8 APK for Android from various sources and enjoy the thrill of racing on your device. So what are you waiting for? Download Asphalt 8 now and get ready to burn some rubber!</p>
59
- <h2>FAQs</h2>
60
- <p>Here are some of the frequently asked questions and their answers about Asphalt 8:</p>
61
- <ol>
62
- <li><b>How do I get more credits and tokens in Asphalt 8?</b><br>
63
- You can get more credits and tokens by winning races, completing missions, participating in events and cups, watching ads, etc. You can also buy them with real money if you want.</li>
64
- <li><b>How do I upgrade my cars and bikes in Asphalt 8?</b><br>
65
- You can upgrade your vehicles by using credits, tokens, cards, blueprints, fusion coins, etc., depending on the type of vehicle. You can also use pro kits to improve their performance.</li>
66
- <li><b>How do I unlock new cars and bikes in Asphalt 8?</b><br>
67
- You can unlock new vehicles by earning stars, completing seasons, participating in events and cups, collecting blueprints, etc. You can also buy them with credits or tokens if they are available.</li>
68
- <li><b>How do I change my control settings in Asphalt 8?</b><br>
69
- You can change your control settings by going to Options > Controls. You can choose from tilt, touch, or tap controls. You can also adjust your sensitivity settings and rearrange your on-screen icons.</li>
70
- <li><b>How do I play with my friends in Asphalt 8?</b >br>
71
- You can play with your friends in Asphalt 8 by inviting them to join your club or by creating a private room in the World Series mode. You can also chat with them and send them gifts.</li>
72
- </ol></p> 401be4b1e0<br />
73
- <br />
74
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download Haunted Dorm MOD APK v1.4.2 for Android and Experience a Thrilling Horror Adventure.md DELETED
@@ -1,88 +0,0 @@
1
-
2
- <h1>Haunted Dorm Mod APK v 1.4.2: A Spooky Strategy Game for Android</h1>
3
- <p>Do you love spooky games that challenge your creativity and strategy skills? If so, you might want to check out Haunted Dorm Mod APK v 1.4.2, a fun and addictive game that lets you build and manage your own haunted dormitory.</p>
4
- <h2>haunted dorm mod apk v 1.4.2</h2><br /><p><b><b>Download Zip</b> --->>> <a href="https://jinyurl.com/2uNKBk">https://jinyurl.com/2uNKBk</a></b></p><br /><br />
5
- <p>In this game, you can create your own unique haunted dorm with different rooms, decorations, and facilities. You can also collect and upgrade various types of ghosts, such as vampires, zombies, mummies, and witches. Each ghost has its own power, skill, and ability that you can use to scare away unwanted guests or raid other players' dorms.</p>
6
- <p>But be careful! Other players can also attack your dorm and steal your money and gems. You need to defend your dorm with traps, guards, and your own ghosts. You can also join a guild and cooperate with other players to become the most powerful haunted dorm in the world.</p>
7
- <p>If you are ready to experience this spooky strategy game on your Android device, read on to find out how to download and install Haunted Dorm Mod APK v 1.4.2.</p>
8
- <h2>How to Download and Install Haunted Dorm Mod APK v 1.4.2</h2>
9
- <p>Downloading and installing Haunted Dorm Mod APK v 1.4.2 is very easy and fast. All you need to do is follow these simple steps:</p>
10
- <ol>
11
- <li>Click on this link to go to the download page of Haunted Dorm Mod APK v 1.4.2.</li>
12
- <li>Tap on the download button to start downloading the modded version of the game.</li>
13
- <li>Once the download is complete, go to your device's settings and enable the installation of apps from unknown sources.</li>
14
- <li>Locate the downloaded file in your device's file manager and tap on it to start installing the game.</li>
15
- <li>Wait for a few seconds until the installation is finished.</li>
16
- <li>Launch the game from your app drawer or home screen.</li>
17
- <li>Enjoy playing Haunted Dorm Mod APK v 1.4.2 with unlimited money and gems!</li>
18
- </ol>
19
- <h2>How to Play Haunted Dorm Mod APK v 1.4.2</h2 <h3>Build Your Haunted Dorm</h3>
20
- <p>One of the main features of Haunted Dorm Mod APK v 1.4.2 is that you can build your own haunted dormitory from scratch. You can choose from different types of rooms, such as bedrooms, bathrooms, kitchens, libraries, and more. You can also decorate your rooms with various items, such as furniture, paintings, candles, and skulls.</p>
21
- <p>But building your haunted dorm is not just for fun. It also has a strategic purpose. The more rooms and decorations you have, the more guests you can attract to your dorm. And the more guests you have, the more money you can earn from scaring them away.</p>
22
- <p>haunted dormitory game mod apk free download<br />
23
- haunted dorm mod apk unlimited money and gems<br />
24
- haunted dorm 1.4.2 mod apk latest version<br />
25
- haunted dorm android game mod apk no ads<br />
26
- haunted dorm mod apk v1.4.2 ad remove free rewards<br />
27
- haunted dorm horror game mod apk unlocked<br />
28
- haunted dorm mod apk v1.4.2 for android 1<br />
29
- haunted dorm modded apk download offline<br />
30
- haunted dorm apk mod v1.4.2 with cheats<br />
31
- haunted dorm mod apk v1.4.2 full version<br />
32
- haunted dorm premium mod apk free purchase<br />
33
- haunted dorm mod apk v1.4.2 hack<br />
34
- haunted dorm mod apk v1.4.2 unlimited everything<br />
35
- haunted dorm pro mod apk no root<br />
36
- haunted dorm mod apk v1.4.2 mega mod<br />
37
- haunted dorm cracked mod apk all levels<br />
38
- haunted dorm mod apk v1.4.2 new update<br />
39
- haunted dorm vip mod apk free shopping<br />
40
- haunted dorm mod apk v1.4.2 online<br />
41
- haunted dorm super mod apk unlimited lives<br />
42
- haunted dorm best mod apk high graphics<br />
43
- haunted dorm mod apk v1.4.2 direct download link<br />
44
- haunted dorm ultimate mod apk god mode<br />
45
- haunted dorm mod apk v1.4.2 without verification<br />
46
- haunted dorm extreme mod apk one hit kill<br />
47
- haunted dorm original mod apk no virus<br />
48
- haunted dorm mod apk v1.4.2 easy install<br />
49
- haunted dorm awesome mod apk all features<br />
50
- haunted dorm mod apk v1.4.2 working fine<br />
51
- haunted dorm cool mod apk fast download speed</p>
52
- <p>However, you also need to spend some money to maintain your dorm. You need to pay for electricity, water, and repairs. You also need to upgrade your dorm to unlock new rooms, decorations, and facilities. Upgrading your dorm also increases its value and reputation, which can help you attract more guests and earn more money.</p>
53
- <h3>Collect and Upgrade Ghosts</h3>
54
- <p>Another feature of Haunted Dorm Mod APK v 1.4.2 is that you can collect and upgrade various types of ghosts. There are four categories of ghosts: common, rare, epic, and legendary. Each category has different types of ghosts, such as vampires, zombies, mummies, witches, and more.</p>
55
- <p>You can collect ghosts by opening chests, completing missions, or buying them with money or gems. You can also upgrade your ghosts by using ghost cards and coins. Upgrading your ghosts increases their level, power, skill, and ability.</p>
56
- <p>Ghosts are not only your companions in the game. They are also your weapons. You can use your ghosts to scare away unwanted guests from your dorm or raid other players' dorms. Each ghost has its own skill and ability that you can activate during battles. For example, vampires can suck blood from enemies, zombies can infect enemies with a virus, mummies can wrap enemies with bandages, and witches can cast spells on enemies.</p>
57
- <h3>Compete with Other Players</h3>
58
- <p>The last feature of Haunted Dorm Mod APK v 1.4.2 is that you can compete with other players in different modes. There are two main modes: raid and defense.</p>
59
- <p>In raid mode, you can attack other players' dorms and try to steal their money and gems. You can choose which dorm to raid from a list of random players or search for a specific player by name or ID. You can also see the details of their dorms, such as their value, reputation, and defense level.</p>
60
- <p>In defense mode, you need to protect your own dorm from other players' attacks. You can set up traps, guards, and your own ghosts to defend your dorm. You can also see the details of the attackers, such as their name, ID, and attack level.</p>
61
- <p>Besides raid and defense modes, you can also join a guild and cooperate with other players. You can chat with your guild members, share tips and tricks, request or donate ghost cards and coins, and participate in guild wars. Guild wars are special events where you can team up with your guild members and fight against other guilds for rewards and glory.</p> <h2>Tips and Tricks for Haunted Dorm Mod APK v 1.4.2</h2>
62
- <p>Now that you know how to download, install, and play Haunted Dorm Mod APK v 1.4.2, you might be wondering how to master the game and become the best haunted dorm manager in the world. Well, don't worry, because we have some useful tips and tricks for you:</p>
63
- <ul>
64
- <li>Optimize your dorm layout. Try to arrange your rooms and decorations in a way that maximizes your space, attractiveness, and efficiency. For example, you can place similar rooms next to each other, use decorations that match the theme of your rooms, and avoid placing rooms or decorations that block the entrance or exit of your dorm.</li>
65
- <li>Use your ghosts effectively. Don't just randomly assign your ghosts to your rooms or battles. Try to use them according to their skills and abilities. For example, you can use vampires to drain the health of enemies, zombies to spread infection among enemies, mummies to immobilize enemies, and witches to cast spells on enemies.</li>
66
- <li>Earn more money and gems. There are many ways to earn more money and gems in the game, such as scaring away guests, raiding other players' dorms, completing missions, opening chests, watching ads, and logging in daily. You can also use the modded version of the game to get unlimited money and gems.</li>
67
- <li>Avoid common mistakes. There are some mistakes that you should avoid while playing the game, such as spending too much money or gems on unnecessary things, neglecting your dorm's maintenance or upgrade, leaving your dorm undefended or unguarded, or attacking stronger players' dorms without preparation.</li>
68
- </ul>
69
- <h2>Conclusion</h2>
70
- <p>Haunted Dorm Mod APK v 1.4.2 is a spooky strategy game for Android that lets you build and manage your own haunted dormitory. You can create your own unique haunted dorm with different rooms, decorations, and facilities. You can also collect and upgrade various types of ghosts, such as vampires, zombies, mummies, and witches. You can also compete with other players in different modes, such as raiding their dorms, defending your own dorm, and joining a guild.</p>
71
- <p>If you are looking for a fun and addictive game that combines creativity and strategy with a touch of horror, you should definitely try Haunted Dorm Mod APK v 1.4.2. You can download and install it easily and fast from this link. You can also follow our tips and tricks to master the game and become the best haunted dorm manager in the world.</p>
72
- <p>So what are you waiting for? Download Haunted Dorm Mod APK v 1.4.2 now and enjoy this spooky strategy game on your Android device!</p>
73
- <h3>FAQs</h3>
74
- <p>Here are some frequently asked questions about Haunted Dorm Mod APK v 1.4.2:</p>
75
- <ol>
76
- <li>What is Haunted Dorm Mod APK v 1.4.2?</li>
77
- <p>Haunted Dorm Mod APK v 1.4.2 is a modded version of Haunted Dorm, a spooky strategy game for Android that lets you build and manage your own haunted dormitory.</p>
78
- <li>What are the features of Haunted Dorm Mod APK v 1.4.2?</li>
79
- <p>The features of Haunted Dorm Mod APK v 1.4.2 include unlimited money and gems, unlocked rooms and decorations, unlocked ghosts and upgrades, and more.</p>
80
- <li>How to download and install Haunted Dorm Mod APK v 1.4.2?</li>
81
- <p>You can download and install Haunted Dorm Mod APK v 1.4.2 by following these simple steps: click on this link to go to the download page of Haunted Dorm Mod APK v 1.4.2; tap on the download button to start downloading the modded version of the game; once the download is complete, go to your device's settings and enable the installation of apps from unknown sources; locate the downloaded file in your device's file manager and tap on it to start installing the game; wait for a few seconds until the installation is finished; launch the game from your app drawer or home screen; enjoy playing Haunted Dorm Mod APK v 1.4.2 with unlimited money and gems!</p>
82
- <li>How to play Haunted Dorm Mod APK v 1.4.2?</li>
83
- <p>You can play Haunted Dorm Mod APK v 1.4.2 by following these simple steps: build your own haunted dorm with different rooms, decorations, and facilities; collect and upgrade various types of ghosts, such as vampires, zombies, mummies, and witches; compete with other players in different modes, such as raiding their dorms, defending your own dorm, and joining a guild; follow our tips and tricks to master the game and become the best haunted dorm manager in the world.</p>
84
- <li>Is Haunted Dorm Mod APK v 1.4.2 safe and legal?</li>
85
- <p>Yes, Haunted Dorm Mod APK v 1.4.2 is safe and legal to use. It does not contain any viruses, malware, or spyware that can harm your device or data. It also does not violate any laws or regulations that prohibit the use of modded apps or games. However, you should always download and install Haunted Dorm Mod APK v 1.4.2 from a trusted source, such as this link, to avoid any risks or problems.</p>
86
- </ol></p> 401be4b1e0<br />
87
- <br />
88
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Enjoy the World of 3D Models with Pocket World 3D MOD APK on Android 1.md DELETED
@@ -1,93 +0,0 @@
1
- <br />
2
- <h1>Pocket World 3D Mod APK Android 1: A Relaxing and Creative Game</h1>
3
- <p>Do you love building and designing miniature models? Do you want to explore different themes and cultures from around the world? Do you want to enjoy a relaxing and creative game on your Android device? If you answered yes to any of these questions, then you should try Pocket World 3D, a game that lets you create your own pocket world in 3D.</p>
4
- <h2>What is Pocket World 3D?</h2>
5
- <p>Pocket World 3D is a mobile game developed by Minimonster Game Limited, a company that specializes in casual and puzzle games. Pocket World 3D is a game that combines puzzle-solving, creativity, and relaxation. In this game, you can build your own miniature models by assembling various pieces of materials. You can also collect and unlock different themes and models, such as famous landmarks, cultural icons, natural wonders, and more. You can enjoy the soothing music and sound effects as you create your pocket world in 3D. You can also share your creations with other players and see their works as well.</p>
6
- <h2>pocket world 3d mod apk android 1</h2><br /><p><b><b>Download Zip</b> &#10026; <a href="https://jinyurl.com/2uNSgg">https://jinyurl.com/2uNSgg</a></b></p><br /><br />
7
- <h3>Features of Pocket World 3D</h3>
8
- <h4>Build your own miniature models</h4>
9
- <p>Pocket World 3D is a game that allows you to unleash your creativity and imagination. You can build your own miniature models by following the instructions or by using your own ideas. You can rotate, zoom, and move the pieces to fit them together. You can also customize the colors, textures, and details of your models. You can create anything from buildings, vehicles, animals, plants, and more.</h4>
10
- <h4>Collect and unlock various themes</h4>
11
- <p>Pocket World 3D is a game that lets you explore different themes and cultures from around the world. You can collect and unlock various themes and models, such as famous landmarks, cultural icons, natural wonders, and more. You can travel to different countries and regions, such as China, Japan, Egypt, France, Italy, USA, and more. You can learn about the history, culture, and characteristics of each place as you build your models.</p>
12
- <h4>Enjoy the relaxing music and sound effects</h4>
13
- <p>Pocket World 3D is a game that helps you relax and unwind. You can enjoy the soothing music and sound effects as you create your pocket world in 3D. The music is composed by professional musicians who specialize in ambient and relaxing music. The sound effects are realistic and immersive, such as the sound of water, wind, birds, cars, etc. You can adjust the volume and mute the sound as you wish.</p>
14
- <h4>Share your creations with other players</h4>
15
- <p>Pocket World 3D is a game that allows you to share your creations with other players. You can upload your models to the online gallery and see what other players have made. You can also rate, comment, and like other players' works. You can also download other players' models and edit them as you like. You can also join the community and chat with other players who share your passion for building miniature models.</p>
16
- <h2>What is Pocket World 3D Mod APK Android 1?</h2>
17
- <p>Pocket World <p>Pocket World 3D Mod APK Android 1 is a modified version of the original game that gives you some extra benefits and features. Pocket World 3D Mod APK Android 1 is not available on the official Google Play Store, but you can download it from a trusted source online. Here are some of the benefits and features of Pocket World 3D Mod APK Android 1:</p>
18
- <h3>Benefits of Pocket World 3D Mod APK Android 1</h3>
19
- <h4>Unlimited money and diamonds</h4>
20
- <p>Pocket World 3D Mod APK Android 1 gives you unlimited money and diamonds, which are the main currencies in the game. You can use them to buy and unlock new themes, models, materials, and more. You can also use them to speed up the building process and skip the waiting time. You don't have to worry about running out of money or diamonds, as you can get as much as you want with Pocket World 3D Mod APK Android 1.</p>
21
- <h4>All themes and models unlocked</h4>
22
- <p>Pocket World 3D Mod APK Android 1 gives you access to all the themes and models in the game, without having to complete the levels or pay for them. You can explore and build any theme or model you want, such as famous landmarks, cultural icons, natural wonders, and more. You can also mix and match different themes and models to create your own unique pocket world in 3D.</p>
23
- <p>pocket world 3d mod apk unlimited money<br />
24
- pocket world 3d mod apk latest version<br />
25
- pocket world 3d mod apk download for android<br />
26
- pocket world 3d mod apk all unlocked<br />
27
- pocket world 3d mod apk free shopping<br />
28
- pocket world 3d mod apk revdl<br />
29
- pocket world 3d mod apk happymod<br />
30
- pocket world 3d mod apk rexdl<br />
31
- pocket world 3d mod apk offline<br />
32
- pocket world 3d mod apk no ads<br />
33
- pocket world 3d mod apk vip unlocked<br />
34
- pocket world 3d mod apk premium<br />
35
- pocket world 3d mod apk hack<br />
36
- pocket world 3d mod apk cheat<br />
37
- pocket world 3d mod apk full version<br />
38
- pocket world 3d mod apk android republic<br />
39
- pocket world 3d mod apk an1.com<br />
40
- pocket world 3d mod apk apkpure<br />
41
- pocket world 3d mod apk apkmody<br />
42
- pocket world 3d mod apk andropalace<br />
43
- pocket world 3d mod apk android oyun club<br />
44
- pocket world 3d mod apk android zone<br />
45
- pocket world 3d mod apk android mob.org<br />
46
- pocket world 3d mod apk android games room<br />
47
- pocket world 3d mod apk android games box<br />
48
- pocket world 3d mod apk android games spot<br />
49
- pocket world 3d mod apk android games hub<br />
50
- pocket world 3d mod apk android games fun<br />
51
- pocket world 3d mod apk android games pk<br />
52
- pocket world 3d mod apk android games xyz<br />
53
- pocket world 3d mod apk android games plus<br />
54
- pocket world 3d mod apk android games store<br />
55
- pocket world 3d mod apk android games club<br />
56
- pocket world 3d mod apk android games net<br />
57
- pocket world 3d mod apk android games pro<br />
58
- pocket world 3d mod apk android games king<br />
59
- pocket world 3d mod apk android games guru<br />
60
- pocket world 3d mod apk android games zone.com<br />
61
- pocket world 3d mod apk android games blogspot.com<br />
62
- pocket world 3d mod apk android games download.com</p>
63
- <h4>No ads and no root required</h4>
64
- <p>Pocket World 3D Mod APK Android 1 removes all the ads from the game, so you can enjoy a smooth and uninterrupted gaming experience. You don't have to watch any annoying or intrusive ads that pop up on your screen or interrupt your gameplay. You also don't need to root your device to install or use Pocket World 3D Mod APK Android 1, as it works on any Android device without any problems.</p>
65
- <h2>How to download and install Pocket World 3D Mod APK Android 1?</h2>
66
- <p>If you want to download and install Pocket World 3D Mod APK Android 1 on your Android device, you need to follow these simple steps:</p>
67
- <h3>Step-by-step guide for downloading and installing Pocket World 3D Mod APK Android 1</h3>
68
- <h4>Download the APK file from a trusted source</h4>
69
- <p>The first step is to download the APK file of Pocket World 3D Mod APK Android 1 from a trusted source online. You can search for it on Google or use the link provided below. Make sure you download the latest version of the mod apk file, which is compatible with your device and has all the features and benefits mentioned above.</p>
70
- <p><a href="">Download Pocket World 3D Mod APK Android 1 here</a></p>
71
- <h4>Enable unknown sources on your device settings</h4>
72
- <p>The second step is to enable unknown sources on your device settings, so you can install apps from sources other than the Google Play Store. To do this, go to your device settings, then security, then unknown sources, and toggle it on. This will allow you to install Pocket World 3D Mod APK Android 1 on your device without any issues.</p>
73
- <h4>Install the APK file and launch the game</h4>
74
- <p>The third and final step is to install the APK file of Pocket World 3D Mod APK Android 1 on your device. To do this, locate the downloaded file on your device storage, tap on it, and follow the instructions on the screen. Once the installation is complete, launch the game and enjoy creating your own pocket world in 3D with unlimited money, diamonds, themes, models, and more.</p>
75
- <h2>Conclusion</h2>
76
- <p>Pocket World 3D is a relaxing and creative game that lets you build your own miniature models by assembling various pieces of materials. You can also collect and unlock different themes and models, such as famous landmarks, cultural icons, natural wonders, and more. You can enjoy the soothing music and sound effects as you create your pocket world in 3D. You can also share your creations with other players and see their works as well.</p>
77
- <p>Pocket World 3D Mod APK Android 1 is a modified version of the original game that gives you some extra benefits and features, such as unlimited money and diamonds, all themes and models unlocked, no ads and no root required. You can download and install Pocket World 3D Mod APK Android 1 on your Android device by following the simple steps mentioned above.</p>
78
- <p>If you are looking for a relaxing and creative game that lets you explore different themes and cultures from around the world, then you should try Pocket World 3D Mod APK Android 1 today.</p>
79
- <h2>FAQs</h2> <p>Here are some of the frequently asked questions about Pocket World 3D Mod APK Android 1:</p>
80
- <ul>
81
- <li><b>Is Pocket World 3D Mod APK Android 1 safe to use?</b></li>
82
- <p>Yes, Pocket World 3D Mod APK Android 1 is safe to use, as long as you download it from a trusted source online. You don't need to root your device or grant any permissions to install or use it. However, you should always be careful when downloading and installing any mod apk files from unknown sources, as they may contain viruses or malware that can harm your device or steal your data.</p>
83
- <li><b>Is Pocket World 3D Mod APK Android 1 compatible with my device?</b></li>
84
- <p>Pocket World 3D Mod APK Android 1 is compatible with most Android devices that run on Android 4.4 or higher. However, some devices may not support the game or the mod apk file due to different specifications or settings. If you encounter any problems or errors while playing the game or installing the mod apk file, you can try to update your device software, clear your cache, or contact the developer for help.</p>
85
- <li><b>Can I play Pocket World 3D Mod APK Android 1 offline?</b></li>
86
- <p>Yes, you can play Pocket World 3D Mod APK Android 1 offline, without an internet connection. However, some features and functions may not work properly or be available offline, such as sharing your creations, downloading other players' models, or accessing the online gallery. You may also miss out on some updates and events that require an internet connection.</p>
87
- <li><b>Can I play Pocket World 3D Mod APK Android 1 with my friends?</b></li>
88
- <p>Yes, you can play Pocket World 3D Mod APK Android 1 with your friends, as long as you have an internet connection. You can share your creations with your friends and see their works as well. You can also chat with them and join the community of other players who love building miniature models. You can also challenge your friends and compete with them on the leaderboard.</p>
89
- <li><b>How can I contact the developer of Pocket World 3D Mod APK Android 1?</b></li>
90
- <p>If you have any questions, feedback, suggestions, or issues regarding Pocket World 3D Mod APK Android 1, you can contact the developer of the game by sending an email to <a href="mailto:[email protected]">[email protected]</a>. You can also visit their official website at <a href="https://www.minimonster.com/">https://www.minimonster.com/</a> or follow them on their social media accounts on Facebook, Twitter, Instagram, and YouTube.</p>
91
- </ul></p> 401be4b1e0<br />
92
- <br />
93
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Experience the 80s with GTA Vice City - Download for Windows 7 32 Bit.md DELETED
@@ -1,98 +0,0 @@
1
- <br />
2
- <h1>GTA Vice City Download for Windows 7 32 Bit</h1>
3
- <p>GTA Vice City is one of the most popular and iconic games in the Grand Theft Auto series. It is an action-adventure game that lets you explore the open world of Vice City, a fictional city based on Miami in the 1980s. You can play as Tommy Vercetti, a former mobster who is sent to Vice City by his boss to establish a criminal empire. You can complete various missions, drive different vehicles, use various weapons, interact with other characters, and enjoy the retro soundtrack and atmosphere.</p>
4
- <h2>gta vice city download for windows 7 32 bit</h2><br /><p><b><b>Download Zip</b> &harr; <a href="https://jinyurl.com/2uNJNf">https://jinyurl.com/2uNJNf</a></b></p><br /><br />
5
- <p>If you want to play GTA Vice City on your Windows 7 32 bit PC, you will need to download and install the game from a reliable source. There are many websites that offer GTA Vice City download for Windows 7 32 bit, but not all of them are safe and legal. Some of them may contain viruses, malware, or unwanted programs that can harm your PC or compromise your privacy. Some of them may also provide incomplete or corrupted files that can cause errors or crashes while playing the game.</p>
6
- <p>To avoid these problems, you should only download GTA Vice City from trusted and verified sources that have positive reviews and ratings from other users. One of these sources is Ocean of Games, a website that provides free and secure downloads of various games for PC. Ocean of Games has a direct download link for GTA Vice City that is easy and fast to use. You just need to click on the download button and follow the instructions to save the game setup file on your PC.</p>
7
- <h2>How to Install GTA Vice City on Windows 7 32 Bit</h2>
8
- <p>After downloading GTA Vice City from Ocean of Games, you will need to install it on your Windows 7 32 bit PC. The installation process is simple and straightforward, but you should follow these steps carefully to avoid any errors or issues:</p>
9
- <ol>
10
- <li>Locate the GTA Vice City setup file that you downloaded from Ocean of Games. It should be in your Downloads folder or any other location that you chose to save it.</li>
11
- <li>Double-click on the setup file to launch the installation wizard. You may need to grant permission or enter your administrator password if prompted by User Account Control (UAC).</li>
12
- <li>Select your preferred language and click Next.</li>
13
- <li>Read and accept the license agreement and click Next.</li>
14
- <li>Choose the destination folder where you want to install GTA Vice City. You can use the default location or browse for another one. Make sure you have enough free space on your hard drive for the game files.</li>
15
- <li>Click Next and wait for the installation to complete. It may take several minutes depending on your PC speed and performance.</li>
16
- <li>Click Finish to exit the installation wizard.</li>
17
- </ol>
18
- <p>Congratulations! You have successfully installed GTA Vice City on your Windows 7 32 bit PC. You can now launch the game from your Start menu or desktop shortcut and enjoy playing it.</p>
19
- <h2>Tips and Tricks for Playing GTA Vice City on Windows 7 32 Bit</h2>
20
- <p>GTA Vice City is a fun and exciting game that offers a lot of features and options for players. However, it can also be challenging and frustrating at times, especially if you are new to the game or encounter some technical difficulties. Here are some tips and tricks that can help you improve your gaming experience and overcome some common problems:</p>
21
- <ul>
22
- <li>Adjust the graphics settings according to your PC specifications and preferences. You can access the graphics options from the main menu or by pressing Esc during the game. You can change the resolution, color depth, draw distance, frame limiter, anti-aliasing, and other settings to optimize the game performance and quality.</li>
23
- <li>Save your game frequently and use multiple slots. GTA Vice City does not have an auto-save feature, so you will need to manually save your game progress at safe houses or other locations. You should also use different save slots to avoid overwriting or losing your data in case of a crash or error.</li>
24
- <li>Use cheats and mods if you want to enhance your gameplay or unlock some hidden features. GTA Vice City has a variety of cheats and mods that you can use to change the game rules, add new items, vehicles, weapons, characters, missions, maps, and more. However, you should be careful when using cheats and mods as they may cause glitches, bugs, or compatibility issues with the game or your PC. You should also avoid using cheats and mods when playing online or on missions as they may affect the game balance or trigger anti-cheat measures.</li>
25
- <li>Use the map and the radar to navigate the city and find your objectives. GTA Vice City has a large and detailed map that shows the locations of safe houses, shops, weapons, vehicles, missions, and other points of interest. You can access the map from the pause menu or by pressing M during the game. You can also use the radar on the bottom left corner of the screen to see your current position, direction, and nearby enemies, allies, and targets.</li>
26
- <li>Learn the controls and the shortcuts for different actions and situations. GTA Vice City has a complex and customizable control system that allows you to perform various actions such as walking, running, jumping, crouching, aiming, shooting, driving, flying, swimming, and more. You can check the default controls from the options menu or by pressing F1 during the game. You can also change the controls to suit your preferences or use a gamepad instead of a keyboard and mouse.</li>
27
- </ul>
28
- <h2>Benefits of Playing GTA Vice City on Windows 7 32 Bit</h2>
29
- <p>GTA Vice City is a classic game that has many benefits for players who want to enjoy a nostalgic and immersive gaming experience. Here are some of the benefits of playing GTA Vice City on Windows 7 32 bit:</p>
30
- <p>gta vice city free download full version for windows 7 32 bit<br />
31
- gta vice city pc game download for windows 7 32 bit<br />
32
- gta vice city ultimate download for windows 7 32 bit<br />
33
- gta vice city setup download for windows 7 32 bit<br />
34
- gta vice city stories download for windows 7 32 bit<br />
35
- gta vice city deluxe download for windows 7 32 bit<br />
36
- gta vice city mod download for windows 7 32 bit<br />
37
- gta vice city cheats download for windows 7 32 bit<br />
38
- gta vice city apk download for windows 7 32 bit<br />
39
- gta vice city online download for windows 7 32 bit<br />
40
- gta vice city compressed download for windows 7 32 bit<br />
41
- gta vice city crack download for windows 7 32 bit<br />
42
- gta vice city patch download for windows 7 32 bit<br />
43
- gta vice city trainer download for windows 7 32 bit<br />
44
- gta vice city save game download for windows 7 32 bit<br />
45
- gta vice city hd graphics download for windows 7 32 bit<br />
46
- gta vice city soundtrack download for windows 7 32 bit<br />
47
- gta vice city android download for windows 7 32 bit<br />
48
- gta vice city remastered download for windows 7 32 bit<br />
49
- gta vice city highly compressed download for windows 7 32 bit<br />
50
- gta vice city original download for windows 7 32 bit<br />
51
- gta vice city latest version download for windows 7 32 bit<br />
52
- gta vice city directx download for windows 7 32 bit<br />
53
- gta vice city multiplayer download for windows 7 32 bit<br />
54
- gta vice city real life mod download for windows 7 32 bit<br />
55
- gta vice city no cd download for windows 7 32 bit<br />
56
- gta vice city radio stations download for windows 7 32 bit<br />
57
- gta vice city cars download for windows 7 32 bit<br />
58
- gta vice city skins download for windows 7 32 bit<br />
59
- gta vice city maps download for windows 7 32 bit<br />
60
- gta vice city missions download for windows 7 32 bit<br />
61
- gta vice city fonts download for windows 7 32 bit<br />
62
- gta vice city icons download for windows 7 32 bit<br />
63
- gta vice city wallpaper download for windows 7 32 bit<br />
64
- gta vice city logo download for windows 7 32 bit<br />
65
- gta vice city theme song download for windows 7 32 bit<br />
66
- gta vice city cutscenes download for windows 7 32 bit<br />
67
- gta vice city voice files download for windows 7</p>
68
- <ul>
69
- <li>You can relive the 1980s culture and vibe with the authentic graphics, music, fashion, and references of GTA Vice City. The game captures the essence of the era with its colorful and stylish design, its diverse and memorable characters, its witty and humorous dialogue, and its iconic soundtrack that features songs from various genres such as pop, rock, hip hop, disco, and more.</li>
70
- <li>You can explore a vast and varied open world that offers endless possibilities and opportunities for fun and adventure. GTA Vice City has a rich and dynamic environment that includes urban areas, suburbs, beaches, islands, airports, golf courses, stadiums, malls, and more. You can interact with the environment and the NPCs in various ways, such as destroying objects, stealing vehicles, fighting enemies, buying properties, doing side activities, and more.</li>
71
- <li>You can experience a thrilling and engaging story that follows the rise of Tommy Vercetti in the criminal underworld of Vice City. GTA Vice City has a compelling and well-written plot that involves betrayal, revenge, loyalty, ambition, and corruption. You can choose how to approach each mission and how to deal with each situation. You can also influence the outcome of the story by making decisions that affect your relationships with other characters and factions.</li>
72
- </ul>
73
- <h2>Conclusion</h2>
74
- <p>GTA Vice City is a game that deserves to be played by anyone who loves action-adventure games or who wants to revisit one of the best games in the Grand Theft Auto series. It is compatible with Windows 7 32 bit PCs and can be downloaded and installed easily from Ocean of Games. It also offers many features and options that make it fun and enjoyable for players of all levels and preferences. If you are looking for a game that will keep you entertained for hours with its gameplay, story, graphics, music, and atmosphere, GTA Vice City is the game for you.</p>
75
- <h2>FAQs</h2>
76
- <p>Here are some frequently asked questions and answers about GTA Vice City download for Windows 7 32 bit:</p>
77
- <ol>
78
- <li><b>Is GTA Vice City free to download and play?</b></li>
79
- <p>No, GTA Vice City is not a free game. You will need to purchase a license or a key to play the game legally. However, there are some websites that offer GTA Vice City download for free, such as Ocean of Games. These websites may not be authorized or licensed by the game developers or publishers, so you should use them at your own risk and discretion.</p>
80
- <li><b>Is GTA Vice City compatible with Windows 10 or other operating systems?</b></li>
81
- <p>Yes, GTA Vice City is compatible with Windows 10 and other operating systems such as Windows 8, Windows XP, and Windows Vista. However, you may need to run the game in compatibility mode or use some patches or fixes to make it work properly on newer or older systems. You can find more information and solutions online or on the game forums.</p>
82
- <li><b>What are the minimum system requirements for GTA Vice City?</b></li>
83
- <p>The minimum system requirements for GTA Vice City are as follows:</p>
84
- <ul>
85
- <li>OS: Windows 7 32 bit or higher</li>
86
- <li>CPU: 800 MHz Intel Pentium III or AMD Athlon</li>
87
- <li>RAM: 128 MB</li>
88
- <li>GPU: 32 MB video card with DirectX 9.0 compatible drivers</li>
89
- <li>HDD: 915 MB of free hard disk space</li>
90
- <li>Sound: DirectX 9.0 compatible sound card</li>
91
- </ul>
92
- <li><b>How can I update GTA Vice City to the latest version?</b></li>
93
- <p>You can update GTA Vice City to the latest version by downloading and installing the official patches from the game website or from other sources. The latest version of GTA Vice City is v1.1, which fixes some bugs and improves some features of the game. You can also use some unofficial patches or mods that add new content or enhancements to the game.</p>
94
- <li><b>How can I get help or support for GTA Vice City?</b></li>
95
- <p>You can get help or support for GTA Vice City by visiting the game website or the game forums, where you can find FAQs, guides, tutorials, tips, tricks, solutions, and other resources. You can also contact the game developers or publishers via email, phone, or social media if you have any questions, feedback, or issues with the game.</p>
96
- </ol></p> 401be4b1e0<br />
97
- <br />
98
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIConsultant/MusicGen/setup.py DELETED
@@ -1,62 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from pathlib import Path
8
-
9
- from setuptools import setup, find_packages
10
-
11
-
12
- NAME = 'audiocraft'
13
- DESCRIPTION = 'Audio generation research library for PyTorch'
14
-
15
- URL = 'https://github.com/facebookresearch/audiocraft'
16
- AUTHOR = 'FAIR Speech & Audio'
17
18
- REQUIRES_PYTHON = '>=3.8.0'
19
-
20
- for line in open('audiocraft/__init__.py'):
21
- line = line.strip()
22
- if '__version__' in line:
23
- context = {}
24
- exec(line, context)
25
- VERSION = context['__version__']
26
-
27
- HERE = Path(__file__).parent
28
-
29
- try:
30
- with open(HERE / "README.md", encoding='utf-8') as f:
31
- long_description = '\n' + f.read()
32
- except FileNotFoundError:
33
- long_description = DESCRIPTION
34
-
35
- REQUIRED = [i.strip() for i in open(HERE / 'requirements.txt') if not i.startswith('#')]
36
-
37
- setup(
38
- name=NAME,
39
- version=VERSION,
40
- description=DESCRIPTION,
41
- author_email=EMAIL,
42
- long_description=long_description,
43
- long_description_content_type='text/markdown',
44
- author=AUTHOR,
45
- url=URL,
46
- python_requires=REQUIRES_PYTHON,
47
- install_requires=REQUIRED,
48
- extras_require={
49
- 'dev': ['coverage', 'flake8', 'mypy', 'pdoc3', 'pytest'],
50
- },
51
- packages=find_packages(),
52
- package_data={'audiocraft': ['py.typed']},
53
- include_package_data=True,
54
- license='MIT License',
55
- classifiers=[
56
- # Trove classifiers
57
- # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
58
- 'License :: OSI Approved :: MIT License',
59
- 'Topic :: Multimedia :: Sound/Audio',
60
- 'Topic :: Scientific/Engineering :: Artificial Intelligence',
61
- ],
62
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/StyleGANEX/datasets/images_dataset.py DELETED
@@ -1,33 +0,0 @@
1
- from torch.utils.data import Dataset
2
- from PIL import Image
3
- from utils import data_utils
4
-
5
-
6
- class ImagesDataset(Dataset):
7
-
8
- def __init__(self, source_root, target_root, opts, target_transform=None, source_transform=None):
9
- self.source_paths = sorted(data_utils.make_dataset(source_root))
10
- self.target_paths = sorted(data_utils.make_dataset(target_root))
11
- self.source_transform = source_transform
12
- self.target_transform = target_transform
13
- self.opts = opts
14
-
15
- def __len__(self):
16
- return len(self.source_paths)
17
-
18
- def __getitem__(self, index):
19
- from_path = self.source_paths[index]
20
- from_im = Image.open(from_path)
21
- from_im = from_im.convert('RGB') if self.opts.label_nc == 0 else from_im.convert('L')
22
-
23
- to_path = self.target_paths[index]
24
- to_im = Image.open(to_path).convert('RGB')
25
- if self.target_transform:
26
- to_im = self.target_transform(to_im)
27
-
28
- if self.source_transform:
29
- from_im = self.source_transform(from_im)
30
- else:
31
- from_im = to_im
32
-
33
- return from_im, to_im
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/generate_human_motion/VQ-Trans/models/encdec.py DELETED
@@ -1,67 +0,0 @@
1
- import torch.nn as nn
2
- from models.resnet import Resnet1D
3
-
4
- class Encoder(nn.Module):
5
- def __init__(self,
6
- input_emb_width = 3,
7
- output_emb_width = 512,
8
- down_t = 3,
9
- stride_t = 2,
10
- width = 512,
11
- depth = 3,
12
- dilation_growth_rate = 3,
13
- activation='relu',
14
- norm=None):
15
- super().__init__()
16
-
17
- blocks = []
18
- filter_t, pad_t = stride_t * 2, stride_t // 2
19
- blocks.append(nn.Conv1d(input_emb_width, width, 3, 1, 1))
20
- blocks.append(nn.ReLU())
21
-
22
- for i in range(down_t):
23
- input_dim = width
24
- block = nn.Sequential(
25
- nn.Conv1d(input_dim, width, filter_t, stride_t, pad_t),
26
- Resnet1D(width, depth, dilation_growth_rate, activation=activation, norm=norm),
27
- )
28
- blocks.append(block)
29
- blocks.append(nn.Conv1d(width, output_emb_width, 3, 1, 1))
30
- self.model = nn.Sequential(*blocks)
31
-
32
- def forward(self, x):
33
- return self.model(x)
34
-
35
- class Decoder(nn.Module):
36
- def __init__(self,
37
- input_emb_width = 3,
38
- output_emb_width = 512,
39
- down_t = 3,
40
- stride_t = 2,
41
- width = 512,
42
- depth = 3,
43
- dilation_growth_rate = 3,
44
- activation='relu',
45
- norm=None):
46
- super().__init__()
47
- blocks = []
48
-
49
- filter_t, pad_t = stride_t * 2, stride_t // 2
50
- blocks.append(nn.Conv1d(output_emb_width, width, 3, 1, 1))
51
- blocks.append(nn.ReLU())
52
- for i in range(down_t):
53
- out_dim = width
54
- block = nn.Sequential(
55
- Resnet1D(width, depth, dilation_growth_rate, reverse_dilation=True, activation=activation, norm=norm),
56
- nn.Upsample(scale_factor=2, mode='nearest'),
57
- nn.Conv1d(width, out_dim, 3, 1, 1)
58
- )
59
- blocks.append(block)
60
- blocks.append(nn.Conv1d(width, width, 3, 1, 1))
61
- blocks.append(nn.ReLU())
62
- blocks.append(nn.Conv1d(width, input_emb_width, 3, 1, 1))
63
- self.model = nn.Sequential(*blocks)
64
-
65
- def forward(self, x):
66
- return self.model(x)
67
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio/vocoder/bigvgan/alias_free_torch/__init__.py DELETED
@@ -1,6 +0,0 @@
1
- # Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0
2
- # LICENSE is in incl_licenses directory.
3
-
4
- from .filter import *
5
- from .resample import *
6
- from .act import *
 
 
 
 
 
 
 
spaces/AILab-CVC/SEED-Bench_Leaderboard/app.py DELETED
@@ -1,313 +0,0 @@
1
-
2
- __all__ = ['block', 'make_clickable_model', 'make_clickable_user', 'get_submissions']
3
-
4
- import gradio as gr
5
- import pandas as pd
6
- import json
7
- import pdb
8
- import tempfile
9
-
10
- from constants import *
11
- from src.auto_leaderboard.model_metadata_type import ModelType
12
-
13
- global data_component, filter_component
14
-
15
-
16
- def upload_file(files):
17
- file_paths = [file.name for file in files]
18
- return file_paths
19
-
20
- def prediction_analyse(prediction_content):
21
- predictions = prediction_content.split("\n")
22
-
23
- # 读取 ground_truth JSON 文件
24
- with open("./file/SEED-Bench.json", "r") as file:
25
- ground_truth_data = json.load(file)["questions"]
26
-
27
- # 将 ground_truth 数据转换为以 question_id 为键的字典
28
- ground_truth = {item["question_id"]: item for item in ground_truth_data}
29
-
30
- # 初始化结果统计字典
31
- results = {i: {"correct": 0, "total": 0} for i in range(1, 13)}
32
-
33
- # 遍历 predictions,计算每个 question_type_id 的正确预测数和总预测数
34
- for prediction in predictions:
35
- # pdb.set_trace()
36
- prediction = prediction.strip()
37
- if not prediction:
38
- continue
39
- try:
40
- prediction = json.loads(prediction)
41
- except json.JSONDecodeError:
42
- print(f"Warning: Skipping invalid JSON data in line: {prediction}")
43
- continue
44
- question_id = prediction["question_id"]
45
- gt_item = ground_truth[question_id]
46
- question_type_id = gt_item["question_type_id"]
47
-
48
- if prediction["prediction"] == gt_item["answer"]:
49
- results[question_type_id]["correct"] += 1
50
-
51
- results[question_type_id]["total"] += 1
52
-
53
- return results
54
-
55
- def add_new_eval(
56
- input_file,
57
- model_name_textbox: str,
58
- revision_name_textbox: str,
59
- model_type: str,
60
- model_link: str,
61
- LLM_type: str,
62
- LLM_name_textbox: str,
63
- Evaluation_dimension: str,
64
- ):
65
- if input_file is None:
66
- return "Error! Empty file!"
67
- else:
68
- content = input_file.decode("utf-8")
69
- prediction = prediction_analyse(content)
70
- csv_data = pd.read_csv(CSV_DIR)
71
-
72
- Start_dimension, End_dimension = 1, 13
73
- if Evaluation_dimension == 'Image':
74
- End_dimension = 10
75
- elif Evaluation_dimension == 'Video':
76
- Start_dimension = 10
77
- each_task_accuracy = {i: round(prediction[i]["correct"] / prediction[i]["total"] * 100, 1) if i >= Start_dimension and i < End_dimension else 0 for i in range(1, 13)}
78
-
79
- # count for average image\video\all
80
- total_correct_image = sum(prediction[i]["correct"] for i in range(1, 10))
81
- total_correct_video = sum(prediction[i]["correct"] for i in range(10, 13))
82
-
83
- total_image = sum(prediction[i]["total"] for i in range(1, 10))
84
- total_video = sum(prediction[i]["total"] for i in range(10, 13))
85
-
86
- if Evaluation_dimension != 'Video':
87
- average_accuracy_image = round(total_correct_image / total_image * 100, 1)
88
- else:
89
- average_accuracy_image = 0
90
-
91
- if Evaluation_dimension != 'Image':
92
- average_accuracy_video = round(total_correct_video / total_video * 100, 1)
93
- else:
94
- average_accuracy_video = 0
95
-
96
- if Evaluation_dimension == 'All':
97
- overall_accuracy = round((total_correct_image + total_correct_video) / (total_image + total_video) * 100, 1)
98
- else:
99
- overall_accuracy = 0
100
-
101
- if LLM_type == 'Other':
102
- LLM_name = LLM_name_textbox
103
- else:
104
- LLM_name = LLM_type
105
-
106
- if revision_name_textbox == '':
107
- col = csv_data.shape[0]
108
- model_name = model_name_textbox
109
- else:
110
- model_name = revision_name_textbox
111
- model_name_list = csv_data['Model']
112
- name_list = [name.split(']')[0][1:] for name in model_name_list]
113
- if revision_name_textbox not in name_list:
114
- col = csv_data.shape[0]
115
- else:
116
- col = name_list.index(revision_name_textbox)
117
-
118
- if model_link == '':
119
- model_name = model_name # no url
120
- else:
121
- model_name = '[' + model_name + '](' + model_link + ')'
122
-
123
- # add new data
124
- new_data = [
125
- model_type,
126
- model_name,
127
- LLM_name,
128
- overall_accuracy,
129
- average_accuracy_image,
130
- average_accuracy_video,
131
- each_task_accuracy[1],
132
- each_task_accuracy[2],
133
- each_task_accuracy[3],
134
- each_task_accuracy[4],
135
- each_task_accuracy[5],
136
- each_task_accuracy[6],
137
- each_task_accuracy[7],
138
- each_task_accuracy[8],
139
- each_task_accuracy[9],
140
- each_task_accuracy[10],
141
- each_task_accuracy[11],
142
- each_task_accuracy[12],
143
- ]
144
- csv_data.loc[col] = new_data
145
- csv_data = csv_data.to_csv(CSV_DIR, index=False)
146
- return 0
147
-
148
- def get_baseline_df():
149
- # pdb.set_trace()
150
- df = pd.read_csv(CSV_DIR)
151
- df = df.sort_values(by="Avg. All", ascending=False)
152
- present_columns = MODEL_INFO + checkbox_group.value
153
- df = df[present_columns]
154
- return df
155
-
156
- def get_all_df():
157
- df = pd.read_csv(CSV_DIR)
158
- df = df.sort_values(by="Avg. All", ascending=False)
159
- return df
160
-
161
- block = gr.Blocks()
162
-
163
-
164
- with block:
165
- gr.Markdown(
166
- LEADERBORAD_INTRODUCTION
167
- )
168
- with gr.Tabs(elem_classes="tab-buttons") as tabs:
169
- with gr.TabItem("🏅 SEED Benchmark", elem_id="seed-benchmark-tab-table", id=0):
170
- with gr.Row():
171
- with gr.Accordion("Citation", open=False):
172
- citation_button = gr.Textbox(
173
- value=CITATION_BUTTON_TEXT,
174
- label=CITATION_BUTTON_LABEL,
175
- elem_id="citation-button",
176
- ).style(show_copy_button=True)
177
-
178
- gr.Markdown(
179
- TABLE_INTRODUCTION
180
- )
181
-
182
- # selection for column part:
183
- checkbox_group = gr.CheckboxGroup(
184
- choices=TASK_INFO_v2,
185
- value=AVG_INFO,
186
- label="Select options",
187
- interactive=True,
188
- )
189
-
190
- # 创建数据帧组件
191
- data_component = gr.components.Dataframe(
192
- value=get_baseline_df,
193
- headers=COLUMN_NAMES,
194
- type="pandas",
195
- datatype=DATA_TITILE_TYPE,
196
- interactive=False,
197
- visible=True,
198
- )
199
-
200
- def on_checkbox_group_change(selected_columns):
201
- # pdb.set_trace()
202
- selected_columns = [item for item in TASK_INFO_v2 if item in selected_columns]
203
- present_columns = MODEL_INFO + selected_columns
204
- updated_data = get_all_df()[present_columns]
205
- updated_data = updated_data.sort_values(by=present_columns[3], ascending=False)
206
- updated_headers = present_columns
207
- update_datatype = [DATA_TITILE_TYPE[COLUMN_NAMES.index(x)] for x in updated_headers]
208
-
209
- filter_component = gr.components.Dataframe(
210
- value=updated_data,
211
- headers=updated_headers,
212
- type="pandas",
213
- datatype=update_datatype,
214
- interactive=False,
215
- visible=True,
216
- )
217
- # pdb.set_trace()
218
-
219
- return filter_component.value
220
-
221
- # 将复选框组关联到处理函数
222
- checkbox_group.change(fn=on_checkbox_group_change, inputs=checkbox_group, outputs=data_component)
223
-
224
- # table 2
225
- with gr.TabItem("📝 About", elem_id="seed-benchmark-tab-table", id=2):
226
- gr.Markdown(LEADERBORAD_INFO, elem_classes="markdown-text")
227
-
228
- # table 3
229
- with gr.TabItem("🚀 Submit here! ", elem_id="seed-benchmark-tab-table", id=3):
230
- gr.Markdown(LEADERBORAD_INTRODUCTION, elem_classes="markdown-text")
231
-
232
- with gr.Row():
233
- gr.Markdown(SUBMIT_INTRODUCTION, elem_classes="markdown-text")
234
-
235
- with gr.Row():
236
- gr.Markdown("# ✉️✨ Submit your model evaluation json file here!", elem_classes="markdown-text")
237
-
238
- with gr.Row():
239
- with gr.Column():
240
- model_name_textbox = gr.Textbox(
241
- label="Model name", placeholder="LLaMA-7B"
242
- )
243
- revision_name_textbox = gr.Textbox(
244
- label="Revision Model Name", placeholder="LLaMA-7B"
245
- )
246
- model_type = gr.Dropdown(
247
- choices=[
248
- "LLM",
249
- "ImageLLM",
250
- "VideoLLM",
251
- "Other",
252
- ],
253
- label="Model type",
254
- multiselect=False,
255
- value="ImageLLM",
256
- interactive=True,
257
- )
258
- model_link = gr.Textbox(
259
- label="Model Link", placeholder="https://huggingface.co/decapoda-research/llama-7b-hf"
260
- )
261
-
262
- with gr.Column():
263
-
264
- LLM_type = gr.Dropdown(
265
- choices=["Vicuna-7B", "Flan-T5-XL", "LLaMA-7B", "Other"],
266
- label="LLM type",
267
- multiselect=False,
268
- value="LLaMA-7B",
269
- interactive=True,
270
- )
271
- LLM_name_textbox = gr.Textbox(
272
- label="LLM model (for Other)",
273
- placeholder="LLaMA-13B"
274
- )
275
- Evaluation_dimension = gr.Dropdown(
276
- choices=["All", "Image", "Video"],
277
- label="Evaluation dimension",
278
- multiselect=False,
279
- value="All",
280
- interactive=True,
281
- )
282
-
283
- with gr.Column():
284
-
285
- input_file = gr.inputs.File(label = "Click to Upload a json File", file_count="single", type='binary')
286
- submit_button = gr.Button("Submit Eval")
287
-
288
- submission_result = gr.Markdown()
289
- submit_button.click(
290
- add_new_eval,
291
- inputs = [
292
- input_file,
293
- model_name_textbox,
294
- revision_name_textbox,
295
- model_type,
296
- model_link,
297
- LLM_type,
298
- LLM_name_textbox,
299
- Evaluation_dimension,
300
- ],
301
- # outputs = submission_result,
302
- )
303
-
304
-
305
- with gr.Row():
306
- data_run = gr.Button("Refresh")
307
- data_run.click(
308
- get_baseline_df, outputs=data_component
309
- )
310
-
311
- # block.load(get_baseline_df, outputs=data_title)
312
-
313
- block.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AILab-CVC/SEED-LLaMA/models/seed_qformer/blip2.py DELETED
@@ -1,186 +0,0 @@
1
- """
2
- Copyright (c) 2023, salesforce.com, inc.
3
- All rights reserved.
4
- SPDX-License-Identifier: BSD-3-Clause
5
- For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
6
- """
7
- import contextlib
8
- import logging
9
- import os
10
- import time
11
- import datetime
12
-
13
- import torch
14
- import torch.nn as nn
15
- import torch.distributed as dist
16
- import torch.nn.functional as F
17
-
18
-
19
- from .qformer_causual import BertConfig, BertLMHeadModel
20
-
21
- from .utils import download_cached_file, get_rank, get_dist_info, get_world_size, main_process, is_dist_avail_and_initialized, is_url
22
- from .eva_vit import create_eva_vit_g
23
- from .clip_vit import create_clip_vit_L
24
- from transformers import BertTokenizer
25
-
26
-
27
- # class Blip2Base(BaseModel):
28
- class Blip2Base(nn.Module):
29
- def __init__(self):
30
- super().__init__()
31
-
32
- @property
33
- def device(self):
34
- return list(self.parameters())[0].device
35
-
36
- @classmethod
37
- def init_tokenizer(cls, truncation_side="right"):
38
- tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", truncation_side=truncation_side)
39
- tokenizer.add_special_tokens({"bos_token": "[DEC]"})
40
- return tokenizer
41
-
42
- def maybe_autocast(self, dtype=torch.float16):
43
- # if on cpu, don't use autocast
44
- # if on gpu, use autocast with dtype if provided, otherwise use torch.float16
45
- enable_autocast = self.device != torch.device("cpu")
46
-
47
- if enable_autocast:
48
- return torch.cuda.amp.autocast(dtype=dtype)
49
- else:
50
- return contextlib.nullcontext()
51
-
52
- @classmethod
53
- def init_Qformer(cls, num_query_token, vision_width, cross_attention_freq=2):
54
- encoder_config = BertConfig.from_pretrained("bert-base-uncased")
55
- encoder_config.encoder_width = vision_width
56
- # insert cross-attention layer every other block
57
- encoder_config.add_cross_attention = True
58
- encoder_config.cross_attention_freq = cross_attention_freq
59
- encoder_config.query_length = num_query_token
60
- Qformer = BertLMHeadModel.from_pretrained("bert-base-uncased", config=encoder_config)
61
- query_tokens = nn.Parameter(torch.zeros(1, num_query_token, encoder_config.hidden_size))
62
- query_tokens.data.normal_(mean=0.0, std=encoder_config.initializer_range)
63
- return Qformer, query_tokens
64
-
65
- def init_vision_encoder(self, model_name, img_size, drop_path_rate, use_grad_checkpoint, precision):
66
- assert model_name in [
67
- "eva_clip_g",
68
- "eva2_clip_L",
69
- "clip_L",
70
- ], "vit model must be eva_clip_g, eva2_clip_L or clip_L"
71
- if model_name == "eva_clip_g":
72
- visual_encoder = create_eva_vit_g(img_size, drop_path_rate, use_grad_checkpoint, precision)
73
-
74
- elif model_name == "clip_L":
75
- visual_encoder = create_clip_vit_L(img_size, use_grad_checkpoint, precision)
76
- ln_vision = LayerNorm(visual_encoder.num_features)
77
- self.vit_name = model_name
78
- return visual_encoder, ln_vision
79
-
80
- def load_from_pretrained(self, url_or_filename):
81
- if is_url(url_or_filename):
82
- cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True)
83
- checkpoint = torch.load(cached_file, map_location="cpu")
84
- elif os.path.isfile(url_or_filename):
85
- checkpoint = torch.load(url_or_filename, map_location="cpu")
86
- else:
87
- raise RuntimeError("checkpoint url or path is invalid")
88
-
89
- state_dict = checkpoint["model"]
90
-
91
- msg = self.load_state_dict(state_dict, strict=False)
92
-
93
- # logging.info("Missing keys {}".format(msg.missing_keys))
94
- logging.info("load checkpoint from %s" % url_or_filename)
95
-
96
- return msg
97
-
98
- def get_optimizer_params(self, weight_decay, lr_scale=1):
99
- if self.vit_name == "eva_clip_g":
100
- vit_num_layers = self.visual_encoder.get_num_layer()
101
- lr_scales = list(lr_scale**(vit_num_layers + 1 - i) for i in range(vit_num_layers + 2))
102
-
103
- parameter_group_names = {}
104
- parameter_group_vars = {}
105
-
106
- for name, param in self.named_parameters():
107
- if not param.requires_grad:
108
- continue # frozen weights
109
- if len(param.shape) == 1 or name.endswith(".bias"):
110
- group_name = "no_decay"
111
- this_weight_decay = 0.
112
- else:
113
- group_name = "decay"
114
- this_weight_decay = weight_decay
115
- if 'visual_encoder' in name:
116
- layer_id = self.visual_encoder.get_num_layer(name.replace('visual_encoder.', ''))
117
- group_name = "vit_layer_%d_%s" % (layer_id, group_name)
118
- else:
119
- layer_id = None
120
-
121
- if group_name not in parameter_group_names:
122
- if layer_id is not None:
123
- scale = lr_scales[layer_id]
124
- else:
125
- scale = 1
126
- parameter_group_names[group_name] = {"weight_decay": this_weight_decay, "params": [], "lr_scale": scale}
127
- parameter_group_vars[group_name] = {"weight_decay": this_weight_decay, "params": [], "lr_scale": scale}
128
- parameter_group_vars[group_name]["params"].append(param)
129
- parameter_group_names[group_name]["params"].append(name)
130
- # import json
131
- # print("Param groups = %s" % json.dumps(parameter_group_names, indent=2))
132
- optim_params = list(parameter_group_vars.values())
133
- return optim_params
134
- else:
135
- return super().get_optimizer_params(weight_decay, lr_scale)
136
-
137
- def _lemmatize(self, answers):
138
- def apply(answer):
139
- doc = self.lemmatizer(answer)
140
-
141
- words = []
142
- for token in doc:
143
- if token.pos_ in ["NOUN", "VERB"]:
144
- words.append(token.lemma_)
145
- else:
146
- words.append(token.text)
147
- answer = " ".join(words)
148
-
149
- return answer
150
-
151
- return [apply(answer) for answer in answers]
152
-
153
- @property
154
- def lemmatizer(self):
155
- if self._lemmatizer is None:
156
- try:
157
- import spacy
158
-
159
- self._lemmatizer = spacy.load("en_core_web_sm")
160
- except ImportError:
161
- logging.error("""
162
- Please install spacy and en_core_web_sm model to apply lemmatization.
163
- python -m spacy download en_core_web_sm
164
- OR
165
- import spacy.cli
166
- spacy.cli.download("en_core_web_sm")
167
- """)
168
- exit(1)
169
-
170
- return self._lemmatizer
171
-
172
-
173
- def disabled_train(self, mode=True):
174
- """Overwrite model.train with this function to make sure train/eval mode
175
- does not change anymore."""
176
- return self
177
-
178
-
179
- class LayerNorm(nn.LayerNorm):
180
- """Subclass torch's LayerNorm to handle fp16."""
181
- def forward(self, x: torch.Tensor):
182
- orig_type = x.dtype
183
- ret = super().forward(x.type(torch.float32))
184
- return ret.type(orig_type)
185
-
186
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_1_ClothesKeyPoint/work_dirs_1-x/td_hm_res50_4xb16-150e_deepfashion2_long_sleeved_dress_256x192/td_hm_res50_4xb16-150e_deepfashion2_long_sleeved_dress_256x192.py DELETED
@@ -1,2861 +0,0 @@
1
- default_scope = 'mmpose'
2
- default_hooks = dict(
3
- timer=dict(type='IterTimerHook'),
4
- logger=dict(type='LoggerHook', interval=50),
5
- param_scheduler=dict(type='ParamSchedulerHook'),
6
- checkpoint=dict(
7
- type='CheckpointHook', interval=10, save_best='PCK', rule='greater'),
8
- sampler_seed=dict(type='DistSamplerSeedHook'),
9
- visualization=dict(type='PoseVisualizationHook', enable=False))
10
- custom_hooks = [dict(type='SyncBuffersHook')]
11
- env_cfg = dict(
12
- cudnn_benchmark=False,
13
- mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
14
- dist_cfg=dict(backend='nccl'))
15
- vis_backends = [dict(type='LocalVisBackend')]
16
- visualizer = dict(
17
- type='PoseLocalVisualizer',
18
- vis_backends=[dict(type='LocalVisBackend'),
19
- dict(type='WandbVisBackend')],
20
- name='visualizer')
21
- log_processor = dict(
22
- type='LogProcessor', window_size=50, by_epoch=True, num_digits=6)
23
- log_level = 'INFO'
24
- load_from = None
25
- resume = False
26
- backend_args = dict(backend='local')
27
- train_cfg = dict(by_epoch=True, max_epochs=150, val_interval=10)
28
- val_cfg = dict()
29
- test_cfg = dict()
30
- colors = dict(
31
- sss=[255, 128, 0],
32
- lss=[255, 0, 128],
33
- sso=[128, 0, 255],
34
- lso=[0, 128, 255],
35
- vest=[0, 128, 128],
36
- sling=[0, 0, 128],
37
- shorts=[128, 128, 128],
38
- trousers=[128, 0, 128],
39
- skirt=[64, 128, 128],
40
- ssd=[64, 64, 128],
41
- lsd=[128, 64, 0],
42
- vd=[128, 64, 255],
43
- sd=[128, 64, 0])
44
- dataset_info = dict(
45
- dataset_name='deepfashion2',
46
- paper_info=dict(
47
- author=
48
- 'Yuying Ge and Ruimao Zhang and Lingyun Wu and Xiaogang Wang and Xiaoou Tang and Ping Luo',
49
- title=
50
- 'DeepFashion2: A Versatile Benchmark for Detection, Pose Estimation, Segmentation and Re-Identification of Clothing Images',
51
- container=
52
- 'Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)',
53
- year='2019',
54
- homepage='https://github.com/switchablenorms/DeepFashion2'),
55
- keypoint_info=dict({
56
- 0:
57
- dict(name='sss_kpt1', id=0, color=[255, 128, 0], type='', swap=''),
58
- 1:
59
- dict(
60
- name='sss_kpt2',
61
- id=1,
62
- color=[255, 128, 0],
63
- type='',
64
- swap='sss_kpt6'),
65
- 2:
66
- dict(
67
- name='sss_kpt3',
68
- id=2,
69
- color=[255, 128, 0],
70
- type='',
71
- swap='sss_kpt5'),
72
- 3:
73
- dict(name='sss_kpt4', id=3, color=[255, 128, 0], type='', swap=''),
74
- 4:
75
- dict(
76
- name='sss_kpt5',
77
- id=4,
78
- color=[255, 128, 0],
79
- type='',
80
- swap='sss_kpt3'),
81
- 5:
82
- dict(
83
- name='sss_kpt6',
84
- id=5,
85
- color=[255, 128, 0],
86
- type='',
87
- swap='sss_kpt2'),
88
- 6:
89
- dict(
90
- name='sss_kpt7',
91
- id=6,
92
- color=[255, 128, 0],
93
- type='',
94
- swap='sss_kpt25'),
95
- 7:
96
- dict(
97
- name='sss_kpt8',
98
- id=7,
99
- color=[255, 128, 0],
100
- type='',
101
- swap='sss_kpt24'),
102
- 8:
103
- dict(
104
- name='sss_kpt9',
105
- id=8,
106
- color=[255, 128, 0],
107
- type='',
108
- swap='sss_kpt23'),
109
- 9:
110
- dict(
111
- name='sss_kpt10',
112
- id=9,
113
- color=[255, 128, 0],
114
- type='',
115
- swap='sss_kpt22'),
116
- 10:
117
- dict(
118
- name='sss_kpt11',
119
- id=10,
120
- color=[255, 128, 0],
121
- type='',
122
- swap='sss_kpt21'),
123
- 11:
124
- dict(
125
- name='sss_kpt12',
126
- id=11,
127
- color=[255, 128, 0],
128
- type='',
129
- swap='sss_kpt20'),
130
- 12:
131
- dict(
132
- name='sss_kpt13',
133
- id=12,
134
- color=[255, 128, 0],
135
- type='',
136
- swap='sss_kpt19'),
137
- 13:
138
- dict(
139
- name='sss_kpt14',
140
- id=13,
141
- color=[255, 128, 0],
142
- type='',
143
- swap='sss_kpt18'),
144
- 14:
145
- dict(
146
- name='sss_kpt15',
147
- id=14,
148
- color=[255, 128, 0],
149
- type='',
150
- swap='sss_kpt17'),
151
- 15:
152
- dict(name='sss_kpt16', id=15, color=[255, 128, 0], type='', swap=''),
153
- 16:
154
- dict(
155
- name='sss_kpt17',
156
- id=16,
157
- color=[255, 128, 0],
158
- type='',
159
- swap='sss_kpt15'),
160
- 17:
161
- dict(
162
- name='sss_kpt18',
163
- id=17,
164
- color=[255, 128, 0],
165
- type='',
166
- swap='sss_kpt14'),
167
- 18:
168
- dict(
169
- name='sss_kpt19',
170
- id=18,
171
- color=[255, 128, 0],
172
- type='',
173
- swap='sss_kpt13'),
174
- 19:
175
- dict(
176
- name='sss_kpt20',
177
- id=19,
178
- color=[255, 128, 0],
179
- type='',
180
- swap='sss_kpt12'),
181
- 20:
182
- dict(
183
- name='sss_kpt21',
184
- id=20,
185
- color=[255, 128, 0],
186
- type='',
187
- swap='sss_kpt11'),
188
- 21:
189
- dict(
190
- name='sss_kpt22',
191
- id=21,
192
- color=[255, 128, 0],
193
- type='',
194
- swap='sss_kpt10'),
195
- 22:
196
- dict(
197
- name='sss_kpt23',
198
- id=22,
199
- color=[255, 128, 0],
200
- type='',
201
- swap='sss_kpt9'),
202
- 23:
203
- dict(
204
- name='sss_kpt24',
205
- id=23,
206
- color=[255, 128, 0],
207
- type='',
208
- swap='sss_kpt8'),
209
- 24:
210
- dict(
211
- name='sss_kpt25',
212
- id=24,
213
- color=[255, 128, 0],
214
- type='',
215
- swap='sss_kpt7'),
216
- 25:
217
- dict(name='lss_kpt1', id=25, color=[255, 0, 128], type='', swap=''),
218
- 26:
219
- dict(
220
- name='lss_kpt2',
221
- id=26,
222
- color=[255, 0, 128],
223
- type='',
224
- swap='lss_kpt6'),
225
- 27:
226
- dict(
227
- name='lss_kpt3',
228
- id=27,
229
- color=[255, 0, 128],
230
- type='',
231
- swap='lss_kpt5'),
232
- 28:
233
- dict(name='lss_kpt4', id=28, color=[255, 0, 128], type='', swap=''),
234
- 29:
235
- dict(
236
- name='lss_kpt5',
237
- id=29,
238
- color=[255, 0, 128],
239
- type='',
240
- swap='lss_kpt3'),
241
- 30:
242
- dict(
243
- name='lss_kpt6',
244
- id=30,
245
- color=[255, 0, 128],
246
- type='',
247
- swap='lss_kpt2'),
248
- 31:
249
- dict(
250
- name='lss_kpt7',
251
- id=31,
252
- color=[255, 0, 128],
253
- type='',
254
- swap='lss_kpt33'),
255
- 32:
256
- dict(
257
- name='lss_kpt8',
258
- id=32,
259
- color=[255, 0, 128],
260
- type='',
261
- swap='lss_kpt32'),
262
- 33:
263
- dict(
264
- name='lss_kpt9',
265
- id=33,
266
- color=[255, 0, 128],
267
- type='',
268
- swap='lss_kpt31'),
269
- 34:
270
- dict(
271
- name='lss_kpt10',
272
- id=34,
273
- color=[255, 0, 128],
274
- type='',
275
- swap='lss_kpt30'),
276
- 35:
277
- dict(
278
- name='lss_kpt11',
279
- id=35,
280
- color=[255, 0, 128],
281
- type='',
282
- swap='lss_kpt29'),
283
- 36:
284
- dict(
285
- name='lss_kpt12',
286
- id=36,
287
- color=[255, 0, 128],
288
- type='',
289
- swap='lss_kpt28'),
290
- 37:
291
- dict(
292
- name='lss_kpt13',
293
- id=37,
294
- color=[255, 0, 128],
295
- type='',
296
- swap='lss_kpt27'),
297
- 38:
298
- dict(
299
- name='lss_kpt14',
300
- id=38,
301
- color=[255, 0, 128],
302
- type='',
303
- swap='lss_kpt26'),
304
- 39:
305
- dict(
306
- name='lss_kpt15',
307
- id=39,
308
- color=[255, 0, 128],
309
- type='',
310
- swap='lss_kpt25'),
311
- 40:
312
- dict(
313
- name='lss_kpt16',
314
- id=40,
315
- color=[255, 0, 128],
316
- type='',
317
- swap='lss_kpt24'),
318
- 41:
319
- dict(
320
- name='lss_kpt17',
321
- id=41,
322
- color=[255, 0, 128],
323
- type='',
324
- swap='lss_kpt23'),
325
- 42:
326
- dict(
327
- name='lss_kpt18',
328
- id=42,
329
- color=[255, 0, 128],
330
- type='',
331
- swap='lss_kpt22'),
332
- 43:
333
- dict(
334
- name='lss_kpt19',
335
- id=43,
336
- color=[255, 0, 128],
337
- type='',
338
- swap='lss_kpt21'),
339
- 44:
340
- dict(name='lss_kpt20', id=44, color=[255, 0, 128], type='', swap=''),
341
- 45:
342
- dict(
343
- name='lss_kpt21',
344
- id=45,
345
- color=[255, 0, 128],
346
- type='',
347
- swap='lss_kpt19'),
348
- 46:
349
- dict(
350
- name='lss_kpt22',
351
- id=46,
352
- color=[255, 0, 128],
353
- type='',
354
- swap='lss_kpt18'),
355
- 47:
356
- dict(
357
- name='lss_kpt23',
358
- id=47,
359
- color=[255, 0, 128],
360
- type='',
361
- swap='lss_kpt17'),
362
- 48:
363
- dict(
364
- name='lss_kpt24',
365
- id=48,
366
- color=[255, 0, 128],
367
- type='',
368
- swap='lss_kpt16'),
369
- 49:
370
- dict(
371
- name='lss_kpt25',
372
- id=49,
373
- color=[255, 0, 128],
374
- type='',
375
- swap='lss_kpt15'),
376
- 50:
377
- dict(
378
- name='lss_kpt26',
379
- id=50,
380
- color=[255, 0, 128],
381
- type='',
382
- swap='lss_kpt14'),
383
- 51:
384
- dict(
385
- name='lss_kpt27',
386
- id=51,
387
- color=[255, 0, 128],
388
- type='',
389
- swap='lss_kpt13'),
390
- 52:
391
- dict(
392
- name='lss_kpt28',
393
- id=52,
394
- color=[255, 0, 128],
395
- type='',
396
- swap='lss_kpt12'),
397
- 53:
398
- dict(
399
- name='lss_kpt29',
400
- id=53,
401
- color=[255, 0, 128],
402
- type='',
403
- swap='lss_kpt11'),
404
- 54:
405
- dict(
406
- name='lss_kpt30',
407
- id=54,
408
- color=[255, 0, 128],
409
- type='',
410
- swap='lss_kpt10'),
411
- 55:
412
- dict(
413
- name='lss_kpt31',
414
- id=55,
415
- color=[255, 0, 128],
416
- type='',
417
- swap='lss_kpt9'),
418
- 56:
419
- dict(
420
- name='lss_kpt32',
421
- id=56,
422
- color=[255, 0, 128],
423
- type='',
424
- swap='lss_kpt8'),
425
- 57:
426
- dict(
427
- name='lss_kpt33',
428
- id=57,
429
- color=[255, 0, 128],
430
- type='',
431
- swap='lss_kpt7'),
432
- 58:
433
- dict(name='sso_kpt1', id=58, color=[128, 0, 255], type='', swap=''),
434
- 59:
435
- dict(
436
- name='sso_kpt2',
437
- id=59,
438
- color=[128, 0, 255],
439
- type='',
440
- swap='sso_kpt26'),
441
- 60:
442
- dict(
443
- name='sso_kpt3',
444
- id=60,
445
- color=[128, 0, 255],
446
- type='',
447
- swap='sso_kpt5'),
448
- 61:
449
- dict(
450
- name='sso_kpt4',
451
- id=61,
452
- color=[128, 0, 255],
453
- type='',
454
- swap='sso_kpt6'),
455
- 62:
456
- dict(
457
- name='sso_kpt5',
458
- id=62,
459
- color=[128, 0, 255],
460
- type='',
461
- swap='sso_kpt3'),
462
- 63:
463
- dict(
464
- name='sso_kpt6',
465
- id=63,
466
- color=[128, 0, 255],
467
- type='',
468
- swap='sso_kpt4'),
469
- 64:
470
- dict(
471
- name='sso_kpt7',
472
- id=64,
473
- color=[128, 0, 255],
474
- type='',
475
- swap='sso_kpt25'),
476
- 65:
477
- dict(
478
- name='sso_kpt8',
479
- id=65,
480
- color=[128, 0, 255],
481
- type='',
482
- swap='sso_kpt24'),
483
- 66:
484
- dict(
485
- name='sso_kpt9',
486
- id=66,
487
- color=[128, 0, 255],
488
- type='',
489
- swap='sso_kpt23'),
490
- 67:
491
- dict(
492
- name='sso_kpt10',
493
- id=67,
494
- color=[128, 0, 255],
495
- type='',
496
- swap='sso_kpt22'),
497
- 68:
498
- dict(
499
- name='sso_kpt11',
500
- id=68,
501
- color=[128, 0, 255],
502
- type='',
503
- swap='sso_kpt21'),
504
- 69:
505
- dict(
506
- name='sso_kpt12',
507
- id=69,
508
- color=[128, 0, 255],
509
- type='',
510
- swap='sso_kpt20'),
511
- 70:
512
- dict(
513
- name='sso_kpt13',
514
- id=70,
515
- color=[128, 0, 255],
516
- type='',
517
- swap='sso_kpt19'),
518
- 71:
519
- dict(
520
- name='sso_kpt14',
521
- id=71,
522
- color=[128, 0, 255],
523
- type='',
524
- swap='sso_kpt18'),
525
- 72:
526
- dict(
527
- name='sso_kpt15',
528
- id=72,
529
- color=[128, 0, 255],
530
- type='',
531
- swap='sso_kpt17'),
532
- 73:
533
- dict(
534
- name='sso_kpt16',
535
- id=73,
536
- color=[128, 0, 255],
537
- type='',
538
- swap='sso_kpt29'),
539
- 74:
540
- dict(
541
- name='sso_kpt17',
542
- id=74,
543
- color=[128, 0, 255],
544
- type='',
545
- swap='sso_kpt15'),
546
- 75:
547
- dict(
548
- name='sso_kpt18',
549
- id=75,
550
- color=[128, 0, 255],
551
- type='',
552
- swap='sso_kpt14'),
553
- 76:
554
- dict(
555
- name='sso_kpt19',
556
- id=76,
557
- color=[128, 0, 255],
558
- type='',
559
- swap='sso_kpt13'),
560
- 77:
561
- dict(
562
- name='sso_kpt20',
563
- id=77,
564
- color=[128, 0, 255],
565
- type='',
566
- swap='sso_kpt12'),
567
- 78:
568
- dict(
569
- name='sso_kpt21',
570
- id=78,
571
- color=[128, 0, 255],
572
- type='',
573
- swap='sso_kpt11'),
574
- 79:
575
- dict(
576
- name='sso_kpt22',
577
- id=79,
578
- color=[128, 0, 255],
579
- type='',
580
- swap='sso_kpt10'),
581
- 80:
582
- dict(
583
- name='sso_kpt23',
584
- id=80,
585
- color=[128, 0, 255],
586
- type='',
587
- swap='sso_kpt9'),
588
- 81:
589
- dict(
590
- name='sso_kpt24',
591
- id=81,
592
- color=[128, 0, 255],
593
- type='',
594
- swap='sso_kpt8'),
595
- 82:
596
- dict(
597
- name='sso_kpt25',
598
- id=82,
599
- color=[128, 0, 255],
600
- type='',
601
- swap='sso_kpt7'),
602
- 83:
603
- dict(
604
- name='sso_kpt26',
605
- id=83,
606
- color=[128, 0, 255],
607
- type='',
608
- swap='sso_kpt2'),
609
- 84:
610
- dict(
611
- name='sso_kpt27',
612
- id=84,
613
- color=[128, 0, 255],
614
- type='',
615
- swap='sso_kpt30'),
616
- 85:
617
- dict(
618
- name='sso_kpt28',
619
- id=85,
620
- color=[128, 0, 255],
621
- type='',
622
- swap='sso_kpt31'),
623
- 86:
624
- dict(
625
- name='sso_kpt29',
626
- id=86,
627
- color=[128, 0, 255],
628
- type='',
629
- swap='sso_kpt16'),
630
- 87:
631
- dict(
632
- name='sso_kpt30',
633
- id=87,
634
- color=[128, 0, 255],
635
- type='',
636
- swap='sso_kpt27'),
637
- 88:
638
- dict(
639
- name='sso_kpt31',
640
- id=88,
641
- color=[128, 0, 255],
642
- type='',
643
- swap='sso_kpt28'),
644
- 89:
645
- dict(name='lso_kpt1', id=89, color=[0, 128, 255], type='', swap=''),
646
- 90:
647
- dict(
648
- name='lso_kpt2',
649
- id=90,
650
- color=[0, 128, 255],
651
- type='',
652
- swap='lso_kpt6'),
653
- 91:
654
- dict(
655
- name='lso_kpt3',
656
- id=91,
657
- color=[0, 128, 255],
658
- type='',
659
- swap='lso_kpt5'),
660
- 92:
661
- dict(
662
- name='lso_kpt4',
663
- id=92,
664
- color=[0, 128, 255],
665
- type='',
666
- swap='lso_kpt34'),
667
- 93:
668
- dict(
669
- name='lso_kpt5',
670
- id=93,
671
- color=[0, 128, 255],
672
- type='',
673
- swap='lso_kpt3'),
674
- 94:
675
- dict(
676
- name='lso_kpt6',
677
- id=94,
678
- color=[0, 128, 255],
679
- type='',
680
- swap='lso_kpt2'),
681
- 95:
682
- dict(
683
- name='lso_kpt7',
684
- id=95,
685
- color=[0, 128, 255],
686
- type='',
687
- swap='lso_kpt33'),
688
- 96:
689
- dict(
690
- name='lso_kpt8',
691
- id=96,
692
- color=[0, 128, 255],
693
- type='',
694
- swap='lso_kpt32'),
695
- 97:
696
- dict(
697
- name='lso_kpt9',
698
- id=97,
699
- color=[0, 128, 255],
700
- type='',
701
- swap='lso_kpt31'),
702
- 98:
703
- dict(
704
- name='lso_kpt10',
705
- id=98,
706
- color=[0, 128, 255],
707
- type='',
708
- swap='lso_kpt30'),
709
- 99:
710
- dict(
711
- name='lso_kpt11',
712
- id=99,
713
- color=[0, 128, 255],
714
- type='',
715
- swap='lso_kpt29'),
716
- 100:
717
- dict(
718
- name='lso_kpt12',
719
- id=100,
720
- color=[0, 128, 255],
721
- type='',
722
- swap='lso_kpt28'),
723
- 101:
724
- dict(
725
- name='lso_kpt13',
726
- id=101,
727
- color=[0, 128, 255],
728
- type='',
729
- swap='lso_kpt27'),
730
- 102:
731
- dict(
732
- name='lso_kpt14',
733
- id=102,
734
- color=[0, 128, 255],
735
- type='',
736
- swap='lso_kpt26'),
737
- 103:
738
- dict(
739
- name='lso_kpt15',
740
- id=103,
741
- color=[0, 128, 255],
742
- type='',
743
- swap='lso_kpt25'),
744
- 104:
745
- dict(
746
- name='lso_kpt16',
747
- id=104,
748
- color=[0, 128, 255],
749
- type='',
750
- swap='lso_kpt24'),
751
- 105:
752
- dict(
753
- name='lso_kpt17',
754
- id=105,
755
- color=[0, 128, 255],
756
- type='',
757
- swap='lso_kpt23'),
758
- 106:
759
- dict(
760
- name='lso_kpt18',
761
- id=106,
762
- color=[0, 128, 255],
763
- type='',
764
- swap='lso_kpt22'),
765
- 107:
766
- dict(
767
- name='lso_kpt19',
768
- id=107,
769
- color=[0, 128, 255],
770
- type='',
771
- swap='lso_kpt21'),
772
- 108:
773
- dict(
774
- name='lso_kpt20',
775
- id=108,
776
- color=[0, 128, 255],
777
- type='',
778
- swap='lso_kpt37'),
779
- 109:
780
- dict(
781
- name='lso_kpt21',
782
- id=109,
783
- color=[0, 128, 255],
784
- type='',
785
- swap='lso_kpt19'),
786
- 110:
787
- dict(
788
- name='lso_kpt22',
789
- id=110,
790
- color=[0, 128, 255],
791
- type='',
792
- swap='lso_kpt18'),
793
- 111:
794
- dict(
795
- name='lso_kpt23',
796
- id=111,
797
- color=[0, 128, 255],
798
- type='',
799
- swap='lso_kpt17'),
800
- 112:
801
- dict(
802
- name='lso_kpt24',
803
- id=112,
804
- color=[0, 128, 255],
805
- type='',
806
- swap='lso_kpt16'),
807
- 113:
808
- dict(
809
- name='lso_kpt25',
810
- id=113,
811
- color=[0, 128, 255],
812
- type='',
813
- swap='lso_kpt15'),
814
- 114:
815
- dict(
816
- name='lso_kpt26',
817
- id=114,
818
- color=[0, 128, 255],
819
- type='',
820
- swap='lso_kpt14'),
821
- 115:
822
- dict(
823
- name='lso_kpt27',
824
- id=115,
825
- color=[0, 128, 255],
826
- type='',
827
- swap='lso_kpt13'),
828
- 116:
829
- dict(
830
- name='lso_kpt28',
831
- id=116,
832
- color=[0, 128, 255],
833
- type='',
834
- swap='lso_kpt12'),
835
- 117:
836
- dict(
837
- name='lso_kpt29',
838
- id=117,
839
- color=[0, 128, 255],
840
- type='',
841
- swap='lso_kpt11'),
842
- 118:
843
- dict(
844
- name='lso_kpt30',
845
- id=118,
846
- color=[0, 128, 255],
847
- type='',
848
- swap='lso_kpt10'),
849
- 119:
850
- dict(
851
- name='lso_kpt31',
852
- id=119,
853
- color=[0, 128, 255],
854
- type='',
855
- swap='lso_kpt9'),
856
- 120:
857
- dict(
858
- name='lso_kpt32',
859
- id=120,
860
- color=[0, 128, 255],
861
- type='',
862
- swap='lso_kpt8'),
863
- 121:
864
- dict(
865
- name='lso_kpt33',
866
- id=121,
867
- color=[0, 128, 255],
868
- type='',
869
- swap='lso_kpt7'),
870
- 122:
871
- dict(
872
- name='lso_kpt34',
873
- id=122,
874
- color=[0, 128, 255],
875
- type='',
876
- swap='lso_kpt4'),
877
- 123:
878
- dict(
879
- name='lso_kpt35',
880
- id=123,
881
- color=[0, 128, 255],
882
- type='',
883
- swap='lso_kpt38'),
884
- 124:
885
- dict(
886
- name='lso_kpt36',
887
- id=124,
888
- color=[0, 128, 255],
889
- type='',
890
- swap='lso_kpt39'),
891
- 125:
892
- dict(
893
- name='lso_kpt37',
894
- id=125,
895
- color=[0, 128, 255],
896
- type='',
897
- swap='lso_kpt20'),
898
- 126:
899
- dict(
900
- name='lso_kpt38',
901
- id=126,
902
- color=[0, 128, 255],
903
- type='',
904
- swap='lso_kpt35'),
905
- 127:
906
- dict(
907
- name='lso_kpt39',
908
- id=127,
909
- color=[0, 128, 255],
910
- type='',
911
- swap='lso_kpt36'),
912
- 128:
913
- dict(name='vest_kpt1', id=128, color=[0, 128, 128], type='', swap=''),
914
- 129:
915
- dict(
916
- name='vest_kpt2',
917
- id=129,
918
- color=[0, 128, 128],
919
- type='',
920
- swap='vest_kpt6'),
921
- 130:
922
- dict(
923
- name='vest_kpt3',
924
- id=130,
925
- color=[0, 128, 128],
926
- type='',
927
- swap='vest_kpt5'),
928
- 131:
929
- dict(name='vest_kpt4', id=131, color=[0, 128, 128], type='', swap=''),
930
- 132:
931
- dict(
932
- name='vest_kpt5',
933
- id=132,
934
- color=[0, 128, 128],
935
- type='',
936
- swap='vest_kpt3'),
937
- 133:
938
- dict(
939
- name='vest_kpt6',
940
- id=133,
941
- color=[0, 128, 128],
942
- type='',
943
- swap='vest_kpt2'),
944
- 134:
945
- dict(
946
- name='vest_kpt7',
947
- id=134,
948
- color=[0, 128, 128],
949
- type='',
950
- swap='vest_kpt15'),
951
- 135:
952
- dict(
953
- name='vest_kpt8',
954
- id=135,
955
- color=[0, 128, 128],
956
- type='',
957
- swap='vest_kpt14'),
958
- 136:
959
- dict(
960
- name='vest_kpt9',
961
- id=136,
962
- color=[0, 128, 128],
963
- type='',
964
- swap='vest_kpt13'),
965
- 137:
966
- dict(
967
- name='vest_kpt10',
968
- id=137,
969
- color=[0, 128, 128],
970
- type='',
971
- swap='vest_kpt12'),
972
- 138:
973
- dict(name='vest_kpt11', id=138, color=[0, 128, 128], type='', swap=''),
974
- 139:
975
- dict(
976
- name='vest_kpt12',
977
- id=139,
978
- color=[0, 128, 128],
979
- type='',
980
- swap='vest_kpt10'),
981
- 140:
982
- dict(name='vest_kpt13', id=140, color=[0, 128, 128], type='', swap=''),
983
- 141:
984
- dict(
985
- name='vest_kpt14',
986
- id=141,
987
- color=[0, 128, 128],
988
- type='',
989
- swap='vest_kpt8'),
990
- 142:
991
- dict(
992
- name='vest_kpt15',
993
- id=142,
994
- color=[0, 128, 128],
995
- type='',
996
- swap='vest_kpt7'),
997
- 143:
998
- dict(name='sling_kpt1', id=143, color=[0, 0, 128], type='', swap=''),
999
- 144:
1000
- dict(
1001
- name='sling_kpt2',
1002
- id=144,
1003
- color=[0, 0, 128],
1004
- type='',
1005
- swap='sling_kpt6'),
1006
- 145:
1007
- dict(
1008
- name='sling_kpt3',
1009
- id=145,
1010
- color=[0, 0, 128],
1011
- type='',
1012
- swap='sling_kpt5'),
1013
- 146:
1014
- dict(name='sling_kpt4', id=146, color=[0, 0, 128], type='', swap=''),
1015
- 147:
1016
- dict(
1017
- name='sling_kpt5',
1018
- id=147,
1019
- color=[0, 0, 128],
1020
- type='',
1021
- swap='sling_kpt3'),
1022
- 148:
1023
- dict(
1024
- name='sling_kpt6',
1025
- id=148,
1026
- color=[0, 0, 128],
1027
- type='',
1028
- swap='sling_kpt2'),
1029
- 149:
1030
- dict(
1031
- name='sling_kpt7',
1032
- id=149,
1033
- color=[0, 0, 128],
1034
- type='',
1035
- swap='sling_kpt15'),
1036
- 150:
1037
- dict(
1038
- name='sling_kpt8',
1039
- id=150,
1040
- color=[0, 0, 128],
1041
- type='',
1042
- swap='sling_kpt14'),
1043
- 151:
1044
- dict(
1045
- name='sling_kpt9',
1046
- id=151,
1047
- color=[0, 0, 128],
1048
- type='',
1049
- swap='sling_kpt13'),
1050
- 152:
1051
- dict(
1052
- name='sling_kpt10',
1053
- id=152,
1054
- color=[0, 0, 128],
1055
- type='',
1056
- swap='sling_kpt12'),
1057
- 153:
1058
- dict(name='sling_kpt11', id=153, color=[0, 0, 128], type='', swap=''),
1059
- 154:
1060
- dict(
1061
- name='sling_kpt12',
1062
- id=154,
1063
- color=[0, 0, 128],
1064
- type='',
1065
- swap='sling_kpt10'),
1066
- 155:
1067
- dict(
1068
- name='sling_kpt13',
1069
- id=155,
1070
- color=[0, 0, 128],
1071
- type='',
1072
- swap='sling_kpt9'),
1073
- 156:
1074
- dict(
1075
- name='sling_kpt14',
1076
- id=156,
1077
- color=[0, 0, 128],
1078
- type='',
1079
- swap='sling_kpt8'),
1080
- 157:
1081
- dict(
1082
- name='sling_kpt15',
1083
- id=157,
1084
- color=[0, 0, 128],
1085
- type='',
1086
- swap='sling_kpt7'),
1087
- 158:
1088
- dict(
1089
- name='shorts_kpt1',
1090
- id=158,
1091
- color=[128, 128, 128],
1092
- type='',
1093
- swap='shorts_kpt3'),
1094
- 159:
1095
- dict(
1096
- name='shorts_kpt2',
1097
- id=159,
1098
- color=[128, 128, 128],
1099
- type='',
1100
- swap=''),
1101
- 160:
1102
- dict(
1103
- name='shorts_kpt3',
1104
- id=160,
1105
- color=[128, 128, 128],
1106
- type='',
1107
- swap='shorts_kpt1'),
1108
- 161:
1109
- dict(
1110
- name='shorts_kpt4',
1111
- id=161,
1112
- color=[128, 128, 128],
1113
- type='',
1114
- swap='shorts_kpt10'),
1115
- 162:
1116
- dict(
1117
- name='shorts_kpt5',
1118
- id=162,
1119
- color=[128, 128, 128],
1120
- type='',
1121
- swap='shorts_kpt9'),
1122
- 163:
1123
- dict(
1124
- name='shorts_kpt6',
1125
- id=163,
1126
- color=[128, 128, 128],
1127
- type='',
1128
- swap='shorts_kpt8'),
1129
- 164:
1130
- dict(
1131
- name='shorts_kpt7',
1132
- id=164,
1133
- color=[128, 128, 128],
1134
- type='',
1135
- swap=''),
1136
- 165:
1137
- dict(
1138
- name='shorts_kpt8',
1139
- id=165,
1140
- color=[128, 128, 128],
1141
- type='',
1142
- swap='shorts_kpt6'),
1143
- 166:
1144
- dict(
1145
- name='shorts_kpt9',
1146
- id=166,
1147
- color=[128, 128, 128],
1148
- type='',
1149
- swap='shorts_kpt5'),
1150
- 167:
1151
- dict(
1152
- name='shorts_kpt10',
1153
- id=167,
1154
- color=[128, 128, 128],
1155
- type='',
1156
- swap='shorts_kpt4'),
1157
- 168:
1158
- dict(
1159
- name='trousers_kpt1',
1160
- id=168,
1161
- color=[128, 0, 128],
1162
- type='',
1163
- swap='trousers_kpt3'),
1164
- 169:
1165
- dict(
1166
- name='trousers_kpt2',
1167
- id=169,
1168
- color=[128, 0, 128],
1169
- type='',
1170
- swap=''),
1171
- 170:
1172
- dict(
1173
- name='trousers_kpt3',
1174
- id=170,
1175
- color=[128, 0, 128],
1176
- type='',
1177
- swap='trousers_kpt1'),
1178
- 171:
1179
- dict(
1180
- name='trousers_kpt4',
1181
- id=171,
1182
- color=[128, 0, 128],
1183
- type='',
1184
- swap='trousers_kpt14'),
1185
- 172:
1186
- dict(
1187
- name='trousers_kpt5',
1188
- id=172,
1189
- color=[128, 0, 128],
1190
- type='',
1191
- swap='trousers_kpt13'),
1192
- 173:
1193
- dict(
1194
- name='trousers_kpt6',
1195
- id=173,
1196
- color=[128, 0, 128],
1197
- type='',
1198
- swap='trousers_kpt12'),
1199
- 174:
1200
- dict(
1201
- name='trousers_kpt7',
1202
- id=174,
1203
- color=[128, 0, 128],
1204
- type='',
1205
- swap='trousers_kpt11'),
1206
- 175:
1207
- dict(
1208
- name='trousers_kpt8',
1209
- id=175,
1210
- color=[128, 0, 128],
1211
- type='',
1212
- swap='trousers_kpt10'),
1213
- 176:
1214
- dict(
1215
- name='trousers_kpt9',
1216
- id=176,
1217
- color=[128, 0, 128],
1218
- type='',
1219
- swap=''),
1220
- 177:
1221
- dict(
1222
- name='trousers_kpt10',
1223
- id=177,
1224
- color=[128, 0, 128],
1225
- type='',
1226
- swap='trousers_kpt8'),
1227
- 178:
1228
- dict(
1229
- name='trousers_kpt11',
1230
- id=178,
1231
- color=[128, 0, 128],
1232
- type='',
1233
- swap='trousers_kpt7'),
1234
- 179:
1235
- dict(
1236
- name='trousers_kpt12',
1237
- id=179,
1238
- color=[128, 0, 128],
1239
- type='',
1240
- swap='trousers_kpt6'),
1241
- 180:
1242
- dict(
1243
- name='trousers_kpt13',
1244
- id=180,
1245
- color=[128, 0, 128],
1246
- type='',
1247
- swap='trousers_kpt5'),
1248
- 181:
1249
- dict(
1250
- name='trousers_kpt14',
1251
- id=181,
1252
- color=[128, 0, 128],
1253
- type='',
1254
- swap='trousers_kpt4'),
1255
- 182:
1256
- dict(
1257
- name='skirt_kpt1',
1258
- id=182,
1259
- color=[64, 128, 128],
1260
- type='',
1261
- swap='skirt_kpt3'),
1262
- 183:
1263
- dict(
1264
- name='skirt_kpt2', id=183, color=[64, 128, 128], type='', swap=''),
1265
- 184:
1266
- dict(
1267
- name='skirt_kpt3',
1268
- id=184,
1269
- color=[64, 128, 128],
1270
- type='',
1271
- swap='skirt_kpt1'),
1272
- 185:
1273
- dict(
1274
- name='skirt_kpt4',
1275
- id=185,
1276
- color=[64, 128, 128],
1277
- type='',
1278
- swap='skirt_kpt8'),
1279
- 186:
1280
- dict(
1281
- name='skirt_kpt5',
1282
- id=186,
1283
- color=[64, 128, 128],
1284
- type='',
1285
- swap='skirt_kpt7'),
1286
- 187:
1287
- dict(
1288
- name='skirt_kpt6', id=187, color=[64, 128, 128], type='', swap=''),
1289
- 188:
1290
- dict(
1291
- name='skirt_kpt7',
1292
- id=188,
1293
- color=[64, 128, 128],
1294
- type='',
1295
- swap='skirt_kpt5'),
1296
- 189:
1297
- dict(
1298
- name='skirt_kpt8',
1299
- id=189,
1300
- color=[64, 128, 128],
1301
- type='',
1302
- swap='skirt_kpt4'),
1303
- 190:
1304
- dict(name='ssd_kpt1', id=190, color=[64, 64, 128], type='', swap=''),
1305
- 191:
1306
- dict(
1307
- name='ssd_kpt2',
1308
- id=191,
1309
- color=[64, 64, 128],
1310
- type='',
1311
- swap='ssd_kpt6'),
1312
- 192:
1313
- dict(
1314
- name='ssd_kpt3',
1315
- id=192,
1316
- color=[64, 64, 128],
1317
- type='',
1318
- swap='ssd_kpt5'),
1319
- 193:
1320
- dict(name='ssd_kpt4', id=193, color=[64, 64, 128], type='', swap=''),
1321
- 194:
1322
- dict(
1323
- name='ssd_kpt5',
1324
- id=194,
1325
- color=[64, 64, 128],
1326
- type='',
1327
- swap='ssd_kpt3'),
1328
- 195:
1329
- dict(
1330
- name='ssd_kpt6',
1331
- id=195,
1332
- color=[64, 64, 128],
1333
- type='',
1334
- swap='ssd_kpt2'),
1335
- 196:
1336
- dict(
1337
- name='ssd_kpt7',
1338
- id=196,
1339
- color=[64, 64, 128],
1340
- type='',
1341
- swap='ssd_kpt29'),
1342
- 197:
1343
- dict(
1344
- name='ssd_kpt8',
1345
- id=197,
1346
- color=[64, 64, 128],
1347
- type='',
1348
- swap='ssd_kpt28'),
1349
- 198:
1350
- dict(
1351
- name='ssd_kpt9',
1352
- id=198,
1353
- color=[64, 64, 128],
1354
- type='',
1355
- swap='ssd_kpt27'),
1356
- 199:
1357
- dict(
1358
- name='ssd_kpt10',
1359
- id=199,
1360
- color=[64, 64, 128],
1361
- type='',
1362
- swap='ssd_kpt26'),
1363
- 200:
1364
- dict(
1365
- name='ssd_kpt11',
1366
- id=200,
1367
- color=[64, 64, 128],
1368
- type='',
1369
- swap='ssd_kpt25'),
1370
- 201:
1371
- dict(
1372
- name='ssd_kpt12',
1373
- id=201,
1374
- color=[64, 64, 128],
1375
- type='',
1376
- swap='ssd_kpt24'),
1377
- 202:
1378
- dict(
1379
- name='ssd_kpt13',
1380
- id=202,
1381
- color=[64, 64, 128],
1382
- type='',
1383
- swap='ssd_kpt23'),
1384
- 203:
1385
- dict(
1386
- name='ssd_kpt14',
1387
- id=203,
1388
- color=[64, 64, 128],
1389
- type='',
1390
- swap='ssd_kpt22'),
1391
- 204:
1392
- dict(
1393
- name='ssd_kpt15',
1394
- id=204,
1395
- color=[64, 64, 128],
1396
- type='',
1397
- swap='ssd_kpt21'),
1398
- 205:
1399
- dict(
1400
- name='ssd_kpt16',
1401
- id=205,
1402
- color=[64, 64, 128],
1403
- type='',
1404
- swap='ssd_kpt20'),
1405
- 206:
1406
- dict(
1407
- name='ssd_kpt17',
1408
- id=206,
1409
- color=[64, 64, 128],
1410
- type='',
1411
- swap='ssd_kpt19'),
1412
- 207:
1413
- dict(name='ssd_kpt18', id=207, color=[64, 64, 128], type='', swap=''),
1414
- 208:
1415
- dict(
1416
- name='ssd_kpt19',
1417
- id=208,
1418
- color=[64, 64, 128],
1419
- type='',
1420
- swap='ssd_kpt17'),
1421
- 209:
1422
- dict(
1423
- name='ssd_kpt20',
1424
- id=209,
1425
- color=[64, 64, 128],
1426
- type='',
1427
- swap='ssd_kpt16'),
1428
- 210:
1429
- dict(
1430
- name='ssd_kpt21',
1431
- id=210,
1432
- color=[64, 64, 128],
1433
- type='',
1434
- swap='ssd_kpt15'),
1435
- 211:
1436
- dict(
1437
- name='ssd_kpt22',
1438
- id=211,
1439
- color=[64, 64, 128],
1440
- type='',
1441
- swap='ssd_kpt14'),
1442
- 212:
1443
- dict(
1444
- name='ssd_kpt23',
1445
- id=212,
1446
- color=[64, 64, 128],
1447
- type='',
1448
- swap='ssd_kpt13'),
1449
- 213:
1450
- dict(
1451
- name='ssd_kpt24',
1452
- id=213,
1453
- color=[64, 64, 128],
1454
- type='',
1455
- swap='ssd_kpt12'),
1456
- 214:
1457
- dict(
1458
- name='ssd_kpt25',
1459
- id=214,
1460
- color=[64, 64, 128],
1461
- type='',
1462
- swap='ssd_kpt11'),
1463
- 215:
1464
- dict(
1465
- name='ssd_kpt26',
1466
- id=215,
1467
- color=[64, 64, 128],
1468
- type='',
1469
- swap='ssd_kpt10'),
1470
- 216:
1471
- dict(
1472
- name='ssd_kpt27',
1473
- id=216,
1474
- color=[64, 64, 128],
1475
- type='',
1476
- swap='ssd_kpt9'),
1477
- 217:
1478
- dict(
1479
- name='ssd_kpt28',
1480
- id=217,
1481
- color=[64, 64, 128],
1482
- type='',
1483
- swap='ssd_kpt8'),
1484
- 218:
1485
- dict(
1486
- name='ssd_kpt29',
1487
- id=218,
1488
- color=[64, 64, 128],
1489
- type='',
1490
- swap='ssd_kpt7'),
1491
- 219:
1492
- dict(name='lsd_kpt1', id=219, color=[128, 64, 0], type='', swap=''),
1493
- 220:
1494
- dict(
1495
- name='lsd_kpt2',
1496
- id=220,
1497
- color=[128, 64, 0],
1498
- type='',
1499
- swap='lsd_kpt6'),
1500
- 221:
1501
- dict(
1502
- name='lsd_kpt3',
1503
- id=221,
1504
- color=[128, 64, 0],
1505
- type='',
1506
- swap='lsd_kpt5'),
1507
- 222:
1508
- dict(name='lsd_kpt4', id=222, color=[128, 64, 0], type='', swap=''),
1509
- 223:
1510
- dict(
1511
- name='lsd_kpt5',
1512
- id=223,
1513
- color=[128, 64, 0],
1514
- type='',
1515
- swap='lsd_kpt3'),
1516
- 224:
1517
- dict(
1518
- name='lsd_kpt6',
1519
- id=224,
1520
- color=[128, 64, 0],
1521
- type='',
1522
- swap='lsd_kpt2'),
1523
- 225:
1524
- dict(
1525
- name='lsd_kpt7',
1526
- id=225,
1527
- color=[128, 64, 0],
1528
- type='',
1529
- swap='lsd_kpt37'),
1530
- 226:
1531
- dict(
1532
- name='lsd_kpt8',
1533
- id=226,
1534
- color=[128, 64, 0],
1535
- type='',
1536
- swap='lsd_kpt36'),
1537
- 227:
1538
- dict(
1539
- name='lsd_kpt9',
1540
- id=227,
1541
- color=[128, 64, 0],
1542
- type='',
1543
- swap='lsd_kpt35'),
1544
- 228:
1545
- dict(
1546
- name='lsd_kpt10',
1547
- id=228,
1548
- color=[128, 64, 0],
1549
- type='',
1550
- swap='lsd_kpt34'),
1551
- 229:
1552
- dict(
1553
- name='lsd_kpt11',
1554
- id=229,
1555
- color=[128, 64, 0],
1556
- type='',
1557
- swap='lsd_kpt33'),
1558
- 230:
1559
- dict(
1560
- name='lsd_kpt12',
1561
- id=230,
1562
- color=[128, 64, 0],
1563
- type='',
1564
- swap='lsd_kpt32'),
1565
- 231:
1566
- dict(
1567
- name='lsd_kpt13',
1568
- id=231,
1569
- color=[128, 64, 0],
1570
- type='',
1571
- swap='lsd_kpt31'),
1572
- 232:
1573
- dict(
1574
- name='lsd_kpt14',
1575
- id=232,
1576
- color=[128, 64, 0],
1577
- type='',
1578
- swap='lsd_kpt30'),
1579
- 233:
1580
- dict(
1581
- name='lsd_kpt15',
1582
- id=233,
1583
- color=[128, 64, 0],
1584
- type='',
1585
- swap='lsd_kpt29'),
1586
- 234:
1587
- dict(
1588
- name='lsd_kpt16',
1589
- id=234,
1590
- color=[128, 64, 0],
1591
- type='',
1592
- swap='lsd_kpt28'),
1593
- 235:
1594
- dict(
1595
- name='lsd_kpt17',
1596
- id=235,
1597
- color=[128, 64, 0],
1598
- type='',
1599
- swap='lsd_kpt27'),
1600
- 236:
1601
- dict(
1602
- name='lsd_kpt18',
1603
- id=236,
1604
- color=[128, 64, 0],
1605
- type='',
1606
- swap='lsd_kpt26'),
1607
- 237:
1608
- dict(
1609
- name='lsd_kpt19',
1610
- id=237,
1611
- color=[128, 64, 0],
1612
- type='',
1613
- swap='lsd_kpt25'),
1614
- 238:
1615
- dict(
1616
- name='lsd_kpt20',
1617
- id=238,
1618
- color=[128, 64, 0],
1619
- type='',
1620
- swap='lsd_kpt24'),
1621
- 239:
1622
- dict(
1623
- name='lsd_kpt21',
1624
- id=239,
1625
- color=[128, 64, 0],
1626
- type='',
1627
- swap='lsd_kpt23'),
1628
- 240:
1629
- dict(name='lsd_kpt22', id=240, color=[128, 64, 0], type='', swap=''),
1630
- 241:
1631
- dict(
1632
- name='lsd_kpt23',
1633
- id=241,
1634
- color=[128, 64, 0],
1635
- type='',
1636
- swap='lsd_kpt21'),
1637
- 242:
1638
- dict(
1639
- name='lsd_kpt24',
1640
- id=242,
1641
- color=[128, 64, 0],
1642
- type='',
1643
- swap='lsd_kpt20'),
1644
- 243:
1645
- dict(
1646
- name='lsd_kpt25',
1647
- id=243,
1648
- color=[128, 64, 0],
1649
- type='',
1650
- swap='lsd_kpt19'),
1651
- 244:
1652
- dict(
1653
- name='lsd_kpt26',
1654
- id=244,
1655
- color=[128, 64, 0],
1656
- type='',
1657
- swap='lsd_kpt18'),
1658
- 245:
1659
- dict(
1660
- name='lsd_kpt27',
1661
- id=245,
1662
- color=[128, 64, 0],
1663
- type='',
1664
- swap='lsd_kpt17'),
1665
- 246:
1666
- dict(
1667
- name='lsd_kpt28',
1668
- id=246,
1669
- color=[128, 64, 0],
1670
- type='',
1671
- swap='lsd_kpt16'),
1672
- 247:
1673
- dict(
1674
- name='lsd_kpt29',
1675
- id=247,
1676
- color=[128, 64, 0],
1677
- type='',
1678
- swap='lsd_kpt15'),
1679
- 248:
1680
- dict(
1681
- name='lsd_kpt30',
1682
- id=248,
1683
- color=[128, 64, 0],
1684
- type='',
1685
- swap='lsd_kpt14'),
1686
- 249:
1687
- dict(
1688
- name='lsd_kpt31',
1689
- id=249,
1690
- color=[128, 64, 0],
1691
- type='',
1692
- swap='lsd_kpt13'),
1693
- 250:
1694
- dict(
1695
- name='lsd_kpt32',
1696
- id=250,
1697
- color=[128, 64, 0],
1698
- type='',
1699
- swap='lsd_kpt12'),
1700
- 251:
1701
- dict(
1702
- name='lsd_kpt33',
1703
- id=251,
1704
- color=[128, 64, 0],
1705
- type='',
1706
- swap='lsd_kpt11'),
1707
- 252:
1708
- dict(
1709
- name='lsd_kpt34',
1710
- id=252,
1711
- color=[128, 64, 0],
1712
- type='',
1713
- swap='lsd_kpt10'),
1714
- 253:
1715
- dict(
1716
- name='lsd_kpt35',
1717
- id=253,
1718
- color=[128, 64, 0],
1719
- type='',
1720
- swap='lsd_kpt9'),
1721
- 254:
1722
- dict(
1723
- name='lsd_kpt36',
1724
- id=254,
1725
- color=[128, 64, 0],
1726
- type='',
1727
- swap='lsd_kpt8'),
1728
- 255:
1729
- dict(
1730
- name='lsd_kpt37',
1731
- id=255,
1732
- color=[128, 64, 0],
1733
- type='',
1734
- swap='lsd_kpt7'),
1735
- 256:
1736
- dict(name='vd_kpt1', id=256, color=[128, 64, 255], type='', swap=''),
1737
- 257:
1738
- dict(
1739
- name='vd_kpt2',
1740
- id=257,
1741
- color=[128, 64, 255],
1742
- type='',
1743
- swap='vd_kpt6'),
1744
- 258:
1745
- dict(
1746
- name='vd_kpt3',
1747
- id=258,
1748
- color=[128, 64, 255],
1749
- type='',
1750
- swap='vd_kpt5'),
1751
- 259:
1752
- dict(name='vd_kpt4', id=259, color=[128, 64, 255], type='', swap=''),
1753
- 260:
1754
- dict(
1755
- name='vd_kpt5',
1756
- id=260,
1757
- color=[128, 64, 255],
1758
- type='',
1759
- swap='vd_kpt3'),
1760
- 261:
1761
- dict(
1762
- name='vd_kpt6',
1763
- id=261,
1764
- color=[128, 64, 255],
1765
- type='',
1766
- swap='vd_kpt2'),
1767
- 262:
1768
- dict(
1769
- name='vd_kpt7',
1770
- id=262,
1771
- color=[128, 64, 255],
1772
- type='',
1773
- swap='vd_kpt19'),
1774
- 263:
1775
- dict(
1776
- name='vd_kpt8',
1777
- id=263,
1778
- color=[128, 64, 255],
1779
- type='',
1780
- swap='vd_kpt18'),
1781
- 264:
1782
- dict(
1783
- name='vd_kpt9',
1784
- id=264,
1785
- color=[128, 64, 255],
1786
- type='',
1787
- swap='vd_kpt17'),
1788
- 265:
1789
- dict(
1790
- name='vd_kpt10',
1791
- id=265,
1792
- color=[128, 64, 255],
1793
- type='',
1794
- swap='vd_kpt16'),
1795
- 266:
1796
- dict(
1797
- name='vd_kpt11',
1798
- id=266,
1799
- color=[128, 64, 255],
1800
- type='',
1801
- swap='vd_kpt15'),
1802
- 267:
1803
- dict(
1804
- name='vd_kpt12',
1805
- id=267,
1806
- color=[128, 64, 255],
1807
- type='',
1808
- swap='vd_kpt14'),
1809
- 268:
1810
- dict(name='vd_kpt13', id=268, color=[128, 64, 255], type='', swap=''),
1811
- 269:
1812
- dict(
1813
- name='vd_kpt14',
1814
- id=269,
1815
- color=[128, 64, 255],
1816
- type='',
1817
- swap='vd_kpt12'),
1818
- 270:
1819
- dict(
1820
- name='vd_kpt15',
1821
- id=270,
1822
- color=[128, 64, 255],
1823
- type='',
1824
- swap='vd_kpt11'),
1825
- 271:
1826
- dict(
1827
- name='vd_kpt16',
1828
- id=271,
1829
- color=[128, 64, 255],
1830
- type='',
1831
- swap='vd_kpt10'),
1832
- 272:
1833
- dict(
1834
- name='vd_kpt17',
1835
- id=272,
1836
- color=[128, 64, 255],
1837
- type='',
1838
- swap='vd_kpt9'),
1839
- 273:
1840
- dict(
1841
- name='vd_kpt18',
1842
- id=273,
1843
- color=[128, 64, 255],
1844
- type='',
1845
- swap='vd_kpt8'),
1846
- 274:
1847
- dict(
1848
- name='vd_kpt19',
1849
- id=274,
1850
- color=[128, 64, 255],
1851
- type='',
1852
- swap='vd_kpt7'),
1853
- 275:
1854
- dict(name='sd_kpt1', id=275, color=[128, 64, 0], type='', swap=''),
1855
- 276:
1856
- dict(
1857
- name='sd_kpt2',
1858
- id=276,
1859
- color=[128, 64, 0],
1860
- type='',
1861
- swap='sd_kpt6'),
1862
- 277:
1863
- dict(
1864
- name='sd_kpt3',
1865
- id=277,
1866
- color=[128, 64, 0],
1867
- type='',
1868
- swap='sd_kpt5'),
1869
- 278:
1870
- dict(name='sd_kpt4', id=278, color=[128, 64, 0], type='', swap=''),
1871
- 279:
1872
- dict(
1873
- name='sd_kpt5',
1874
- id=279,
1875
- color=[128, 64, 0],
1876
- type='',
1877
- swap='sd_kpt3'),
1878
- 280:
1879
- dict(
1880
- name='sd_kpt6',
1881
- id=280,
1882
- color=[128, 64, 0],
1883
- type='',
1884
- swap='sd_kpt2'),
1885
- 281:
1886
- dict(
1887
- name='sd_kpt7',
1888
- id=281,
1889
- color=[128, 64, 0],
1890
- type='',
1891
- swap='sd_kpt19'),
1892
- 282:
1893
- dict(
1894
- name='sd_kpt8',
1895
- id=282,
1896
- color=[128, 64, 0],
1897
- type='',
1898
- swap='sd_kpt18'),
1899
- 283:
1900
- dict(
1901
- name='sd_kpt9',
1902
- id=283,
1903
- color=[128, 64, 0],
1904
- type='',
1905
- swap='sd_kpt17'),
1906
- 284:
1907
- dict(
1908
- name='sd_kpt10',
1909
- id=284,
1910
- color=[128, 64, 0],
1911
- type='',
1912
- swap='sd_kpt16'),
1913
- 285:
1914
- dict(
1915
- name='sd_kpt11',
1916
- id=285,
1917
- color=[128, 64, 0],
1918
- type='',
1919
- swap='sd_kpt15'),
1920
- 286:
1921
- dict(
1922
- name='sd_kpt12',
1923
- id=286,
1924
- color=[128, 64, 0],
1925
- type='',
1926
- swap='sd_kpt14'),
1927
- 287:
1928
- dict(name='sd_kpt13', id=287, color=[128, 64, 0], type='', swap=''),
1929
- 288:
1930
- dict(
1931
- name='sd_kpt14',
1932
- id=288,
1933
- color=[128, 64, 0],
1934
- type='',
1935
- swap='sd_kpt12'),
1936
- 289:
1937
- dict(
1938
- name='sd_kpt15',
1939
- id=289,
1940
- color=[128, 64, 0],
1941
- type='',
1942
- swap='sd_kpt11'),
1943
- 290:
1944
- dict(
1945
- name='sd_kpt16',
1946
- id=290,
1947
- color=[128, 64, 0],
1948
- type='',
1949
- swap='sd_kpt10'),
1950
- 291:
1951
- dict(
1952
- name='sd_kpt17',
1953
- id=291,
1954
- color=[128, 64, 0],
1955
- type='',
1956
- swap='sd_kpt9'),
1957
- 292:
1958
- dict(
1959
- name='sd_kpt18',
1960
- id=292,
1961
- color=[128, 64, 0],
1962
- type='',
1963
- swap='sd_kpt8'),
1964
- 293:
1965
- dict(
1966
- name='sd_kpt19',
1967
- id=293,
1968
- color=[128, 64, 0],
1969
- type='',
1970
- swap='sd_kpt7')
1971
- }),
1972
- skeleton_info=dict({
1973
- 0:
1974
- dict(link=('sss_kpt1', 'sss_kpt2'), id=0, color=[255, 128, 0]),
1975
- 1:
1976
- dict(link=('sss_kpt2', 'sss_kpt7'), id=1, color=[255, 128, 0]),
1977
- 2:
1978
- dict(link=('sss_kpt7', 'sss_kpt8'), id=2, color=[255, 128, 0]),
1979
- 3:
1980
- dict(link=('sss_kpt8', 'sss_kpt9'), id=3, color=[255, 128, 0]),
1981
- 4:
1982
- dict(link=('sss_kpt9', 'sss_kpt10'), id=4, color=[255, 128, 0]),
1983
- 5:
1984
- dict(link=('sss_kpt10', 'sss_kpt11'), id=5, color=[255, 128, 0]),
1985
- 6:
1986
- dict(link=('sss_kpt11', 'sss_kpt12'), id=6, color=[255, 128, 0]),
1987
- 7:
1988
- dict(link=('sss_kpt12', 'sss_kpt13'), id=7, color=[255, 128, 0]),
1989
- 8:
1990
- dict(link=('sss_kpt13', 'sss_kpt14'), id=8, color=[255, 128, 0]),
1991
- 9:
1992
- dict(link=('sss_kpt14', 'sss_kpt15'), id=9, color=[255, 128, 0]),
1993
- 10:
1994
- dict(link=('sss_kpt15', 'sss_kpt16'), id=10, color=[255, 128, 0]),
1995
- 11:
1996
- dict(link=('sss_kpt16', 'sss_kpt17'), id=11, color=[255, 128, 0]),
1997
- 12:
1998
- dict(link=('sss_kpt17', 'sss_kpt18'), id=12, color=[255, 128, 0]),
1999
- 13:
2000
- dict(link=('sss_kpt18', 'sss_kpt19'), id=13, color=[255, 128, 0]),
2001
- 14:
2002
- dict(link=('sss_kpt19', 'sss_kpt20'), id=14, color=[255, 128, 0]),
2003
- 15:
2004
- dict(link=('sss_kpt20', 'sss_kpt21'), id=15, color=[255, 128, 0]),
2005
- 16:
2006
- dict(link=('sss_kpt21', 'sss_kpt22'), id=16, color=[255, 128, 0]),
2007
- 17:
2008
- dict(link=('sss_kpt22', 'sss_kpt23'), id=17, color=[255, 128, 0]),
2009
- 18:
2010
- dict(link=('sss_kpt23', 'sss_kpt24'), id=18, color=[255, 128, 0]),
2011
- 19:
2012
- dict(link=('sss_kpt24', 'sss_kpt25'), id=19, color=[255, 128, 0]),
2013
- 20:
2014
- dict(link=('sss_kpt25', 'sss_kpt6'), id=20, color=[255, 128, 0]),
2015
- 21:
2016
- dict(link=('sss_kpt6', 'sss_kpt1'), id=21, color=[255, 128, 0]),
2017
- 22:
2018
- dict(link=('sss_kpt2', 'sss_kpt3'), id=22, color=[255, 128, 0]),
2019
- 23:
2020
- dict(link=('sss_kpt3', 'sss_kpt4'), id=23, color=[255, 128, 0]),
2021
- 24:
2022
- dict(link=('sss_kpt4', 'sss_kpt5'), id=24, color=[255, 128, 0]),
2023
- 25:
2024
- dict(link=('sss_kpt5', 'sss_kpt6'), id=25, color=[255, 128, 0]),
2025
- 26:
2026
- dict(link=('lss_kpt1', 'lss_kpt2'), id=26, color=[255, 0, 128]),
2027
- 27:
2028
- dict(link=('lss_kpt2', 'lss_kpt7'), id=27, color=[255, 0, 128]),
2029
- 28:
2030
- dict(link=('lss_kpt7', 'lss_kpt8'), id=28, color=[255, 0, 128]),
2031
- 29:
2032
- dict(link=('lss_kpt8', 'lss_kpt9'), id=29, color=[255, 0, 128]),
2033
- 30:
2034
- dict(link=('lss_kpt9', 'lss_kpt10'), id=30, color=[255, 0, 128]),
2035
- 31:
2036
- dict(link=('lss_kpt10', 'lss_kpt11'), id=31, color=[255, 0, 128]),
2037
- 32:
2038
- dict(link=('lss_kpt11', 'lss_kpt12'), id=32, color=[255, 0, 128]),
2039
- 33:
2040
- dict(link=('lss_kpt12', 'lss_kpt13'), id=33, color=[255, 0, 128]),
2041
- 34:
2042
- dict(link=('lss_kpt13', 'lss_kpt14'), id=34, color=[255, 0, 128]),
2043
- 35:
2044
- dict(link=('lss_kpt14', 'lss_kpt15'), id=35, color=[255, 0, 128]),
2045
- 36:
2046
- dict(link=('lss_kpt15', 'lss_kpt16'), id=36, color=[255, 0, 128]),
2047
- 37:
2048
- dict(link=('lss_kpt16', 'lss_kpt17'), id=37, color=[255, 0, 128]),
2049
- 38:
2050
- dict(link=('lss_kpt17', 'lss_kpt18'), id=38, color=[255, 0, 128]),
2051
- 39:
2052
- dict(link=('lss_kpt18', 'lss_kpt19'), id=39, color=[255, 0, 128]),
2053
- 40:
2054
- dict(link=('lss_kpt19', 'lss_kpt20'), id=40, color=[255, 0, 128]),
2055
- 41:
2056
- dict(link=('lss_kpt20', 'lss_kpt21'), id=41, color=[255, 0, 128]),
2057
- 42:
2058
- dict(link=('lss_kpt21', 'lss_kpt22'), id=42, color=[255, 0, 128]),
2059
- 43:
2060
- dict(link=('lss_kpt22', 'lss_kpt23'), id=43, color=[255, 0, 128]),
2061
- 44:
2062
- dict(link=('lss_kpt23', 'lss_kpt24'), id=44, color=[255, 0, 128]),
2063
- 45:
2064
- dict(link=('lss_kpt24', 'lss_kpt25'), id=45, color=[255, 0, 128]),
2065
- 46:
2066
- dict(link=('lss_kpt25', 'lss_kpt26'), id=46, color=[255, 0, 128]),
2067
- 47:
2068
- dict(link=('lss_kpt26', 'lss_kpt27'), id=47, color=[255, 0, 128]),
2069
- 48:
2070
- dict(link=('lss_kpt27', 'lss_kpt28'), id=48, color=[255, 0, 128]),
2071
- 49:
2072
- dict(link=('lss_kpt28', 'lss_kpt29'), id=49, color=[255, 0, 128]),
2073
- 50:
2074
- dict(link=('lss_kpt29', 'lss_kpt30'), id=50, color=[255, 0, 128]),
2075
- 51:
2076
- dict(link=('lss_kpt30', 'lss_kpt31'), id=51, color=[255, 0, 128]),
2077
- 52:
2078
- dict(link=('lss_kpt31', 'lss_kpt32'), id=52, color=[255, 0, 128]),
2079
- 53:
2080
- dict(link=('lss_kpt32', 'lss_kpt33'), id=53, color=[255, 0, 128]),
2081
- 54:
2082
- dict(link=('lss_kpt33', 'lss_kpt6'), id=54, color=[255, 0, 128]),
2083
- 55:
2084
- dict(link=('lss_kpt6', 'lss_kpt5'), id=55, color=[255, 0, 128]),
2085
- 56:
2086
- dict(link=('lss_kpt5', 'lss_kpt4'), id=56, color=[255, 0, 128]),
2087
- 57:
2088
- dict(link=('lss_kpt4', 'lss_kpt3'), id=57, color=[255, 0, 128]),
2089
- 58:
2090
- dict(link=('lss_kpt3', 'lss_kpt2'), id=58, color=[255, 0, 128]),
2091
- 59:
2092
- dict(link=('lss_kpt6', 'lss_kpt1'), id=59, color=[255, 0, 128]),
2093
- 60:
2094
- dict(link=('sso_kpt1', 'sso_kpt4'), id=60, color=[128, 0, 255]),
2095
- 61:
2096
- dict(link=('sso_kpt4', 'sso_kpt7'), id=61, color=[128, 0, 255]),
2097
- 62:
2098
- dict(link=('sso_kpt7', 'sso_kpt8'), id=62, color=[128, 0, 255]),
2099
- 63:
2100
- dict(link=('sso_kpt8', 'sso_kpt9'), id=63, color=[128, 0, 255]),
2101
- 64:
2102
- dict(link=('sso_kpt9', 'sso_kpt10'), id=64, color=[128, 0, 255]),
2103
- 65:
2104
- dict(link=('sso_kpt10', 'sso_kpt11'), id=65, color=[128, 0, 255]),
2105
- 66:
2106
- dict(link=('sso_kpt11', 'sso_kpt12'), id=66, color=[128, 0, 255]),
2107
- 67:
2108
- dict(link=('sso_kpt12', 'sso_kpt13'), id=67, color=[128, 0, 255]),
2109
- 68:
2110
- dict(link=('sso_kpt13', 'sso_kpt14'), id=68, color=[128, 0, 255]),
2111
- 69:
2112
- dict(link=('sso_kpt14', 'sso_kpt15'), id=69, color=[128, 0, 255]),
2113
- 70:
2114
- dict(link=('sso_kpt15', 'sso_kpt16'), id=70, color=[128, 0, 255]),
2115
- 71:
2116
- dict(link=('sso_kpt16', 'sso_kpt31'), id=71, color=[128, 0, 255]),
2117
- 72:
2118
- dict(link=('sso_kpt31', 'sso_kpt30'), id=72, color=[128, 0, 255]),
2119
- 73:
2120
- dict(link=('sso_kpt30', 'sso_kpt2'), id=73, color=[128, 0, 255]),
2121
- 74:
2122
- dict(link=('sso_kpt2', 'sso_kpt3'), id=74, color=[128, 0, 255]),
2123
- 75:
2124
- dict(link=('sso_kpt3', 'sso_kpt4'), id=75, color=[128, 0, 255]),
2125
- 76:
2126
- dict(link=('sso_kpt1', 'sso_kpt6'), id=76, color=[128, 0, 255]),
2127
- 77:
2128
- dict(link=('sso_kpt6', 'sso_kpt25'), id=77, color=[128, 0, 255]),
2129
- 78:
2130
- dict(link=('sso_kpt25', 'sso_kpt24'), id=78, color=[128, 0, 255]),
2131
- 79:
2132
- dict(link=('sso_kpt24', 'sso_kpt23'), id=79, color=[128, 0, 255]),
2133
- 80:
2134
- dict(link=('sso_kpt23', 'sso_kpt22'), id=80, color=[128, 0, 255]),
2135
- 81:
2136
- dict(link=('sso_kpt22', 'sso_kpt21'), id=81, color=[128, 0, 255]),
2137
- 82:
2138
- dict(link=('sso_kpt21', 'sso_kpt20'), id=82, color=[128, 0, 255]),
2139
- 83:
2140
- dict(link=('sso_kpt20', 'sso_kpt19'), id=83, color=[128, 0, 255]),
2141
- 84:
2142
- dict(link=('sso_kpt19', 'sso_kpt18'), id=84, color=[128, 0, 255]),
2143
- 85:
2144
- dict(link=('sso_kpt18', 'sso_kpt17'), id=85, color=[128, 0, 255]),
2145
- 86:
2146
- dict(link=('sso_kpt17', 'sso_kpt29'), id=86, color=[128, 0, 255]),
2147
- 87:
2148
- dict(link=('sso_kpt29', 'sso_kpt28'), id=87, color=[128, 0, 255]),
2149
- 88:
2150
- dict(link=('sso_kpt28', 'sso_kpt27'), id=88, color=[128, 0, 255]),
2151
- 89:
2152
- dict(link=('sso_kpt27', 'sso_kpt26'), id=89, color=[128, 0, 255]),
2153
- 90:
2154
- dict(link=('sso_kpt26', 'sso_kpt5'), id=90, color=[128, 0, 255]),
2155
- 91:
2156
- dict(link=('sso_kpt5', 'sso_kpt6'), id=91, color=[128, 0, 255]),
2157
- 92:
2158
- dict(link=('lso_kpt1', 'lso_kpt2'), id=92, color=[0, 128, 255]),
2159
- 93:
2160
- dict(link=('lso_kpt2', 'lso_kpt7'), id=93, color=[0, 128, 255]),
2161
- 94:
2162
- dict(link=('lso_kpt7', 'lso_kpt8'), id=94, color=[0, 128, 255]),
2163
- 95:
2164
- dict(link=('lso_kpt8', 'lso_kpt9'), id=95, color=[0, 128, 255]),
2165
- 96:
2166
- dict(link=('lso_kpt9', 'lso_kpt10'), id=96, color=[0, 128, 255]),
2167
- 97:
2168
- dict(link=('lso_kpt10', 'lso_kpt11'), id=97, color=[0, 128, 255]),
2169
- 98:
2170
- dict(link=('lso_kpt11', 'lso_kpt12'), id=98, color=[0, 128, 255]),
2171
- 99:
2172
- dict(link=('lso_kpt12', 'lso_kpt13'), id=99, color=[0, 128, 255]),
2173
- 100:
2174
- dict(link=('lso_kpt13', 'lso_kpt14'), id=100, color=[0, 128, 255]),
2175
- 101:
2176
- dict(link=('lso_kpt14', 'lso_kpt15'), id=101, color=[0, 128, 255]),
2177
- 102:
2178
- dict(link=('lso_kpt15', 'lso_kpt16'), id=102, color=[0, 128, 255]),
2179
- 103:
2180
- dict(link=('lso_kpt16', 'lso_kpt17'), id=103, color=[0, 128, 255]),
2181
- 104:
2182
- dict(link=('lso_kpt17', 'lso_kpt18'), id=104, color=[0, 128, 255]),
2183
- 105:
2184
- dict(link=('lso_kpt18', 'lso_kpt19'), id=105, color=[0, 128, 255]),
2185
- 106:
2186
- dict(link=('lso_kpt19', 'lso_kpt20'), id=106, color=[0, 128, 255]),
2187
- 107:
2188
- dict(link=('lso_kpt20', 'lso_kpt39'), id=107, color=[0, 128, 255]),
2189
- 108:
2190
- dict(link=('lso_kpt39', 'lso_kpt38'), id=108, color=[0, 128, 255]),
2191
- 109:
2192
- dict(link=('lso_kpt38', 'lso_kpt4'), id=109, color=[0, 128, 255]),
2193
- 110:
2194
- dict(link=('lso_kpt4', 'lso_kpt3'), id=110, color=[0, 128, 255]),
2195
- 111:
2196
- dict(link=('lso_kpt3', 'lso_kpt2'), id=111, color=[0, 128, 255]),
2197
- 112:
2198
- dict(link=('lso_kpt1', 'lso_kpt6'), id=112, color=[0, 128, 255]),
2199
- 113:
2200
- dict(link=('lso_kpt6', 'lso_kpt33'), id=113, color=[0, 128, 255]),
2201
- 114:
2202
- dict(link=('lso_kpt33', 'lso_kpt32'), id=114, color=[0, 128, 255]),
2203
- 115:
2204
- dict(link=('lso_kpt32', 'lso_kpt31'), id=115, color=[0, 128, 255]),
2205
- 116:
2206
- dict(link=('lso_kpt31', 'lso_kpt30'), id=116, color=[0, 128, 255]),
2207
- 117:
2208
- dict(link=('lso_kpt30', 'lso_kpt29'), id=117, color=[0, 128, 255]),
2209
- 118:
2210
- dict(link=('lso_kpt29', 'lso_kpt28'), id=118, color=[0, 128, 255]),
2211
- 119:
2212
- dict(link=('lso_kpt28', 'lso_kpt27'), id=119, color=[0, 128, 255]),
2213
- 120:
2214
- dict(link=('lso_kpt27', 'lso_kpt26'), id=120, color=[0, 128, 255]),
2215
- 121:
2216
- dict(link=('lso_kpt26', 'lso_kpt25'), id=121, color=[0, 128, 255]),
2217
- 122:
2218
- dict(link=('lso_kpt25', 'lso_kpt24'), id=122, color=[0, 128, 255]),
2219
- 123:
2220
- dict(link=('lso_kpt24', 'lso_kpt23'), id=123, color=[0, 128, 255]),
2221
- 124:
2222
- dict(link=('lso_kpt23', 'lso_kpt22'), id=124, color=[0, 128, 255]),
2223
- 125:
2224
- dict(link=('lso_kpt22', 'lso_kpt21'), id=125, color=[0, 128, 255]),
2225
- 126:
2226
- dict(link=('lso_kpt21', 'lso_kpt37'), id=126, color=[0, 128, 255]),
2227
- 127:
2228
- dict(link=('lso_kpt37', 'lso_kpt36'), id=127, color=[0, 128, 255]),
2229
- 128:
2230
- dict(link=('lso_kpt36', 'lso_kpt35'), id=128, color=[0, 128, 255]),
2231
- 129:
2232
- dict(link=('lso_kpt35', 'lso_kpt34'), id=129, color=[0, 128, 255]),
2233
- 130:
2234
- dict(link=('lso_kpt34', 'lso_kpt5'), id=130, color=[0, 128, 255]),
2235
- 131:
2236
- dict(link=('lso_kpt5', 'lso_kpt6'), id=131, color=[0, 128, 255]),
2237
- 132:
2238
- dict(link=('vest_kpt1', 'vest_kpt2'), id=132, color=[0, 128, 128]),
2239
- 133:
2240
- dict(link=('vest_kpt2', 'vest_kpt7'), id=133, color=[0, 128, 128]),
2241
- 134:
2242
- dict(link=('vest_kpt7', 'vest_kpt8'), id=134, color=[0, 128, 128]),
2243
- 135:
2244
- dict(link=('vest_kpt8', 'vest_kpt9'), id=135, color=[0, 128, 128]),
2245
- 136:
2246
- dict(link=('vest_kpt9', 'vest_kpt10'), id=136, color=[0, 128, 128]),
2247
- 137:
2248
- dict(link=('vest_kpt10', 'vest_kpt11'), id=137, color=[0, 128, 128]),
2249
- 138:
2250
- dict(link=('vest_kpt11', 'vest_kpt12'), id=138, color=[0, 128, 128]),
2251
- 139:
2252
- dict(link=('vest_kpt12', 'vest_kpt13'), id=139, color=[0, 128, 128]),
2253
- 140:
2254
- dict(link=('vest_kpt13', 'vest_kpt14'), id=140, color=[0, 128, 128]),
2255
- 141:
2256
- dict(link=('vest_kpt14', 'vest_kpt15'), id=141, color=[0, 128, 128]),
2257
- 142:
2258
- dict(link=('vest_kpt15', 'vest_kpt6'), id=142, color=[0, 128, 128]),
2259
- 143:
2260
- dict(link=('vest_kpt6', 'vest_kpt1'), id=143, color=[0, 128, 128]),
2261
- 144:
2262
- dict(link=('vest_kpt2', 'vest_kpt3'), id=144, color=[0, 128, 128]),
2263
- 145:
2264
- dict(link=('vest_kpt3', 'vest_kpt4'), id=145, color=[0, 128, 128]),
2265
- 146:
2266
- dict(link=('vest_kpt4', 'vest_kpt5'), id=146, color=[0, 128, 128]),
2267
- 147:
2268
- dict(link=('vest_kpt5', 'vest_kpt6'), id=147, color=[0, 128, 128]),
2269
- 148:
2270
- dict(link=('sling_kpt1', 'sling_kpt2'), id=148, color=[0, 0, 128]),
2271
- 149:
2272
- dict(link=('sling_kpt2', 'sling_kpt8'), id=149, color=[0, 0, 128]),
2273
- 150:
2274
- dict(link=('sling_kpt8', 'sling_kpt9'), id=150, color=[0, 0, 128]),
2275
- 151:
2276
- dict(link=('sling_kpt9', 'sling_kpt10'), id=151, color=[0, 0, 128]),
2277
- 152:
2278
- dict(link=('sling_kpt10', 'sling_kpt11'), id=152, color=[0, 0, 128]),
2279
- 153:
2280
- dict(link=('sling_kpt11', 'sling_kpt12'), id=153, color=[0, 0, 128]),
2281
- 154:
2282
- dict(link=('sling_kpt12', 'sling_kpt13'), id=154, color=[0, 0, 128]),
2283
- 155:
2284
- dict(link=('sling_kpt13', 'sling_kpt14'), id=155, color=[0, 0, 128]),
2285
- 156:
2286
- dict(link=('sling_kpt14', 'sling_kpt6'), id=156, color=[0, 0, 128]),
2287
- 157:
2288
- dict(link=('sling_kpt2', 'sling_kpt7'), id=157, color=[0, 0, 128]),
2289
- 158:
2290
- dict(link=('sling_kpt6', 'sling_kpt15'), id=158, color=[0, 0, 128]),
2291
- 159:
2292
- dict(link=('sling_kpt2', 'sling_kpt3'), id=159, color=[0, 0, 128]),
2293
- 160:
2294
- dict(link=('sling_kpt3', 'sling_kpt4'), id=160, color=[0, 0, 128]),
2295
- 161:
2296
- dict(link=('sling_kpt4', 'sling_kpt5'), id=161, color=[0, 0, 128]),
2297
- 162:
2298
- dict(link=('sling_kpt5', 'sling_kpt6'), id=162, color=[0, 0, 128]),
2299
- 163:
2300
- dict(link=('sling_kpt1', 'sling_kpt6'), id=163, color=[0, 0, 128]),
2301
- 164:
2302
- dict(
2303
- link=('shorts_kpt1', 'shorts_kpt4'), id=164, color=[128, 128,
2304
- 128]),
2305
- 165:
2306
- dict(
2307
- link=('shorts_kpt4', 'shorts_kpt5'), id=165, color=[128, 128,
2308
- 128]),
2309
- 166:
2310
- dict(
2311
- link=('shorts_kpt5', 'shorts_kpt6'), id=166, color=[128, 128,
2312
- 128]),
2313
- 167:
2314
- dict(
2315
- link=('shorts_kpt6', 'shorts_kpt7'), id=167, color=[128, 128,
2316
- 128]),
2317
- 168:
2318
- dict(
2319
- link=('shorts_kpt7', 'shorts_kpt8'), id=168, color=[128, 128,
2320
- 128]),
2321
- 169:
2322
- dict(
2323
- link=('shorts_kpt8', 'shorts_kpt9'), id=169, color=[128, 128,
2324
- 128]),
2325
- 170:
2326
- dict(
2327
- link=('shorts_kpt9', 'shorts_kpt10'),
2328
- id=170,
2329
- color=[128, 128, 128]),
2330
- 171:
2331
- dict(
2332
- link=('shorts_kpt10', 'shorts_kpt3'),
2333
- id=171,
2334
- color=[128, 128, 128]),
2335
- 172:
2336
- dict(
2337
- link=('shorts_kpt3', 'shorts_kpt2'), id=172, color=[128, 128,
2338
- 128]),
2339
- 173:
2340
- dict(
2341
- link=('shorts_kpt2', 'shorts_kpt1'), id=173, color=[128, 128,
2342
- 128]),
2343
- 174:
2344
- dict(
2345
- link=('trousers_kpt1', 'trousers_kpt4'),
2346
- id=174,
2347
- color=[128, 0, 128]),
2348
- 175:
2349
- dict(
2350
- link=('trousers_kpt4', 'trousers_kpt5'),
2351
- id=175,
2352
- color=[128, 0, 128]),
2353
- 176:
2354
- dict(
2355
- link=('trousers_kpt5', 'trousers_kpt6'),
2356
- id=176,
2357
- color=[128, 0, 128]),
2358
- 177:
2359
- dict(
2360
- link=('trousers_kpt6', 'trousers_kpt7'),
2361
- id=177,
2362
- color=[128, 0, 128]),
2363
- 178:
2364
- dict(
2365
- link=('trousers_kpt7', 'trousers_kpt8'),
2366
- id=178,
2367
- color=[128, 0, 128]),
2368
- 179:
2369
- dict(
2370
- link=('trousers_kpt8', 'trousers_kpt9'),
2371
- id=179,
2372
- color=[128, 0, 128]),
2373
- 180:
2374
- dict(
2375
- link=('trousers_kpt9', 'trousers_kpt10'),
2376
- id=180,
2377
- color=[128, 0, 128]),
2378
- 181:
2379
- dict(
2380
- link=('trousers_kpt10', 'trousers_kpt11'),
2381
- id=181,
2382
- color=[128, 0, 128]),
2383
- 182:
2384
- dict(
2385
- link=('trousers_kpt11', 'trousers_kpt12'),
2386
- id=182,
2387
- color=[128, 0, 128]),
2388
- 183:
2389
- dict(
2390
- link=('trousers_kpt12', 'trousers_kpt13'),
2391
- id=183,
2392
- color=[128, 0, 128]),
2393
- 184:
2394
- dict(
2395
- link=('trousers_kpt13', 'trousers_kpt14'),
2396
- id=184,
2397
- color=[128, 0, 128]),
2398
- 185:
2399
- dict(
2400
- link=('trousers_kpt14', 'trousers_kpt3'),
2401
- id=185,
2402
- color=[128, 0, 128]),
2403
- 186:
2404
- dict(
2405
- link=('trousers_kpt3', 'trousers_kpt2'),
2406
- id=186,
2407
- color=[128, 0, 128]),
2408
- 187:
2409
- dict(
2410
- link=('trousers_kpt2', 'trousers_kpt1'),
2411
- id=187,
2412
- color=[128, 0, 128]),
2413
- 188:
2414
- dict(link=('skirt_kpt1', 'skirt_kpt4'), id=188, color=[64, 128, 128]),
2415
- 189:
2416
- dict(link=('skirt_kpt4', 'skirt_kpt5'), id=189, color=[64, 128, 128]),
2417
- 190:
2418
- dict(link=('skirt_kpt5', 'skirt_kpt6'), id=190, color=[64, 128, 128]),
2419
- 191:
2420
- dict(link=('skirt_kpt6', 'skirt_kpt7'), id=191, color=[64, 128, 128]),
2421
- 192:
2422
- dict(link=('skirt_kpt7', 'skirt_kpt8'), id=192, color=[64, 128, 128]),
2423
- 193:
2424
- dict(link=('skirt_kpt8', 'skirt_kpt3'), id=193, color=[64, 128, 128]),
2425
- 194:
2426
- dict(link=('skirt_kpt3', 'skirt_kpt2'), id=194, color=[64, 128, 128]),
2427
- 195:
2428
- dict(link=('skirt_kpt2', 'skirt_kpt1'), id=195, color=[64, 128, 128]),
2429
- 196:
2430
- dict(link=('ssd_kpt1', 'ssd_kpt2'), id=196, color=[64, 64, 128]),
2431
- 197:
2432
- dict(link=('ssd_kpt2', 'ssd_kpt7'), id=197, color=[64, 64, 128]),
2433
- 198:
2434
- dict(link=('ssd_kpt7', 'ssd_kpt8'), id=198, color=[64, 64, 128]),
2435
- 199:
2436
- dict(link=('ssd_kpt8', 'ssd_kpt9'), id=199, color=[64, 64, 128]),
2437
- 200:
2438
- dict(link=('ssd_kpt9', 'ssd_kpt10'), id=200, color=[64, 64, 128]),
2439
- 201:
2440
- dict(link=('ssd_kpt10', 'ssd_kpt11'), id=201, color=[64, 64, 128]),
2441
- 202:
2442
- dict(link=('ssd_kpt11', 'ssd_kpt12'), id=202, color=[64, 64, 128]),
2443
- 203:
2444
- dict(link=('ssd_kpt12', 'ssd_kpt13'), id=203, color=[64, 64, 128]),
2445
- 204:
2446
- dict(link=('ssd_kpt13', 'ssd_kpt14'), id=204, color=[64, 64, 128]),
2447
- 205:
2448
- dict(link=('ssd_kpt14', 'ssd_kpt15'), id=205, color=[64, 64, 128]),
2449
- 206:
2450
- dict(link=('ssd_kpt15', 'ssd_kpt16'), id=206, color=[64, 64, 128]),
2451
- 207:
2452
- dict(link=('ssd_kpt16', 'ssd_kpt17'), id=207, color=[64, 64, 128]),
2453
- 208:
2454
- dict(link=('ssd_kpt17', 'ssd_kpt18'), id=208, color=[64, 64, 128]),
2455
- 209:
2456
- dict(link=('ssd_kpt18', 'ssd_kpt19'), id=209, color=[64, 64, 128]),
2457
- 210:
2458
- dict(link=('ssd_kpt19', 'ssd_kpt20'), id=210, color=[64, 64, 128]),
2459
- 211:
2460
- dict(link=('ssd_kpt20', 'ssd_kpt21'), id=211, color=[64, 64, 128]),
2461
- 212:
2462
- dict(link=('ssd_kpt21', 'ssd_kpt22'), id=212, color=[64, 64, 128]),
2463
- 213:
2464
- dict(link=('ssd_kpt22', 'ssd_kpt23'), id=213, color=[64, 64, 128]),
2465
- 214:
2466
- dict(link=('ssd_kpt23', 'ssd_kpt24'), id=214, color=[64, 64, 128]),
2467
- 215:
2468
- dict(link=('ssd_kpt24', 'ssd_kpt25'), id=215, color=[64, 64, 128]),
2469
- 216:
2470
- dict(link=('ssd_kpt25', 'ssd_kpt26'), id=216, color=[64, 64, 128]),
2471
- 217:
2472
- dict(link=('ssd_kpt26', 'ssd_kpt27'), id=217, color=[64, 64, 128]),
2473
- 218:
2474
- dict(link=('ssd_kpt27', 'ssd_kpt28'), id=218, color=[64, 64, 128]),
2475
- 219:
2476
- dict(link=('ssd_kpt28', 'ssd_kpt29'), id=219, color=[64, 64, 128]),
2477
- 220:
2478
- dict(link=('ssd_kpt29', 'ssd_kpt6'), id=220, color=[64, 64, 128]),
2479
- 221:
2480
- dict(link=('ssd_kpt6', 'ssd_kpt5'), id=221, color=[64, 64, 128]),
2481
- 222:
2482
- dict(link=('ssd_kpt5', 'ssd_kpt4'), id=222, color=[64, 64, 128]),
2483
- 223:
2484
- dict(link=('ssd_kpt4', 'ssd_kpt3'), id=223, color=[64, 64, 128]),
2485
- 224:
2486
- dict(link=('ssd_kpt3', 'ssd_kpt2'), id=224, color=[64, 64, 128]),
2487
- 225:
2488
- dict(link=('ssd_kpt6', 'ssd_kpt1'), id=225, color=[64, 64, 128]),
2489
- 226:
2490
- dict(link=('lsd_kpt1', 'lsd_kpt2'), id=226, color=[128, 64, 0]),
2491
- 227:
2492
- dict(link=('lsd_kpt2', 'lsd_kpt7'), id=228, color=[128, 64, 0]),
2493
- 228:
2494
- dict(link=('lsd_kpt7', 'lsd_kpt8'), id=228, color=[128, 64, 0]),
2495
- 229:
2496
- dict(link=('lsd_kpt8', 'lsd_kpt9'), id=229, color=[128, 64, 0]),
2497
- 230:
2498
- dict(link=('lsd_kpt9', 'lsd_kpt10'), id=230, color=[128, 64, 0]),
2499
- 231:
2500
- dict(link=('lsd_kpt10', 'lsd_kpt11'), id=231, color=[128, 64, 0]),
2501
- 232:
2502
- dict(link=('lsd_kpt11', 'lsd_kpt12'), id=232, color=[128, 64, 0]),
2503
- 233:
2504
- dict(link=('lsd_kpt12', 'lsd_kpt13'), id=233, color=[128, 64, 0]),
2505
- 234:
2506
- dict(link=('lsd_kpt13', 'lsd_kpt14'), id=234, color=[128, 64, 0]),
2507
- 235:
2508
- dict(link=('lsd_kpt14', 'lsd_kpt15'), id=235, color=[128, 64, 0]),
2509
- 236:
2510
- dict(link=('lsd_kpt15', 'lsd_kpt16'), id=236, color=[128, 64, 0]),
2511
- 237:
2512
- dict(link=('lsd_kpt16', 'lsd_kpt17'), id=237, color=[128, 64, 0]),
2513
- 238:
2514
- dict(link=('lsd_kpt17', 'lsd_kpt18'), id=238, color=[128, 64, 0]),
2515
- 239:
2516
- dict(link=('lsd_kpt18', 'lsd_kpt19'), id=239, color=[128, 64, 0]),
2517
- 240:
2518
- dict(link=('lsd_kpt19', 'lsd_kpt20'), id=240, color=[128, 64, 0]),
2519
- 241:
2520
- dict(link=('lsd_kpt20', 'lsd_kpt21'), id=241, color=[128, 64, 0]),
2521
- 242:
2522
- dict(link=('lsd_kpt21', 'lsd_kpt22'), id=242, color=[128, 64, 0]),
2523
- 243:
2524
- dict(link=('lsd_kpt22', 'lsd_kpt23'), id=243, color=[128, 64, 0]),
2525
- 244:
2526
- dict(link=('lsd_kpt23', 'lsd_kpt24'), id=244, color=[128, 64, 0]),
2527
- 245:
2528
- dict(link=('lsd_kpt24', 'lsd_kpt25'), id=245, color=[128, 64, 0]),
2529
- 246:
2530
- dict(link=('lsd_kpt25', 'lsd_kpt26'), id=246, color=[128, 64, 0]),
2531
- 247:
2532
- dict(link=('lsd_kpt26', 'lsd_kpt27'), id=247, color=[128, 64, 0]),
2533
- 248:
2534
- dict(link=('lsd_kpt27', 'lsd_kpt28'), id=248, color=[128, 64, 0]),
2535
- 249:
2536
- dict(link=('lsd_kpt28', 'lsd_kpt29'), id=249, color=[128, 64, 0]),
2537
- 250:
2538
- dict(link=('lsd_kpt29', 'lsd_kpt30'), id=250, color=[128, 64, 0]),
2539
- 251:
2540
- dict(link=('lsd_kpt30', 'lsd_kpt31'), id=251, color=[128, 64, 0]),
2541
- 252:
2542
- dict(link=('lsd_kpt31', 'lsd_kpt32'), id=252, color=[128, 64, 0]),
2543
- 253:
2544
- dict(link=('lsd_kpt32', 'lsd_kpt33'), id=253, color=[128, 64, 0]),
2545
- 254:
2546
- dict(link=('lsd_kpt33', 'lsd_kpt34'), id=254, color=[128, 64, 0]),
2547
- 255:
2548
- dict(link=('lsd_kpt34', 'lsd_kpt35'), id=255, color=[128, 64, 0]),
2549
- 256:
2550
- dict(link=('lsd_kpt35', 'lsd_kpt36'), id=256, color=[128, 64, 0]),
2551
- 257:
2552
- dict(link=('lsd_kpt36', 'lsd_kpt37'), id=257, color=[128, 64, 0]),
2553
- 258:
2554
- dict(link=('lsd_kpt37', 'lsd_kpt6'), id=258, color=[128, 64, 0]),
2555
- 259:
2556
- dict(link=('lsd_kpt6', 'lsd_kpt5'), id=259, color=[128, 64, 0]),
2557
- 260:
2558
- dict(link=('lsd_kpt5', 'lsd_kpt4'), id=260, color=[128, 64, 0]),
2559
- 261:
2560
- dict(link=('lsd_kpt4', 'lsd_kpt3'), id=261, color=[128, 64, 0]),
2561
- 262:
2562
- dict(link=('lsd_kpt3', 'lsd_kpt2'), id=262, color=[128, 64, 0]),
2563
- 263:
2564
- dict(link=('lsd_kpt6', 'lsd_kpt1'), id=263, color=[128, 64, 0]),
2565
- 264:
2566
- dict(link=('vd_kpt1', 'vd_kpt2'), id=264, color=[128, 64, 255]),
2567
- 265:
2568
- dict(link=('vd_kpt2', 'vd_kpt7'), id=265, color=[128, 64, 255]),
2569
- 266:
2570
- dict(link=('vd_kpt7', 'vd_kpt8'), id=266, color=[128, 64, 255]),
2571
- 267:
2572
- dict(link=('vd_kpt8', 'vd_kpt9'), id=267, color=[128, 64, 255]),
2573
- 268:
2574
- dict(link=('vd_kpt9', 'vd_kpt10'), id=268, color=[128, 64, 255]),
2575
- 269:
2576
- dict(link=('vd_kpt10', 'vd_kpt11'), id=269, color=[128, 64, 255]),
2577
- 270:
2578
- dict(link=('vd_kpt11', 'vd_kpt12'), id=270, color=[128, 64, 255]),
2579
- 271:
2580
- dict(link=('vd_kpt12', 'vd_kpt13'), id=271, color=[128, 64, 255]),
2581
- 272:
2582
- dict(link=('vd_kpt13', 'vd_kpt14'), id=272, color=[128, 64, 255]),
2583
- 273:
2584
- dict(link=('vd_kpt14', 'vd_kpt15'), id=273, color=[128, 64, 255]),
2585
- 274:
2586
- dict(link=('vd_kpt15', 'vd_kpt16'), id=274, color=[128, 64, 255]),
2587
- 275:
2588
- dict(link=('vd_kpt16', 'vd_kpt17'), id=275, color=[128, 64, 255]),
2589
- 276:
2590
- dict(link=('vd_kpt17', 'vd_kpt18'), id=276, color=[128, 64, 255]),
2591
- 277:
2592
- dict(link=('vd_kpt18', 'vd_kpt19'), id=277, color=[128, 64, 255]),
2593
- 278:
2594
- dict(link=('vd_kpt19', 'vd_kpt6'), id=278, color=[128, 64, 255]),
2595
- 279:
2596
- dict(link=('vd_kpt6', 'vd_kpt5'), id=279, color=[128, 64, 255]),
2597
- 280:
2598
- dict(link=('vd_kpt5', 'vd_kpt4'), id=280, color=[128, 64, 255]),
2599
- 281:
2600
- dict(link=('vd_kpt4', 'vd_kpt3'), id=281, color=[128, 64, 255]),
2601
- 282:
2602
- dict(link=('vd_kpt3', 'vd_kpt2'), id=282, color=[128, 64, 255]),
2603
- 283:
2604
- dict(link=('vd_kpt6', 'vd_kpt1'), id=283, color=[128, 64, 255]),
2605
- 284:
2606
- dict(link=('sd_kpt1', 'sd_kpt2'), id=284, color=[128, 64, 0]),
2607
- 285:
2608
- dict(link=('sd_kpt2', 'sd_kpt8'), id=285, color=[128, 64, 0]),
2609
- 286:
2610
- dict(link=('sd_kpt8', 'sd_kpt9'), id=286, color=[128, 64, 0]),
2611
- 287:
2612
- dict(link=('sd_kpt9', 'sd_kpt10'), id=287, color=[128, 64, 0]),
2613
- 288:
2614
- dict(link=('sd_kpt10', 'sd_kpt11'), id=288, color=[128, 64, 0]),
2615
- 289:
2616
- dict(link=('sd_kpt11', 'sd_kpt12'), id=289, color=[128, 64, 0]),
2617
- 290:
2618
- dict(link=('sd_kpt12', 'sd_kpt13'), id=290, color=[128, 64, 0]),
2619
- 291:
2620
- dict(link=('sd_kpt13', 'sd_kpt14'), id=291, color=[128, 64, 0]),
2621
- 292:
2622
- dict(link=('sd_kpt14', 'sd_kpt15'), id=292, color=[128, 64, 0]),
2623
- 293:
2624
- dict(link=('sd_kpt15', 'sd_kpt16'), id=293, color=[128, 64, 0]),
2625
- 294:
2626
- dict(link=('sd_kpt16', 'sd_kpt17'), id=294, color=[128, 64, 0]),
2627
- 295:
2628
- dict(link=('sd_kpt17', 'sd_kpt18'), id=295, color=[128, 64, 0]),
2629
- 296:
2630
- dict(link=('sd_kpt18', 'sd_kpt6'), id=296, color=[128, 64, 0]),
2631
- 297:
2632
- dict(link=('sd_kpt6', 'sd_kpt5'), id=297, color=[128, 64, 0]),
2633
- 298:
2634
- dict(link=('sd_kpt5', 'sd_kpt4'), id=298, color=[128, 64, 0]),
2635
- 299:
2636
- dict(link=('sd_kpt4', 'sd_kpt3'), id=299, color=[128, 64, 0]),
2637
- 300:
2638
- dict(link=('sd_kpt3', 'sd_kpt2'), id=300, color=[128, 64, 0]),
2639
- 301:
2640
- dict(link=('sd_kpt2', 'sd_kpt7'), id=301, color=[128, 64, 0]),
2641
- 302:
2642
- dict(link=('sd_kpt6', 'sd_kpt19'), id=302, color=[128, 64, 0]),
2643
- 303:
2644
- dict(link=('sd_kpt6', 'sd_kpt1'), id=303, color=[128, 64, 0])
2645
- }),
2646
- joint_weights=[
2647
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2648
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2649
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2650
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2651
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2652
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2653
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2654
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2655
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2656
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2657
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2658
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2659
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2660
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2661
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2662
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2663
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2664
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2665
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2666
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
2667
- 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
2668
- ],
2669
- sigmas=[])
2670
- param_scheduler = [
2671
- dict(
2672
- type='LinearLR', begin=0, end=500, start_factor=0.001, by_epoch=False),
2673
- dict(
2674
- type='MultiStepLR',
2675
- begin=0,
2676
- end=150,
2677
- milestones=[100, 130],
2678
- gamma=0.1,
2679
- by_epoch=True)
2680
- ]
2681
- optim_wrapper = dict(optimizer=dict(type='Adam', lr=0.0005))
2682
- auto_scale_lr = dict(base_batch_size=512)
2683
- dataset_type = 'DeepFashion2Dataset'
2684
- data_mode = 'topdown'
2685
- data_root = 'data/deepfashion2/'
2686
- codec = dict(
2687
- type='MSRAHeatmap', input_size=(192, 256), heatmap_size=(48, 64), sigma=2)
2688
- train_pipeline = [
2689
- dict(type='LoadImage'),
2690
- dict(type='GetBBoxCenterScale'),
2691
- dict(type='RandomFlip', direction='horizontal'),
2692
- dict(
2693
- type='RandomBBoxTransform',
2694
- shift_prob=0,
2695
- rotate_factor=60,
2696
- scale_factor=(0.75, 1.25)),
2697
- dict(type='TopdownAffine', input_size=(192, 256)),
2698
- dict(
2699
- type='GenerateTarget',
2700
- encoder=dict(
2701
- type='MSRAHeatmap',
2702
- input_size=(192, 256),
2703
- heatmap_size=(48, 64),
2704
- sigma=2)),
2705
- dict(type='PackPoseInputs')
2706
- ]
2707
- val_pipeline = [
2708
- dict(type='LoadImage', backend_args=dict(backend='local')),
2709
- dict(type='GetBBoxCenterScale'),
2710
- dict(type='TopdownAffine', input_size=(192, 256)),
2711
- dict(type='PackPoseInputs')
2712
- ]
2713
- train_dataloader = dict(
2714
- batch_size=16,
2715
- num_workers=6,
2716
- persistent_workers=True,
2717
- sampler=dict(type='DefaultSampler', shuffle=True),
2718
- dataset=dict(
2719
- type='DeepFashion2Dataset',
2720
- data_root='data/deepfashion2/',
2721
- data_mode='topdown',
2722
- ann_file='train/deepfashion2_long_sleeved_dress.json',
2723
- data_prefix=dict(img='train/image/'),
2724
- pipeline=[
2725
- dict(type='LoadImage'),
2726
- dict(type='GetBBoxCenterScale'),
2727
- dict(type='RandomFlip', direction='horizontal'),
2728
- dict(
2729
- type='RandomBBoxTransform',
2730
- shift_prob=0,
2731
- rotate_factor=60,
2732
- scale_factor=(0.75, 1.25)),
2733
- dict(type='TopdownAffine', input_size=(192, 256)),
2734
- dict(
2735
- type='GenerateTarget',
2736
- encoder=dict(
2737
- type='MSRAHeatmap',
2738
- input_size=(192, 256),
2739
- heatmap_size=(48, 64),
2740
- sigma=2)),
2741
- dict(type='PackPoseInputs')
2742
- ]))
2743
- val_dataloader = dict(
2744
- batch_size=16,
2745
- num_workers=6,
2746
- persistent_workers=True,
2747
- drop_last=False,
2748
- sampler=dict(type='DefaultSampler', shuffle=False),
2749
- dataset=dict(
2750
- type='DeepFashion2Dataset',
2751
- data_root='data/deepfashion2/',
2752
- data_mode='topdown',
2753
- ann_file='validation/deepfashion2_long_sleeved_dress.json',
2754
- data_prefix=dict(img='validation/image/'),
2755
- test_mode=True,
2756
- pipeline=[
2757
- dict(type='LoadImage', backend_args=dict(backend='local')),
2758
- dict(type='GetBBoxCenterScale'),
2759
- dict(type='TopdownAffine', input_size=(192, 256)),
2760
- dict(type='PackPoseInputs')
2761
- ]))
2762
- test_dataloader = dict(
2763
- batch_size=16,
2764
- num_workers=6,
2765
- persistent_workers=True,
2766
- drop_last=False,
2767
- sampler=dict(type='DefaultSampler', shuffle=False),
2768
- dataset=dict(
2769
- type='DeepFashion2Dataset',
2770
- data_root='data/deepfashion2/',
2771
- data_mode='topdown',
2772
- ann_file='validation/deepfashion2_long_sleeved_dress.json',
2773
- data_prefix=dict(img='validation/image/'),
2774
- test_mode=True,
2775
- pipeline=[
2776
- dict(type='LoadImage', backend_args=dict(backend='local')),
2777
- dict(type='GetBBoxCenterScale'),
2778
- dict(type='TopdownAffine', input_size=(192, 256)),
2779
- dict(type='PackPoseInputs')
2780
- ]))
2781
- channel_cfg = dict(
2782
- num_output_channels=294,
2783
- dataset_joints=294,
2784
- dataset_channel=[[
2785
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
2786
- 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
2787
- 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
2788
- 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
2789
- 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
2790
- 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
2791
- 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
2792
- 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
2793
- 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
2794
- 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
2795
- 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
2796
- 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
2797
- 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
2798
- 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
2799
- 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
2800
- 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
2801
- 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
2802
- 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
2803
- 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
2804
- 290, 291, 292, 293
2805
- ]],
2806
- inference_channel=[
2807
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
2808
- 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
2809
- 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
2810
- 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
2811
- 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
2812
- 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
2813
- 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121,
2814
- 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
2815
- 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
2816
- 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
2817
- 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177,
2818
- 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191,
2819
- 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
2820
- 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
2821
- 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233,
2822
- 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247,
2823
- 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261,
2824
- 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
2825
- 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289,
2826
- 290, 291, 292, 293
2827
- ])
2828
- model = dict(
2829
- type='TopdownPoseEstimator',
2830
- data_preprocessor=dict(
2831
- type='PoseDataPreprocessor',
2832
- mean=[123.675, 116.28, 103.53],
2833
- std=[58.395, 57.12, 57.375],
2834
- bgr_to_rgb=True),
2835
- backbone=dict(
2836
- type='ResNet',
2837
- depth=50,
2838
- init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
2839
- head=dict(
2840
- type='HeatmapHead',
2841
- in_channels=2048,
2842
- out_channels=294,
2843
- loss=dict(type='KeypointMSELoss', use_target_weight=True),
2844
- decoder=dict(
2845
- type='MSRAHeatmap',
2846
- input_size=(192, 256),
2847
- heatmap_size=(48, 64),
2848
- sigma=2)),
2849
- test_cfg=dict(flip_test=True, flip_mode='heatmap', shift_heatmap=True))
2850
- val_evaluator = [
2851
- dict(type='PCKAccuracy', thr=0.2),
2852
- dict(type='AUC'),
2853
- dict(type='EPE')
2854
- ]
2855
- test_evaluator = [
2856
- dict(type='PCKAccuracy', thr=0.2),
2857
- dict(type='AUC'),
2858
- dict(type='EPE')
2859
- ]
2860
- launcher = 'pytorch'
2861
- work_dir = './work_dirs/td_hm_res50_4xb16-150e_deepfashion2_long_sleeved_dress_256x192'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AbandonedMuse/UnlimitedMusicGen/audiocraft/models/loaders.py DELETED
@@ -1,96 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- """
8
- Utility functions to load from the checkpoints.
9
- Each checkpoint is a torch.saved dict with the following keys:
10
- - 'xp.cfg': the hydra config as dumped during training. This should be used
11
- to rebuild the object using the audiocraft.models.builders functions,
12
- - 'model_best_state': a readily loadable best state for the model, including
13
- the conditioner. The model obtained from `xp.cfg` should be compatible
14
- with this state dict. In the case of a LM, the encodec model would not be
15
- bundled along but instead provided separately.
16
-
17
- Those functions also support loading from a remote location with the Torch Hub API.
18
- They also support overriding some parameters, in particular the device and dtype
19
- of the returned model.
20
- """
21
-
22
- from pathlib import Path
23
- from huggingface_hub import hf_hub_download
24
- import typing as tp
25
- import os
26
-
27
- from omegaconf import OmegaConf
28
- import torch
29
-
30
- from . import builders
31
-
32
-
33
- HF_MODEL_CHECKPOINTS_MAP = {
34
- "small": "facebook/musicgen-small",
35
- "medium": "facebook/musicgen-medium",
36
- "large": "facebook/musicgen-large",
37
- "melody": "facebook/musicgen-melody",
38
- }
39
-
40
-
41
- def _get_state_dict(
42
- file_or_url_or_id: tp.Union[Path, str],
43
- filename: tp.Optional[str] = None,
44
- device='cpu',
45
- cache_dir: tp.Optional[str] = None,
46
- ):
47
- # Return the state dict either from a file or url
48
- file_or_url_or_id = str(file_or_url_or_id)
49
- assert isinstance(file_or_url_or_id, str)
50
-
51
- if os.path.isfile(file_or_url_or_id):
52
- return torch.load(file_or_url_or_id, map_location=device)
53
-
54
- if os.path.isdir(file_or_url_or_id):
55
- file = f"{file_or_url_or_id}/{filename}"
56
- return torch.load(file, map_location=device)
57
-
58
- elif file_or_url_or_id.startswith('https://'):
59
- return torch.hub.load_state_dict_from_url(file_or_url_or_id, map_location=device, check_hash=True)
60
-
61
- elif file_or_url_or_id in HF_MODEL_CHECKPOINTS_MAP:
62
- assert filename is not None, "filename needs to be defined if using HF checkpoints"
63
-
64
- repo_id = HF_MODEL_CHECKPOINTS_MAP[file_or_url_or_id]
65
- file = hf_hub_download(repo_id=repo_id, filename=filename, cache_dir=cache_dir)
66
- return torch.load(file, map_location=device)
67
-
68
- else:
69
- raise ValueError(f"{file_or_url_or_id} is not a valid name, path or link that can be loaded.")
70
-
71
-
72
- def load_compression_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None):
73
- pkg = _get_state_dict(file_or_url_or_id, filename="compression_state_dict.bin", cache_dir=cache_dir)
74
- cfg = OmegaConf.create(pkg['xp.cfg'])
75
- cfg.device = str(device)
76
- model = builders.get_compression_model(cfg)
77
- model.load_state_dict(pkg['best_state'])
78
- model.eval()
79
- return model
80
-
81
-
82
- def load_lm_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None):
83
- pkg = _get_state_dict(file_or_url_or_id, filename="state_dict.bin", cache_dir=cache_dir)
84
- cfg = OmegaConf.create(pkg['xp.cfg'])
85
- cfg.device = str(device)
86
- if cfg.device == 'cpu':
87
- cfg.transformer_lm.memory_efficient = False
88
- cfg.transformer_lm.custom = True
89
- cfg.dtype = 'float32'
90
- else:
91
- cfg.dtype = 'float16'
92
- model = builders.get_lm_model(cfg)
93
- model.load_state_dict(pkg['best_state'])
94
- model.eval()
95
- model.cfg = cfg
96
- return model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/npm/node_modules/crypto-js/README.md DELETED
@@ -1,261 +0,0 @@
1
- # crypto-js [![Build Status](https://travis-ci.org/brix/crypto-js.svg?branch=develop)](https://travis-ci.org/brix/crypto-js)
2
-
3
- JavaScript library of crypto standards.
4
-
5
- ## Node.js (Install)
6
-
7
- Requirements:
8
-
9
- - Node.js
10
- - npm (Node.js package manager)
11
-
12
- ```bash
13
- npm install crypto-js
14
- ```
15
-
16
- ### Usage
17
-
18
- ES6 import for typical API call signing use case:
19
-
20
- ```javascript
21
- import sha256 from 'crypto-js/sha256';
22
- import hmacSHA512 from 'crypto-js/hmac-sha512';
23
- import Base64 from 'crypto-js/enc-base64';
24
-
25
- const message, nonce, path, privateKey; // ...
26
- const hashDigest = sha256(nonce + message);
27
- const hmacDigest = Base64.stringify(hmacSHA512(path + hashDigest, privateKey));
28
- ```
29
-
30
- Modular include:
31
-
32
- ```javascript
33
- var AES = require("crypto-js/aes");
34
- var SHA256 = require("crypto-js/sha256");
35
- ...
36
- console.log(SHA256("Message"));
37
- ```
38
-
39
- Including all libraries, for access to extra methods:
40
-
41
- ```javascript
42
- var CryptoJS = require("crypto-js");
43
- console.log(CryptoJS.HmacSHA1("Message", "Key"));
44
- ```
45
-
46
- ## Client (browser)
47
-
48
- Requirements:
49
-
50
- - Node.js
51
- - Bower (package manager for frontend)
52
-
53
- ```bash
54
- bower install crypto-js
55
- ```
56
-
57
- ### Usage
58
-
59
- Modular include:
60
-
61
- ```javascript
62
- require.config({
63
- packages: [
64
- {
65
- name: 'crypto-js',
66
- location: 'path-to/bower_components/crypto-js',
67
- main: 'index'
68
- }
69
- ]
70
- });
71
-
72
- require(["crypto-js/aes", "crypto-js/sha256"], function (AES, SHA256) {
73
- console.log(SHA256("Message"));
74
- });
75
- ```
76
-
77
- Including all libraries, for access to extra methods:
78
-
79
- ```javascript
80
- // Above-mentioned will work or use this simple form
81
- require.config({
82
- paths: {
83
- 'crypto-js': 'path-to/bower_components/crypto-js/crypto-js'
84
- }
85
- });
86
-
87
- require(["crypto-js"], function (CryptoJS) {
88
- console.log(CryptoJS.HmacSHA1("Message", "Key"));
89
- });
90
- ```
91
-
92
- ### Usage without RequireJS
93
-
94
- ```html
95
- <script type="text/javascript" src="path-to/bower_components/crypto-js/crypto-js.js"></script>
96
- <script type="text/javascript">
97
- var encrypted = CryptoJS.AES(...);
98
- var encrypted = CryptoJS.SHA256(...);
99
- </script>
100
- ```
101
-
102
- ## API
103
-
104
- See: https://cryptojs.gitbook.io/docs/
105
-
106
- ### AES Encryption
107
-
108
- #### Plain text encryption
109
-
110
- ```javascript
111
- var CryptoJS = require("crypto-js");
112
-
113
- // Encrypt
114
- var ciphertext = CryptoJS.AES.encrypt('my message', 'secret key 123').toString();
115
-
116
- // Decrypt
117
- var bytes = CryptoJS.AES.decrypt(ciphertext, 'secret key 123');
118
- var originalText = bytes.toString(CryptoJS.enc.Utf8);
119
-
120
- console.log(originalText); // 'my message'
121
- ```
122
-
123
- #### Object encryption
124
-
125
- ```javascript
126
- var CryptoJS = require("crypto-js");
127
-
128
- var data = [{id: 1}, {id: 2}]
129
-
130
- // Encrypt
131
- var ciphertext = CryptoJS.AES.encrypt(JSON.stringify(data), 'secret key 123').toString();
132
-
133
- // Decrypt
134
- var bytes = CryptoJS.AES.decrypt(ciphertext, 'secret key 123');
135
- var decryptedData = JSON.parse(bytes.toString(CryptoJS.enc.Utf8));
136
-
137
- console.log(decryptedData); // [{id: 1}, {id: 2}]
138
- ```
139
-
140
- ### List of modules
141
-
142
-
143
- - ```crypto-js/core```
144
- - ```crypto-js/x64-core```
145
- - ```crypto-js/lib-typedarrays```
146
-
147
- ---
148
-
149
- - ```crypto-js/md5```
150
- - ```crypto-js/sha1```
151
- - ```crypto-js/sha256```
152
- - ```crypto-js/sha224```
153
- - ```crypto-js/sha512```
154
- - ```crypto-js/sha384```
155
- - ```crypto-js/sha3```
156
- - ```crypto-js/ripemd160```
157
-
158
- ---
159
-
160
- - ```crypto-js/hmac-md5```
161
- - ```crypto-js/hmac-sha1```
162
- - ```crypto-js/hmac-sha256```
163
- - ```crypto-js/hmac-sha224```
164
- - ```crypto-js/hmac-sha512```
165
- - ```crypto-js/hmac-sha384```
166
- - ```crypto-js/hmac-sha3```
167
- - ```crypto-js/hmac-ripemd160```
168
-
169
- ---
170
-
171
- - ```crypto-js/pbkdf2```
172
-
173
- ---
174
-
175
- - ```crypto-js/aes```
176
- - ```crypto-js/tripledes```
177
- - ```crypto-js/rc4```
178
- - ```crypto-js/rabbit```
179
- - ```crypto-js/rabbit-legacy```
180
- - ```crypto-js/evpkdf```
181
-
182
- ---
183
-
184
- - ```crypto-js/format-openssl```
185
- - ```crypto-js/format-hex```
186
-
187
- ---
188
-
189
- - ```crypto-js/enc-latin1```
190
- - ```crypto-js/enc-utf8```
191
- - ```crypto-js/enc-hex```
192
- - ```crypto-js/enc-utf16```
193
- - ```crypto-js/enc-base64```
194
-
195
- ---
196
-
197
- - ```crypto-js/mode-cfb```
198
- - ```crypto-js/mode-ctr```
199
- - ```crypto-js/mode-ctr-gladman```
200
- - ```crypto-js/mode-ofb```
201
- - ```crypto-js/mode-ecb```
202
-
203
- ---
204
-
205
- - ```crypto-js/pad-pkcs7```
206
- - ```crypto-js/pad-ansix923```
207
- - ```crypto-js/pad-iso10126```
208
- - ```crypto-js/pad-iso97971```
209
- - ```crypto-js/pad-zeropadding```
210
- - ```crypto-js/pad-nopadding```
211
-
212
-
213
- ## Release notes
214
-
215
- ### 4.1.1
216
-
217
- Fix module order in bundled release.
218
-
219
- Include the browser field in the released package.json.
220
-
221
- ### 4.1.0
222
-
223
- Added url safe variant of base64 encoding. [357](https://github.com/brix/crypto-js/pull/357)
224
-
225
- Avoid webpack to add crypto-browser package. [364](https://github.com/brix/crypto-js/pull/364)
226
-
227
- ### 4.0.0
228
-
229
- This is an update including breaking changes for some environments.
230
-
231
- In this version `Math.random()` has been replaced by the random methods of the native crypto module.
232
-
233
- For this reason CryptoJS might not run in some JavaScript environments without native crypto module. Such as IE 10 or before or React Native.
234
-
235
- ### 3.3.0
236
-
237
- Rollback, `3.3.0` is the same as `3.1.9-1`.
238
-
239
- The move of using native secure crypto module will be shifted to a new `4.x.x` version. As it is a breaking change the impact is too big for a minor release.
240
-
241
- ### 3.2.1
242
-
243
- The usage of the native crypto module has been fixed. The import and access of the native crypto module has been improved.
244
-
245
- ### 3.2.0
246
-
247
- In this version `Math.random()` has been replaced by the random methods of the native crypto module.
248
-
249
- For this reason CryptoJS might does not run in some JavaScript environments without native crypto module. Such as IE 10 or before.
250
-
251
- If it's absolute required to run CryptoJS in such an environment, stay with `3.1.x` version. Encrypting and decrypting stays compatible. But keep in mind `3.1.x` versions still use `Math.random()` which is cryptographically not secure, as it's not random enough.
252
-
253
- This version came along with `CRITICAL` `BUG`.
254
-
255
- DO NOT USE THIS VERSION! Please, go for a newer version!
256
-
257
- ### 3.1.x
258
-
259
- The `3.1.x` are based on the original CryptoJS, wrapped in CommonJS modules.
260
-
261
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/CoAdapter/ldm/models/diffusion/plms.py DELETED
@@ -1,243 +0,0 @@
1
- """SAMPLING ONLY."""
2
-
3
- import torch
4
- import numpy as np
5
- from tqdm import tqdm
6
- from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
7
-
8
-
9
- class PLMSSampler(object):
10
- def __init__(self, model, schedule="linear", **kwargs):
11
- super().__init__()
12
- self.model = model
13
- self.ddpm_num_timesteps = model.num_timesteps
14
- self.schedule = schedule
15
-
16
- def register_buffer(self, name, attr):
17
- if type(attr) == torch.Tensor:
18
- if attr.device != torch.device("cuda"):
19
- attr = attr.to(torch.device("cuda"))
20
- setattr(self, name, attr)
21
-
22
- def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
23
- if ddim_eta != 0:
24
- raise ValueError('ddim_eta must be 0 for PLMS')
25
- self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
26
- num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose)
27
- alphas_cumprod = self.model.alphas_cumprod
28
- assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
29
- to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
30
-
31
- self.register_buffer('betas', to_torch(self.model.betas))
32
- self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
33
- self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
34
-
35
- # calculations for diffusion q(x_t | x_{t-1}) and others
36
- self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
37
- self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
38
- self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
39
- self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
40
- self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
41
-
42
- # ddim sampling parameters
43
- ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
44
- ddim_timesteps=self.ddim_timesteps,
45
- eta=ddim_eta, verbose=verbose)
46
- self.register_buffer('ddim_sigmas', ddim_sigmas)
47
- self.register_buffer('ddim_alphas', ddim_alphas)
48
- self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
49
- self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
50
- sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
51
- (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
52
- 1 - self.alphas_cumprod / self.alphas_cumprod_prev))
53
- self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
54
-
55
- @torch.no_grad()
56
- def sample(self,
57
- S,
58
- batch_size,
59
- shape,
60
- conditioning=None,
61
- callback=None,
62
- normals_sequence=None,
63
- img_callback=None,
64
- quantize_x0=False,
65
- eta=0.,
66
- mask=None,
67
- x0=None,
68
- temperature=1.,
69
- noise_dropout=0.,
70
- score_corrector=None,
71
- corrector_kwargs=None,
72
- verbose=True,
73
- x_T=None,
74
- log_every_t=100,
75
- unconditional_guidance_scale=1.,
76
- unconditional_conditioning=None,
77
- features_adapter=None,
78
- cond_tau=0.4,
79
- # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
80
- **kwargs
81
- ):
82
- # print('*'*20,x_T)
83
- # exit(0)
84
- if conditioning is not None:
85
- if isinstance(conditioning, dict):
86
- cbs = conditioning[list(conditioning.keys())[0]].shape[0]
87
- if cbs != batch_size:
88
- print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
89
- else:
90
- if conditioning.shape[0] != batch_size:
91
- print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
92
-
93
- self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
94
- C, H, W = shape
95
- size = (batch_size, C, H, W)
96
- print(f'Data shape for PLMS sampling is {size}')
97
-
98
- samples, intermediates = self.plms_sampling(conditioning, size,
99
- callback=callback,
100
- img_callback=img_callback,
101
- quantize_denoised=quantize_x0,
102
- mask=mask, x0=x0,
103
- ddim_use_original_steps=False,
104
- noise_dropout=noise_dropout,
105
- temperature=temperature,
106
- score_corrector=score_corrector,
107
- corrector_kwargs=corrector_kwargs,
108
- x_T=x_T,
109
- log_every_t=log_every_t,
110
- unconditional_guidance_scale=unconditional_guidance_scale,
111
- unconditional_conditioning=unconditional_conditioning,
112
- features_adapter=features_adapter,
113
- cond_tau=cond_tau
114
- )
115
- return samples, intermediates
116
-
117
- @torch.no_grad()
118
- def plms_sampling(self, cond, shape,
119
- x_T=None, ddim_use_original_steps=False,
120
- callback=None, timesteps=None, quantize_denoised=False,
121
- mask=None, x0=None, img_callback=None, log_every_t=100,
122
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
123
- unconditional_guidance_scale=1., unconditional_conditioning=None, features_adapter=None,
124
- cond_tau=0.4):
125
- device = self.model.betas.device
126
- b = shape[0]
127
- if x_T is None:
128
- img = torch.randn(shape, device=device)
129
- else:
130
- img = x_T
131
- if timesteps is None:
132
- timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
133
- elif timesteps is not None and not ddim_use_original_steps:
134
- subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
135
- timesteps = self.ddim_timesteps[:subset_end]
136
-
137
- intermediates = {'x_inter': [img], 'pred_x0': [img]}
138
- time_range = list(reversed(range(0, timesteps))) if ddim_use_original_steps else np.flip(timesteps)
139
- total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
140
- print(f"Running PLMS Sampling with {total_steps} timesteps")
141
-
142
- iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)
143
- old_eps = []
144
-
145
- for i, step in enumerate(iterator):
146
- index = total_steps - i - 1
147
- ts = torch.full((b,), step, device=device, dtype=torch.long)
148
- ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long)
149
-
150
- if mask is not None: # and index>=10:
151
- assert x0 is not None
152
- img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
153
- img = img_orig * mask + (1. - mask) * img
154
-
155
- outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
156
- quantize_denoised=quantize_denoised, temperature=temperature,
157
- noise_dropout=noise_dropout, score_corrector=score_corrector,
158
- corrector_kwargs=corrector_kwargs,
159
- unconditional_guidance_scale=unconditional_guidance_scale,
160
- unconditional_conditioning=unconditional_conditioning,
161
- old_eps=old_eps, t_next=ts_next,
162
- features_adapter=None if index < int(
163
- (1 - cond_tau) * total_steps) else features_adapter)
164
-
165
- img, pred_x0, e_t = outs
166
- old_eps.append(e_t)
167
- if len(old_eps) >= 4:
168
- old_eps.pop(0)
169
- if callback: callback(i)
170
- if img_callback: img_callback(pred_x0, i)
171
-
172
- if index % log_every_t == 0 or index == total_steps - 1:
173
- intermediates['x_inter'].append(img)
174
- intermediates['pred_x0'].append(pred_x0)
175
-
176
- return img, intermediates
177
-
178
- @torch.no_grad()
179
- def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
180
- temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
181
- unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None,
182
- features_adapter=None):
183
- b, *_, device = *x.shape, x.device
184
-
185
- def get_model_output(x, t):
186
- if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
187
- e_t = self.model.apply_model(x, t, c, features_adapter=features_adapter)
188
- else:
189
- x_in = torch.cat([x] * 2)
190
- t_in = torch.cat([t] * 2)
191
- c_in = torch.cat([unconditional_conditioning, c])
192
- e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in, features_adapter=features_adapter).chunk(2)
193
- e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
194
-
195
- if score_corrector is not None:
196
- assert self.model.parameterization == "eps"
197
- e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
198
-
199
- return e_t
200
-
201
- alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
202
- alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
203
- sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
204
- sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
205
-
206
- def get_x_prev_and_pred_x0(e_t, index):
207
- # select parameters corresponding to the currently considered timestep
208
- a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
209
- a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
210
- sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
211
- sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device)
212
-
213
- # current prediction for x_0
214
- pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
215
- if quantize_denoised:
216
- pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
217
- # direction pointing to x_t
218
- dir_xt = (1. - a_prev - sigma_t ** 2).sqrt() * e_t
219
- noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
220
- if noise_dropout > 0.:
221
- noise = torch.nn.functional.dropout(noise, p=noise_dropout)
222
- x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
223
- return x_prev, pred_x0
224
-
225
- e_t = get_model_output(x, t)
226
- if len(old_eps) == 0:
227
- # Pseudo Improved Euler (2nd order)
228
- x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)
229
- e_t_next = get_model_output(x_prev, t_next)
230
- e_t_prime = (e_t + e_t_next) / 2
231
- elif len(old_eps) == 1:
232
- # 2nd order Pseudo Linear Multistep (Adams-Bashforth)
233
- e_t_prime = (3 * e_t - old_eps[-1]) / 2
234
- elif len(old_eps) == 2:
235
- # 3nd order Pseudo Linear Multistep (Adams-Bashforth)
236
- e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12
237
- elif len(old_eps) >= 3:
238
- # 4nd order Pseudo Linear Multistep (Adams-Bashforth)
239
- e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24
240
-
241
- x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)
242
-
243
- return x_prev, pred_x0, e_t
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aditya9790/yolo7-object-tracking/utils/general.py DELETED
@@ -1,892 +0,0 @@
1
- # YOLOR general utils
2
-
3
- import glob
4
- import logging
5
- import math
6
- import os
7
- import platform
8
- import random
9
- import re
10
- import subprocess
11
- import time
12
- from pathlib import Path
13
-
14
- import cv2
15
- import numpy as np
16
- import pandas as pd
17
- import torch
18
- import torchvision
19
- import yaml
20
-
21
- from utils.google_utils import gsutil_getsize
22
- from utils.metrics import fitness
23
- from utils.torch_utils import init_torch_seeds
24
-
25
- # Settings
26
- torch.set_printoptions(linewidth=320, precision=5, profile='long')
27
- np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
28
- pd.options.display.max_columns = 10
29
- cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
30
- os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads
31
-
32
-
33
- def set_logging(rank=-1):
34
- logging.basicConfig(
35
- format="%(message)s",
36
- level=logging.INFO if rank in [-1, 0] else logging.WARN)
37
-
38
-
39
- def init_seeds(seed=0):
40
- # Initialize random number generator (RNG) seeds
41
- random.seed(seed)
42
- np.random.seed(seed)
43
- init_torch_seeds(seed)
44
-
45
-
46
- def get_latest_run(search_dir='.'):
47
- # Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
48
- last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
49
- return max(last_list, key=os.path.getctime) if last_list else ''
50
-
51
-
52
- def isdocker():
53
- # Is environment a Docker container
54
- return Path('/workspace').exists() # or Path('/.dockerenv').exists()
55
-
56
-
57
- def emojis(str=''):
58
- # Return platform-dependent emoji-safe version of string
59
- return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
60
-
61
-
62
- def check_online():
63
- # Check internet connectivity
64
- import socket
65
- try:
66
- socket.create_connection(("1.1.1.1", 443), 5) # check host accesability
67
- return True
68
- except OSError:
69
- return False
70
-
71
-
72
- def check_git_status():
73
- # Recommend 'git pull' if code is out of date
74
- print(colorstr('github: '), end='')
75
- try:
76
- assert Path('.git').exists(), 'skipping check (not a git repository)'
77
- assert not isdocker(), 'skipping check (Docker image)'
78
- assert check_online(), 'skipping check (offline)'
79
-
80
- cmd = 'git fetch && git config --get remote.origin.url'
81
- url = subprocess.check_output(cmd, shell=True).decode().strip().rstrip('.git') # github repo url
82
- branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out
83
- n = int(subprocess.check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind
84
- if n > 0:
85
- s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \
86
- f"Use 'git pull' to update or 'git clone {url}' to download latest."
87
- else:
88
- s = f'up to date with {url} ✅'
89
- print(emojis(s)) # emoji-safe
90
- except Exception as e:
91
- print(e)
92
-
93
-
94
- def check_requirements(requirements='requirements.txt', exclude=()):
95
- # Check installed dependencies meet requirements (pass *.txt file or list of packages)
96
- import pkg_resources as pkg
97
- prefix = colorstr('red', 'bold', 'requirements:')
98
- if isinstance(requirements, (str, Path)): # requirements.txt file
99
- file = Path(requirements)
100
- if not file.exists():
101
- print(f"{prefix} {file.resolve()} not found, check failed.")
102
- return
103
- requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude]
104
- else: # list or tuple of packages
105
- requirements = [x for x in requirements if x not in exclude]
106
-
107
- n = 0 # number of packages updates
108
- for r in requirements:
109
- try:
110
- pkg.require(r)
111
- except Exception as e: # DistributionNotFound or VersionConflict if requirements not met
112
- n += 1
113
- print(f"{prefix} {e.req} not found and is required by YOLOR, attempting auto-update...")
114
- print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode())
115
-
116
- if n: # if packages updated
117
- source = file.resolve() if 'file' in locals() else requirements
118
- s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \
119
- f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
120
- print(emojis(s)) # emoji-safe
121
-
122
-
123
- def check_img_size(img_size, s=32):
124
- # Verify img_size is a multiple of stride s
125
- new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
126
- if new_size != img_size:
127
- print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))
128
- return new_size
129
-
130
-
131
- def check_imshow():
132
- # Check if environment supports image displays
133
- try:
134
- assert not isdocker(), 'cv2.imshow() is disabled in Docker environments'
135
- cv2.imshow('test', np.zeros((1, 1, 3)))
136
- cv2.waitKey(1)
137
- cv2.destroyAllWindows()
138
- cv2.waitKey(1)
139
- return True
140
- except Exception as e:
141
- print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}')
142
- return False
143
-
144
-
145
- def check_file(file):
146
- # Search for file if not found
147
- if Path(file).is_file() or file == '':
148
- return file
149
- else:
150
- files = glob.glob('./**/' + file, recursive=True) # find file
151
- assert len(files), f'File Not Found: {file}' # assert file was found
152
- assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique
153
- return files[0] # return file
154
-
155
-
156
- def check_dataset(dict):
157
- # Download dataset if not found locally
158
- val, s = dict.get('val'), dict.get('download')
159
- if val and len(val):
160
- val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
161
- if not all(x.exists() for x in val):
162
- print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])
163
- if s and len(s): # download script
164
- print('Downloading %s ...' % s)
165
- if s.startswith('http') and s.endswith('.zip'): # URL
166
- f = Path(s).name # filename
167
- torch.hub.download_url_to_file(s, f)
168
- r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip
169
- else: # bash script
170
- r = os.system(s)
171
- print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value
172
- else:
173
- raise Exception('Dataset not found.')
174
-
175
-
176
- def make_divisible(x, divisor):
177
- # Returns x evenly divisible by divisor
178
- return math.ceil(x / divisor) * divisor
179
-
180
-
181
- def clean_str(s):
182
- # Cleans a string by replacing special characters with underscore _
183
- return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
184
-
185
-
186
- def one_cycle(y1=0.0, y2=1.0, steps=100):
187
- # lambda function for sinusoidal ramp from y1 to y2
188
- return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
189
-
190
-
191
- def colorstr(*input):
192
- # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')
193
- *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string
194
- colors = {'black': '\033[30m', # basic colors
195
- 'red': '\033[31m',
196
- 'green': '\033[32m',
197
- 'yellow': '\033[33m',
198
- 'blue': '\033[34m',
199
- 'magenta': '\033[35m',
200
- 'cyan': '\033[36m',
201
- 'white': '\033[37m',
202
- 'bright_black': '\033[90m', # bright colors
203
- 'bright_red': '\033[91m',
204
- 'bright_green': '\033[92m',
205
- 'bright_yellow': '\033[93m',
206
- 'bright_blue': '\033[94m',
207
- 'bright_magenta': '\033[95m',
208
- 'bright_cyan': '\033[96m',
209
- 'bright_white': '\033[97m',
210
- 'end': '\033[0m', # misc
211
- 'bold': '\033[1m',
212
- 'underline': '\033[4m'}
213
- return ''.join(colors[x] for x in args) + f'{string}' + colors['end']
214
-
215
-
216
- def labels_to_class_weights(labels, nc=80):
217
- # Get class weights (inverse frequency) from training labels
218
- if labels[0] is None: # no labels loaded
219
- return torch.Tensor()
220
-
221
- labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
222
- classes = labels[:, 0].astype(np.int32) # labels = [class xywh]
223
- weights = np.bincount(classes, minlength=nc) # occurrences per class
224
-
225
- # Prepend gridpoint count (for uCE training)
226
- # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
227
- # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
228
-
229
- weights[weights == 0] = 1 # replace empty bins with 1
230
- weights = 1 / weights # number of targets per class
231
- weights /= weights.sum() # normalize
232
- return torch.from_numpy(weights)
233
-
234
-
235
- def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
236
- # Produces image weights based on class_weights and image contents
237
- class_counts = np.array([np.bincount(x[:, 0].astype(np.int32), minlength=nc) for x in labels])
238
- image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
239
- # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
240
- return image_weights
241
-
242
-
243
- def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
244
- # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
245
- # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
246
- # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
247
- # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
248
- # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
249
- x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
250
- 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
251
- 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
252
- return x
253
-
254
-
255
- def xyxy2xywh(x):
256
- # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
257
- y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
258
- y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
259
- y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
260
- y[:, 2] = x[:, 2] - x[:, 0] # width
261
- y[:, 3] = x[:, 3] - x[:, 1] # height
262
- return y
263
-
264
-
265
- def xywh2xyxy(x):
266
- # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
267
- y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
268
- y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
269
- y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
270
- y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
271
- y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
272
- return y
273
-
274
-
275
- def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
276
- # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
277
- y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
278
- y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x
279
- y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y
280
- y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x
281
- y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y
282
- return y
283
-
284
-
285
- def xyn2xy(x, w=640, h=640, padw=0, padh=0):
286
- # Convert normalized segments into pixel segments, shape (n,2)
287
- y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
288
- y[:, 0] = w * x[:, 0] + padw # top left x
289
- y[:, 1] = h * x[:, 1] + padh # top left y
290
- return y
291
-
292
-
293
- def segment2box(segment, width=640, height=640):
294
- # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)
295
- x, y = segment.T # segment xy
296
- inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
297
- x, y, = x[inside], y[inside]
298
- return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy
299
-
300
-
301
- def segments2boxes(segments):
302
- # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)
303
- boxes = []
304
- for s in segments:
305
- x, y = s.T # segment xy
306
- boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy
307
- return xyxy2xywh(np.array(boxes)) # cls, xywh
308
-
309
-
310
- def resample_segments(segments, n=1000):
311
- # Up-sample an (n,2) segment
312
- for i, s in enumerate(segments):
313
- s = np.concatenate((s, s[0:1, :]), axis=0)
314
- x = np.linspace(0, len(s) - 1, n)
315
- xp = np.arange(len(s))
316
- segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy
317
- return segments
318
-
319
-
320
- def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
321
- # Rescale coords (xyxy) from img1_shape to img0_shape
322
- if ratio_pad is None: # calculate from img0_shape
323
- gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
324
- pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
325
- else:
326
- gain = ratio_pad[0][0]
327
- pad = ratio_pad[1]
328
-
329
- coords[:, [0, 2]] -= pad[0] # x padding
330
- coords[:, [1, 3]] -= pad[1] # y padding
331
- coords[:, :4] /= gain
332
- clip_coords(coords, img0_shape)
333
- return coords
334
-
335
-
336
- def clip_coords(boxes, img_shape):
337
- # Clip bounding xyxy bounding boxes to image shape (height, width)
338
- boxes[:, 0].clamp_(0, img_shape[1]) # x1
339
- boxes[:, 1].clamp_(0, img_shape[0]) # y1
340
- boxes[:, 2].clamp_(0, img_shape[1]) # x2
341
- boxes[:, 3].clamp_(0, img_shape[0]) # y2
342
-
343
-
344
- def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7):
345
- # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
346
- box2 = box2.T
347
-
348
- # Get the coordinates of bounding boxes
349
- if x1y1x2y2: # x1, y1, x2, y2 = box1
350
- b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
351
- b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
352
- else: # transform from xywh to xyxy
353
- b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
354
- b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
355
- b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
356
- b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
357
-
358
- # Intersection area
359
- inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
360
- (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
361
-
362
- # Union Area
363
- w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
364
- w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
365
- union = w1 * h1 + w2 * h2 - inter + eps
366
-
367
- iou = inter / union
368
-
369
- if GIoU or DIoU or CIoU:
370
- cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
371
- ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
372
- if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
373
- c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
374
- rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +
375
- (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared
376
- if DIoU:
377
- return iou - rho2 / c2 # DIoU
378
- elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
379
- v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / (h2 + eps)) - torch.atan(w1 / (h1 + eps)), 2)
380
- with torch.no_grad():
381
- alpha = v / (v - iou + (1 + eps))
382
- return iou - (rho2 / c2 + v * alpha) # CIoU
383
- else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
384
- c_area = cw * ch + eps # convex area
385
- return iou - (c_area - union) / c_area # GIoU
386
- else:
387
- return iou # IoU
388
-
389
-
390
-
391
-
392
- def bbox_alpha_iou(box1, box2, x1y1x2y2=False, GIoU=False, DIoU=False, CIoU=False, alpha=2, eps=1e-9):
393
- # Returns tsqrt_he IoU of box1 to box2. box1 is 4, box2 is nx4
394
- box2 = box2.T
395
-
396
- # Get the coordinates of bounding boxes
397
- if x1y1x2y2: # x1, y1, x2, y2 = box1
398
- b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
399
- b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
400
- else: # transform from xywh to xyxy
401
- b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
402
- b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
403
- b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
404
- b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
405
-
406
- # Intersection area
407
- inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
408
- (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
409
-
410
- # Union Area
411
- w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
412
- w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
413
- union = w1 * h1 + w2 * h2 - inter + eps
414
-
415
- # change iou into pow(iou+eps)
416
- # iou = inter / union
417
- iou = torch.pow(inter/union + eps, alpha)
418
- # beta = 2 * alpha
419
- if GIoU or DIoU or CIoU:
420
- cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
421
- ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
422
- if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
423
- c2 = (cw ** 2 + ch ** 2) ** alpha + eps # convex diagonal
424
- rho_x = torch.abs(b2_x1 + b2_x2 - b1_x1 - b1_x2)
425
- rho_y = torch.abs(b2_y1 + b2_y2 - b1_y1 - b1_y2)
426
- rho2 = ((rho_x ** 2 + rho_y ** 2) / 4) ** alpha # center distance
427
- if DIoU:
428
- return iou - rho2 / c2 # DIoU
429
- elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
430
- v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
431
- with torch.no_grad():
432
- alpha_ciou = v / ((1 + eps) - inter / union + v)
433
- # return iou - (rho2 / c2 + v * alpha_ciou) # CIoU
434
- return iou - (rho2 / c2 + torch.pow(v * alpha_ciou + eps, alpha)) # CIoU
435
- else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
436
- # c_area = cw * ch + eps # convex area
437
- # return iou - (c_area - union) / c_area # GIoU
438
- c_area = torch.max(cw * ch + eps, union) # convex area
439
- return iou - torch.pow((c_area - union) / c_area + eps, alpha) # GIoU
440
- else:
441
- return iou # torch.log(iou+eps) or iou
442
-
443
-
444
- def box_iou(box1, box2):
445
- # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
446
- """
447
- Return intersection-over-union (Jaccard index) of boxes.
448
- Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
449
- Arguments:
450
- box1 (Tensor[N, 4])
451
- box2 (Tensor[M, 4])
452
- Returns:
453
- iou (Tensor[N, M]): the NxM matrix containing the pairwise
454
- IoU values for every element in boxes1 and boxes2
455
- """
456
-
457
- def box_area(box):
458
- # box = 4xn
459
- return (box[2] - box[0]) * (box[3] - box[1])
460
-
461
- area1 = box_area(box1.T)
462
- area2 = box_area(box2.T)
463
-
464
- # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
465
- inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
466
- return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
467
-
468
-
469
- def wh_iou(wh1, wh2):
470
- # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
471
- wh1 = wh1[:, None] # [N,1,2]
472
- wh2 = wh2[None] # [1,M,2]
473
- inter = torch.min(wh1, wh2).prod(2) # [N,M]
474
- return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
475
-
476
-
477
- def box_giou(box1, box2):
478
- """
479
- Return generalized intersection-over-union (Jaccard index) between two sets of boxes.
480
- Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with
481
- ``0 <= x1 < x2`` and ``0 <= y1 < y2``.
482
- Args:
483
- boxes1 (Tensor[N, 4]): first set of boxes
484
- boxes2 (Tensor[M, 4]): second set of boxes
485
- Returns:
486
- Tensor[N, M]: the NxM matrix containing the pairwise generalized IoU values
487
- for every element in boxes1 and boxes2
488
- """
489
-
490
- def box_area(box):
491
- # box = 4xn
492
- return (box[2] - box[0]) * (box[3] - box[1])
493
-
494
- area1 = box_area(box1.T)
495
- area2 = box_area(box2.T)
496
-
497
- inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
498
- union = (area1[:, None] + area2 - inter)
499
-
500
- iou = inter / union
501
-
502
- lti = torch.min(box1[:, None, :2], box2[:, :2])
503
- rbi = torch.max(box1[:, None, 2:], box2[:, 2:])
504
-
505
- whi = (rbi - lti).clamp(min=0) # [N,M,2]
506
- areai = whi[:, :, 0] * whi[:, :, 1]
507
-
508
- return iou - (areai - union) / areai
509
-
510
-
511
- def box_ciou(box1, box2, eps: float = 1e-7):
512
- """
513
- Return complete intersection-over-union (Jaccard index) between two sets of boxes.
514
- Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with
515
- ``0 <= x1 < x2`` and ``0 <= y1 < y2``.
516
- Args:
517
- boxes1 (Tensor[N, 4]): first set of boxes
518
- boxes2 (Tensor[M, 4]): second set of boxes
519
- eps (float, optional): small number to prevent division by zero. Default: 1e-7
520
- Returns:
521
- Tensor[N, M]: the NxM matrix containing the pairwise complete IoU values
522
- for every element in boxes1 and boxes2
523
- """
524
-
525
- def box_area(box):
526
- # box = 4xn
527
- return (box[2] - box[0]) * (box[3] - box[1])
528
-
529
- area1 = box_area(box1.T)
530
- area2 = box_area(box2.T)
531
-
532
- inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
533
- union = (area1[:, None] + area2 - inter)
534
-
535
- iou = inter / union
536
-
537
- lti = torch.min(box1[:, None, :2], box2[:, :2])
538
- rbi = torch.max(box1[:, None, 2:], box2[:, 2:])
539
-
540
- whi = (rbi - lti).clamp(min=0) # [N,M,2]
541
- diagonal_distance_squared = (whi[:, :, 0] ** 2) + (whi[:, :, 1] ** 2) + eps
542
-
543
- # centers of boxes
544
- x_p = (box1[:, None, 0] + box1[:, None, 2]) / 2
545
- y_p = (box1[:, None, 1] + box1[:, None, 3]) / 2
546
- x_g = (box2[:, 0] + box2[:, 2]) / 2
547
- y_g = (box2[:, 1] + box2[:, 3]) / 2
548
- # The distance between boxes' centers squared.
549
- centers_distance_squared = (x_p - x_g) ** 2 + (y_p - y_g) ** 2
550
-
551
- w_pred = box1[:, None, 2] - box1[:, None, 0]
552
- h_pred = box1[:, None, 3] - box1[:, None, 1]
553
-
554
- w_gt = box2[:, 2] - box2[:, 0]
555
- h_gt = box2[:, 3] - box2[:, 1]
556
-
557
- v = (4 / (torch.pi ** 2)) * torch.pow((torch.atan(w_gt / h_gt) - torch.atan(w_pred / h_pred)), 2)
558
- with torch.no_grad():
559
- alpha = v / (1 - iou + v + eps)
560
- return iou - (centers_distance_squared / diagonal_distance_squared) - alpha * v
561
-
562
-
563
- def box_diou(box1, box2, eps: float = 1e-7):
564
- """
565
- Return distance intersection-over-union (Jaccard index) between two sets of boxes.
566
- Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with
567
- ``0 <= x1 < x2`` and ``0 <= y1 < y2``.
568
- Args:
569
- boxes1 (Tensor[N, 4]): first set of boxes
570
- boxes2 (Tensor[M, 4]): second set of boxes
571
- eps (float, optional): small number to prevent division by zero. Default: 1e-7
572
- Returns:
573
- Tensor[N, M]: the NxM matrix containing the pairwise distance IoU values
574
- for every element in boxes1 and boxes2
575
- """
576
-
577
- def box_area(box):
578
- # box = 4xn
579
- return (box[2] - box[0]) * (box[3] - box[1])
580
-
581
- area1 = box_area(box1.T)
582
- area2 = box_area(box2.T)
583
-
584
- inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
585
- union = (area1[:, None] + area2 - inter)
586
-
587
- iou = inter / union
588
-
589
- lti = torch.min(box1[:, None, :2], box2[:, :2])
590
- rbi = torch.max(box1[:, None, 2:], box2[:, 2:])
591
-
592
- whi = (rbi - lti).clamp(min=0) # [N,M,2]
593
- diagonal_distance_squared = (whi[:, :, 0] ** 2) + (whi[:, :, 1] ** 2) + eps
594
-
595
- # centers of boxes
596
- x_p = (box1[:, None, 0] + box1[:, None, 2]) / 2
597
- y_p = (box1[:, None, 1] + box1[:, None, 3]) / 2
598
- x_g = (box2[:, 0] + box2[:, 2]) / 2
599
- y_g = (box2[:, 1] + box2[:, 3]) / 2
600
- # The distance between boxes' centers squared.
601
- centers_distance_squared = (x_p - x_g) ** 2 + (y_p - y_g) ** 2
602
-
603
- # The distance IoU is the IoU penalized by a normalized
604
- # distance between boxes' centers squared.
605
- return iou - (centers_distance_squared / diagonal_distance_squared)
606
-
607
-
608
- def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,
609
- labels=()):
610
- """Runs Non-Maximum Suppression (NMS) on inference results
611
-
612
- Returns:
613
- list of detections, on (n,6) tensor per image [xyxy, conf, cls]
614
- """
615
-
616
- nc = prediction.shape[2] - 5 # number of classes
617
- xc = prediction[..., 4] > conf_thres # candidates
618
-
619
- # Settings
620
- min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
621
- max_det = 300 # maximum number of detections per image
622
- max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
623
- time_limit = 10.0 # seconds to quit after
624
- redundant = True # require redundant detections
625
- multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
626
- merge = False # use merge-NMS
627
-
628
- t = time.time()
629
- output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
630
- for xi, x in enumerate(prediction): # image index, image inference
631
- # Apply constraints
632
- # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
633
- x = x[xc[xi]] # confidence
634
-
635
- # Cat apriori labels if autolabelling
636
- if labels and len(labels[xi]):
637
- l = labels[xi]
638
- v = torch.zeros((len(l), nc + 5), device=x.device)
639
- v[:, :4] = l[:, 1:5] # box
640
- v[:, 4] = 1.0 # conf
641
- v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
642
- x = torch.cat((x, v), 0)
643
-
644
- # If none remain process next image
645
- if not x.shape[0]:
646
- continue
647
-
648
- # Compute conf
649
- if nc == 1:
650
- x[:, 5:] = x[:, 4:5] # for models with one class, cls_loss is 0 and cls_conf is always 0.5,
651
- # so there is no need to multiplicate.
652
- else:
653
- x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
654
-
655
- # Box (center x, center y, width, height) to (x1, y1, x2, y2)
656
- box = xywh2xyxy(x[:, :4])
657
-
658
- # Detections matrix nx6 (xyxy, conf, cls)
659
- if multi_label:
660
- i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
661
- x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
662
- else: # best class only
663
- conf, j = x[:, 5:].max(1, keepdim=True)
664
- x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
665
-
666
- # Filter by class
667
- if classes is not None:
668
- x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
669
-
670
- # Apply finite constraint
671
- # if not torch.isfinite(x).all():
672
- # x = x[torch.isfinite(x).all(1)]
673
-
674
- # Check shape
675
- n = x.shape[0] # number of boxes
676
- if not n: # no boxes
677
- continue
678
- elif n > max_nms: # excess boxes
679
- x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
680
-
681
- # Batched NMS
682
- c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
683
- boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
684
- i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
685
- if i.shape[0] > max_det: # limit detections
686
- i = i[:max_det]
687
- if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
688
- # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
689
- iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
690
- weights = iou * scores[None] # box weights
691
- x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
692
- if redundant:
693
- i = i[iou.sum(1) > 1] # require redundancy
694
-
695
- output[xi] = x[i]
696
- if (time.time() - t) > time_limit:
697
- print(f'WARNING: NMS time limit {time_limit}s exceeded')
698
- break # time limit exceeded
699
-
700
- return output
701
-
702
-
703
- def non_max_suppression_kpt(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,
704
- labels=(), kpt_label=False, nc=None, nkpt=None):
705
- """Runs Non-Maximum Suppression (NMS) on inference results
706
-
707
- Returns:
708
- list of detections, on (n,6) tensor per image [xyxy, conf, cls]
709
- """
710
- if nc is None:
711
- nc = prediction.shape[2] - 5 if not kpt_label else prediction.shape[2] - 56 # number of classes
712
- xc = prediction[..., 4] > conf_thres # candidates
713
-
714
- # Settings
715
- min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
716
- max_det = 300 # maximum number of detections per image
717
- max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
718
- time_limit = 10.0 # seconds to quit after
719
- redundant = True # require redundant detections
720
- multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
721
- merge = False # use merge-NMS
722
-
723
- t = time.time()
724
- output = [torch.zeros((0,6), device=prediction.device)] * prediction.shape[0]
725
- for xi, x in enumerate(prediction): # image index, image inference
726
- # Apply constraints
727
- # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
728
- x = x[xc[xi]] # confidence
729
-
730
- # Cat apriori labels if autolabelling
731
- if labels and len(labels[xi]):
732
- l = labels[xi]
733
- v = torch.zeros((len(l), nc + 5), device=x.device)
734
- v[:, :4] = l[:, 1:5] # box
735
- v[:, 4] = 1.0 # conf
736
- v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
737
- x = torch.cat((x, v), 0)
738
-
739
- # If none remain process next image
740
- if not x.shape[0]:
741
- continue
742
-
743
- # Compute conf
744
- x[:, 5:5+nc] *= x[:, 4:5] # conf = obj_conf * cls_conf
745
-
746
- # Box (center x, center y, width, height) to (x1, y1, x2, y2)
747
- box = xywh2xyxy(x[:, :4])
748
-
749
- # Detections matrix nx6 (xyxy, conf, cls)
750
- if multi_label:
751
- i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
752
- x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
753
- else: # best class only
754
- if not kpt_label:
755
- conf, j = x[:, 5:].max(1, keepdim=True)
756
- x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
757
- else:
758
- kpts = x[:, 6:]
759
- conf, j = x[:, 5:6].max(1, keepdim=True)
760
- x = torch.cat((box, conf, j.float(), kpts), 1)[conf.view(-1) > conf_thres]
761
-
762
-
763
- # Filter by class
764
- if classes is not None:
765
- x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
766
-
767
- # Apply finite constraint
768
- # if not torch.isfinite(x).all():
769
- # x = x[torch.isfinite(x).all(1)]
770
-
771
- # Check shape
772
- n = x.shape[0] # number of boxes
773
- if not n: # no boxes
774
- continue
775
- elif n > max_nms: # excess boxes
776
- x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
777
-
778
- # Batched NMS
779
- c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
780
- boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
781
- i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
782
- if i.shape[0] > max_det: # limit detections
783
- i = i[:max_det]
784
- if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
785
- # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
786
- iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
787
- weights = iou * scores[None] # box weights
788
- x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
789
- if redundant:
790
- i = i[iou.sum(1) > 1] # require redundancy
791
-
792
- output[xi] = x[i]
793
- if (time.time() - t) > time_limit:
794
- print(f'WARNING: NMS time limit {time_limit}s exceeded')
795
- break # time limit exceeded
796
-
797
- return output
798
-
799
-
800
- def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()
801
- # Strip optimizer from 'f' to finalize training, optionally save as 's'
802
- x = torch.load(f, map_location=torch.device('cpu'))
803
- if x.get('ema'):
804
- x['model'] = x['ema'] # replace model with ema
805
- for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys
806
- x[k] = None
807
- x['epoch'] = -1
808
- x['model'].half() # to FP16
809
- for p in x['model'].parameters():
810
- p.requires_grad = False
811
- torch.save(x, s or f)
812
- mb = os.path.getsize(s or f) / 1E6 # filesize
813
- print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB")
814
-
815
-
816
- def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):
817
- # Print mutation results to evolve.txt (for use with train.py --evolve)
818
- a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
819
- b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
820
- c = '%10.4g' * len(results) % results # results (P, R, [email protected], [email protected]:0.95, val_losses x 3)
821
- print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
822
-
823
- if bucket:
824
- url = 'gs://%s/evolve.txt' % bucket
825
- if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0):
826
- os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local
827
-
828
- with open('evolve.txt', 'a') as f: # append result
829
- f.write(c + b + '\n')
830
- x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows
831
- x = x[np.argsort(-fitness(x))] # sort
832
- np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness
833
-
834
- # Save yaml
835
- for i, k in enumerate(hyp.keys()):
836
- hyp[k] = float(x[0, i + 7])
837
- with open(yaml_file, 'w') as f:
838
- results = tuple(x[0, :7])
839
- c = '%10.4g' * len(results) % results # results (P, R, [email protected], [email protected]:0.95, val_losses x 3)
840
- f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n')
841
- yaml.dump(hyp, f, sort_keys=False)
842
-
843
- if bucket:
844
- os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload
845
-
846
-
847
- def apply_classifier(x, model, img, im0):
848
- # applies a second stage classifier to yolo outputs
849
- im0 = [im0] if isinstance(im0, np.ndarray) else im0
850
- for i, d in enumerate(x): # per image
851
- if d is not None and len(d):
852
- d = d.clone()
853
-
854
- # Reshape and pad cutouts
855
- b = xyxy2xywh(d[:, :4]) # boxes
856
- b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
857
- b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
858
- d[:, :4] = xywh2xyxy(b).long()
859
-
860
- # Rescale boxes from img_size to im0 size
861
- scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
862
-
863
- # Classes
864
- pred_cls1 = d[:, 5].long()
865
- ims = []
866
- for j, a in enumerate(d): # per item
867
- cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
868
- im = cv2.resize(cutout, (224, 224)) # BGR
869
- # cv2.imwrite('test%i.jpg' % j, cutout)
870
-
871
- im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
872
- im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
873
- im /= 255.0 # 0 - 255 to 0.0 - 1.0
874
- ims.append(im)
875
-
876
- pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
877
- x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
878
-
879
- return x
880
-
881
-
882
- def increment_path(path, exist_ok=True, sep=''):
883
- # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc.
884
- path = Path(path) # os-agnostic
885
- if (path.exists() and exist_ok) or (not path.exists()):
886
- return str(path)
887
- else:
888
- dirs = glob.glob(f"{path}{sep}*") # similar paths
889
- matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
890
- i = [int(m.groups()[0]) for m in matches if m] # indices
891
- n = max(i) + 1 if i else 2 # increment number
892
- return f"{path}{sep}{n}" # update path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/dist/index.html DELETED
@@ -1,20 +0,0 @@
1
- <!DOCTYPE html>
2
- <html lang="en">
3
-
4
- <head>
5
- <meta charset="UTF-8">
6
- <meta name="viewport" content="user-scalable=no, width=device-width, initial-scale=1.0">
7
- <style>
8
- html,
9
- body {
10
- margin: 0;
11
- padding: 0;
12
- }
13
- </style>
14
- </head>
15
-
16
- <body>
17
- <div id="game"></div>
18
- <script src="bundle.js"></script>
19
- </body>
20
- </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/pages/methods/Methods.js DELETED
@@ -1,17 +0,0 @@
1
- import AddChildMethods from './AddChildMethods.js';
2
- import GetPage from './GetPage.js';
3
- import SwapPage from './SwapPage.js';
4
- import HasPage from './HasPage.js';
5
-
6
- var methods = {
7
- getPage: GetPage,
8
- swapPage: SwapPage,
9
- hasPage: HasPage,
10
- }
11
-
12
- Object.assign(
13
- methods,
14
- AddChildMethods,
15
- );
16
-
17
- export default methods;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AkshayKollimarala/MygenAI/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: MygenAI
3
- emoji: 🏃
4
- colorFrom: purple
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.39.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alpaca233/SadTalker/src/face3d/models/arcface_torch/utils/utils_callbacks.py DELETED
@@ -1,117 +0,0 @@
1
- import logging
2
- import os
3
- import time
4
- from typing import List
5
-
6
- import torch
7
-
8
- from eval import verification
9
- from utils.utils_logging import AverageMeter
10
-
11
-
12
- class CallBackVerification(object):
13
- def __init__(self, frequent, rank, val_targets, rec_prefix, image_size=(112, 112)):
14
- self.frequent: int = frequent
15
- self.rank: int = rank
16
- self.highest_acc: float = 0.0
17
- self.highest_acc_list: List[float] = [0.0] * len(val_targets)
18
- self.ver_list: List[object] = []
19
- self.ver_name_list: List[str] = []
20
- if self.rank is 0:
21
- self.init_dataset(val_targets=val_targets, data_dir=rec_prefix, image_size=image_size)
22
-
23
- def ver_test(self, backbone: torch.nn.Module, global_step: int):
24
- results = []
25
- for i in range(len(self.ver_list)):
26
- acc1, std1, acc2, std2, xnorm, embeddings_list = verification.test(
27
- self.ver_list[i], backbone, 10, 10)
28
- logging.info('[%s][%d]XNorm: %f' % (self.ver_name_list[i], global_step, xnorm))
29
- logging.info('[%s][%d]Accuracy-Flip: %1.5f+-%1.5f' % (self.ver_name_list[i], global_step, acc2, std2))
30
- if acc2 > self.highest_acc_list[i]:
31
- self.highest_acc_list[i] = acc2
32
- logging.info(
33
- '[%s][%d]Accuracy-Highest: %1.5f' % (self.ver_name_list[i], global_step, self.highest_acc_list[i]))
34
- results.append(acc2)
35
-
36
- def init_dataset(self, val_targets, data_dir, image_size):
37
- for name in val_targets:
38
- path = os.path.join(data_dir, name + ".bin")
39
- if os.path.exists(path):
40
- data_set = verification.load_bin(path, image_size)
41
- self.ver_list.append(data_set)
42
- self.ver_name_list.append(name)
43
-
44
- def __call__(self, num_update, backbone: torch.nn.Module):
45
- if self.rank is 0 and num_update > 0 and num_update % self.frequent == 0:
46
- backbone.eval()
47
- self.ver_test(backbone, num_update)
48
- backbone.train()
49
-
50
-
51
- class CallBackLogging(object):
52
- def __init__(self, frequent, rank, total_step, batch_size, world_size, writer=None):
53
- self.frequent: int = frequent
54
- self.rank: int = rank
55
- self.time_start = time.time()
56
- self.total_step: int = total_step
57
- self.batch_size: int = batch_size
58
- self.world_size: int = world_size
59
- self.writer = writer
60
-
61
- self.init = False
62
- self.tic = 0
63
-
64
- def __call__(self,
65
- global_step: int,
66
- loss: AverageMeter,
67
- epoch: int,
68
- fp16: bool,
69
- learning_rate: float,
70
- grad_scaler: torch.cuda.amp.GradScaler):
71
- if self.rank == 0 and global_step > 0 and global_step % self.frequent == 0:
72
- if self.init:
73
- try:
74
- speed: float = self.frequent * self.batch_size / (time.time() - self.tic)
75
- speed_total = speed * self.world_size
76
- except ZeroDivisionError:
77
- speed_total = float('inf')
78
-
79
- time_now = (time.time() - self.time_start) / 3600
80
- time_total = time_now / ((global_step + 1) / self.total_step)
81
- time_for_end = time_total - time_now
82
- if self.writer is not None:
83
- self.writer.add_scalar('time_for_end', time_for_end, global_step)
84
- self.writer.add_scalar('learning_rate', learning_rate, global_step)
85
- self.writer.add_scalar('loss', loss.avg, global_step)
86
- if fp16:
87
- msg = "Speed %.2f samples/sec Loss %.4f LearningRate %.4f Epoch: %d Global Step: %d " \
88
- "Fp16 Grad Scale: %2.f Required: %1.f hours" % (
89
- speed_total, loss.avg, learning_rate, epoch, global_step,
90
- grad_scaler.get_scale(), time_for_end
91
- )
92
- else:
93
- msg = "Speed %.2f samples/sec Loss %.4f LearningRate %.4f Epoch: %d Global Step: %d " \
94
- "Required: %1.f hours" % (
95
- speed_total, loss.avg, learning_rate, epoch, global_step, time_for_end
96
- )
97
- logging.info(msg)
98
- loss.reset()
99
- self.tic = time.time()
100
- else:
101
- self.init = True
102
- self.tic = time.time()
103
-
104
-
105
- class CallBackModelCheckpoint(object):
106
- def __init__(self, rank, output="./"):
107
- self.rank: int = rank
108
- self.output: str = output
109
-
110
- def __call__(self, global_step, backbone, partial_fc, ):
111
- if global_step > 100 and self.rank == 0:
112
- path_module = os.path.join(self.output, "backbone.pth")
113
- torch.save(backbone.module.state_dict(), path_module)
114
- logging.info("Pytorch Model Saved in '{}'".format(path_module))
115
-
116
- if global_step > 100 and partial_fc is not None:
117
- partial_fc.save_params()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_ipex.py DELETED
@@ -1,848 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- import inspect
16
- from typing import Any, Callable, Dict, List, Optional, Union
17
-
18
- import intel_extension_for_pytorch as ipex
19
- import torch
20
- from packaging import version
21
- from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
22
-
23
- from diffusers.configuration_utils import FrozenDict
24
- from diffusers.models import AutoencoderKL, UNet2DConditionModel
25
- from diffusers.pipeline_utils import DiffusionPipeline
26
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
27
- from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
28
- from diffusers.schedulers import KarrasDiffusionSchedulers
29
- from diffusers.utils import (
30
- deprecate,
31
- is_accelerate_available,
32
- is_accelerate_version,
33
- logging,
34
- randn_tensor,
35
- replace_example_docstring,
36
- )
37
-
38
-
39
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
40
-
41
- EXAMPLE_DOC_STRING = """
42
- Examples:
43
- ```py
44
- >>> import torch
45
- >>> from diffusers import StableDiffusionPipeline
46
-
47
- >>> pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", custom_pipeline="stable_diffusion_ipex")
48
-
49
- >>> # For Float32
50
- >>> pipe.prepare_for_ipex(prompt, dtype=torch.float32, height=512, width=512) #value of image height/width should be consistent with the pipeline inference
51
- >>> # For BFloat16
52
- >>> pipe.prepare_for_ipex(prompt, dtype=torch.bfloat16, height=512, width=512) #value of image height/width should be consistent with the pipeline inference
53
-
54
- >>> prompt = "a photo of an astronaut riding a horse on mars"
55
- >>> # For Float32
56
- >>> image = pipe(prompt, num_inference_steps=num_inference_steps, height=512, width=512).images[0] #value of image height/width should be consistent with 'prepare_for_ipex()'
57
- >>> # For BFloat16
58
- >>> with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloat16):
59
- >>> image = pipe(prompt, num_inference_steps=num_inference_steps, height=512, width=512).images[0] #value of image height/width should be consistent with 'prepare_for_ipex()'
60
- ```
61
- """
62
-
63
-
64
- class StableDiffusionIPEXPipeline(DiffusionPipeline):
65
- r"""
66
- Pipeline for text-to-image generation using Stable Diffusion on IPEX.
67
-
68
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
69
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
70
-
71
- Args:
72
- vae ([`AutoencoderKL`]):
73
- Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
74
- text_encoder ([`CLIPTextModel`]):
75
- Frozen text-encoder. Stable Diffusion uses the text portion of
76
- [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
77
- the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
78
- tokenizer (`CLIPTokenizer`):
79
- Tokenizer of class
80
- [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
81
- unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
82
- scheduler ([`SchedulerMixin`]):
83
- A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
84
- [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
85
- safety_checker ([`StableDiffusionSafetyChecker`]):
86
- Classification module that estimates whether generated images could be considered offensive or harmful.
87
- Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
88
- feature_extractor ([`CLIPFeatureExtractor`]):
89
- Model that extracts features from generated images to be used as inputs for the `safety_checker`.
90
- """
91
- _optional_components = ["safety_checker", "feature_extractor"]
92
-
93
- def __init__(
94
- self,
95
- vae: AutoencoderKL,
96
- text_encoder: CLIPTextModel,
97
- tokenizer: CLIPTokenizer,
98
- unet: UNet2DConditionModel,
99
- scheduler: KarrasDiffusionSchedulers,
100
- safety_checker: StableDiffusionSafetyChecker,
101
- feature_extractor: CLIPFeatureExtractor,
102
- requires_safety_checker: bool = True,
103
- ):
104
- super().__init__()
105
-
106
- if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
107
- deprecation_message = (
108
- f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
109
- f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
110
- "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
111
- " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
112
- " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
113
- " file"
114
- )
115
- deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
116
- new_config = dict(scheduler.config)
117
- new_config["steps_offset"] = 1
118
- scheduler._internal_dict = FrozenDict(new_config)
119
-
120
- if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
121
- deprecation_message = (
122
- f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
123
- " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
124
- " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
125
- " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
126
- " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
127
- )
128
- deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
129
- new_config = dict(scheduler.config)
130
- new_config["clip_sample"] = False
131
- scheduler._internal_dict = FrozenDict(new_config)
132
-
133
- if safety_checker is None and requires_safety_checker:
134
- logger.warning(
135
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
136
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
137
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
138
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
139
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
140
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
141
- )
142
-
143
- if safety_checker is not None and feature_extractor is None:
144
- raise ValueError(
145
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
146
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
147
- )
148
-
149
- is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
150
- version.parse(unet.config._diffusers_version).base_version
151
- ) < version.parse("0.9.0.dev0")
152
- is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
153
- if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
154
- deprecation_message = (
155
- "The configuration file of the unet has set the default `sample_size` to smaller than"
156
- " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
157
- " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
158
- " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
159
- " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
160
- " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
161
- " in the config might lead to incorrect results in future versions. If you have downloaded this"
162
- " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
163
- " the `unet/config.json` file"
164
- )
165
- deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
166
- new_config = dict(unet.config)
167
- new_config["sample_size"] = 64
168
- unet._internal_dict = FrozenDict(new_config)
169
-
170
- self.register_modules(
171
- vae=vae,
172
- text_encoder=text_encoder,
173
- tokenizer=tokenizer,
174
- unet=unet,
175
- scheduler=scheduler,
176
- safety_checker=safety_checker,
177
- feature_extractor=feature_extractor,
178
- )
179
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
180
- self.register_to_config(requires_safety_checker=requires_safety_checker)
181
-
182
- def get_input_example(self, prompt, height=None, width=None, guidance_scale=7.5, num_images_per_prompt=1):
183
- prompt_embeds = None
184
- negative_prompt_embeds = None
185
- negative_prompt = None
186
- callback_steps = 1
187
- generator = None
188
- latents = None
189
-
190
- # 0. Default height and width to unet
191
- height = height or self.unet.config.sample_size * self.vae_scale_factor
192
- width = width or self.unet.config.sample_size * self.vae_scale_factor
193
-
194
- # 1. Check inputs. Raise error if not correct
195
- self.check_inputs(
196
- prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
197
- )
198
-
199
- # 2. Define call parameters
200
- if prompt is not None and isinstance(prompt, str):
201
- batch_size = 1
202
- elif prompt is not None and isinstance(prompt, list):
203
- batch_size = len(prompt)
204
-
205
- device = "cpu"
206
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
207
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
208
- # corresponds to doing no classifier free guidance.
209
- do_classifier_free_guidance = guidance_scale > 1.0
210
-
211
- # 3. Encode input prompt
212
- prompt_embeds = self._encode_prompt(
213
- prompt,
214
- device,
215
- num_images_per_prompt,
216
- do_classifier_free_guidance,
217
- negative_prompt,
218
- prompt_embeds=prompt_embeds,
219
- negative_prompt_embeds=negative_prompt_embeds,
220
- )
221
-
222
- # 5. Prepare latent variables
223
- latents = self.prepare_latents(
224
- batch_size * num_images_per_prompt,
225
- self.unet.in_channels,
226
- height,
227
- width,
228
- prompt_embeds.dtype,
229
- device,
230
- generator,
231
- latents,
232
- )
233
- dummy = torch.ones(1, dtype=torch.int32)
234
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
235
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, dummy)
236
-
237
- unet_input_example = (latent_model_input, dummy, prompt_embeds)
238
- vae_decoder_input_example = latents
239
-
240
- return unet_input_example, vae_decoder_input_example
241
-
242
- def prepare_for_ipex(self, promt, dtype=torch.float32, height=None, width=None, guidance_scale=7.5):
243
- self.unet = self.unet.to(memory_format=torch.channels_last)
244
- self.vae.decoder = self.vae.decoder.to(memory_format=torch.channels_last)
245
- self.text_encoder = self.text_encoder.to(memory_format=torch.channels_last)
246
- if self.safety_checker is not None:
247
- self.safety_checker = self.safety_checker.to(memory_format=torch.channels_last)
248
-
249
- unet_input_example, vae_decoder_input_example = self.get_input_example(promt, height, width, guidance_scale)
250
-
251
- # optimize with ipex
252
- if dtype == torch.bfloat16:
253
- self.unet = ipex.optimize(
254
- self.unet.eval(), dtype=torch.bfloat16, inplace=True, sample_input=unet_input_example
255
- )
256
- self.vae.decoder = ipex.optimize(self.vae.decoder.eval(), dtype=torch.bfloat16, inplace=True)
257
- self.text_encoder = ipex.optimize(self.text_encoder.eval(), dtype=torch.bfloat16, inplace=True)
258
- if self.safety_checker is not None:
259
- self.safety_checker = ipex.optimize(self.safety_checker.eval(), dtype=torch.bfloat16, inplace=True)
260
- elif dtype == torch.float32:
261
- self.unet = ipex.optimize(
262
- self.unet.eval(),
263
- dtype=torch.float32,
264
- inplace=True,
265
- sample_input=unet_input_example,
266
- level="O1",
267
- weights_prepack=True,
268
- auto_kernel_selection=False,
269
- )
270
- self.vae.decoder = ipex.optimize(
271
- self.vae.decoder.eval(),
272
- dtype=torch.float32,
273
- inplace=True,
274
- level="O1",
275
- weights_prepack=True,
276
- auto_kernel_selection=False,
277
- )
278
- self.text_encoder = ipex.optimize(
279
- self.text_encoder.eval(),
280
- dtype=torch.float32,
281
- inplace=True,
282
- level="O1",
283
- weights_prepack=True,
284
- auto_kernel_selection=False,
285
- )
286
- if self.safety_checker is not None:
287
- self.safety_checker = ipex.optimize(
288
- self.safety_checker.eval(),
289
- dtype=torch.float32,
290
- inplace=True,
291
- level="O1",
292
- weights_prepack=True,
293
- auto_kernel_selection=False,
294
- )
295
- else:
296
- raise ValueError(" The value of 'dtype' should be 'torch.bfloat16' or 'torch.float32' !")
297
-
298
- # trace unet model to get better performance on IPEX
299
- with torch.cpu.amp.autocast(enabled=dtype == torch.bfloat16), torch.no_grad():
300
- unet_trace_model = torch.jit.trace(self.unet, unet_input_example, check_trace=False, strict=False)
301
- unet_trace_model = torch.jit.freeze(unet_trace_model)
302
- self.unet.forward = unet_trace_model.forward
303
-
304
- # trace vae.decoder model to get better performance on IPEX
305
- with torch.cpu.amp.autocast(enabled=dtype == torch.bfloat16), torch.no_grad():
306
- ave_decoder_trace_model = torch.jit.trace(
307
- self.vae.decoder, vae_decoder_input_example, check_trace=False, strict=False
308
- )
309
- ave_decoder_trace_model = torch.jit.freeze(ave_decoder_trace_model)
310
- self.vae.decoder.forward = ave_decoder_trace_model.forward
311
-
312
- def enable_vae_slicing(self):
313
- r"""
314
- Enable sliced VAE decoding.
315
-
316
- When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
317
- steps. This is useful to save some memory and allow larger batch sizes.
318
- """
319
- self.vae.enable_slicing()
320
-
321
- def disable_vae_slicing(self):
322
- r"""
323
- Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
324
- computing decoding in one step.
325
- """
326
- self.vae.disable_slicing()
327
-
328
- def enable_vae_tiling(self):
329
- r"""
330
- Enable tiled VAE decoding.
331
-
332
- When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in
333
- several steps. This is useful to save a large amount of memory and to allow the processing of larger images.
334
- """
335
- self.vae.enable_tiling()
336
-
337
- def disable_vae_tiling(self):
338
- r"""
339
- Disable tiled VAE decoding. If `enable_vae_tiling` was previously invoked, this method will go back to
340
- computing decoding in one step.
341
- """
342
- self.vae.disable_tiling()
343
-
344
- def enable_sequential_cpu_offload(self, gpu_id=0):
345
- r"""
346
- Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
347
- text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
348
- `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
349
- Note that offloading happens on a submodule basis. Memory savings are higher than with
350
- `enable_model_cpu_offload`, but performance is lower.
351
- """
352
- if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"):
353
- from accelerate import cpu_offload
354
- else:
355
- raise ImportError("`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher")
356
-
357
- device = torch.device(f"cuda:{gpu_id}")
358
-
359
- if self.device.type != "cpu":
360
- self.to("cpu", silence_dtype_warnings=True)
361
- torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
362
-
363
- for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
364
- cpu_offload(cpu_offloaded_model, device)
365
-
366
- if self.safety_checker is not None:
367
- cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True)
368
-
369
- def enable_model_cpu_offload(self, gpu_id=0):
370
- r"""
371
- Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
372
- to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
373
- method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
374
- `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
375
- """
376
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
377
- from accelerate import cpu_offload_with_hook
378
- else:
379
- raise ImportError("`enable_model_offload` requires `accelerate v0.17.0` or higher.")
380
-
381
- device = torch.device(f"cuda:{gpu_id}")
382
-
383
- if self.device.type != "cpu":
384
- self.to("cpu", silence_dtype_warnings=True)
385
- torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
386
-
387
- hook = None
388
- for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
389
- _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
390
-
391
- if self.safety_checker is not None:
392
- _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
393
-
394
- # We'll offload the last model manually.
395
- self.final_offload_hook = hook
396
-
397
- @property
398
- def _execution_device(self):
399
- r"""
400
- Returns the device on which the pipeline's models will be executed. After calling
401
- `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
402
- hooks.
403
- """
404
- if not hasattr(self.unet, "_hf_hook"):
405
- return self.device
406
- for module in self.unet.modules():
407
- if (
408
- hasattr(module, "_hf_hook")
409
- and hasattr(module._hf_hook, "execution_device")
410
- and module._hf_hook.execution_device is not None
411
- ):
412
- return torch.device(module._hf_hook.execution_device)
413
- return self.device
414
-
415
- def _encode_prompt(
416
- self,
417
- prompt,
418
- device,
419
- num_images_per_prompt,
420
- do_classifier_free_guidance,
421
- negative_prompt=None,
422
- prompt_embeds: Optional[torch.FloatTensor] = None,
423
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
424
- ):
425
- r"""
426
- Encodes the prompt into text encoder hidden states.
427
-
428
- Args:
429
- prompt (`str` or `List[str]`, *optional*):
430
- prompt to be encoded
431
- device: (`torch.device`):
432
- torch device
433
- num_images_per_prompt (`int`):
434
- number of images that should be generated per prompt
435
- do_classifier_free_guidance (`bool`):
436
- whether to use classifier free guidance or not
437
- negative_prompt (`str` or `List[str]`, *optional*):
438
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
439
- `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
440
- Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
441
- prompt_embeds (`torch.FloatTensor`, *optional*):
442
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
443
- provided, text embeddings will be generated from `prompt` input argument.
444
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
445
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
446
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
447
- argument.
448
- """
449
- if prompt is not None and isinstance(prompt, str):
450
- batch_size = 1
451
- elif prompt is not None and isinstance(prompt, list):
452
- batch_size = len(prompt)
453
- else:
454
- batch_size = prompt_embeds.shape[0]
455
-
456
- if prompt_embeds is None:
457
- text_inputs = self.tokenizer(
458
- prompt,
459
- padding="max_length",
460
- max_length=self.tokenizer.model_max_length,
461
- truncation=True,
462
- return_tensors="pt",
463
- )
464
- text_input_ids = text_inputs.input_ids
465
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
466
-
467
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
468
- text_input_ids, untruncated_ids
469
- ):
470
- removed_text = self.tokenizer.batch_decode(
471
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
472
- )
473
- logger.warning(
474
- "The following part of your input was truncated because CLIP can only handle sequences up to"
475
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
476
- )
477
-
478
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
479
- attention_mask = text_inputs.attention_mask.to(device)
480
- else:
481
- attention_mask = None
482
-
483
- prompt_embeds = self.text_encoder(
484
- text_input_ids.to(device),
485
- attention_mask=attention_mask,
486
- )
487
- prompt_embeds = prompt_embeds[0]
488
-
489
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
490
-
491
- bs_embed, seq_len, _ = prompt_embeds.shape
492
- # duplicate text embeddings for each generation per prompt, using mps friendly method
493
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
494
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
495
-
496
- # get unconditional embeddings for classifier free guidance
497
- if do_classifier_free_guidance and negative_prompt_embeds is None:
498
- uncond_tokens: List[str]
499
- if negative_prompt is None:
500
- uncond_tokens = [""] * batch_size
501
- elif type(prompt) is not type(negative_prompt):
502
- raise TypeError(
503
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
504
- f" {type(prompt)}."
505
- )
506
- elif isinstance(negative_prompt, str):
507
- uncond_tokens = [negative_prompt]
508
- elif batch_size != len(negative_prompt):
509
- raise ValueError(
510
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
511
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
512
- " the batch size of `prompt`."
513
- )
514
- else:
515
- uncond_tokens = negative_prompt
516
-
517
- max_length = prompt_embeds.shape[1]
518
- uncond_input = self.tokenizer(
519
- uncond_tokens,
520
- padding="max_length",
521
- max_length=max_length,
522
- truncation=True,
523
- return_tensors="pt",
524
- )
525
-
526
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
527
- attention_mask = uncond_input.attention_mask.to(device)
528
- else:
529
- attention_mask = None
530
-
531
- negative_prompt_embeds = self.text_encoder(
532
- uncond_input.input_ids.to(device),
533
- attention_mask=attention_mask,
534
- )
535
- negative_prompt_embeds = negative_prompt_embeds[0]
536
-
537
- if do_classifier_free_guidance:
538
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
539
- seq_len = negative_prompt_embeds.shape[1]
540
-
541
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
542
-
543
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
544
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
545
-
546
- # For classifier free guidance, we need to do two forward passes.
547
- # Here we concatenate the unconditional and text embeddings into a single batch
548
- # to avoid doing two forward passes
549
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
550
-
551
- return prompt_embeds
552
-
553
- def run_safety_checker(self, image, device, dtype):
554
- if self.safety_checker is not None:
555
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
556
- image, has_nsfw_concept = self.safety_checker(
557
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
558
- )
559
- else:
560
- has_nsfw_concept = None
561
- return image, has_nsfw_concept
562
-
563
- def decode_latents(self, latents):
564
- latents = 1 / self.vae.config.scaling_factor * latents
565
- image = self.vae.decode(latents).sample
566
- image = (image / 2 + 0.5).clamp(0, 1)
567
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
568
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
569
- return image
570
-
571
- def prepare_extra_step_kwargs(self, generator, eta):
572
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
573
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
574
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
575
- # and should be between [0, 1]
576
-
577
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
578
- extra_step_kwargs = {}
579
- if accepts_eta:
580
- extra_step_kwargs["eta"] = eta
581
-
582
- # check if the scheduler accepts generator
583
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
584
- if accepts_generator:
585
- extra_step_kwargs["generator"] = generator
586
- return extra_step_kwargs
587
-
588
- def check_inputs(
589
- self,
590
- prompt,
591
- height,
592
- width,
593
- callback_steps,
594
- negative_prompt=None,
595
- prompt_embeds=None,
596
- negative_prompt_embeds=None,
597
- ):
598
- if height % 8 != 0 or width % 8 != 0:
599
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
600
-
601
- if (callback_steps is None) or (
602
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
603
- ):
604
- raise ValueError(
605
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
606
- f" {type(callback_steps)}."
607
- )
608
-
609
- if prompt is not None and prompt_embeds is not None:
610
- raise ValueError(
611
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
612
- " only forward one of the two."
613
- )
614
- elif prompt is None and prompt_embeds is None:
615
- raise ValueError(
616
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
617
- )
618
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
619
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
620
-
621
- if negative_prompt is not None and negative_prompt_embeds is not None:
622
- raise ValueError(
623
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
624
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
625
- )
626
-
627
- if prompt_embeds is not None and negative_prompt_embeds is not None:
628
- if prompt_embeds.shape != negative_prompt_embeds.shape:
629
- raise ValueError(
630
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
631
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
632
- f" {negative_prompt_embeds.shape}."
633
- )
634
-
635
- def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
636
- shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
637
- if isinstance(generator, list) and len(generator) != batch_size:
638
- raise ValueError(
639
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
640
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
641
- )
642
-
643
- if latents is None:
644
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
645
- else:
646
- latents = latents.to(device)
647
-
648
- # scale the initial noise by the standard deviation required by the scheduler
649
- latents = latents * self.scheduler.init_noise_sigma
650
- return latents
651
-
652
- @torch.no_grad()
653
- @replace_example_docstring(EXAMPLE_DOC_STRING)
654
- def __call__(
655
- self,
656
- prompt: Union[str, List[str]] = None,
657
- height: Optional[int] = None,
658
- width: Optional[int] = None,
659
- num_inference_steps: int = 50,
660
- guidance_scale: float = 7.5,
661
- negative_prompt: Optional[Union[str, List[str]]] = None,
662
- num_images_per_prompt: Optional[int] = 1,
663
- eta: float = 0.0,
664
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
665
- latents: Optional[torch.FloatTensor] = None,
666
- prompt_embeds: Optional[torch.FloatTensor] = None,
667
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
668
- output_type: Optional[str] = "pil",
669
- return_dict: bool = True,
670
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
671
- callback_steps: int = 1,
672
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
673
- ):
674
- r"""
675
- Function invoked when calling the pipeline for generation.
676
-
677
- Args:
678
- prompt (`str` or `List[str]`, *optional*):
679
- The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
680
- instead.
681
- height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
682
- The height in pixels of the generated image.
683
- width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
684
- The width in pixels of the generated image.
685
- num_inference_steps (`int`, *optional*, defaults to 50):
686
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
687
- expense of slower inference.
688
- guidance_scale (`float`, *optional*, defaults to 7.5):
689
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
690
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
691
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
692
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
693
- usually at the expense of lower image quality.
694
- negative_prompt (`str` or `List[str]`, *optional*):
695
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
696
- `negative_prompt_embeds`. instead. If not defined, one has to pass `negative_prompt_embeds`. instead.
697
- Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
698
- num_images_per_prompt (`int`, *optional*, defaults to 1):
699
- The number of images to generate per prompt.
700
- eta (`float`, *optional*, defaults to 0.0):
701
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
702
- [`schedulers.DDIMScheduler`], will be ignored for others.
703
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
704
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
705
- to make generation deterministic.
706
- latents (`torch.FloatTensor`, *optional*):
707
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
708
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
709
- tensor will ge generated by sampling using the supplied random `generator`.
710
- prompt_embeds (`torch.FloatTensor`, *optional*):
711
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
712
- provided, text embeddings will be generated from `prompt` input argument.
713
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
714
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
715
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
716
- argument.
717
- output_type (`str`, *optional*, defaults to `"pil"`):
718
- The output format of the generate image. Choose between
719
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
720
- return_dict (`bool`, *optional*, defaults to `True`):
721
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
722
- plain tuple.
723
- callback (`Callable`, *optional*):
724
- A function that will be called every `callback_steps` steps during inference. The function will be
725
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
726
- callback_steps (`int`, *optional*, defaults to 1):
727
- The frequency at which the `callback` function will be called. If not specified, the callback will be
728
- called at every step.
729
- cross_attention_kwargs (`dict`, *optional*):
730
- A kwargs dictionary that if specified is passed along to the `AttnProcessor` as defined under
731
- `self.processor` in
732
- [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
733
-
734
- Examples:
735
-
736
- Returns:
737
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
738
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
739
- When returning a tuple, the first element is a list with the generated images, and the second element is a
740
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
741
- (nsfw) content, according to the `safety_checker`.
742
- """
743
- # 0. Default height and width to unet
744
- height = height or self.unet.config.sample_size * self.vae_scale_factor
745
- width = width or self.unet.config.sample_size * self.vae_scale_factor
746
-
747
- # 1. Check inputs. Raise error if not correct
748
- self.check_inputs(
749
- prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds
750
- )
751
-
752
- # 2. Define call parameters
753
- if prompt is not None and isinstance(prompt, str):
754
- batch_size = 1
755
- elif prompt is not None and isinstance(prompt, list):
756
- batch_size = len(prompt)
757
- else:
758
- batch_size = prompt_embeds.shape[0]
759
-
760
- device = self._execution_device
761
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
762
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
763
- # corresponds to doing no classifier free guidance.
764
- do_classifier_free_guidance = guidance_scale > 1.0
765
-
766
- # 3. Encode input prompt
767
- prompt_embeds = self._encode_prompt(
768
- prompt,
769
- device,
770
- num_images_per_prompt,
771
- do_classifier_free_guidance,
772
- negative_prompt,
773
- prompt_embeds=prompt_embeds,
774
- negative_prompt_embeds=negative_prompt_embeds,
775
- )
776
-
777
- # 4. Prepare timesteps
778
- self.scheduler.set_timesteps(num_inference_steps, device=device)
779
- timesteps = self.scheduler.timesteps
780
-
781
- # 5. Prepare latent variables
782
- num_channels_latents = self.unet.in_channels
783
- latents = self.prepare_latents(
784
- batch_size * num_images_per_prompt,
785
- num_channels_latents,
786
- height,
787
- width,
788
- prompt_embeds.dtype,
789
- device,
790
- generator,
791
- latents,
792
- )
793
-
794
- # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
795
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
796
-
797
- # 7. Denoising loop
798
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
799
- with self.progress_bar(total=num_inference_steps) as progress_bar:
800
- for i, t in enumerate(timesteps):
801
- # expand the latents if we are doing classifier free guidance
802
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
803
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
804
-
805
- # predict the noise residual
806
- noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds)["sample"]
807
-
808
- # perform guidance
809
- if do_classifier_free_guidance:
810
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
811
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
812
-
813
- # compute the previous noisy sample x_t -> x_t-1
814
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
815
-
816
- # call the callback, if provided
817
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
818
- progress_bar.update()
819
- if callback is not None and i % callback_steps == 0:
820
- callback(i, t, latents)
821
-
822
- if output_type == "latent":
823
- image = latents
824
- has_nsfw_concept = None
825
- elif output_type == "pil":
826
- # 8. Post-processing
827
- image = self.decode_latents(latents)
828
-
829
- # 9. Run safety checker
830
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
831
-
832
- # 10. Convert to PIL
833
- image = self.numpy_to_pil(image)
834
- else:
835
- # 8. Post-processing
836
- image = self.decode_latents(latents)
837
-
838
- # 9. Run safety checker
839
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
840
-
841
- # Offload last model to CPU
842
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
843
- self.final_offload_hook.offload()
844
-
845
- if not return_dict:
846
- return (image, has_nsfw_concept)
847
-
848
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/scripts/__init__.py DELETED
File without changes
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/kandinsky2_2/pipeline_kandinsky2_2.py DELETED
@@ -1,294 +0,0 @@
1
- # Copyright 2023 The HuggingFace Team. All rights reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- from typing import Callable, List, Optional, Union
16
-
17
- import torch
18
-
19
- from ...models import UNet2DConditionModel, VQModel
20
- from ...schedulers import DDPMScheduler
21
- from ...utils import (
22
- is_accelerate_available,
23
- is_accelerate_version,
24
- logging,
25
- randn_tensor,
26
- replace_example_docstring,
27
- )
28
- from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
29
-
30
-
31
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
32
-
33
- EXAMPLE_DOC_STRING = """
34
- Examples:
35
- ```py
36
- >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
37
- >>> import torch
38
-
39
- >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
40
- >>> pipe_prior.to("cuda")
41
- >>> prompt = "red cat, 4k photo"
42
- >>> out = pipe_prior(prompt)
43
- >>> image_emb = out.image_embeds
44
- >>> zero_image_emb = out.negative_image_embeds
45
- >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
46
- >>> pipe.to("cuda")
47
- >>> image = pipe(
48
- ... image_embeds=image_emb,
49
- ... negative_image_embeds=zero_image_emb,
50
- ... height=768,
51
- ... width=768,
52
- ... num_inference_steps=50,
53
- ... ).images
54
- >>> image[0].save("cat.png")
55
- ```
56
- """
57
-
58
-
59
- def downscale_height_and_width(height, width, scale_factor=8):
60
- new_height = height // scale_factor**2
61
- if height % scale_factor**2 != 0:
62
- new_height += 1
63
- new_width = width // scale_factor**2
64
- if width % scale_factor**2 != 0:
65
- new_width += 1
66
- return new_height * scale_factor, new_width * scale_factor
67
-
68
-
69
- class KandinskyV22Pipeline(DiffusionPipeline):
70
- """
71
- Pipeline for text-to-image generation using Kandinsky
72
-
73
- This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
74
- library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
75
-
76
- Args:
77
- scheduler (Union[`DDIMScheduler`,`DDPMScheduler`]):
78
- A scheduler to be used in combination with `unet` to generate image latents.
79
- unet ([`UNet2DConditionModel`]):
80
- Conditional U-Net architecture to denoise the image embedding.
81
- movq ([`VQModel`]):
82
- MoVQ Decoder to generate the image from the latents.
83
- """
84
-
85
- def __init__(
86
- self,
87
- unet: UNet2DConditionModel,
88
- scheduler: DDPMScheduler,
89
- movq: VQModel,
90
- ):
91
- super().__init__()
92
-
93
- self.register_modules(
94
- unet=unet,
95
- scheduler=scheduler,
96
- movq=movq,
97
- )
98
- self.movq_scale_factor = 2 ** (len(self.movq.config.block_out_channels) - 1)
99
-
100
- # Copied from diffusers.pipelines.unclip.pipeline_unclip.UnCLIPPipeline.prepare_latents
101
- def prepare_latents(self, shape, dtype, device, generator, latents, scheduler):
102
- if latents is None:
103
- latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
104
- else:
105
- if latents.shape != shape:
106
- raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
107
- latents = latents.to(device)
108
-
109
- latents = latents * scheduler.init_noise_sigma
110
- return latents
111
-
112
- def enable_model_cpu_offload(self, gpu_id=0):
113
- r"""
114
- Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
115
- to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
116
- method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
117
- `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
118
- """
119
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
120
- from accelerate import cpu_offload_with_hook
121
- else:
122
- raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
123
-
124
- device = torch.device(f"cuda:{gpu_id}")
125
-
126
- if self.device.type != "cpu":
127
- self.to("cpu", silence_dtype_warnings=True)
128
- torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
129
-
130
- hook = None
131
- for cpu_offloaded_model in [self.unet, self.movq]:
132
- _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
133
-
134
- # We'll offload the last model manually.
135
- self.final_offload_hook = hook
136
-
137
- @torch.no_grad()
138
- @replace_example_docstring(EXAMPLE_DOC_STRING)
139
- def __call__(
140
- self,
141
- image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]],
142
- negative_image_embeds: Union[torch.FloatTensor, List[torch.FloatTensor]],
143
- height: int = 512,
144
- width: int = 512,
145
- num_inference_steps: int = 100,
146
- guidance_scale: float = 4.0,
147
- num_images_per_prompt: int = 1,
148
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
149
- latents: Optional[torch.FloatTensor] = None,
150
- output_type: Optional[str] = "pil",
151
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
152
- callback_steps: int = 1,
153
- return_dict: bool = True,
154
- ):
155
- """
156
- Function invoked when calling the pipeline for generation.
157
-
158
- Args:
159
- image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`):
160
- The clip image embeddings for text prompt, that will be used to condition the image generation.
161
- negative_image_embeds (`torch.FloatTensor` or `List[torch.FloatTensor]`):
162
- The clip image embeddings for negative text prompt, will be used to condition the image generation.
163
- height (`int`, *optional*, defaults to 512):
164
- The height in pixels of the generated image.
165
- width (`int`, *optional*, defaults to 512):
166
- The width in pixels of the generated image.
167
- num_inference_steps (`int`, *optional*, defaults to 100):
168
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
169
- expense of slower inference.
170
- guidance_scale (`float`, *optional*, defaults to 4.0):
171
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
172
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
173
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
174
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
175
- usually at the expense of lower image quality.
176
- num_images_per_prompt (`int`, *optional*, defaults to 1):
177
- The number of images to generate per prompt.
178
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
179
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
180
- to make generation deterministic.
181
- latents (`torch.FloatTensor`, *optional*):
182
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
183
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
184
- tensor will ge generated by sampling using the supplied random `generator`.
185
- output_type (`str`, *optional*, defaults to `"pil"`):
186
- The output format of the generate image. Choose between: `"pil"` (`PIL.Image.Image`), `"np"`
187
- (`np.array`) or `"pt"` (`torch.Tensor`).
188
- callback (`Callable`, *optional*):
189
- A function that calls every `callback_steps` steps during inference. The function is called with the
190
- following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
191
- callback_steps (`int`, *optional*, defaults to 1):
192
- The frequency at which the `callback` function is called. If not specified, the callback is called at
193
- every step.
194
- return_dict (`bool`, *optional*, defaults to `True`):
195
- Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
196
-
197
- Examples:
198
-
199
- Returns:
200
- [`~pipelines.ImagePipelineOutput`] or `tuple`
201
- """
202
- device = self._execution_device
203
-
204
- do_classifier_free_guidance = guidance_scale > 1.0
205
-
206
- if isinstance(image_embeds, list):
207
- image_embeds = torch.cat(image_embeds, dim=0)
208
- batch_size = image_embeds.shape[0] * num_images_per_prompt
209
- if isinstance(negative_image_embeds, list):
210
- negative_image_embeds = torch.cat(negative_image_embeds, dim=0)
211
-
212
- if do_classifier_free_guidance:
213
- image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
214
- negative_image_embeds = negative_image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
215
-
216
- image_embeds = torch.cat([negative_image_embeds, image_embeds], dim=0).to(
217
- dtype=self.unet.dtype, device=device
218
- )
219
-
220
- self.scheduler.set_timesteps(num_inference_steps, device=device)
221
- timesteps_tensor = self.scheduler.timesteps
222
-
223
- num_channels_latents = self.unet.config.in_channels
224
-
225
- height, width = downscale_height_and_width(height, width, self.movq_scale_factor)
226
-
227
- # create initial latent
228
- latents = self.prepare_latents(
229
- (batch_size, num_channels_latents, height, width),
230
- image_embeds.dtype,
231
- device,
232
- generator,
233
- latents,
234
- self.scheduler,
235
- )
236
-
237
- for i, t in enumerate(self.progress_bar(timesteps_tensor)):
238
- # expand the latents if we are doing classifier free guidance
239
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
240
-
241
- added_cond_kwargs = {"image_embeds": image_embeds}
242
- noise_pred = self.unet(
243
- sample=latent_model_input,
244
- timestep=t,
245
- encoder_hidden_states=None,
246
- added_cond_kwargs=added_cond_kwargs,
247
- return_dict=False,
248
- )[0]
249
-
250
- if do_classifier_free_guidance:
251
- noise_pred, variance_pred = noise_pred.split(latents.shape[1], dim=1)
252
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
253
- _, variance_pred_text = variance_pred.chunk(2)
254
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
255
- noise_pred = torch.cat([noise_pred, variance_pred_text], dim=1)
256
-
257
- if not (
258
- hasattr(self.scheduler.config, "variance_type")
259
- and self.scheduler.config.variance_type in ["learned", "learned_range"]
260
- ):
261
- noise_pred, _ = noise_pred.split(latents.shape[1], dim=1)
262
-
263
- # compute the previous noisy sample x_t -> x_t-1
264
- latents = self.scheduler.step(
265
- noise_pred,
266
- t,
267
- latents,
268
- generator=generator,
269
- )[0]
270
-
271
- if callback is not None and i % callback_steps == 0:
272
- callback(i, t, latents)
273
- # post-processing
274
- image = self.movq.decode(latents, force_not_quantize=True)["sample"]
275
-
276
- # Offload last model to CPU
277
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
278
- self.final_offload_hook.offload()
279
-
280
- if output_type not in ["pt", "np", "pil"]:
281
- raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
282
-
283
- if output_type in ["np", "pil"]:
284
- image = image * 0.5 + 0.5
285
- image = image.clamp(0, 1)
286
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
287
-
288
- if output_type == "pil":
289
- image = self.numpy_to_pil(image)
290
-
291
- if not return_dict:
292
- return (image,)
293
-
294
- return ImagePipelineOutput(images=image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/core/bbox/assigners/max_iou_assigner.py DELETED
@@ -1,212 +0,0 @@
1
- import torch
2
-
3
- from ..builder import BBOX_ASSIGNERS
4
- from ..iou_calculators import build_iou_calculator
5
- from .assign_result import AssignResult
6
- from .base_assigner import BaseAssigner
7
-
8
-
9
- @BBOX_ASSIGNERS.register_module()
10
- class MaxIoUAssigner(BaseAssigner):
11
- """Assign a corresponding gt bbox or background to each bbox.
12
-
13
- Each proposals will be assigned with `-1`, or a semi-positive integer
14
- indicating the ground truth index.
15
-
16
- - -1: negative sample, no assigned gt
17
- - semi-positive integer: positive sample, index (0-based) of assigned gt
18
-
19
- Args:
20
- pos_iou_thr (float): IoU threshold for positive bboxes.
21
- neg_iou_thr (float or tuple): IoU threshold for negative bboxes.
22
- min_pos_iou (float): Minimum iou for a bbox to be considered as a
23
- positive bbox. Positive samples can have smaller IoU than
24
- pos_iou_thr due to the 4th step (assign max IoU sample to each gt).
25
- gt_max_assign_all (bool): Whether to assign all bboxes with the same
26
- highest overlap with some gt to that gt.
27
- ignore_iof_thr (float): IoF threshold for ignoring bboxes (if
28
- `gt_bboxes_ignore` is specified). Negative values mean not
29
- ignoring any bboxes.
30
- ignore_wrt_candidates (bool): Whether to compute the iof between
31
- `bboxes` and `gt_bboxes_ignore`, or the contrary.
32
- match_low_quality (bool): Whether to allow low quality matches. This is
33
- usually allowed for RPN and single stage detectors, but not allowed
34
- in the second stage. Details are demonstrated in Step 4.
35
- gpu_assign_thr (int): The upper bound of the number of GT for GPU
36
- assign. When the number of gt is above this threshold, will assign
37
- on CPU device. Negative values mean not assign on CPU.
38
- """
39
-
40
- def __init__(self,
41
- pos_iou_thr,
42
- neg_iou_thr,
43
- min_pos_iou=.0,
44
- gt_max_assign_all=True,
45
- ignore_iof_thr=-1,
46
- ignore_wrt_candidates=True,
47
- match_low_quality=True,
48
- gpu_assign_thr=-1,
49
- iou_calculator=dict(type='BboxOverlaps2D')):
50
- self.pos_iou_thr = pos_iou_thr
51
- self.neg_iou_thr = neg_iou_thr
52
- self.min_pos_iou = min_pos_iou
53
- self.gt_max_assign_all = gt_max_assign_all
54
- self.ignore_iof_thr = ignore_iof_thr
55
- self.ignore_wrt_candidates = ignore_wrt_candidates
56
- self.gpu_assign_thr = gpu_assign_thr
57
- self.match_low_quality = match_low_quality
58
- self.iou_calculator = build_iou_calculator(iou_calculator)
59
-
60
- def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
61
- """Assign gt to bboxes.
62
-
63
- This method assign a gt bbox to every bbox (proposal/anchor), each bbox
64
- will be assigned with -1, or a semi-positive number. -1 means negative
65
- sample, semi-positive number is the index (0-based) of assigned gt.
66
- The assignment is done in following steps, the order matters.
67
-
68
- 1. assign every bbox to the background
69
- 2. assign proposals whose iou with all gts < neg_iou_thr to 0
70
- 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr,
71
- assign it to that bbox
72
- 4. for each gt bbox, assign its nearest proposals (may be more than
73
- one) to itself
74
-
75
- Args:
76
- bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
77
- gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
78
- gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
79
- labelled as `ignored`, e.g., crowd boxes in COCO.
80
- gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
81
-
82
- Returns:
83
- :obj:`AssignResult`: The assign result.
84
-
85
- Example:
86
- >>> self = MaxIoUAssigner(0.5, 0.5)
87
- >>> bboxes = torch.Tensor([[0, 0, 10, 10], [10, 10, 20, 20]])
88
- >>> gt_bboxes = torch.Tensor([[0, 0, 10, 9]])
89
- >>> assign_result = self.assign(bboxes, gt_bboxes)
90
- >>> expected_gt_inds = torch.LongTensor([1, 0])
91
- >>> assert torch.all(assign_result.gt_inds == expected_gt_inds)
92
- """
93
- assign_on_cpu = True if (self.gpu_assign_thr > 0) and (
94
- gt_bboxes.shape[0] > self.gpu_assign_thr) else False
95
- # compute overlap and assign gt on CPU when number of GT is large
96
- if assign_on_cpu:
97
- device = bboxes.device
98
- bboxes = bboxes.cpu()
99
- gt_bboxes = gt_bboxes.cpu()
100
- if gt_bboxes_ignore is not None:
101
- gt_bboxes_ignore = gt_bboxes_ignore.cpu()
102
- if gt_labels is not None:
103
- gt_labels = gt_labels.cpu()
104
-
105
- overlaps = self.iou_calculator(gt_bboxes, bboxes)
106
-
107
- if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None
108
- and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0):
109
- if self.ignore_wrt_candidates:
110
- ignore_overlaps = self.iou_calculator(
111
- bboxes, gt_bboxes_ignore, mode='iof')
112
- ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
113
- else:
114
- ignore_overlaps = self.iou_calculator(
115
- gt_bboxes_ignore, bboxes, mode='iof')
116
- ignore_max_overlaps, _ = ignore_overlaps.max(dim=0)
117
- overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1
118
-
119
- assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)
120
- if assign_on_cpu:
121
- assign_result.gt_inds = assign_result.gt_inds.to(device)
122
- assign_result.max_overlaps = assign_result.max_overlaps.to(device)
123
- if assign_result.labels is not None:
124
- assign_result.labels = assign_result.labels.to(device)
125
- return assign_result
126
-
127
- def assign_wrt_overlaps(self, overlaps, gt_labels=None):
128
- """Assign w.r.t. the overlaps of bboxes with gts.
129
-
130
- Args:
131
- overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes,
132
- shape(k, n).
133
- gt_labels (Tensor, optional): Labels of k gt_bboxes, shape (k, ).
134
-
135
- Returns:
136
- :obj:`AssignResult`: The assign result.
137
- """
138
- num_gts, num_bboxes = overlaps.size(0), overlaps.size(1)
139
-
140
- # 1. assign -1 by default
141
- assigned_gt_inds = overlaps.new_full((num_bboxes, ),
142
- -1,
143
- dtype=torch.long)
144
-
145
- if num_gts == 0 or num_bboxes == 0:
146
- # No ground truth or boxes, return empty assignment
147
- max_overlaps = overlaps.new_zeros((num_bboxes, ))
148
- if num_gts == 0:
149
- # No truth, assign everything to background
150
- assigned_gt_inds[:] = 0
151
- if gt_labels is None:
152
- assigned_labels = None
153
- else:
154
- assigned_labels = overlaps.new_full((num_bboxes, ),
155
- -1,
156
- dtype=torch.long)
157
- return AssignResult(
158
- num_gts,
159
- assigned_gt_inds,
160
- max_overlaps,
161
- labels=assigned_labels)
162
-
163
- # for each anchor, which gt best overlaps with it
164
- # for each anchor, the max iou of all gts
165
- max_overlaps, argmax_overlaps = overlaps.max(dim=0)
166
- # for each gt, which anchor best overlaps with it
167
- # for each gt, the max iou of all proposals
168
- gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1)
169
-
170
- # 2. assign negative: below
171
- # the negative inds are set to be 0
172
- if isinstance(self.neg_iou_thr, float):
173
- assigned_gt_inds[(max_overlaps >= 0)
174
- & (max_overlaps < self.neg_iou_thr)] = 0
175
- elif isinstance(self.neg_iou_thr, tuple):
176
- assert len(self.neg_iou_thr) == 2
177
- assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0])
178
- & (max_overlaps < self.neg_iou_thr[1])] = 0
179
-
180
- # 3. assign positive: above positive IoU threshold
181
- pos_inds = max_overlaps >= self.pos_iou_thr
182
- assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1
183
-
184
- if self.match_low_quality:
185
- # Low-quality matching will overwrite the assigned_gt_inds assigned
186
- # in Step 3. Thus, the assigned gt might not be the best one for
187
- # prediction.
188
- # For example, if bbox A has 0.9 and 0.8 iou with GT bbox 1 & 2,
189
- # bbox 1 will be assigned as the best target for bbox A in step 3.
190
- # However, if GT bbox 2's gt_argmax_overlaps = A, bbox A's
191
- # assigned_gt_inds will be overwritten to be bbox B.
192
- # This might be the reason that it is not used in ROI Heads.
193
- for i in range(num_gts):
194
- if gt_max_overlaps[i] >= self.min_pos_iou:
195
- if self.gt_max_assign_all:
196
- max_iou_inds = overlaps[i, :] == gt_max_overlaps[i]
197
- assigned_gt_inds[max_iou_inds] = i + 1
198
- else:
199
- assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1
200
-
201
- if gt_labels is not None:
202
- assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)
203
- pos_inds = torch.nonzero(
204
- assigned_gt_inds > 0, as_tuple=False).squeeze()
205
- if pos_inds.numel() > 0:
206
- assigned_labels[pos_inds] = gt_labels[
207
- assigned_gt_inds[pos_inds] - 1]
208
- else:
209
- assigned_labels = None
210
-
211
- return AssignResult(
212
- num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnimalEquality/chatbot/_proc/_docs/site_libs/quarto-html/quarto-syntax-highlighting.css DELETED
@@ -1,203 +0,0 @@
1
- /* quarto syntax highlight colors */
2
- :root {
3
- --quarto-hl-ot-color: #003B4F;
4
- --quarto-hl-at-color: #657422;
5
- --quarto-hl-ss-color: #20794D;
6
- --quarto-hl-an-color: #5E5E5E;
7
- --quarto-hl-fu-color: #4758AB;
8
- --quarto-hl-st-color: #20794D;
9
- --quarto-hl-cf-color: #003B4F;
10
- --quarto-hl-op-color: #5E5E5E;
11
- --quarto-hl-er-color: #AD0000;
12
- --quarto-hl-bn-color: #AD0000;
13
- --quarto-hl-al-color: #AD0000;
14
- --quarto-hl-va-color: #111111;
15
- --quarto-hl-bu-color: inherit;
16
- --quarto-hl-ex-color: inherit;
17
- --quarto-hl-pp-color: #AD0000;
18
- --quarto-hl-in-color: #5E5E5E;
19
- --quarto-hl-vs-color: #20794D;
20
- --quarto-hl-wa-color: #5E5E5E;
21
- --quarto-hl-do-color: #5E5E5E;
22
- --quarto-hl-im-color: #00769E;
23
- --quarto-hl-ch-color: #20794D;
24
- --quarto-hl-dt-color: #AD0000;
25
- --quarto-hl-fl-color: #AD0000;
26
- --quarto-hl-co-color: #5E5E5E;
27
- --quarto-hl-cv-color: #5E5E5E;
28
- --quarto-hl-cn-color: #8f5902;
29
- --quarto-hl-sc-color: #5E5E5E;
30
- --quarto-hl-dv-color: #AD0000;
31
- --quarto-hl-kw-color: #003B4F;
32
- }
33
-
34
- /* other quarto variables */
35
- :root {
36
- --quarto-font-monospace: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;
37
- }
38
-
39
- pre > code.sourceCode > span {
40
- color: #003B4F;
41
- }
42
-
43
- code span {
44
- color: #003B4F;
45
- }
46
-
47
- code.sourceCode > span {
48
- color: #003B4F;
49
- }
50
-
51
- div.sourceCode,
52
- div.sourceCode pre.sourceCode {
53
- color: #003B4F;
54
- }
55
-
56
- code span.ot {
57
- color: #003B4F;
58
- font-style: inherit;
59
- }
60
-
61
- code span.at {
62
- color: #657422;
63
- font-style: inherit;
64
- }
65
-
66
- code span.ss {
67
- color: #20794D;
68
- font-style: inherit;
69
- }
70
-
71
- code span.an {
72
- color: #5E5E5E;
73
- font-style: inherit;
74
- }
75
-
76
- code span.fu {
77
- color: #4758AB;
78
- font-style: inherit;
79
- }
80
-
81
- code span.st {
82
- color: #20794D;
83
- font-style: inherit;
84
- }
85
-
86
- code span.cf {
87
- color: #003B4F;
88
- font-style: inherit;
89
- }
90
-
91
- code span.op {
92
- color: #5E5E5E;
93
- font-style: inherit;
94
- }
95
-
96
- code span.er {
97
- color: #AD0000;
98
- font-style: inherit;
99
- }
100
-
101
- code span.bn {
102
- color: #AD0000;
103
- font-style: inherit;
104
- }
105
-
106
- code span.al {
107
- color: #AD0000;
108
- font-style: inherit;
109
- }
110
-
111
- code span.va {
112
- color: #111111;
113
- font-style: inherit;
114
- }
115
-
116
- code span.bu {
117
- font-style: inherit;
118
- }
119
-
120
- code span.ex {
121
- font-style: inherit;
122
- }
123
-
124
- code span.pp {
125
- color: #AD0000;
126
- font-style: inherit;
127
- }
128
-
129
- code span.in {
130
- color: #5E5E5E;
131
- font-style: inherit;
132
- }
133
-
134
- code span.vs {
135
- color: #20794D;
136
- font-style: inherit;
137
- }
138
-
139
- code span.wa {
140
- color: #5E5E5E;
141
- font-style: italic;
142
- }
143
-
144
- code span.do {
145
- color: #5E5E5E;
146
- font-style: italic;
147
- }
148
-
149
- code span.im {
150
- color: #00769E;
151
- font-style: inherit;
152
- }
153
-
154
- code span.ch {
155
- color: #20794D;
156
- font-style: inherit;
157
- }
158
-
159
- code span.dt {
160
- color: #AD0000;
161
- font-style: inherit;
162
- }
163
-
164
- code span.fl {
165
- color: #AD0000;
166
- font-style: inherit;
167
- }
168
-
169
- code span.co {
170
- color: #5E5E5E;
171
- font-style: inherit;
172
- }
173
-
174
- code span.cv {
175
- color: #5E5E5E;
176
- font-style: italic;
177
- }
178
-
179
- code span.cn {
180
- color: #8f5902;
181
- font-style: inherit;
182
- }
183
-
184
- code span.sc {
185
- color: #5E5E5E;
186
- font-style: inherit;
187
- }
188
-
189
- code span.dv {
190
- color: #AD0000;
191
- font-style: inherit;
192
- }
193
-
194
- code span.kw {
195
- color: #003B4F;
196
- font-style: inherit;
197
- }
198
-
199
- .prevent-inlining {
200
- content: "</";
201
- }
202
-
203
- /*# sourceMappingURL=debc5d5d77c3f9108843748ff7464032.css.map */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/ui_session.py DELETED
@@ -1,68 +0,0 @@
1
- import gradio as gr
2
-
3
- from modules import shared, ui, utils
4
- from modules.github import clone_or_pull_repository
5
- from modules.utils import gradio
6
-
7
-
8
- def create_ui():
9
- mu = shared.args.multi_user
10
- with gr.Tab("Session", elem_id="session-tab"):
11
- with gr.Row():
12
- with gr.Column():
13
- shared.gradio['reset_interface'] = gr.Button("Apply flags/extensions and restart", interactive=not mu)
14
- with gr.Row():
15
- shared.gradio['toggle_dark_mode'] = gr.Button('Toggle 💡')
16
- shared.gradio['save_settings'] = gr.Button('Save UI defaults to settings.yaml', interactive=not mu)
17
-
18
- with gr.Row():
19
- with gr.Column():
20
- shared.gradio['extensions_menu'] = gr.CheckboxGroup(choices=utils.get_available_extensions(), value=shared.args.extensions, label="Available extensions", info='Note that some of these extensions may require manually installing Python requirements through the command: pip install -r extensions/extension_name/requirements.txt', elem_classes='checkboxgroup-table')
21
-
22
- with gr.Column():
23
- shared.gradio['bool_menu'] = gr.CheckboxGroup(choices=get_boolean_arguments(), value=get_boolean_arguments(active=True), label="Boolean command-line flags", elem_classes='checkboxgroup-table')
24
-
25
- with gr.Column():
26
- extension_name = gr.Textbox(lines=1, label='Install or update an extension', info='Enter the GitHub URL below and press Enter. For a list of extensions, see: https://github.com/oobabooga/text-generation-webui-extensions ⚠️ WARNING ⚠️ : extensions can execute arbitrary code. Make sure to inspect their source code before activating them.', interactive=not mu)
27
- extension_status = gr.Markdown()
28
-
29
- extension_name.submit(clone_or_pull_repository, extension_name, extension_status, show_progress=False)
30
-
31
- # Reset interface event
32
- shared.gradio['reset_interface'].click(
33
- set_interface_arguments, gradio('extensions_menu', 'bool_menu'), None).then(
34
- lambda: None, None, None, _js='() => {document.body.innerHTML=\'<h1 style="font-family:monospace;padding-top:20%;margin:0;height:100vh;color:lightgray;text-align:center;background:var(--body-background-fill)">Reloading...</h1>\'; setTimeout(function(){location.reload()},2500); return []}')
35
-
36
- shared.gradio['toggle_dark_mode'].click(lambda: None, None, None, _js='() => {document.getElementsByTagName("body")[0].classList.toggle("dark")}')
37
- shared.gradio['save_settings'].click(
38
- ui.gather_interface_values, gradio(shared.input_elements), gradio('interface_state')).then(
39
- ui.save_settings, gradio('interface_state', 'preset_menu', 'instruction_template', 'extensions_menu', 'show_controls'), gradio('save_contents')).then(
40
- lambda: './', None, gradio('save_root')).then(
41
- lambda: 'settings.yaml', None, gradio('save_filename')).then(
42
- lambda: gr.update(visible=True), None, gradio('file_saver'))
43
-
44
-
45
- def set_interface_arguments(extensions, bool_active):
46
- shared.args.extensions = extensions
47
-
48
- bool_list = get_boolean_arguments()
49
-
50
- for k in bool_list:
51
- setattr(shared.args, k, False)
52
- for k in bool_active:
53
- setattr(shared.args, k, True)
54
-
55
- shared.need_restart = True
56
-
57
-
58
- def get_boolean_arguments(active=False):
59
- exclude = ["default", "notebook", "chat"]
60
-
61
- cmd_list = vars(shared.args)
62
- bool_list = sorted([k for k in cmd_list if type(cmd_list[k]) is bool and k not in exclude + ui.list_model_elements()])
63
- bool_active = [k for k in bool_list if vars(shared.args)[k]]
64
-
65
- if active:
66
- return bool_active
67
- else:
68
- return bool_list
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/configs/_base_/datasets/stare.py DELETED
@@ -1,59 +0,0 @@
1
- # dataset settings
2
- dataset_type = 'STAREDataset'
3
- data_root = 'data/STARE'
4
- img_norm_cfg = dict(
5
- mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
6
- img_scale = (605, 700)
7
- crop_size = (128, 128)
8
- train_pipeline = [
9
- dict(type='LoadImageFromFile'),
10
- dict(type='LoadAnnotations'),
11
- dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
12
- dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
13
- dict(type='RandomFlip', prob=0.5),
14
- dict(type='PhotoMetricDistortion'),
15
- dict(type='Normalize', **img_norm_cfg),
16
- dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
17
- dict(type='DefaultFormatBundle'),
18
- dict(type='Collect', keys=['img', 'gt_semantic_seg'])
19
- ]
20
- test_pipeline = [
21
- dict(type='LoadImageFromFile'),
22
- dict(
23
- type='MultiScaleFlipAug',
24
- img_scale=img_scale,
25
- # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
26
- flip=False,
27
- transforms=[
28
- dict(type='Resize', keep_ratio=True),
29
- dict(type='RandomFlip'),
30
- dict(type='Normalize', **img_norm_cfg),
31
- dict(type='ImageToTensor', keys=['img']),
32
- dict(type='Collect', keys=['img'])
33
- ])
34
- ]
35
-
36
- data = dict(
37
- samples_per_gpu=4,
38
- workers_per_gpu=4,
39
- train=dict(
40
- type='RepeatDataset',
41
- times=40000,
42
- dataset=dict(
43
- type=dataset_type,
44
- data_root=data_root,
45
- img_dir='images/training',
46
- ann_dir='annotations/training',
47
- pipeline=train_pipeline)),
48
- val=dict(
49
- type=dataset_type,
50
- data_root=data_root,
51
- img_dir='images/validation',
52
- ann_dir='annotations/validation',
53
- pipeline=test_pipeline),
54
- test=dict(
55
- type=dataset_type,
56
- data_root=data_root,
57
- img_dir='images/validation',
58
- ann_dir='annotations/validation',
59
- pipeline=test_pipeline))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Aphrodite/stable-diffusion-2/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Stable Diffusion 2
3
- emoji: 🐠
4
- colorFrom: purple
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.11.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/data/datasets/coco.py DELETED
@@ -1,539 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- import contextlib
3
- import datetime
4
- import io
5
- import json
6
- import logging
7
- import numpy as np
8
- import os
9
- import shutil
10
- import pycocotools.mask as mask_util
11
- from fvcore.common.timer import Timer
12
- from iopath.common.file_io import file_lock
13
- from PIL import Image
14
-
15
- from detectron2.structures import Boxes, BoxMode, PolygonMasks, RotatedBoxes
16
- from detectron2.utils.file_io import PathManager
17
-
18
- from .. import DatasetCatalog, MetadataCatalog
19
-
20
- """
21
- This file contains functions to parse COCO-format annotations into dicts in "Detectron2 format".
22
- """
23
-
24
-
25
- logger = logging.getLogger(__name__)
26
-
27
- __all__ = ["load_coco_json", "load_sem_seg", "convert_to_coco_json", "register_coco_instances"]
28
-
29
-
30
- def load_coco_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None):
31
- """
32
- Load a json file with COCO's instances annotation format.
33
- Currently supports instance detection, instance segmentation,
34
- and person keypoints annotations.
35
-
36
- Args:
37
- json_file (str): full path to the json file in COCO instances annotation format.
38
- image_root (str or path-like): the directory where the images in this json file exists.
39
- dataset_name (str or None): the name of the dataset (e.g., coco_2017_train).
40
- When provided, this function will also do the following:
41
-
42
- * Put "thing_classes" into the metadata associated with this dataset.
43
- * Map the category ids into a contiguous range (needed by standard dataset format),
44
- and add "thing_dataset_id_to_contiguous_id" to the metadata associated
45
- with this dataset.
46
-
47
- This option should usually be provided, unless users need to load
48
- the original json content and apply more processing manually.
49
- extra_annotation_keys (list[str]): list of per-annotation keys that should also be
50
- loaded into the dataset dict (besides "iscrowd", "bbox", "keypoints",
51
- "category_id", "segmentation"). The values for these keys will be returned as-is.
52
- For example, the densepose annotations are loaded in this way.
53
-
54
- Returns:
55
- list[dict]: a list of dicts in Detectron2 standard dataset dicts format (See
56
- `Using Custom Datasets </tutorials/datasets.html>`_ ) when `dataset_name` is not None.
57
- If `dataset_name` is None, the returned `category_ids` may be
58
- incontiguous and may not conform to the Detectron2 standard format.
59
-
60
- Notes:
61
- 1. This function does not read the image files.
62
- The results do not have the "image" field.
63
- """
64
- from pycocotools.coco import COCO
65
-
66
- timer = Timer()
67
- json_file = PathManager.get_local_path(json_file)
68
- with contextlib.redirect_stdout(io.StringIO()):
69
- coco_api = COCO(json_file)
70
- if timer.seconds() > 1:
71
- logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
72
-
73
- id_map = None
74
- if dataset_name is not None:
75
- meta = MetadataCatalog.get(dataset_name)
76
- cat_ids = sorted(coco_api.getCatIds())
77
- cats = coco_api.loadCats(cat_ids)
78
- # The categories in a custom json file may not be sorted.
79
- thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])]
80
- meta.thing_classes = thing_classes
81
-
82
- # In COCO, certain category ids are artificially removed,
83
- # and by convention they are always ignored.
84
- # We deal with COCO's id issue and translate
85
- # the category ids to contiguous ids in [0, 80).
86
-
87
- # It works by looking at the "categories" field in the json, therefore
88
- # if users' own json also have incontiguous ids, we'll
89
- # apply this mapping as well but print a warning.
90
- if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)):
91
- if "coco" not in dataset_name:
92
- logger.warning(
93
- """
94
- Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you.
95
- """
96
- )
97
- id_map = {v: i for i, v in enumerate(cat_ids)}
98
- meta.thing_dataset_id_to_contiguous_id = id_map
99
-
100
- # sort indices for reproducible results
101
- img_ids = sorted(coco_api.imgs.keys())
102
- # imgs is a list of dicts, each looks something like:
103
- # {'license': 4,
104
- # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
105
- # 'file_name': 'COCO_val2014_000000001268.jpg',
106
- # 'height': 427,
107
- # 'width': 640,
108
- # 'date_captured': '2013-11-17 05:57:24',
109
- # 'id': 1268}
110
- imgs = coco_api.loadImgs(img_ids)
111
- # anns is a list[list[dict]], where each dict is an annotation
112
- # record for an object. The inner list enumerates the objects in an image
113
- # and the outer list enumerates over images. Example of anns[0]:
114
- # [{'segmentation': [[192.81,
115
- # 247.09,
116
- # ...
117
- # 219.03,
118
- # 249.06]],
119
- # 'area': 1035.749,
120
- # 'iscrowd': 0,
121
- # 'image_id': 1268,
122
- # 'bbox': [192.81, 224.8, 74.73, 33.43],
123
- # 'category_id': 16,
124
- # 'id': 42986},
125
- # ...]
126
- anns = [coco_api.imgToAnns[img_id] for img_id in img_ids]
127
- total_num_valid_anns = sum([len(x) for x in anns])
128
- total_num_anns = len(coco_api.anns)
129
- if total_num_valid_anns < total_num_anns:
130
- logger.warning(
131
- f"{json_file} contains {total_num_anns} annotations, but only "
132
- f"{total_num_valid_anns} of them match to images in the file."
133
- )
134
-
135
- if "minival" not in json_file:
136
- # The popular valminusminival & minival annotations for COCO2014 contain this bug.
137
- # However the ratio of buggy annotations there is tiny and does not affect accuracy.
138
- # Therefore we explicitly white-list them.
139
- ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
140
- assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format(
141
- json_file
142
- )
143
-
144
- imgs_anns = list(zip(imgs, anns))
145
- logger.info("Loaded {} images in COCO format from {}".format(len(imgs_anns), json_file))
146
-
147
- dataset_dicts = []
148
-
149
- ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"] + (extra_annotation_keys or [])
150
-
151
- num_instances_without_valid_segmentation = 0
152
-
153
- for (img_dict, anno_dict_list) in imgs_anns:
154
- record = {}
155
- record["file_name"] = os.path.join(image_root, img_dict["file_name"])
156
- record["height"] = img_dict["height"]
157
- record["width"] = img_dict["width"]
158
- image_id = record["image_id"] = img_dict["id"]
159
-
160
- objs = []
161
- for anno in anno_dict_list:
162
- # Check that the image_id in this annotation is the same as
163
- # the image_id we're looking at.
164
- # This fails only when the data parsing logic or the annotation file is buggy.
165
-
166
- # The original COCO valminusminival2014 & minival2014 annotation files
167
- # actually contains bugs that, together with certain ways of using COCO API,
168
- # can trigger this assertion.
169
- assert anno["image_id"] == image_id
170
-
171
- assert anno.get("ignore", 0) == 0, '"ignore" in COCO json file is not supported.'
172
-
173
- obj = {key: anno[key] for key in ann_keys if key in anno}
174
- if "bbox" in obj and len(obj["bbox"]) == 0:
175
- raise ValueError(
176
- f"One annotation of image {image_id} contains empty 'bbox' value! "
177
- "This json does not have valid COCO format."
178
- )
179
-
180
- segm = anno.get("segmentation", None)
181
- if segm: # either list[list[float]] or dict(RLE)
182
- if isinstance(segm, dict):
183
- if isinstance(segm["counts"], list):
184
- # convert to compressed RLE
185
- segm = mask_util.frPyObjects(segm, *segm["size"])
186
- else:
187
- # filter out invalid polygons (< 3 points)
188
- segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
189
- if len(segm) == 0:
190
- num_instances_without_valid_segmentation += 1
191
- continue # ignore this instance
192
- obj["segmentation"] = segm
193
-
194
- keypts = anno.get("keypoints", None)
195
- if keypts: # list[int]
196
- for idx, v in enumerate(keypts):
197
- if idx % 3 != 2:
198
- # COCO's segmentation coordinates are floating points in [0, H or W],
199
- # but keypoint coordinates are integers in [0, H-1 or W-1]
200
- # Therefore we assume the coordinates are "pixel indices" and
201
- # add 0.5 to convert to floating point coordinates.
202
- keypts[idx] = v + 0.5
203
- obj["keypoints"] = keypts
204
-
205
- obj["bbox_mode"] = BoxMode.XYWH_ABS
206
- if id_map:
207
- annotation_category_id = obj["category_id"]
208
- try:
209
- obj["category_id"] = id_map[annotation_category_id]
210
- except KeyError as e:
211
- raise KeyError(
212
- f"Encountered category_id={annotation_category_id} "
213
- "but this id does not exist in 'categories' of the json file."
214
- ) from e
215
- objs.append(obj)
216
- record["annotations"] = objs
217
- dataset_dicts.append(record)
218
-
219
- if num_instances_without_valid_segmentation > 0:
220
- logger.warning(
221
- "Filtered out {} instances without valid segmentation. ".format(
222
- num_instances_without_valid_segmentation
223
- )
224
- + "There might be issues in your dataset generation process. Please "
225
- "check https://detectron2.readthedocs.io/en/latest/tutorials/datasets.html carefully"
226
- )
227
- return dataset_dicts
228
-
229
-
230
- def load_sem_seg(gt_root, image_root, gt_ext="png", image_ext="jpg"):
231
- """
232
- Load semantic segmentation datasets. All files under "gt_root" with "gt_ext" extension are
233
- treated as ground truth annotations and all files under "image_root" with "image_ext" extension
234
- as input images. Ground truth and input images are matched using file paths relative to
235
- "gt_root" and "image_root" respectively without taking into account file extensions.
236
- This works for COCO as well as some other datasets.
237
-
238
- Args:
239
- gt_root (str): full path to ground truth semantic segmentation files. Semantic segmentation
240
- annotations are stored as images with integer values in pixels that represent
241
- corresponding semantic labels.
242
- image_root (str): the directory where the input images are.
243
- gt_ext (str): file extension for ground truth annotations.
244
- image_ext (str): file extension for input images.
245
-
246
- Returns:
247
- list[dict]:
248
- a list of dicts in detectron2 standard format without instance-level
249
- annotation.
250
-
251
- Notes:
252
- 1. This function does not read the image and ground truth files.
253
- The results do not have the "image" and "sem_seg" fields.
254
- """
255
-
256
- # We match input images with ground truth based on their relative filepaths (without file
257
- # extensions) starting from 'image_root' and 'gt_root' respectively.
258
- def file2id(folder_path, file_path):
259
- # extract relative path starting from `folder_path`
260
- image_id = os.path.normpath(os.path.relpath(file_path, start=folder_path))
261
- # remove file extension
262
- image_id = os.path.splitext(image_id)[0]
263
- return image_id
264
-
265
- input_files = sorted(
266
- (os.path.join(image_root, f) for f in PathManager.ls(image_root) if f.endswith(image_ext)),
267
- key=lambda file_path: file2id(image_root, file_path),
268
- )
269
- gt_files = sorted(
270
- (os.path.join(gt_root, f) for f in PathManager.ls(gt_root) if f.endswith(gt_ext)),
271
- key=lambda file_path: file2id(gt_root, file_path),
272
- )
273
-
274
- assert len(gt_files) > 0, "No annotations found in {}.".format(gt_root)
275
-
276
- # Use the intersection, so that val2017_100 annotations can run smoothly with val2017 images
277
- if len(input_files) != len(gt_files):
278
- logger.warn(
279
- "Directory {} and {} has {} and {} files, respectively.".format(
280
- image_root, gt_root, len(input_files), len(gt_files)
281
- )
282
- )
283
- input_basenames = [os.path.basename(f)[: -len(image_ext)] for f in input_files]
284
- gt_basenames = [os.path.basename(f)[: -len(gt_ext)] for f in gt_files]
285
- intersect = list(set(input_basenames) & set(gt_basenames))
286
- # sort, otherwise each worker may obtain a list[dict] in different order
287
- intersect = sorted(intersect)
288
- logger.warn("Will use their intersection of {} files.".format(len(intersect)))
289
- input_files = [os.path.join(image_root, f + image_ext) for f in intersect]
290
- gt_files = [os.path.join(gt_root, f + gt_ext) for f in intersect]
291
-
292
- logger.info(
293
- "Loaded {} images with semantic segmentation from {}".format(len(input_files), image_root)
294
- )
295
-
296
- dataset_dicts = []
297
- for (img_path, gt_path) in zip(input_files, gt_files):
298
- record = {}
299
- record["file_name"] = img_path
300
- record["sem_seg_file_name"] = gt_path
301
- dataset_dicts.append(record)
302
-
303
- return dataset_dicts
304
-
305
-
306
- def convert_to_coco_dict(dataset_name):
307
- """
308
- Convert an instance detection/segmentation or keypoint detection dataset
309
- in detectron2's standard format into COCO json format.
310
-
311
- Generic dataset description can be found here:
312
- https://detectron2.readthedocs.io/tutorials/datasets.html#register-a-dataset
313
-
314
- COCO data format description can be found here:
315
- http://cocodataset.org/#format-data
316
-
317
- Args:
318
- dataset_name (str):
319
- name of the source dataset
320
- Must be registered in DatastCatalog and in detectron2's standard format.
321
- Must have corresponding metadata "thing_classes"
322
- Returns:
323
- coco_dict: serializable dict in COCO json format
324
- """
325
-
326
- dataset_dicts = DatasetCatalog.get(dataset_name)
327
- metadata = MetadataCatalog.get(dataset_name)
328
-
329
- # unmap the category mapping ids for COCO
330
- if hasattr(metadata, "thing_dataset_id_to_contiguous_id"):
331
- reverse_id_mapping = {v: k for k, v in metadata.thing_dataset_id_to_contiguous_id.items()}
332
- reverse_id_mapper = lambda contiguous_id: reverse_id_mapping[contiguous_id] # noqa
333
- else:
334
- reverse_id_mapper = lambda contiguous_id: contiguous_id # noqa
335
-
336
- categories = [
337
- {"id": reverse_id_mapper(id), "name": name}
338
- for id, name in enumerate(metadata.thing_classes)
339
- ]
340
-
341
- logger.info("Converting dataset dicts into COCO format")
342
- coco_images = []
343
- coco_annotations = []
344
-
345
- for image_id, image_dict in enumerate(dataset_dicts):
346
- coco_image = {
347
- "id": image_dict.get("image_id", image_id),
348
- "width": int(image_dict["width"]),
349
- "height": int(image_dict["height"]),
350
- "file_name": str(image_dict["file_name"]),
351
- }
352
- coco_images.append(coco_image)
353
-
354
- anns_per_image = image_dict.get("annotations", [])
355
- for annotation in anns_per_image:
356
- # create a new dict with only COCO fields
357
- coco_annotation = {}
358
-
359
- # COCO requirement: XYWH box format for axis-align and XYWHA for rotated
360
- bbox = annotation["bbox"]
361
- if isinstance(bbox, np.ndarray):
362
- if bbox.ndim != 1:
363
- raise ValueError(f"bbox has to be 1-dimensional. Got shape={bbox.shape}.")
364
- bbox = bbox.tolist()
365
- if len(bbox) not in [4, 5]:
366
- raise ValueError(f"bbox has to has length 4 or 5. Got {bbox}.")
367
- from_bbox_mode = annotation["bbox_mode"]
368
- to_bbox_mode = BoxMode.XYWH_ABS if len(bbox) == 4 else BoxMode.XYWHA_ABS
369
- bbox = BoxMode.convert(bbox, from_bbox_mode, to_bbox_mode)
370
-
371
- # COCO requirement: instance area
372
- if "segmentation" in annotation:
373
- # Computing areas for instances by counting the pixels
374
- segmentation = annotation["segmentation"]
375
- # TODO: check segmentation type: RLE, BinaryMask or Polygon
376
- if isinstance(segmentation, list):
377
- polygons = PolygonMasks([segmentation])
378
- area = polygons.area()[0].item()
379
- elif isinstance(segmentation, dict): # RLE
380
- area = mask_util.area(segmentation).item()
381
- else:
382
- raise TypeError(f"Unknown segmentation type {type(segmentation)}!")
383
- else:
384
- # Computing areas using bounding boxes
385
- if to_bbox_mode == BoxMode.XYWH_ABS:
386
- bbox_xy = BoxMode.convert(bbox, to_bbox_mode, BoxMode.XYXY_ABS)
387
- area = Boxes([bbox_xy]).area()[0].item()
388
- else:
389
- area = RotatedBoxes([bbox]).area()[0].item()
390
-
391
- if "keypoints" in annotation:
392
- keypoints = annotation["keypoints"] # list[int]
393
- for idx, v in enumerate(keypoints):
394
- if idx % 3 != 2:
395
- # COCO's segmentation coordinates are floating points in [0, H or W],
396
- # but keypoint coordinates are integers in [0, H-1 or W-1]
397
- # For COCO format consistency we substract 0.5
398
- # https://github.com/facebookresearch/detectron2/pull/175#issuecomment-551202163
399
- keypoints[idx] = v - 0.5
400
- if "num_keypoints" in annotation:
401
- num_keypoints = annotation["num_keypoints"]
402
- else:
403
- num_keypoints = sum(kp > 0 for kp in keypoints[2::3])
404
-
405
- # COCO requirement:
406
- # linking annotations to images
407
- # "id" field must start with 1
408
- coco_annotation["id"] = len(coco_annotations) + 1
409
- coco_annotation["image_id"] = coco_image["id"]
410
- coco_annotation["bbox"] = [round(float(x), 3) for x in bbox]
411
- coco_annotation["area"] = float(area)
412
- coco_annotation["iscrowd"] = int(annotation.get("iscrowd", 0))
413
- coco_annotation["category_id"] = int(reverse_id_mapper(annotation["category_id"]))
414
-
415
- # Add optional fields
416
- if "keypoints" in annotation:
417
- coco_annotation["keypoints"] = keypoints
418
- coco_annotation["num_keypoints"] = num_keypoints
419
-
420
- if "segmentation" in annotation:
421
- seg = coco_annotation["segmentation"] = annotation["segmentation"]
422
- if isinstance(seg, dict): # RLE
423
- counts = seg["counts"]
424
- if not isinstance(counts, str):
425
- # make it json-serializable
426
- seg["counts"] = counts.decode("ascii")
427
-
428
- coco_annotations.append(coco_annotation)
429
-
430
- logger.info(
431
- "Conversion finished, "
432
- f"#images: {len(coco_images)}, #annotations: {len(coco_annotations)}"
433
- )
434
-
435
- info = {
436
- "date_created": str(datetime.datetime.now()),
437
- "description": "Automatically generated COCO json file for Detectron2.",
438
- }
439
- coco_dict = {"info": info, "images": coco_images, "categories": categories, "licenses": None}
440
- if len(coco_annotations) > 0:
441
- coco_dict["annotations"] = coco_annotations
442
- return coco_dict
443
-
444
-
445
- def convert_to_coco_json(dataset_name, output_file, allow_cached=True):
446
- """
447
- Converts dataset into COCO format and saves it to a json file.
448
- dataset_name must be registered in DatasetCatalog and in detectron2's standard format.
449
-
450
- Args:
451
- dataset_name:
452
- reference from the config file to the catalogs
453
- must be registered in DatasetCatalog and in detectron2's standard format
454
- output_file: path of json file that will be saved to
455
- allow_cached: if json file is already present then skip conversion
456
- """
457
-
458
- # TODO: The dataset or the conversion script *may* change,
459
- # a checksum would be useful for validating the cached data
460
-
461
- PathManager.mkdirs(os.path.dirname(output_file))
462
- with file_lock(output_file):
463
- if PathManager.exists(output_file) and allow_cached:
464
- logger.warning(
465
- f"Using previously cached COCO format annotations at '{output_file}'. "
466
- "You need to clear the cache file if your dataset has been modified."
467
- )
468
- else:
469
- logger.info(f"Converting annotations of dataset '{dataset_name}' to COCO format ...)")
470
- coco_dict = convert_to_coco_dict(dataset_name)
471
-
472
- logger.info(f"Caching COCO format annotations at '{output_file}' ...")
473
- tmp_file = output_file + ".tmp"
474
- with PathManager.open(tmp_file, "w") as f:
475
- json.dump(coco_dict, f)
476
- shutil.move(tmp_file, output_file)
477
-
478
-
479
- def register_coco_instances(name, metadata, json_file, image_root):
480
- """
481
- Register a dataset in COCO's json annotation format for
482
- instance detection, instance segmentation and keypoint detection.
483
- (i.e., Type 1 and 2 in http://cocodataset.org/#format-data.
484
- `instances*.json` and `person_keypoints*.json` in the dataset).
485
-
486
- This is an example of how to register a new dataset.
487
- You can do something similar to this function, to register new datasets.
488
-
489
- Args:
490
- name (str): the name that identifies a dataset, e.g. "coco_2014_train".
491
- metadata (dict): extra metadata associated with this dataset. You can
492
- leave it as an empty dict.
493
- json_file (str): path to the json instance annotation file.
494
- image_root (str or path-like): directory which contains all the images.
495
- """
496
- assert isinstance(name, str), name
497
- assert isinstance(json_file, (str, os.PathLike)), json_file
498
- assert isinstance(image_root, (str, os.PathLike)), image_root
499
- # 1. register a function which returns dicts
500
- DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, name))
501
-
502
- # 2. Optionally, add metadata about this dataset,
503
- # since they might be useful in evaluation, visualization or logging
504
- MetadataCatalog.get(name).set(
505
- json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata
506
- )
507
-
508
-
509
- if __name__ == "__main__":
510
- """
511
- Test the COCO json dataset loader.
512
-
513
- Usage:
514
- python -m detectron2.data.datasets.coco \
515
- path/to/json path/to/image_root dataset_name
516
-
517
- "dataset_name" can be "coco_2014_minival_100", or other
518
- pre-registered ones
519
- """
520
- from detectron2.utils.logger import setup_logger
521
- from detectron2.utils.visualizer import Visualizer
522
- import detectron2.data.datasets # noqa # add pre-defined metadata
523
- import sys
524
-
525
- logger = setup_logger(name=__name__)
526
- assert sys.argv[3] in DatasetCatalog.list()
527
- meta = MetadataCatalog.get(sys.argv[3])
528
-
529
- dicts = load_coco_json(sys.argv[1], sys.argv[2], sys.argv[3])
530
- logger.info("Done loading {} samples.".format(len(dicts)))
531
-
532
- dirname = "coco-data-vis"
533
- os.makedirs(dirname, exist_ok=True)
534
- for d in dicts:
535
- img = np.array(Image.open(d["file_name"]))
536
- visualizer = Visualizer(img, metadata=meta)
537
- vis = visualizer.draw_dataset_dict(d)
538
- fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
539
- vis.save(fpath)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/demucs/separate.py DELETED
@@ -1,185 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- # All rights reserved.
3
- #
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import argparse
8
- import sys
9
- from pathlib import Path
10
- import subprocess
11
-
12
- import julius
13
- import torch as th
14
- import torchaudio as ta
15
-
16
- from .audio import AudioFile, convert_audio_channels
17
- from .pretrained import is_pretrained, load_pretrained
18
- from .utils import apply_model, load_model
19
-
20
-
21
- def load_track(track, device, audio_channels, samplerate):
22
- errors = {}
23
- wav = None
24
-
25
- try:
26
- wav = AudioFile(track).read(
27
- streams=0,
28
- samplerate=samplerate,
29
- channels=audio_channels).to(device)
30
- except FileNotFoundError:
31
- errors['ffmpeg'] = 'Ffmpeg is not installed.'
32
- except subprocess.CalledProcessError:
33
- errors['ffmpeg'] = 'FFmpeg could not read the file.'
34
-
35
- if wav is None:
36
- try:
37
- wav, sr = ta.load(str(track))
38
- except RuntimeError as err:
39
- errors['torchaudio'] = err.args[0]
40
- else:
41
- wav = convert_audio_channels(wav, audio_channels)
42
- wav = wav.to(device)
43
- wav = julius.resample_frac(wav, sr, samplerate)
44
-
45
- if wav is None:
46
- print(f"Could not load file {track}. "
47
- "Maybe it is not a supported file format? ")
48
- for backend, error in errors.items():
49
- print(f"When trying to load using {backend}, got the following error: {error}")
50
- sys.exit(1)
51
- return wav
52
-
53
-
54
- def encode_mp3(wav, path, bitrate=320, samplerate=44100, channels=2, verbose=False):
55
- try:
56
- import lameenc
57
- except ImportError:
58
- print("Failed to call lame encoder. Maybe it is not installed? "
59
- "On windows, run `python.exe -m pip install -U lameenc`, "
60
- "on OSX/Linux, run `python3 -m pip install -U lameenc`, "
61
- "then try again.", file=sys.stderr)
62
- sys.exit(1)
63
- encoder = lameenc.Encoder()
64
- encoder.set_bit_rate(bitrate)
65
- encoder.set_in_sample_rate(samplerate)
66
- encoder.set_channels(channels)
67
- encoder.set_quality(2) # 2-highest, 7-fastest
68
- if not verbose:
69
- encoder.silence()
70
- wav = wav.transpose(0, 1).numpy()
71
- mp3_data = encoder.encode(wav.tobytes())
72
- mp3_data += encoder.flush()
73
- with open(path, "wb") as f:
74
- f.write(mp3_data)
75
-
76
-
77
- def main():
78
- parser = argparse.ArgumentParser("demucs.separate",
79
- description="Separate the sources for the given tracks")
80
- parser.add_argument("tracks", nargs='+', type=Path, default=[], help='Path to tracks')
81
- parser.add_argument("-n",
82
- "--name",
83
- default="demucs_quantized",
84
- help="Model name. See README.md for the list of pretrained models. "
85
- "Default is demucs_quantized.")
86
- parser.add_argument("-v", "--verbose", action="store_true")
87
- parser.add_argument("-o",
88
- "--out",
89
- type=Path,
90
- default=Path("separated"),
91
- help="Folder where to put extracted tracks. A subfolder "
92
- "with the model name will be created.")
93
- parser.add_argument("--models",
94
- type=Path,
95
- default=Path("models"),
96
- help="Path to trained models. "
97
- "Also used to store downloaded pretrained models")
98
- parser.add_argument("-d",
99
- "--device",
100
- default="cuda" if th.cuda.is_available() else "cpu",
101
- help="Device to use, default is cuda if available else cpu")
102
- parser.add_argument("--shifts",
103
- default=0,
104
- type=int,
105
- help="Number of random shifts for equivariant stabilization."
106
- "Increase separation time but improves quality for Demucs. 10 was used "
107
- "in the original paper.")
108
- parser.add_argument("--overlap",
109
- default=0.25,
110
- type=float,
111
- help="Overlap between the splits.")
112
- parser.add_argument("--no-split",
113
- action="store_false",
114
- dest="split",
115
- default=True,
116
- help="Doesn't split audio in chunks. This can use large amounts of memory.")
117
- parser.add_argument("--float32",
118
- action="store_true",
119
- help="Convert the output wavefile to use pcm f32 format instead of s16. "
120
- "This should not make a difference if you just plan on listening to the "
121
- "audio but might be needed to compute exactly metrics like SDR etc.")
122
- parser.add_argument("--int16",
123
- action="store_false",
124
- dest="float32",
125
- help="Opposite of --float32, here for compatibility.")
126
- parser.add_argument("--mp3", action="store_true",
127
- help="Convert the output wavs to mp3.")
128
- parser.add_argument("--mp3-bitrate",
129
- default=320,
130
- type=int,
131
- help="Bitrate of converted mp3.")
132
-
133
- args = parser.parse_args()
134
- name = args.name + ".th"
135
- model_path = args.models / name
136
- if model_path.is_file():
137
- model = load_model(model_path)
138
- else:
139
- if is_pretrained(args.name):
140
- model = load_pretrained(args.name)
141
- else:
142
- print(f"No pre-trained model {args.name}", file=sys.stderr)
143
- sys.exit(1)
144
- model.to(args.device)
145
-
146
- out = args.out / args.name
147
- out.mkdir(parents=True, exist_ok=True)
148
- print(f"Separated tracks will be stored in {out.resolve()}")
149
- for track in args.tracks:
150
- if not track.exists():
151
- print(
152
- f"File {track} does not exist. If the path contains spaces, "
153
- "please try again after surrounding the entire path with quotes \"\".",
154
- file=sys.stderr)
155
- continue
156
- print(f"Separating track {track}")
157
- wav = load_track(track, args.device, model.audio_channels, model.samplerate)
158
-
159
- ref = wav.mean(0)
160
- wav = (wav - ref.mean()) / ref.std()
161
- sources = apply_model(model, wav, shifts=args.shifts, split=args.split,
162
- overlap=args.overlap, progress=True)
163
- sources = sources * ref.std() + ref.mean()
164
-
165
- track_folder = out / track.name.rsplit(".", 1)[0]
166
- track_folder.mkdir(exist_ok=True)
167
- for source, name in zip(sources, model.sources):
168
- source = source / max(1.01 * source.abs().max(), 1)
169
- if args.mp3 or not args.float32:
170
- source = (source * 2**15).clamp_(-2**15, 2**15 - 1).short()
171
- source = source.cpu()
172
- stem = str(track_folder / name)
173
- if args.mp3:
174
- encode_mp3(source, stem + ".mp3",
175
- bitrate=args.mp3_bitrate,
176
- samplerate=model.samplerate,
177
- channels=model.audio_channels,
178
- verbose=args.verbose)
179
- else:
180
- wavname = str(track_folder / f"{name}.wav")
181
- ta.save(wavname, source, sample_rate=model.samplerate)
182
-
183
-
184
- if __name__ == "__main__":
185
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Camin Simulador ltimo Mod Apk Android Oyun Club.md DELETED
@@ -1,124 +0,0 @@
1
-
2
- <h1>Camión simulador final temporada de invierno Mod Apk: Un deber-Tener para los fans de simulación de camiones</h1>
3
- <p>Si eres un fan de los juegos de simulación de camiones, es posible que hayas oído hablar de Truck Simulator Ultimate, un juego desarrollado por Zuuks Games que ofrece una experiencia de transporte realista e inmersiva. Pero ¿sabías que hay un apk mod que añade una temporada de invierno para el juego, por lo que es aún más difícil y divertido? En este artículo, le diremos todo lo que necesita saber sobre el camión simulador final invierno mod apk temporada, incluyendo sus características, juego, revisión y calificación. </p>
4
- <h2>camión simulador último mod apk android oyun club</h2><br /><p><b><b>DOWNLOAD</b> >>> <a href="https://bltlly.com/2v6KxL">https://bltlly.com/2v6KxL</a></b></p><br /><br />
5
- <h2>¿Qué es Camión Simulador Ultimate temporada de invierno Mod Apk? </h2>
6
- <p>Camión Simulador Ultimate Winter Season Mod Apk es una versión modificada del juego original Truck Simulator Ultimate que añade una temporada de invierno para el juego. Esto significa que tendrá que lidiar con nieve, hielo, niebla, lluvia y otras condiciones climáticas duras mientras conduce su camión por Europa. También tendrá que adaptarse a carreteras resbaladizas, visibilidad reducida y aumento de los accidentes de tráfico. El apk mod también añade algunas nuevas características y mejoras al juego, tales como nuevos camiones, nuevos trabajos, nuevos mapas, y más. </p>
7
- <h2>¿Por qué es popular entre los fans de simulación de camiones? </h2>
8
- <p>El camión simulador final invierno temporada mod apk es popular entre los fans de la simulación de camiones, ya que añade un nuevo nivel de realismo y desafío para el juego. La temporada de invierno hace que el juego sea más dinámico e impredecible, ya que nunca se sabe qué tipo de clima o condición de la carretera se encontrará. El mod apk también mejora los gráficos y efectos de sonido del juego, por lo que es más atractivo visual y envolvente. El mod apk también ofrece más variedad y contenido para el juego, ya que se puede elegir entre diferentes camiones, cargas, rutas y misiones. </p>
9
- <h2>Características de Camión Simulador Ultimate temporada de invierno Mod Apk</h2>
10
-
11
- <ul>
12
- <li><b>Clima invernal realista y condiciones de la carretera</b>: El mod apk simula la temporada de invierno en Europa, con nieve, hielo, niebla, lluvia y otros fenómenos climáticos. Tendrá que ajustar su estilo de conducción y velocidad de acuerdo con el clima y las condiciones de la carretera. También tendrá que tener cuidado con quitanieves, camiones de sal, atascos de tráfico, accidentes y otros obstáculos en la carretera. </li>
13
- <li><b>Camiones con licencia de Mercedes-Benz, Setra y otras marcas</b>: El mod apk cuenta con camiones con licencia oficial de Mercedes-Benz Group AG. Puede conducir camiones de la serie Mercedes-Benz Actros, así como autobuses Setra. También puede personalizar sus camiones con lámparas, parachoques, bocinas, luces de cabina y más opciones de modificación. </li>
14
- <li><b>Modo multijugador y subastas en línea</b>: El apk mod le permite jugar en línea con otros jugadores en el modo multijugador. Puede llevar carga conjunta o participar en carreras. También puede pujar por trabajos en subastas en línea y obtener mayores beneficios. </li>
15
- <li><b>Magnate y juego de simulación</b>: El apk mod combina magnate y juego de simulación en un juego. Puede establecer su propia empresa de transporte, contratar empleados, expandir su flota, diseñar sus oficinas, administrar sus finanzas y convertirse en la empresa de logística más grande del mundo. También puede transportar una amplia variedad de carga en más de 100 ciudades de toda Europa.</li>
16
- </ul>
17
- <h2>Cómo descargar e instalar el mod apk</h2>
18
- <p>Para descargar e instalar el simulador de camiones última temporada de invierno mod apk, es necesario seguir estos pasos:</p>
19
- <ol>
20
- <li>Descargar el archivo apk mod de una fuente de confianza. Puede encontrar el enlace a la última versión del apk mod al final de este artículo. </li>
21
- <li>Habilita la instalación de aplicaciones de fuentes desconocidas en tu dispositivo. Puede hacer esto yendo a Configuración > Seguridad > Fuentes desconocidas y activando. </li>
22
- <li>Busque el archivo apk mod descargado en su dispositivo y toque en él para iniciar el proceso de instalación. </li>
23
-
24
- <li>Iniciar el juego y disfrutar de la temporada de invierno. </li>
25
- </ol>
26
- <h2>Cómo iniciar y ejecutar una empresa de transporte por carretera</h2>
27
- <p>Una vez que haya instalado el apk mod, puede iniciar su propia empresa de transporte en el juego. Aquí hay algunos consejos sobre cómo ejecutar una empresa de transporte exitoso:</p>
28
- <ul>
29
- <li><b>Elija la ubicación de su sede </b>: Puede elegir entre 12 países diferentes en Europa para configurar su sede. Cada país tiene sus propias ventajas y desventajas, como impuestos, precios de los combustibles, condiciones de las carreteras y demanda del mercado. Elige sabiamente según tus preferencias y estrategia. </li>
30
- <li><b>Contratar conductores y comprar camiones</b>: Necesitarás conductores y camiones para transportar carga y ganar dinero. Puede contratar conductores de la agencia de contratación o de subastas en línea. También puede comprar camiones en concesionarios o en subastas en línea. También puede actualizar y personalizar sus camiones con varias piezas y accesorios. </li>
31
- <li><b>Encuentre y acepte empleos</b>: Puede encontrar empleos de varias fuentes, como el mercado de carga, el mercado de carga, las subastas en línea o los contratos. Puede aceptar trabajos que se adapten a su horario, presupuesto y reputación. También puede negociar el precio y los términos de los trabajos con los clientes. </li>
32
- <li><b>Entregar carga y ganar dinero</b>: Usted tendrá que entregar carga de una ciudad a otra dentro de un plazo determinado. Tendrá que seguir las reglas de tráfico, pagar peajes, repostar, descansar y mantener su camión. También tendrá que lidiar con el clima de invierno y las condiciones de la carretera. Ganará dinero para cada entrega exitosa, que puede usar para pagar sus gastos, expandir su negocio o ahorrar para inversiones futuras. </li>
33
- </ul>
34
- <h2>Cómo conducir y entregar carga en toda Europa</h2>
35
- <p>Conducir y entregar carga en toda Europa no es una tarea fácil, especialmente en temporada de invierno. Usted tendrá que hacer frente a diversos desafíos y riesgos en el camino. Aquí hay algunos consejos y trucos sobre cómo conducir y entregar carga de forma segura y eficiente:</p>
36
- <p></p>
37
- <ul>
38
-
39
- <li><b>Conduce con cuidado</b>: Cuando conduzcas tu camión, debes conducir con cuidado y responsabilidad. Debe seguir el límite de velocidad, obedecer las señales de tráfico y señales, usar indicadores, faros, limpiaparabrisas y bocina cuando sea necesario. También debe mantener una distancia segura de otros vehículos, evitar el frenado o giro repentino y tener cuidado con los peatones, ciclistas, animales y otros peligros en la carretera. </li>
40
- <li><b>Utilice equipo de invierno</b>: En la temporada de invierno, necesitará algún equipo especial para conducir con seguridad en carreteras nevadas y heladas. Debe equipar su camión con neumáticos de invierno, cadenas de nieve, líquido anticongelante, calentador, descongelador y raspador. También debe llevar algunos artículos de emergencia como linterna, manta, comida, agua, botiquín de primeros auxilios y cargador de teléfono. </li>
41
- <li><b>Monitorea tu estado</b>: Mientras conduces tu camión, debes monitorear tu estado regularmente. Debe verificar el nivel de combustible, la temperatura del motor, la presión de los neumáticos, la condición del freno, el peso de la carga y el nivel de daño. También debe comprobar el nivel de fatiga de su conductor, <h2>Monitorear su estado</h2>
42
- <p>Mientras conduce su camión, debe monitorear su estado regularmente. Debe verificar el nivel de combustible, la temperatura del motor, la presión de los neumáticos, la condición del freno, el peso de la carga y el nivel de daño. También debe verificar el nivel de fatiga, el nivel de salud y el estado de ánimo de su conductor. Debe evitar conducir cuando esté cansado, hambriento, sediento, enfermo o enojado. Debe tomar descansos, comer, beber, descansar y relajarse cuando sea necesario. </p>
43
- <h2>Utilice consejos y trucos para ganar más dinero y evitar multas de tráfico</h2>
44
- <p>Conducir y entregar carga no solo se trata de seguridad y eficiencia, sino también de rentabilidad y reputación. Deberías usar algunos consejos y trucos para ganar más dinero y evitar multas de tráfico en el juego. Algunos de estos consejos y trucos son:</p>
45
- <ul>
46
-
47
- <li><b>Negociar el precio y los términos</b>: Usted debe negociar el precio y los términos de los trabajos con los clientes. Usted debe tratar de obtener un precio más alto, un plazo más largo, una pena más baja, o una bonificación por los trabajos. También debe tratar de reducir los costos, como peajes, combustible, mantenimiento o impuestos. </li>
48
- <li><b>Conduce económicamente</b>: Debes conducir económicamente para ahorrar combustible y dinero. Debe conducir a una velocidad moderada, evitar la aceleración o el frenado innecesarios, usar el control de crucero o el modo ecológico cuando sea posible, y planificar su ruta para evitar atascos de tráfico o obras en carretera. </li>
49
- <li><b>Evite las multas de tráfico</b>: Debe evitar las multas de tráfico siguiendo las reglas y regulaciones de tráfico. Usted debe prestar atención al límite de velocidad, señales de tráfico y señales, marcas y señales de tráfico, y patrullas policiales. También debe evitar exceso de velocidad, pasar luces rojas, adelantar ilegalmente, estacionar incorrectamente o dañar otros vehículos o propiedades. </li>
50
- </ul>
51
- <h2>Revisión y calificación de Camión Simulador Ultimate Winter Season Mod Apk</h2>
52
- <p>El simulador de camiones última temporada de invierno mod apk es uno de los mejores juegos de simulación de camiones disponibles en el mercado. Ha recibido críticas y valoraciones positivas tanto de críticos como de usuarios. Estos son algunos de los pros y contras del mod apk:</p>
53
- <tabla>
54
- <tr>
55
- <th>Pros</th>
56
- <th>Contras</th>
57
- </tr>
58
- <tr>
59
- <td>- Tiempo de invierno realista y condiciones de la carretera<br>- Camiones con licencia de Mercedes-Benz Group AG<br>- Modo multijugador y subastas en línea<br>- Tycoon y juego de simulación<br>- Gráficos mejorados y efectos de sonido<br>- Más variedad y contenido</td>
60
- <td>- Altos requisitos del sistema<br>- Gran tamaño de archivo<br>- Ocasionales errores y problemas técnicos<br>- Requiere conexión a Internet<br>- Algunas características requieren compras en la aplicación<br>- Puede no ser compatible con algunos dispositivos</td>
61
- </tr>
62
- </tabla>
63
-
64
- <h2>Comparación con otros juegos de simulación de camiones</h2>
65
- <p>El simulador de camiones última temporada de invierno mod apk no es el único juego de simulación de camiones disponibles en el mercado. Hay otros juegos de simulación de camiones populares, como Euro Truck Simulator 2, American Truck Simulator, Truck Simulator 2018: Europa y World Truck Driving Simulator. ¿Cómo se compara el mod apk con estos juegos? Aquí hay una breve comparación:</p>
66
- <tabla>
67
- <tr>
68
- <th>Juego</th>
69
- <th>Plataforma</th>
70
- <th>Características</th>
71
- <th>Valoración</th>
72
- </tr>
73
- <tr>
74
- <td>Camión simulador último invierno temporada Mod Apk</td>
75
- <td>Android, iOS</td>
76
- <td>- Temporada de invierno<br>- Camiones con licencia<br>- Modo multijugador<br>- Magnate y juego de simulación<br>- Mapa de Europa</td>
77
- <td>4.5/5 (Google Play)<br>4.7/5 (App Store)</td>
78
- </tr>
79
- <tr>
80
- <td>Euro Truck Simulator 2</td>
81
- <td>Windows, Mac, Linux</td>
82
- <td>- Física realista<br>- Camiones personalizables<br>- Multijugador en línea<br>- Modo carrera<br>- Mapa de Europa</td>
83
- <td>10/10 (vapor)</td>
84
- </tr>
85
- <tr>
86
- <td>American Truck Simulator</td>
87
- <td>Windows, Mac, Linux</td>
88
- <td>- Física realista<br>- Camiones personalizables<br>- Multijugador en línea<br>- Modo carrera<br>- Mapa de Estados Unidos</td>
89
- <td>9/10 (vapor)</td>
90
- </tr>
91
- <tr>
92
- <td>Simulador de camiones 2018: Europa</td>
93
- <td>Android, iOS</td>
94
- <td>- Física realista<br>- Camiones personalizables<br>- Multijugador en línea<br>- Modo carrera<br>- Mapa de Europa</td>
95
- <td>4.1/5 (Google Play)<br>4.3/5 (App Store)</td>
96
- </tr>
97
- <tr>
98
- <td>Simulador de conducción de camiones</td>
99
- <td>Android, iOS</td>
100
- <td>- Física realista<br>- Camiones personalizables<br>- Multijugador en línea<br>- Modo carrera<br>- Mapa del mundo</td>
101
- <td>4.3/5 (Google Play)<br>N/A (App Store)</td>
102
- </tr>
103
- </tabla>
104
-
105
- <h2>Conclusión</h2>
106
- <p>En conclusión, el simulador de camiones última temporada de invierno mod apk es un deber-tener para los fans de la simulación de camiones que quieren experimentar una experiencia realista e inmersiva de transporte en la temporada de invierno. El mod apk añade un nuevo nivel de desafío y diversión al juego mediante la simulación del clima de invierno y las condiciones de la carretera en Europa. El mod apk también ofrece muchas características y mejoras al juego, tales como camiones con licencia de Mercedes-Benz Group AG, modo multijugador y subastas en línea, magnate y juego de simulación, gráficos y efectos de sonido mejorados, y más variedad y contenido. El mod apk ha recibido críticas y valoraciones positivas tanto de los críticos como de los usuarios, que han elogiado su realismo, desafío, variedad y factor de diversión. El mod apk también se compara bien con otros juegos de simulación de camiones disponibles en el mercado, aunque tiene algunas ventajas y desventajas. Si usted es un entusiasta de la simulación de camiones que quiere probar algo nuevo y emocionante, sin duda debe descargar el camión simulador final invierno mod apk temporada hoy. </p>
107
- <h2>Preguntas frecuentes (preguntas frecuentes)</h2>
108
- <h3>P: ¿Cómo puedo descargar el simulador de camiones apk mod temporada de invierno final? </h3>
109
- <p>A: Puede descargar el archivo mod apk desde una fuente de confianza. Puede encontrar el enlace a la última versión del apk mod al final de este artículo. También tendrá que habilitar la instalación de aplicaciones de fuentes desconocidas en su dispositivo antes de instalar el apk mod. </p>
110
- <h3>Q: ¿Es el simulador de camiones última temporada de invierno mod apk seguro de usar? </h3>
111
- <p>A: El apk mod es seguro de usar, siempre y cuando se descarga desde una fuente de confianza. También debe escanear el archivo apk mod con un software antivirus antes de instalarlo. También debe hacer una copia de seguridad de sus datos antes de instalar el apk mod en caso de que algo salga mal. </p>
112
- <h3>Q: ¿Cuáles son los requisitos del sistema para el simulador de camiones última temporada de invierno mod apk? </h3>
113
-
114
- <h3>Q: ¿Cómo puedo actualizar el simulador de camiones última temporada de invierno mod apk? </h3>
115
- <p>A: Puede actualizar el apk mod descargando e instalando la última versión del apk mod desde la misma fuente. También debe comprobar si hay actualizaciones regularmente para disfrutar de las últimas características y mejoras de la apk mod. </p>
116
- <h3>Q: ¿Cómo puedo contactar con el equipo de soporte del simulador de camiones última temporada de invierno mod apk? </h3>
117
- <p>A: Puede ponerse en contacto con el equipo de soporte del mod apk enviando un correo electrónico a [email protected] o visitando su sitio web en https:///www.zuuks.com/ También puede seguirlos en plataformas de redes sociales como Facebook, Twitter, Instagram y YouTube para obtener más información y actualizaciones. </p>
118
- <h3>Q: ¿Dónde puedo encontrar más consejos y trucos para el simulador de camiones última temporada de invierno mod apk? </h3>
119
- <p>A: Usted puede encontrar más consejos y trucos para el apk mod visitando foros en línea, blogs, sitios web, o canales de YouTube que se dedican a los juegos de simulación de camiones. También puede unirse a comunidades en línea, grupos o clubes de fans de la simulación de camiones y compartir sus experiencias y conocimientos con ellos. </p>
120
- <h2>Enlace de descarga</h2>
121
- <p>Si usted está listo para descargar el camión simulador última temporada de invierno mod apk, puede hacer clic en el enlace de abajo para obtener la última versión de la apk mod. El enlace es seguro y seguro, y le dirigirá a la página de descarga. Disfrute! </p>
122
- <p><a href="">Camión Simulador Ultimate Winter Season Mod Apk Descargar</a></p> 64aa2da5cf<br />
123
- <br />
124
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Apk Mod pica Conquista 2.md DELETED
@@ -1,68 +0,0 @@
1
- <br />
2
- <h1>Descargar APK Mod épica conquista 2: Una guía para los jugadores de Android</h1>
3
- <p>Si eres un fan de los juegos de acción RPG y anime, es posible que hayas oído hablar de Epic Conquest 2, un juego creado por un pequeño equipo indie de 4 con pasión ardiente y amor por el género. Pero ¿sabías que se puede descargar una versión modificada del juego, llamado APK Mod, que le da acceso a características premium o trucos? En este artículo, explicaremos qué es Epic Conquest 2, qué es APK Mod y cómo descargar APK Mod Epic Conquest 2 para tu dispositivo Android. Pero tenga en cuenta, la descarga de archivos APK Mod puede ser arriesgado e ilegal, así que proceda con su propia precaución. </p>
4
- <h2>descargar apk mod épica conquista 2</h2><br /><p><b><b>DOWNLOAD</b> &#9881; <a href="https://bltlly.com/2v6LJe">https://bltlly.com/2v6LJe</a></b></p><br /><br />
5
- <h2>¿Qué es la conquista épica 2?</h2>
6
- <p>Epic Conquest 2 es un juego de acción RPG que se inspira en los clásicos, con un toque especial en el combate y la historia, dándole una experiencia que es difícil de encontrar en juegos similares. También es la secuela del juego anterior Epic Conquest, que cautivó a millones de jugadores en plataformas móviles. </p>
7
- <h3>Un juego de rol de acción clásico con una historia de estilo anime</h3>
8
- <p>En Epic Conquest 2, puedes explorar un mundo abierto con todo tipo de tesoros y recursos para fortalecer a tu personaje, elegir entre varias habilidades y maestrías para adaptarse a tu estilo de juego, crear, mejorar y mejorar tu equipo para enfrentar desafíos más difíciles, y recoger los trajes de su personaje para cambiar su/ su apariencia y poder. El juego también cuenta con unos gráficos sencillos pero hermosos de la vieja escuela, un soporte de controlador completo y un modo sin conexión que te permite jugar en cualquier lugar sin conexión a Internet. </p>
9
- <p>El juego también cuenta con una historia al estilo anime, con una rica tradición y personajes memorables. Puedes seguir la historia principal o hacer misiones secundarias para aprender más sobre el mundo y sus habitantes. El juego también tiene mucho humor y romance, lo que lo hace más agradable e inmersivo. </p>
10
- <h3>Una secuela del popular juego móvil Epic Conquest</h3>
11
-
12
- <p>Si has jugado a Epic Conquest antes, notarás lo lejos que ha evolucionado este juego. También conocerás algunas caras conocidas del juego anterior, así como otras nuevas. También puedes importar tus datos guardados de Epic Conquest a Epic Conquest 2, para que puedas continuar tu aventura con tu personaje favorito. </p>
13
- <p></p>
14
- <h3>Un juego de acceso temprano en Steam y Google Play</h3>
15
- <p>Epic Conquest 2 se encuentra actualmente en la etapa de acceso temprano, lo que significa que no está completo y puede cambiar aún más en el desarrollo. Los desarrolladores tienen como objetivo terminar el juego en un año, por lo que puede esperar la versión completa en algún momento en 2021. Sin embargo, todavía puedes jugar el juego ahora y participar en su proceso de desarrollo. </p>
16
- <p>El juego está disponible en las plataformas Steam y Google Play. Puedes descargarlo gratis y jugarlo sin pagar ni ver anuncios. Sin embargo, si quieres apoyar a los desarrolladores, puedes comprar algunos artículos del juego o donar a través de Patreon. También puede dar comentarios o sugerencias a los desarrolladores a través de correo electrónico o centro de la comunidad. </p> <h2> ¿Qué es APK Mod? </h2>
17
- <p>APK Mod es un término que se refiere a una versión modificada de una aplicación o juego original, generalmente creado por desarrolladores o hackers de terceros. Los archivos APK Mod no están disponibles en las tiendas de aplicaciones oficiales, pero se pueden descargar desde varios sitios web o foros. Los archivos APK Mod pueden ofrecer diferentes beneficios o características que no están presentes en la aplicación o juego original, como desbloquear contenido premium, eliminar anuncios, agregar trucos o mejorar el rendimiento. </p>
18
- <h3>Una versión modificada de una aplicación o juego original</h3>
19
-
20
- <p>Un archivo APK Mod puede ser creado por cualquiera que tenga las habilidades y herramientas para hacerlo. Por lo general, un archivo APK Mod se hace descompilando el archivo APK original, editando el código fuente o los recursos, y luego recompilándolo en un nuevo archivo APK. A veces, un archivo APK Mod también se puede hacer mediante el uso de una herramienta de parcheo que aplica cambios al archivo APK original sin descompilarlo. </p>
21
- <h3>Una forma de desbloquear funciones premium o trucos</h3>
22
- <p>Una de las principales razones por las que la gente descarga archivos APK Mod es para desbloquear características premium o trucos que no están disponibles en la aplicación original o juego. Por ejemplo, algunas aplicaciones o juegos pueden requerir que pagues dinero real para acceder a ciertos contenidos o funciones, como niveles adicionales, personajes, skins, armas, etc. Al descargar un archivo APK Mod, puedes omitir este requisito y disfrutar del contenido completo de forma gratuita. </p>
23
- <p>Otra razón por la que la gente descarga archivos APK Mod es utilizar trucos que pueden hacer la aplicación o juego más fácil o más divertido. Por ejemplo, algunos juegos pueden tener niveles de dificultad desafiantes, vidas limitadas, temporizadores, enemigos, etc. Al descargar un archivo APK Mod, puede modificar estos aspectos y obtener vidas ilimitadas, monedas, gemas, salud, munición, etc. También puede desbloquear funciones o modos ocultos que normalmente no son accesibles. </p>
24
- <h3>Un riesgo potencial para la seguridad y la legalidad</h3>
25
- <p>Mientras que la descarga de archivos APK Mod puede parecer tentador e inofensivo, también viene con algunos riesgos que usted debe ser consciente de. En primer lugar, la descarga de archivos APK Mod puede ser ilegal y violar los términos de servicio de la aplicación original o desarrollador de juegos. Usted puede estar infringiendo sus derechos de propiedad intelectual y rompiendo sus reglas. Esto puede resultar en acciones legales contra usted o prohibir que su cuenta use sus servicios. </p>
26
-
27
- <p>Si todavía estás interesado en descargar APK Mod Epic Conquest 2, a pesar de los riesgos involucrados, tendrás que seguir algunos pasos para hacerlo. Aquí hay una guía general sobre cómo descargar archivos APK Mod para dispositivos Android, pero tenga en cuenta que el proceso exacto puede variar dependiendo de la fuente y el archivo. </p>
28
- <h3>Encontrar una fuente confiable de archivos APK Mod</h3>
29
- <p>El primer paso es encontrar un sitio web o un foro que proporciona archivos APK Mod para Epic Conquest 2. Puede usar un motor de búsqueda o una plataforma de redes sociales para buscarlos, pero tenga cuidado y haga una investigación antes de confiar en cualquier fuente. Algunas fuentes pueden ser falsas, anticuadas o maliciosas, por lo que debes revisar las reseñas, valoraciones, comentarios y comentarios de otros usuarios. También debes evitar hacer clic en cualquier enlace o anuncio sospechoso que pueda redirigirte a sitios o descargas no deseados. </p>
30
- <p>Una vez que encuentre una fuente confiable de archivos APK Mod para Epic Conquest 2, tendrá que elegir la versión y las características que desea. Puede haber diferentes versiones de archivos APK Mod para el mismo juego, con diferentes modificaciones o adiciones. Por ejemplo, algunos archivos APK Mod pueden darte oro ilimitado, mientras que otros pueden darte gemas ilimitadas. Algunos archivos APK Mod también pueden requerir que raíz de su dispositivo o instalar aplicaciones o herramientas adicionales. Usted debe leer la descripción y las instrucciones cuidadosamente antes de descargar cualquier archivo. </p>
31
- <h3>Habilitar fuentes desconocidas en la configuración del dispositivo</h3>
32
- <p>El siguiente paso es habilitar fuentes desconocidas en la configuración de su dispositivo, lo que le permitirá instalar aplicaciones o juegos de fuentes distintas de las tiendas de aplicaciones oficiales. Para hacer esto, tendrá que ir a la configuración del dispositivo, luego la seguridad o la privacidad, luego activar la opción que dice fuentes desconocidas o permitir la instalación de fuentes desconocidas. También puede necesitar confirmar esta acción tocando OK o Yes.</p>
33
-
34
- <h3>Instalar el archivo APK Mod y disfrutar del juego</h3>
35
- <p>El paso final es instalar el archivo APK Mod y disfrutar del juego. Para ello, tendrá que localizar el archivo APK Mod que descargó de su fuente, ya sea en el almacenamiento del dispositivo o en la carpeta de descarga. Luego, deberá tocar el archivo y seguir las instrucciones de instalación. Es posible que necesite conceder algunos permisos o aceptar algunos términos y condiciones antes de instalar el archivo. </p>
36
- <p>Una vez completada la instalación, puedes iniciar el juego y ver si el archivo APK Mod funciona como se espera. Usted debe ser capaz de acceder a las características premium o trucos que quería, tales como recursos ilimitados, artículos o habilidades. Sin embargo, también debes ser consciente de las posibles consecuencias de usar un archivo APK Mod, como perder tu progreso, ser expulsado de los servicios en línea o enfrentar problemas legales. </p>
37
- <h2>Conclusión</h2>
38
- <p>En este artículo, hemos explicado qué es Epic Conquest 2, qué es APK Mod y cómo descargar APK Mod Epic Conquest 2 para tu dispositivo Android. También te hemos advertido sobre los riesgos y desventajas de descargar archivos APK Mod, que pueden ser ilegales y peligrosos. </p>
39
- <h3>Resumen de los puntos principales</h3>
40
- <ul>
41
- <li>Epic Conquest 2 es un juego de rol de acción con una historia de estilo anime y un juego clásico. </li>
42
- <li>APK Mod es una versión modificada de una aplicación original o juego que ofrece características premium o trucos. </li>
43
- <li>Para descargar APK Mod Epic Conquest 2, es necesario encontrar una fuente confiable de archivos APK Mod, habilitar fuentes desconocidas en la configuración del dispositivo, e instalar el archivo APK Mod. </li>
44
- </ul>
45
- <h3> Pros y contras de la descarga APK Mod Epic Conquest 2</h3>
46
- <tabla>
47
- <tr><th>Pros</th><th>Contras</th></tr>
48
- <tr><td>Puedes disfrutar el contenido completo de Epic Conquest 2 gratis. </td><td>Usted puede violar los términos de servicio del desarrollador de Epic Conquest 2 y enfrentar acciones legales. </td></tr>
49
-
50
- <tr><td>Puede desbloquear funciones o modos ocultos que normalmente no son accesibles. </td><td>Puedes encontrar errores, errores, fallos o problemas de compatibilidad que afecten tu experiencia de juego. </td></tr>
51
- <tr><td>Puedes personalizar la apariencia y el poder de tu personaje con disfraces. </td><td>Puede infectar su dispositivo con malware, virus, software espía, spyware, adware, etc. que pueden robar su información personal o dañar su dispositivo. </td></tr>
52
- </tabla>
53
- <h3>Recomendación final</h3>
54
- <p>En última instancia, la decisión de descargar APK Mod Epic Conquest 2 depende de usted. Si usted está dispuesto a tomar los riesgos y aceptar las consecuencias, puede disfrutar del juego más con las características añadidas o trucos. Sin embargo, si quieres jugar el juego de forma segura y legal, es posible que desee atenerse a la versión original y apoyar a los desarrolladores mediante la compra de algunos elementos del juego o la donación a través de Patreon. De cualquier manera, esperamos que te diviertas jugando a Epic Conquest 2 y aprecien el trabajo duro y la pasión del equipo indie detrás de él. </p>
55
- <h2>Preguntas frecuentes</h2>
56
- <p>Aquí hay algunas preguntas frecuentes sobre APK Mod Epic Conquest 2:</p>
57
- <h3>Q: ¿Puedo jugar Epic Conquest 2 en línea con APK Mod? </h3>
58
- <p>A: No, no puedes jugar Epic Conquest 2 online con APK Mod. El juego tiene un modo en línea que requiere una cuenta válida y una conexión con el servidor oficial. Si usas APK Mod, no podrás acceder al modo online y puede que te prohíban usar los servicios del juego. </p>
59
- <h3>Q: ¿Puedo actualizar Epic Conquest 2 con APK Mod? </h3>
60
- <p>A: No, no se puede actualizar Epic Conquest 2 con APK Mod. El juego todavía está en la etapa de acceso temprano y recibe actualizaciones frecuentes de los desarrolladores. Si utiliza APK Mod, usted no será capaz de actualizar el juego y usted puede perder el nuevo contenido o características. También tendrá que descargar un nuevo archivo APK Mod para cada actualización, que puede ser tedioso y arriesgado. </p>
61
- <h3>Q: ¿Puedo transferir mis datos guardados de Epic Conquest 2 a APK Mod Epic Conquest 2?</h3>
62
-
63
- <h3>P: ¿Puedo transferir mis datos guardados de APK Mod Epic Conquest 2 a Epic Conquest 2?</h3>
64
- <p>A: No, no puedes transferir tus datos de guardado de APK Mod Epic Conquest 2 a Epic Conquest 2. El juego no te permite importar tus datos de guardado de la versión modificada a la versión original, ya que no son compatibles. También puede dañar sus datos guardados o causar errores si intenta hacerlo. </p>
65
- <h3>Q: ¿Puedo jugar Epic Conquest 2 en PC con APK Mod? </h3>
66
- <p>A: Sí, puedes jugar Epic Conquest 2 en PC con APK Mod. El juego está disponible en Steam para usuarios de PC, pero también puedes usar un emulador de Android para ejecutar el juego en tu PC con APK Mod. Sin embargo, puede encontrar algunos problemas de rendimiento o compatibilidad dependiendo de las especificaciones de su PC y la configuración del emulador. </p> 64aa2da5cf<br />
67
- <br />
68
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Estn Llegando Desde El Este Mp3.md DELETED
@@ -1,55 +0,0 @@
1
-
2
- <h1>Descargar Vienen del Este Mp3: Una Poderosa Canción Gospel de GUC</h1>
3
- <p>Si usted está buscando una canción del evangelio que agitará su fe y llenará su corazón de alegría, usted debe descargar que vienen del este mp3. Esta es una canción de GUC, un cantante de gospel y compositor nigeriano, que es conocido por su voz ungida y letras llenas de espíritu. En este artículo, te diremos de qué trata la canción, quién es GUC, cómo descargar la canción y por qué deberías descargarla. </p>
4
- <h2>Introducción</h2>
5
- <h3>¿De qué trata la canción? </h3>
6
- <p>La canción, también conocida como "La Llamada", es un canto que declara el cumplimiento de las promesas de Dios y la manifestación de Su gloria en las vidas de Su pueblo. La canción está basada en Isaías 60:1-5, que dice:</p>
7
- <h2>descargar están llegando desde el este mp3</h2><br /><p><b><b>Download Zip</b> ->>> <a href="https://bltlly.com/2v6IHr">https://bltlly.com/2v6IHr</a></b></p><br /><br />
8
- <blockquote><p>"Levántate, brilla; porque tu luz ha venido, y la gloria del Señor ha resucitado sobre ti. Porque he aquí, las tinieblas cubrirán la tierra, y las tinieblas burdas al pueblo; mas el Señor se levantará sobre ti, y su gloria será vista sobre ti. Y los gentiles vendrán a tu luz, y los reyes al resplandor de tu resurrección. Levanta tus ojos alrededor, y mira: todos ellos se reúnen, vienen a ti: tus hijos vendrán de lejos, y tus hijas serán cuidadas a tu lado. Entonces verás, y fluirá junto, y tu corazón temerá, y será agrandado; porque la abundancia del mar será convertida a ti, las fuerzas de los gentiles vendrán a ti." </p></blockquote>
9
- <p>La canción proclama que los hijos de Dios vienen de todas partes para adorarlo y recibir sus bendiciones. Vienen del este, oeste, norte y sur. Vienen de diferentes naciones, tribus, lenguas y culturas. Vienen como un cuerpo, una novia, una iglesia. Vienen a levantar Su nombre alto. </p>
10
- <h3>¿Quién es GUC? </h3>
11
-
12
- <h2>¿Cómo descargar la canción? </h2>
13
- <h3>Opción 1: Visite el sitio web oficial de GUC</h3>
14
- <p>Una manera de descargar que vienen desde el este mp3 es visitar el sitio web oficial de GUC. Estos son los pasos a seguir <h4>Paso 1: Ir a [sitio web de GUC]</h4>
15
- <p>El primer paso es ir al sitio web oficial de GUC, que es [www.gucmusic.com]. Aquí puedes encontrar toda la información sobre GUC, su música, sus eventos, sus vídeos y sus datos de contacto. </p>
16
- <h4>Paso 2: Haga clic en la pestaña "Música" </h4>
17
- <p>El siguiente paso es hacer clic en la pestaña "Música" en el menú superior del sitio web. Esto te llevará a una página donde puedes ver todas las canciones que GUC ha lanzado hasta ahora. También puedes escucharlas en línea o ver sus videos. </p>
18
- <h4>Paso 3: Encontrar la canción "La llamada" y haga clic en el icono de descarga</h4>
19
- <p>El paso final es encontrar la canción "The Call" o "They Are Coming From The East Mp3" en la lista de canciones. Puede usar la barra de búsqueda o desplazarse hacia abajo hasta que la vea. Una vez que la encuentre, verá un icono de descarga junto a ella. Haga clic en él y usted será capaz de descargar la canción a su dispositivo. Es posible que tenga que introducir su dirección de correo electrónico o número de teléfono para obtener el enlace de descarga. </p>
20
- <h3>Opción 2: Utilice un descargador de mp3 de terceros</h3>
21
- <p>Otra forma de descargar que vienen desde el este mp3 es utilizar un sitio de descarga de mp3 de terceros. Estos son sitios web que le permiten descargar archivos mp3 de varias fuentes, como YouTube, SoundCloud, Spotify, etc. Sin embargo, debe tener cuidado al usar estos sitios, ya que algunos de ellos pueden contener virus, malware o anuncios que pueden dañar su dispositivo o comprometer su privacidad. Estos son los pasos a seguir:</p>
22
- <h4>Paso 1: Vaya a un sitio confiable de descarga de mp3, como [Music Lyrics] o [PraiseZion]</h4>
23
-
24
- <h4>Paso 2: Buscar la canción "Ellos están viniendo de The East Mp3" o "La llamada por GUC"</h4>
25
- <p>El siguiente paso es buscar la canción que desea descargar. Puede utilizar la barra de búsqueda o navegar por las categorías o etiquetas del sitio. Puede buscar "Vienen de The East Mp3" o "La llamada de GUC". Verá una lista de resultados que coinciden con su consulta. </p>
26
- <p></p>
27
- <h4>Paso 3: Haga clic en el enlace o botón de descarga y guarde el archivo en su dispositivo</h4>
28
- <p>El paso final es hacer clic en el enlace de descarga o botón que corresponde a la canción que desea. Se le redirigirá a otra página donde puede elegir la calidad y el formato del archivo. También puede obtener una vista previa de la canción antes de descargarla. Una vez que esté satisfecho, haga clic en el botón de descarga y guarde el archivo en su dispositivo. </p>
29
- <h2>¿Por qué descargar la canción? </h2>
30
- <h3>La canción es inspiradora y edificante</h3>
31
- <p>Una de las razones por las que deberías descargarlos vienen del este mp3 es porque es una canción inspiradora y edificante. Aumentará su fe y confianza en las promesas y el poder de Dios. Te recordará que Dios está trabajando en tu favor y que Él traerá Su gloria a tu vida. También te animará a levantarte y brillar porque Su luz ha venido sobre ti. </p>
32
- <h3>La canción se basa en verdades bíblicas y profecías</h3>
33
- <p>Otra razón por la que debe descargar que están llegando desde el este mp3 es porque se basa en verdades bíblicas y profecías. No es solo una melodía pegadiza o una frase pegadiza, sino una declaración de la palabra y voluntad de Dios para Su pueblo. Es una canción que se alinea con la agenda y el propósito de Dios para esta generación. Es una canción que revela el plan y la visión de Dios para Su iglesia. </p>
34
- <h3>La canción es pegadiza y fácil de cantar a lo largo de</h3>
35
-
36
- <h2>Conclusión</h2> <h3>Resumen de los puntos principales</h3>
37
- <p>En este artículo, hemos discutido cómo descargarlos desde el este mp3, una poderosa canción gospel de GUC. Hemos explicado de qué trata la canción, quién es GUC, cómo descargar la canción desde su sitio web oficial o desde un sitio de descarga de mp3 de terceros, y por qué debe descargar la canción. También hemos proporcionado algunas referencias y enlaces donde se puede encontrar más información sobre la canción y el artista. </p>
38
- <h3>Llamada a la acción</h3>
39
- <p>Ahora que ya sabes cómo descargarlos vienen del este mp3, te animamos a que lo hagas y disfrutes de esta increíble canción. También le invitamos a compartir este artículo con sus amigos y familiares que pueden estar interesados en esta canción. También puedes dejar un comentario a continuación y decirnos qué piensas de la canción y cómo te ha bendecido. Gracias por leer y que Dios te bendiga. </p>
40
- <h2>Preguntas frecuentes</h2>
41
- <p>Aquí hay algunas preguntas y respuestas frecuentes sobre que vienen del este mp3:</p>
42
- <ul>
43
- <li><strong>Q: ¿Cuál es el nombre del álbum que contiene la canción? </strong></li>
44
- <li>A: La canción es parte del álbum "The Message", que fue lanzado por GUC en diciembre de 2020. El álbum contiene 12 pistas, incluyendo "The Call". </li>
45
- <li><strong>Q: ¿Cuál es el significado de GUC? </strong></li>
46
- <li>A: GUC significa Don Ugochi Christopher, que es el nombre completo del cantante de gospel y compositor. </li>
47
- <li><strong>Q: ¿Cómo puedo contactar a GUC o reservarlo para un evento? </strong></li>
48
- <li>A: Puede ponerse en contacto con GUC o reservarlo para un evento a través de su sitio web oficial, [www.gucmusic.com], o a través de sus redes sociales, como Instagram, Facebook, Twitter y YouTube. También puede enviarle un correo electrónico a [[email protected]] o llamarlo a [+234 813 378 2310]. </li>
49
- <li><strong>Q: ¿Dónde puedo encontrar la letra y el vídeo de la canción? </strong></li>
50
-
51
- <li><strong>Q: ¿Hay otras canciones de GUC que deba escuchar? </strong></li>
52
- <li>A: Sí, hay muchas otras canciones de GUC que deberías escuchar. Algunos de ellos son "Todo Lo Que Importa", "Tuyo", "Ike Nile", "Dios de la Venganza", "Desesperado", "Conociéndote", "Alabo" y "Nkem". Puedes encontrarlos en su sitio web, [www.gucmusic.com], o en otros sitios que ofrecen descargas de música gospel. </li>
53
- </ul></p> 64aa2da5cf<br />
54
- <br />
55
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Gratis Nba 2k20 V97.md DELETED
@@ -1,67 +0,0 @@
1
-
2
- <h1>Descargar gratis NBA 2K20 V97: Todo lo que necesitas saber</h1>
3
- <p>Si usted es un fan de los juegos de baloncesto, es posible que haya oído hablar de NBA 2K20, la última entrega de la popular serie NBA 2K. ¿Pero sabías que hay una nueva versión del juego que puedes descargar gratis? Se llama NBA 2K20 V97, y es una versión modificada del juego original que ofrece muchas mejoras y características. En este artículo, te contaremos todo lo que necesitas saber sobre NBA 2K20 V97, incluyendo qué es, cómo descargarlo, por qué deberías jugarlo y algunos consejos y trucos para ayudarte a disfrutarlo más. ¡Vamos a empezar! </p>
4
- <h2>¿Qué es NBA 2K20 V97? </h2>
5
- <p>NBA 2K20 V97 es una versión modificada de NBA 2K20, el juego oficial de simulación de baloncesto desarrollado por Visual Concepts y publicado por 2K Sports. NBA 2K20 V97 es creado por fans que querían mejorar el juego original y hacerlo más realista, actualizado y divertido. NBA 2K20 V97 no es una actualización oficial o parche de los desarrolladores, sino más bien un proyecto hecho por fans que se puede descargar e instalar en su dispositivo Android. </p>
6
- <h2>descargar gratis nba 2k20 v97</h2><br /><p><b><b>Download Zip</b> &#10084; <a href="https://bltlly.com/2v6J1F">https://bltlly.com/2v6J1F</a></b></p><br /><br />
7
- <h3>Características de NBA 2K20 V97</h3>
8
- <p>¿Qué hace que NBA 2K20 V97 sea diferente del juego original? Estas son algunas de las características que puedes esperar de esta versión modificada:</p>
9
- <ul>
10
- <li><b>Nuevos gráficos y animaciones. </b> NBA 2K20 V97 ha mejorado los gráficos y animaciones del juego, haciéndolo más realista y suave. Los jugadores, canchas, camisetas, zapatos y otros elementos han sido rediseñados y mejorados para que coincida con los homólogos del mundo real. También puedes disfrutar de nuevos ángulos de cámara y efectos que hacen que el juego sea más inmersivo. </li>
11
-
12
- <li><b>Nuevas opciones y recompensas personalizables. </b> NBA 2K20 V97 ha añadido más opciones para que personalices tu experiencia de juego. Puedes crear tu propio jugador, equipo, logotipo, camiseta, corte y más. También puedes ganar más recompensas como moneda virtual, puntos de habilidad, tarjetas, insignias, prendas de vestir, zapatos y más. Puedes usar estas recompensas para actualizar a tu jugador o equipo, o para comprar nuevos artículos en la tienda. </li>
13
- </ul>
14
- <h3>Requisitos del sistema para NBA 2K20 V97</h3>
15
- <p>Antes de descargar NBA 2K20 V97, debe asegurarse de que su dispositivo cumple con los requisitos mínimos del sistema para el juego. Aquí están los requisitos del sistema para NBA 2K20 V97:</p>
16
- <ul>
17
- <li><b>OS:</b> Android 7 o superior</li>
18
- <li><b>Procesador:</b> Intelạ Core= i3-530 @ 2.93 GHz / AMD FX-4100 @ 3.60 GHz o mejor</li>
19
- <li><b>Memoria:</b> 4 GB de RAM</li> <li><b>Gráficos:</b> NVIDIA= GeForce‍ GT 450 1GB / AMDạ Radeon! HD 7770 1GB or better</li>
20
- <li><b>Almacenamiento:</b> 16 GB de espacio disponible</li>
21
- <li><b>Internet:</b> Se requiere conexión de banda ancha</li>
22
- </ul>
23
- <h3>Cómo descargar NBA 2K20 V97</h3>
24
- <p>Ahora que conoces las características y requisitos de NBA 2K20 V97, es posible que te estés preguntando cómo descargarlo. Estos son los pasos que debes seguir para descargar NBA 2K20 V97:</p>
25
- <ol>
26
- <li><b>Descargar los archivos APK y OBB. </b> Puede descargar los archivos APK y OBB de NBA 2K20 V97 desde este enlace: . El archivo APK es de unos 16 MB, mientras que el archivo OBB es de unos 3,1 GB. Asegúrese de tener suficiente espacio en el dispositivo antes de descargar. </li>
27
- <li><b>Instalar el archivo APK. </b> Después de descargar el archivo APK, es necesario instalarlo en el dispositivo. Es posible que necesite habilitar la instalación de fuentes desconocidas desde su configuración. Para hacer esto, vaya a Configuración > Seguridad > Fuentes desconocidas y cámbielo. Luego, busque el archivo APK y toque en él para instalarlo. </li>
28
-
29
- <li><b>Mueva la carpeta OBB. </b> Después de extraer el archivo OBB, debe mover la carpeta OBB a la ubicación correcta. La carpeta OBB debe llamarse com.t2ksports.nba2k20and y debe contener un archivo llamado main.97.com.t2ksports.nba2k20and.obb. Es necesario mover esta carpeta a Android > OBB en el almacenamiento interno del dispositivo. </li>
30
- <li><b>Inicie el juego. </b> Después de mover la carpeta OBB, está listo para iniciar el juego. Puede encontrar el icono del juego en la pantalla de inicio o en el cajón de la aplicación. ¡Toque en él y disfrute de NBA 2K20 V97! </li>
31
- </ol>
32
- <h2>Por qué usted debe jugar NBA 2K20 V97</h2>
33
- <p>NBA 2K20 V97 no es solo un mod simple de NBA 2K20, sino una experiencia completamente nueva que te hará amar los juegos de baloncesto aún más. Estas son algunas de las razones por las que deberías jugar NBA 2K20 V97:</p>
34
- <h3>Jugabilidad y gráficos mejorados</h3>
35
- <p>NBA 2K20 V97 ha hecho mejoras significativas en el juego y los gráficos de NBA 2K20, por lo que es más realista y agradable. El juego ha mejorado la física, las animaciones, los controles y la IA de los jugadores, haciendo que se muevan y reaccionen de manera más natural e inteligente. El juego también ha mejorado los gráficos y sonidos del juego, haciéndolo parecer y sentirse más realista e inmersivo. Puedes ver el sudor, las emociones, las expresiones y los detalles de los jugadores, así como la iluminación, las sombras, los reflejos y las texturas de las canchas y las arenas. </p>
36
- <h3>Lista y modos actualizados</h3>
37
- <p>NBA 2K20 V97 ha actualizado la lista y los modos de NBA 2K20, por lo que es más actual y diversa. El juego ha añadido nuevos jugadores, equipos, entrenadores y clasificaciones, basadas en la última temporada y eventos de la NBA. Puedes jugar con tus jugadores y equipos favoritos, o descubrir nuevos de diferentes ligas y países. El juego también ha añadido nuevos modos y características, como MyCareer, MyTeam, MyGM, MyLeague, Play Now, Blacktop y más. Puedes crear tu propia historia, equipo, liga o desafío en estos modos, o jugar online con otros jugadores de todo el mundo. </p>
38
-
39
- <p>NBA 2K20 V97 ha añadido más opciones y recompensas para personalizar tu experiencia de juego. Puedes crear tu propio jugador, equipo, logotipo, camiseta, corte y más, usando varias herramientas y opciones. También puedes ganar más recompensas como moneda virtual, puntos de habilidad, tarjetas, insignias, prendas de vestir, zapatos y más. Puedes usar estas recompensas para mejorar a tu jugador o equipo, o para comprar nuevos artículos en la tienda. También puedes desbloquear más logros y trofeos mientras juegas. </p>
40
- <h2>Consejos y trucos para NBA 2K20 V97</h2>
41
- <p>NBA 2K20 V97 es un juego divertido y emocionante, pero también puede ser desafiante y competitivo. Si quieres mejorar tus habilidades y rendimiento en NBA 2K20 V97, aquí tienes algunos consejos y trucos que puedes usar:</p>
42
- <p></p>
43
- <h3>Domine su libro de jugadas</h3>
44
- <p>Uno de los aspectos más importantes de NBA 2K20 V97 es conocer tu libro de jugadas. Tu libro de jugadas es un conjunto de jugadas y estrategias que puedes usar para anotar, defender o crear oportunidades para tu equipo. Puede acceder a su libro de jugadas presionando el botón direccional izquierdo en su controlador, o tocando el icono del libro de jugadas en su pantalla. A continuación, puede elegir entre diferentes juegos, tales como recoger y rodar, aislamiento, publicar, cortar, pantalla, y más. También puede personalizar su libro de jugadas agregando o eliminando jugadas que se adapten a su estilo y preferencia. Dominar tu libro de jugadas te ayudará a obtener una ventaja sobre tus oponentes y ejecutar tu plan de juego con eficacia. </p>
45
- <h3>Actualizar su MyPlayer</h3>
46
-
47
- <h3>Usar tarjetas de evolución en MyTeam</h3>
48
- <p>Una de las características más emocionantes de NBA 2K20 V97 es las tarjetas de evolución en MyTeam. MyTeam es un modo en el que puedes crear tu propio equipo ideal de jugadores de diferentes épocas y ligas. Puedes recoger cartas de diferentes jugadores, como estrellas actuales, leyendas, novatos y más. También puedes recoger cartas de evolución, que son cartas especiales que pueden evolucionar y mejorar a medida que las usas. Las tarjetas Evolution tienen diferentes niveles y niveles, y pueden cambiar su apariencia, calificaciones, insignias y habilidades a medida que evolucionan. Puedes desarrollar tus cartas completando ciertas tareas o desafíos con ellas, como anotar una cierta cantidad de puntos, hacer un cierto número de asistencias o ganar cierto número de juegos. Usar tarjetas de evolución en MyTeam te ayudará a construir un equipo más fuerte y versátil en NBA 2K20 V97.</p>
49
- <h3>Puntuación en el post y defender inteligentemente</h3>
50
- <p>Una de las formas más efectivas de anotar en NBA 2K20 V97 es anotar en el post. El poste es el área cerca de la canasta donde puedes usar tu tamaño, fuerza y habilidades para dominar o engañar a tu defensor. Para anotar en el post, necesitas posicionarte bien, retroceder hacia tu defensor, usar diferentes movimientos y falsificaciones, y terminar con un tiro o un pase. Algunos de los movimientos que puedes usar en el post son drop step, spin move, hook shot, fadeaway shot, up and under move, pump fake y más. Para defenderte en el post, debes anticiparte a los movimientos de tu oponente, permanecer delante de ellos, disputar sus tiros o pases, y evitar ensuciarlos. Anotar en el post y defender inteligentemente te ayudará a dominar la pintura en NBA 2K20 V97.</p>
51
- <h3>Administrar su resistencia y faltas</h3>
52
-
53
- <h2>Conclusión</h2>
54
- <p>NBA 2K20 V97 es una versión modificada de NBA 2K20 que ofrece muchas mejoras y características que hacen que el juego sea más realista, actualizado y divertido. Puede descargar NBA 2K20 V97 gratis e instalarlo en su dispositivo Android, siempre y cuando cumpla con los requisitos del sistema. Puede disfrutar de los nuevos gráficos y animaciones, la lista actualizada y modos, las opciones personalizables y recompensas, y más. También puedes utilizar algunos consejos y trucos para mejorar tus habilidades y rendimiento en NBA 2K20 V97. NBA 2K20 V97 es un gran juego para los fanáticos del baloncesto que quieren experimentar la emoción y la emoción de la NBA en sus dispositivos móviles. </p>
55
- <h3>Llamada a la acción</h3>
56
- <p>Si estás interesado en NBA 2K20 V97, no dudes en descargarlo ahora y pruébalo por ti mismo. Puedes descargar NBA 2K20 V97 desde este enlace: . También puedes compartir este artículo con tus amigos que podrían estar interesados en NBA 2K20 V97. Háganos saber lo que piensa de NBA 2K20 V97 en los comentarios a continuación. Gracias por leer! </p>
57
- <h3>Preguntas frecuentes</h3>
58
- <p>Aquí están algunas de las preguntas más frecuentes sobre NBA 2K20 V97:</p>
59
- <ul>
60
- <li><b>¿Es seguro descargar NBA 2K20 V97? </b> Sí, NBA 2K20 V97 es seguro de descargar, siempre y cuando lo descargue de una fuente confiable. Sin embargo, ya que es una versión modificada de NBA 2K20, no es respaldado o apoyado por los desarrolladores oficiales o editores de NBA 2K20. Por lo tanto, debe descargar e instalar NBA 2K20 V97 bajo su propio riesgo. </li>
61
- <li><b>¿Es NBA 2K20 V97 compatible con otros dispositivos? </b> NBA 2K20 V97 está diseñado solo para dispositivos Android. No es compatible con dispositivos iOS o PC. Si desea jugar NBA 2K20 en otros dispositivos, es necesario comprar la versión oficial de NBA 2K20 de las plataformas respectivas. </li>
62
-
63
- <li><b>¿Puedo transferir mi progreso de NBA 2K20 a NBA 2K20 V97? </b> No, no puedes transferir tu progreso de NBA 2K20 a NBA 2K20 V97. Desde NBA 2K20 V97 es una versión modificada de NBA 2K20, tiene diferentes archivos y datos que no son compatibles con el juego original. Por lo tanto, es necesario comenzar desde cero cuando se juega NBA 2K20 V97.</li>
64
- <li><b>¿Puedo actualizar NBA 2K20 V97? </b> Sí, puede actualizar NBA 2K20 V97 si hay nuevas versiones disponibles de los modders. Sin embargo, la actualización de NBA 2K20 V97 podría requerir que descargue e instale nuevos archivos APK y OBB del juego. Por lo tanto, siempre debe hacer una copia de seguridad de sus datos antes de actualizar NBA 2K20 V97.</li>
65
- </ul></p> 64aa2da5cf<br />
66
- <br />
67
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BetterAPI/BetterChat/src/lib/utils/trimSuffix.ts DELETED
@@ -1,6 +0,0 @@
1
- export function trimSuffix(input: string, end: string): string {
2
- if (input.endsWith(end)) {
3
- return input.slice(0, input.length - end.length);
4
- }
5
- return input;
6
- }
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/boto3/resources/response.py DELETED
@@ -1,318 +0,0 @@
1
- # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"). You
4
- # may not use this file except in compliance with the License. A copy of
5
- # the License is located at
6
- #
7
- # https://aws.amazon.com/apache2.0/
8
- #
9
- # or in the "license" file accompanying this file. This file is
10
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11
- # ANY KIND, either express or implied. See the License for the specific
12
- # language governing permissions and limitations under the License.
13
-
14
- import jmespath
15
- from botocore import xform_name
16
-
17
- from .params import get_data_member
18
-
19
-
20
- def all_not_none(iterable):
21
- """
22
- Return True if all elements of the iterable are not None (or if the
23
- iterable is empty). This is like the built-in ``all``, except checks
24
- against None, so 0 and False are allowable values.
25
- """
26
- for element in iterable:
27
- if element is None:
28
- return False
29
- return True
30
-
31
-
32
- def build_identifiers(identifiers, parent, params=None, raw_response=None):
33
- """
34
- Builds a mapping of identifier names to values based on the
35
- identifier source location, type, and target. Identifier
36
- values may be scalars or lists depending on the source type
37
- and location.
38
-
39
- :type identifiers: list
40
- :param identifiers: List of :py:class:`~boto3.resources.model.Parameter`
41
- definitions
42
- :type parent: ServiceResource
43
- :param parent: The resource instance to which this action is attached.
44
- :type params: dict
45
- :param params: Request parameters sent to the service.
46
- :type raw_response: dict
47
- :param raw_response: Low-level operation response.
48
- :rtype: list
49
- :return: An ordered list of ``(name, value)`` identifier tuples.
50
- """
51
- results = []
52
-
53
- for identifier in identifiers:
54
- source = identifier.source
55
- target = identifier.target
56
-
57
- if source == 'response':
58
- value = jmespath.search(identifier.path, raw_response)
59
- elif source == 'requestParameter':
60
- value = jmespath.search(identifier.path, params)
61
- elif source == 'identifier':
62
- value = getattr(parent, xform_name(identifier.name))
63
- elif source == 'data':
64
- # If this is a data member then it may incur a load
65
- # action before returning the value.
66
- value = get_data_member(parent, identifier.path)
67
- elif source == 'input':
68
- # This value is set by the user, so ignore it here
69
- continue
70
- else:
71
- raise NotImplementedError(f'Unsupported source type: {source}')
72
-
73
- results.append((xform_name(target), value))
74
-
75
- return results
76
-
77
-
78
- def build_empty_response(search_path, operation_name, service_model):
79
- """
80
- Creates an appropriate empty response for the type that is expected,
81
- based on the service model's shape type. For example, a value that
82
- is normally a list would then return an empty list. A structure would
83
- return an empty dict, and a number would return None.
84
-
85
- :type search_path: string
86
- :param search_path: JMESPath expression to search in the response
87
- :type operation_name: string
88
- :param operation_name: Name of the underlying service operation.
89
- :type service_model: :ref:`botocore.model.ServiceModel`
90
- :param service_model: The Botocore service model
91
- :rtype: dict, list, or None
92
- :return: An appropriate empty value
93
- """
94
- response = None
95
-
96
- operation_model = service_model.operation_model(operation_name)
97
- shape = operation_model.output_shape
98
-
99
- if search_path:
100
- # Walk the search path and find the final shape. For example, given
101
- # a path of ``foo.bar[0].baz``, we first find the shape for ``foo``,
102
- # then the shape for ``bar`` (ignoring the indexing), and finally
103
- # the shape for ``baz``.
104
- for item in search_path.split('.'):
105
- item = item.strip('[0123456789]$')
106
-
107
- if shape.type_name == 'structure':
108
- shape = shape.members[item]
109
- elif shape.type_name == 'list':
110
- shape = shape.member
111
- else:
112
- raise NotImplementedError(
113
- 'Search path hits shape type {} from {}'.format(
114
- shape.type_name, item
115
- )
116
- )
117
-
118
- # Anything not handled here is set to None
119
- if shape.type_name == 'structure':
120
- response = {}
121
- elif shape.type_name == 'list':
122
- response = []
123
- elif shape.type_name == 'map':
124
- response = {}
125
-
126
- return response
127
-
128
-
129
- class RawHandler:
130
- """
131
- A raw action response handler. This passed through the response
132
- dictionary, optionally after performing a JMESPath search if one
133
- has been defined for the action.
134
-
135
- :type search_path: string
136
- :param search_path: JMESPath expression to search in the response
137
- :rtype: dict
138
- :return: Service response
139
- """
140
-
141
- def __init__(self, search_path):
142
- self.search_path = search_path
143
-
144
- def __call__(self, parent, params, response):
145
- """
146
- :type parent: ServiceResource
147
- :param parent: The resource instance to which this action is attached.
148
- :type params: dict
149
- :param params: Request parameters sent to the service.
150
- :type response: dict
151
- :param response: Low-level operation response.
152
- """
153
- # TODO: Remove the '$' check after JMESPath supports it
154
- if self.search_path and self.search_path != '$':
155
- response = jmespath.search(self.search_path, response)
156
-
157
- return response
158
-
159
-
160
- class ResourceHandler:
161
- """
162
- Creates a new resource or list of new resources from the low-level
163
- response based on the given response resource definition.
164
-
165
- :type search_path: string
166
- :param search_path: JMESPath expression to search in the response
167
-
168
- :type factory: ResourceFactory
169
- :param factory: The factory that created the resource class to which
170
- this action is attached.
171
-
172
- :type resource_model: :py:class:`~boto3.resources.model.ResponseResource`
173
- :param resource_model: Response resource model.
174
-
175
- :type service_context: :py:class:`~boto3.utils.ServiceContext`
176
- :param service_context: Context about the AWS service
177
-
178
- :type operation_name: string
179
- :param operation_name: Name of the underlying service operation, if it
180
- exists.
181
-
182
- :rtype: ServiceResource or list
183
- :return: New resource instance(s).
184
- """
185
-
186
- def __init__(
187
- self,
188
- search_path,
189
- factory,
190
- resource_model,
191
- service_context,
192
- operation_name=None,
193
- ):
194
- self.search_path = search_path
195
- self.factory = factory
196
- self.resource_model = resource_model
197
- self.operation_name = operation_name
198
- self.service_context = service_context
199
-
200
- def __call__(self, parent, params, response):
201
- """
202
- :type parent: ServiceResource
203
- :param parent: The resource instance to which this action is attached.
204
- :type params: dict
205
- :param params: Request parameters sent to the service.
206
- :type response: dict
207
- :param response: Low-level operation response.
208
- """
209
- resource_name = self.resource_model.type
210
- json_definition = self.service_context.resource_json_definitions.get(
211
- resource_name
212
- )
213
-
214
- # Load the new resource class that will result from this action.
215
- resource_cls = self.factory.load_from_definition(
216
- resource_name=resource_name,
217
- single_resource_json_definition=json_definition,
218
- service_context=self.service_context,
219
- )
220
- raw_response = response
221
- search_response = None
222
-
223
- # Anytime a path is defined, it means the response contains the
224
- # resource's attributes, so resource_data gets set here. It
225
- # eventually ends up in resource.meta.data, which is where
226
- # the attribute properties look for data.
227
- if self.search_path:
228
- search_response = jmespath.search(self.search_path, raw_response)
229
-
230
- # First, we parse all the identifiers, then create the individual
231
- # response resources using them. Any identifiers that are lists
232
- # will have one item consumed from the front of the list for each
233
- # resource that is instantiated. Items which are not a list will
234
- # be set as the same value on each new resource instance.
235
- identifiers = dict(
236
- build_identifiers(
237
- self.resource_model.identifiers, parent, params, raw_response
238
- )
239
- )
240
-
241
- # If any of the identifiers is a list, then the response is plural
242
- plural = [v for v in identifiers.values() if isinstance(v, list)]
243
-
244
- if plural:
245
- response = []
246
-
247
- # The number of items in an identifier that is a list will
248
- # determine how many resource instances to create.
249
- for i in range(len(plural[0])):
250
- # Response item data is *only* available if a search path
251
- # was given. This prevents accidentally loading unrelated
252
- # data that may be in the response.
253
- response_item = None
254
- if search_response:
255
- response_item = search_response[i]
256
- response.append(
257
- self.handle_response_item(
258
- resource_cls, parent, identifiers, response_item
259
- )
260
- )
261
- elif all_not_none(identifiers.values()):
262
- # All identifiers must always exist, otherwise the resource
263
- # cannot be instantiated.
264
- response = self.handle_response_item(
265
- resource_cls, parent, identifiers, search_response
266
- )
267
- else:
268
- # The response should be empty, but that may mean an
269
- # empty dict, list, or None based on whether we make
270
- # a remote service call and what shape it is expected
271
- # to return.
272
- response = None
273
- if self.operation_name is not None:
274
- # A remote service call was made, so try and determine
275
- # its shape.
276
- response = build_empty_response(
277
- self.search_path,
278
- self.operation_name,
279
- self.service_context.service_model,
280
- )
281
-
282
- return response
283
-
284
- def handle_response_item(
285
- self, resource_cls, parent, identifiers, resource_data
286
- ):
287
- """
288
- Handles the creation of a single response item by setting
289
- parameters and creating the appropriate resource instance.
290
-
291
- :type resource_cls: ServiceResource subclass
292
- :param resource_cls: The resource class to instantiate.
293
- :type parent: ServiceResource
294
- :param parent: The resource instance to which this action is attached.
295
- :type identifiers: dict
296
- :param identifiers: Map of identifier names to value or values.
297
- :type resource_data: dict or None
298
- :param resource_data: Data for resource attributes.
299
- :rtype: ServiceResource
300
- :return: New resource instance.
301
- """
302
- kwargs = {
303
- 'client': parent.meta.client,
304
- }
305
-
306
- for name, value in identifiers.items():
307
- # If value is a list, then consume the next item
308
- if isinstance(value, list):
309
- value = value.pop(0)
310
-
311
- kwargs[name] = value
312
-
313
- resource = resource_cls(**kwargs)
314
-
315
- if resource_data is not None:
316
- resource.meta.data = resource_data
317
-
318
- return resource
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pip/_vendor/chardet/eucjpprober.py DELETED
@@ -1,102 +0,0 @@
1
- ######################## BEGIN LICENSE BLOCK ########################
2
- # The Original Code is mozilla.org code.
3
- #
4
- # The Initial Developer of the Original Code is
5
- # Netscape Communications Corporation.
6
- # Portions created by the Initial Developer are Copyright (C) 1998
7
- # the Initial Developer. All Rights Reserved.
8
- #
9
- # Contributor(s):
10
- # Mark Pilgrim - port to Python
11
- #
12
- # This library is free software; you can redistribute it and/or
13
- # modify it under the terms of the GNU Lesser General Public
14
- # License as published by the Free Software Foundation; either
15
- # version 2.1 of the License, or (at your option) any later version.
16
- #
17
- # This library is distributed in the hope that it will be useful,
18
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
19
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20
- # Lesser General Public License for more details.
21
- #
22
- # You should have received a copy of the GNU Lesser General Public
23
- # License along with this library; if not, write to the Free Software
24
- # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25
- # 02110-1301 USA
26
- ######################### END LICENSE BLOCK #########################
27
-
28
- from typing import Union
29
-
30
- from .chardistribution import EUCJPDistributionAnalysis
31
- from .codingstatemachine import CodingStateMachine
32
- from .enums import MachineState, ProbingState
33
- from .jpcntx import EUCJPContextAnalysis
34
- from .mbcharsetprober import MultiByteCharSetProber
35
- from .mbcssm import EUCJP_SM_MODEL
36
-
37
-
38
- class EUCJPProber(MultiByteCharSetProber):
39
- def __init__(self) -> None:
40
- super().__init__()
41
- self.coding_sm = CodingStateMachine(EUCJP_SM_MODEL)
42
- self.distribution_analyzer = EUCJPDistributionAnalysis()
43
- self.context_analyzer = EUCJPContextAnalysis()
44
- self.reset()
45
-
46
- def reset(self) -> None:
47
- super().reset()
48
- self.context_analyzer.reset()
49
-
50
- @property
51
- def charset_name(self) -> str:
52
- return "EUC-JP"
53
-
54
- @property
55
- def language(self) -> str:
56
- return "Japanese"
57
-
58
- def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
59
- assert self.coding_sm is not None
60
- assert self.distribution_analyzer is not None
61
-
62
- for i, byte in enumerate(byte_str):
63
- # PY3K: byte_str is a byte array, so byte is an int, not a byte
64
- coding_state = self.coding_sm.next_state(byte)
65
- if coding_state == MachineState.ERROR:
66
- self.logger.debug(
67
- "%s %s prober hit error at byte %s",
68
- self.charset_name,
69
- self.language,
70
- i,
71
- )
72
- self._state = ProbingState.NOT_ME
73
- break
74
- if coding_state == MachineState.ITS_ME:
75
- self._state = ProbingState.FOUND_IT
76
- break
77
- if coding_state == MachineState.START:
78
- char_len = self.coding_sm.get_current_charlen()
79
- if i == 0:
80
- self._last_char[1] = byte
81
- self.context_analyzer.feed(self._last_char, char_len)
82
- self.distribution_analyzer.feed(self._last_char, char_len)
83
- else:
84
- self.context_analyzer.feed(byte_str[i - 1 : i + 1], char_len)
85
- self.distribution_analyzer.feed(byte_str[i - 1 : i + 1], char_len)
86
-
87
- self._last_char[0] = byte_str[-1]
88
-
89
- if self.state == ProbingState.DETECTING:
90
- if self.context_analyzer.got_enough_data() and (
91
- self.get_confidence() > self.SHORTCUT_THRESHOLD
92
- ):
93
- self._state = ProbingState.FOUND_IT
94
-
95
- return self.state
96
-
97
- def get_confidence(self) -> float:
98
- assert self.distribution_analyzer is not None
99
-
100
- context_conf = self.context_analyzer.get_confidence()
101
- distrib_conf = self.distribution_analyzer.get_confidence()
102
- return max(context_conf, distrib_conf)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/s3transfer/__init__.py DELETED
@@ -1,875 +0,0 @@
1
- # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"). You
4
- # may not use this file except in compliance with the License. A copy of
5
- # the License is located at
6
- #
7
- # http://aws.amazon.com/apache2.0/
8
- #
9
- # or in the "license" file accompanying this file. This file is
10
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11
- # ANY KIND, either express or implied. See the License for the specific
12
- # language governing permissions and limitations under the License.
13
- """Abstractions over S3's upload/download operations.
14
-
15
- This module provides high level abstractions for efficient
16
- uploads/downloads. It handles several things for the user:
17
-
18
- * Automatically switching to multipart transfers when
19
- a file is over a specific size threshold
20
- * Uploading/downloading a file in parallel
21
- * Throttling based on max bandwidth
22
- * Progress callbacks to monitor transfers
23
- * Retries. While botocore handles retries for streaming uploads,
24
- it is not possible for it to handle retries for streaming
25
- downloads. This module handles retries for both cases so
26
- you don't need to implement any retry logic yourself.
27
-
28
- This module has a reasonable set of defaults. It also allows you
29
- to configure many aspects of the transfer process including:
30
-
31
- * Multipart threshold size
32
- * Max parallel downloads
33
- * Max bandwidth
34
- * Socket timeouts
35
- * Retry amounts
36
-
37
- There is no support for s3->s3 multipart copies at this
38
- time.
39
-
40
-
41
- .. _ref_s3transfer_usage:
42
-
43
- Usage
44
- =====
45
-
46
- The simplest way to use this module is:
47
-
48
- .. code-block:: python
49
-
50
- client = boto3.client('s3', 'us-west-2')
51
- transfer = S3Transfer(client)
52
- # Upload /tmp/myfile to s3://bucket/key
53
- transfer.upload_file('/tmp/myfile', 'bucket', 'key')
54
-
55
- # Download s3://bucket/key to /tmp/myfile
56
- transfer.download_file('bucket', 'key', '/tmp/myfile')
57
-
58
- The ``upload_file`` and ``download_file`` methods also accept
59
- ``**kwargs``, which will be forwarded through to the corresponding
60
- client operation. Here are a few examples using ``upload_file``::
61
-
62
- # Making the object public
63
- transfer.upload_file('/tmp/myfile', 'bucket', 'key',
64
- extra_args={'ACL': 'public-read'})
65
-
66
- # Setting metadata
67
- transfer.upload_file('/tmp/myfile', 'bucket', 'key',
68
- extra_args={'Metadata': {'a': 'b', 'c': 'd'}})
69
-
70
- # Setting content type
71
- transfer.upload_file('/tmp/myfile.json', 'bucket', 'key',
72
- extra_args={'ContentType': "application/json"})
73
-
74
-
75
- The ``S3Transfer`` class also supports progress callbacks so you can
76
- provide transfer progress to users. Both the ``upload_file`` and
77
- ``download_file`` methods take an optional ``callback`` parameter.
78
- Here's an example of how to print a simple progress percentage
79
- to the user:
80
-
81
- .. code-block:: python
82
-
83
- class ProgressPercentage(object):
84
- def __init__(self, filename):
85
- self._filename = filename
86
- self._size = float(os.path.getsize(filename))
87
- self._seen_so_far = 0
88
- self._lock = threading.Lock()
89
-
90
- def __call__(self, bytes_amount):
91
- # To simplify we'll assume this is hooked up
92
- # to a single filename.
93
- with self._lock:
94
- self._seen_so_far += bytes_amount
95
- percentage = (self._seen_so_far / self._size) * 100
96
- sys.stdout.write(
97
- "\r%s %s / %s (%.2f%%)" % (self._filename, self._seen_so_far,
98
- self._size, percentage))
99
- sys.stdout.flush()
100
-
101
-
102
- transfer = S3Transfer(boto3.client('s3', 'us-west-2'))
103
- # Upload /tmp/myfile to s3://bucket/key and print upload progress.
104
- transfer.upload_file('/tmp/myfile', 'bucket', 'key',
105
- callback=ProgressPercentage('/tmp/myfile'))
106
-
107
-
108
-
109
- You can also provide a TransferConfig object to the S3Transfer
110
- object that gives you more fine grained control over the
111
- transfer. For example:
112
-
113
- .. code-block:: python
114
-
115
- client = boto3.client('s3', 'us-west-2')
116
- config = TransferConfig(
117
- multipart_threshold=8 * 1024 * 1024,
118
- max_concurrency=10,
119
- num_download_attempts=10,
120
- )
121
- transfer = S3Transfer(client, config)
122
- transfer.upload_file('/tmp/foo', 'bucket', 'key')
123
-
124
-
125
- """
126
- import concurrent.futures
127
- import functools
128
- import logging
129
- import math
130
- import os
131
- import queue
132
- import random
133
- import socket
134
- import string
135
- import threading
136
-
137
- from botocore.compat import six # noqa: F401
138
- from botocore.exceptions import IncompleteReadError
139
- from botocore.vendored.requests.packages.urllib3.exceptions import (
140
- ReadTimeoutError,
141
- )
142
-
143
- import s3transfer.compat
144
- from s3transfer.exceptions import RetriesExceededError, S3UploadFailedError
145
-
146
- __author__ = 'Amazon Web Services'
147
- __version__ = '0.6.1'
148
-
149
-
150
- class NullHandler(logging.Handler):
151
- def emit(self, record):
152
- pass
153
-
154
-
155
- logger = logging.getLogger(__name__)
156
- logger.addHandler(NullHandler())
157
-
158
- MB = 1024 * 1024
159
- SHUTDOWN_SENTINEL = object()
160
-
161
-
162
- def random_file_extension(num_digits=8):
163
- return ''.join(random.choice(string.hexdigits) for _ in range(num_digits))
164
-
165
-
166
- def disable_upload_callbacks(request, operation_name, **kwargs):
167
- if operation_name in ['PutObject', 'UploadPart'] and hasattr(
168
- request.body, 'disable_callback'
169
- ):
170
- request.body.disable_callback()
171
-
172
-
173
- def enable_upload_callbacks(request, operation_name, **kwargs):
174
- if operation_name in ['PutObject', 'UploadPart'] and hasattr(
175
- request.body, 'enable_callback'
176
- ):
177
- request.body.enable_callback()
178
-
179
-
180
- class QueueShutdownError(Exception):
181
- pass
182
-
183
-
184
- class ReadFileChunk:
185
- def __init__(
186
- self,
187
- fileobj,
188
- start_byte,
189
- chunk_size,
190
- full_file_size,
191
- callback=None,
192
- enable_callback=True,
193
- ):
194
- """
195
-
196
- Given a file object shown below:
197
-
198
- |___________________________________________________|
199
- 0 | | full_file_size
200
- |----chunk_size---|
201
- start_byte
202
-
203
- :type fileobj: file
204
- :param fileobj: File like object
205
-
206
- :type start_byte: int
207
- :param start_byte: The first byte from which to start reading.
208
-
209
- :type chunk_size: int
210
- :param chunk_size: The max chunk size to read. Trying to read
211
- pass the end of the chunk size will behave like you've
212
- reached the end of the file.
213
-
214
- :type full_file_size: int
215
- :param full_file_size: The entire content length associated
216
- with ``fileobj``.
217
-
218
- :type callback: function(amount_read)
219
- :param callback: Called whenever data is read from this object.
220
-
221
- """
222
- self._fileobj = fileobj
223
- self._start_byte = start_byte
224
- self._size = self._calculate_file_size(
225
- self._fileobj,
226
- requested_size=chunk_size,
227
- start_byte=start_byte,
228
- actual_file_size=full_file_size,
229
- )
230
- self._fileobj.seek(self._start_byte)
231
- self._amount_read = 0
232
- self._callback = callback
233
- self._callback_enabled = enable_callback
234
-
235
- @classmethod
236
- def from_filename(
237
- cls,
238
- filename,
239
- start_byte,
240
- chunk_size,
241
- callback=None,
242
- enable_callback=True,
243
- ):
244
- """Convenience factory function to create from a filename.
245
-
246
- :type start_byte: int
247
- :param start_byte: The first byte from which to start reading.
248
-
249
- :type chunk_size: int
250
- :param chunk_size: The max chunk size to read. Trying to read
251
- pass the end of the chunk size will behave like you've
252
- reached the end of the file.
253
-
254
- :type full_file_size: int
255
- :param full_file_size: The entire content length associated
256
- with ``fileobj``.
257
-
258
- :type callback: function(amount_read)
259
- :param callback: Called whenever data is read from this object.
260
-
261
- :type enable_callback: bool
262
- :param enable_callback: Indicate whether to invoke callback
263
- during read() calls.
264
-
265
- :rtype: ``ReadFileChunk``
266
- :return: A new instance of ``ReadFileChunk``
267
-
268
- """
269
- f = open(filename, 'rb')
270
- file_size = os.fstat(f.fileno()).st_size
271
- return cls(
272
- f, start_byte, chunk_size, file_size, callback, enable_callback
273
- )
274
-
275
- def _calculate_file_size(
276
- self, fileobj, requested_size, start_byte, actual_file_size
277
- ):
278
- max_chunk_size = actual_file_size - start_byte
279
- return min(max_chunk_size, requested_size)
280
-
281
- def read(self, amount=None):
282
- if amount is None:
283
- amount_to_read = self._size - self._amount_read
284
- else:
285
- amount_to_read = min(self._size - self._amount_read, amount)
286
- data = self._fileobj.read(amount_to_read)
287
- self._amount_read += len(data)
288
- if self._callback is not None and self._callback_enabled:
289
- self._callback(len(data))
290
- return data
291
-
292
- def enable_callback(self):
293
- self._callback_enabled = True
294
-
295
- def disable_callback(self):
296
- self._callback_enabled = False
297
-
298
- def seek(self, where):
299
- self._fileobj.seek(self._start_byte + where)
300
- if self._callback is not None and self._callback_enabled:
301
- # To also rewind the callback() for an accurate progress report
302
- self._callback(where - self._amount_read)
303
- self._amount_read = where
304
-
305
- def close(self):
306
- self._fileobj.close()
307
-
308
- def tell(self):
309
- return self._amount_read
310
-
311
- def __len__(self):
312
- # __len__ is defined because requests will try to determine the length
313
- # of the stream to set a content length. In the normal case
314
- # of the file it will just stat the file, but we need to change that
315
- # behavior. By providing a __len__, requests will use that instead
316
- # of stat'ing the file.
317
- return self._size
318
-
319
- def __enter__(self):
320
- return self
321
-
322
- def __exit__(self, *args, **kwargs):
323
- self.close()
324
-
325
- def __iter__(self):
326
- # This is a workaround for http://bugs.python.org/issue17575
327
- # Basically httplib will try to iterate over the contents, even
328
- # if its a file like object. This wasn't noticed because we've
329
- # already exhausted the stream so iterating over the file immediately
330
- # stops, which is what we're simulating here.
331
- return iter([])
332
-
333
-
334
- class StreamReaderProgress:
335
- """Wrapper for a read only stream that adds progress callbacks."""
336
-
337
- def __init__(self, stream, callback=None):
338
- self._stream = stream
339
- self._callback = callback
340
-
341
- def read(self, *args, **kwargs):
342
- value = self._stream.read(*args, **kwargs)
343
- if self._callback is not None:
344
- self._callback(len(value))
345
- return value
346
-
347
-
348
- class OSUtils:
349
- def get_file_size(self, filename):
350
- return os.path.getsize(filename)
351
-
352
- def open_file_chunk_reader(self, filename, start_byte, size, callback):
353
- return ReadFileChunk.from_filename(
354
- filename, start_byte, size, callback, enable_callback=False
355
- )
356
-
357
- def open(self, filename, mode):
358
- return open(filename, mode)
359
-
360
- def remove_file(self, filename):
361
- """Remove a file, noop if file does not exist."""
362
- # Unlike os.remove, if the file does not exist,
363
- # then this method does nothing.
364
- try:
365
- os.remove(filename)
366
- except OSError:
367
- pass
368
-
369
- def rename_file(self, current_filename, new_filename):
370
- s3transfer.compat.rename_file(current_filename, new_filename)
371
-
372
-
373
- class MultipartUploader:
374
- # These are the extra_args that need to be forwarded onto
375
- # subsequent upload_parts.
376
- UPLOAD_PART_ARGS = [
377
- 'SSECustomerKey',
378
- 'SSECustomerAlgorithm',
379
- 'SSECustomerKeyMD5',
380
- 'RequestPayer',
381
- ]
382
-
383
- def __init__(
384
- self,
385
- client,
386
- config,
387
- osutil,
388
- executor_cls=concurrent.futures.ThreadPoolExecutor,
389
- ):
390
- self._client = client
391
- self._config = config
392
- self._os = osutil
393
- self._executor_cls = executor_cls
394
-
395
- def _extra_upload_part_args(self, extra_args):
396
- # Only the args in UPLOAD_PART_ARGS actually need to be passed
397
- # onto the upload_part calls.
398
- upload_parts_args = {}
399
- for key, value in extra_args.items():
400
- if key in self.UPLOAD_PART_ARGS:
401
- upload_parts_args[key] = value
402
- return upload_parts_args
403
-
404
- def upload_file(self, filename, bucket, key, callback, extra_args):
405
- response = self._client.create_multipart_upload(
406
- Bucket=bucket, Key=key, **extra_args
407
- )
408
- upload_id = response['UploadId']
409
- try:
410
- parts = self._upload_parts(
411
- upload_id, filename, bucket, key, callback, extra_args
412
- )
413
- except Exception as e:
414
- logger.debug(
415
- "Exception raised while uploading parts, "
416
- "aborting multipart upload.",
417
- exc_info=True,
418
- )
419
- self._client.abort_multipart_upload(
420
- Bucket=bucket, Key=key, UploadId=upload_id
421
- )
422
- raise S3UploadFailedError(
423
- "Failed to upload {} to {}: {}".format(
424
- filename, '/'.join([bucket, key]), e
425
- )
426
- )
427
- self._client.complete_multipart_upload(
428
- Bucket=bucket,
429
- Key=key,
430
- UploadId=upload_id,
431
- MultipartUpload={'Parts': parts},
432
- )
433
-
434
- def _upload_parts(
435
- self, upload_id, filename, bucket, key, callback, extra_args
436
- ):
437
- upload_parts_extra_args = self._extra_upload_part_args(extra_args)
438
- parts = []
439
- part_size = self._config.multipart_chunksize
440
- num_parts = int(
441
- math.ceil(self._os.get_file_size(filename) / float(part_size))
442
- )
443
- max_workers = self._config.max_concurrency
444
- with self._executor_cls(max_workers=max_workers) as executor:
445
- upload_partial = functools.partial(
446
- self._upload_one_part,
447
- filename,
448
- bucket,
449
- key,
450
- upload_id,
451
- part_size,
452
- upload_parts_extra_args,
453
- callback,
454
- )
455
- for part in executor.map(upload_partial, range(1, num_parts + 1)):
456
- parts.append(part)
457
- return parts
458
-
459
- def _upload_one_part(
460
- self,
461
- filename,
462
- bucket,
463
- key,
464
- upload_id,
465
- part_size,
466
- extra_args,
467
- callback,
468
- part_number,
469
- ):
470
- open_chunk_reader = self._os.open_file_chunk_reader
471
- with open_chunk_reader(
472
- filename, part_size * (part_number - 1), part_size, callback
473
- ) as body:
474
- response = self._client.upload_part(
475
- Bucket=bucket,
476
- Key=key,
477
- UploadId=upload_id,
478
- PartNumber=part_number,
479
- Body=body,
480
- **extra_args,
481
- )
482
- etag = response['ETag']
483
- return {'ETag': etag, 'PartNumber': part_number}
484
-
485
-
486
- class ShutdownQueue(queue.Queue):
487
- """A queue implementation that can be shutdown.
488
-
489
- Shutting down a queue means that this class adds a
490
- trigger_shutdown method that will trigger all subsequent
491
- calls to put() to fail with a ``QueueShutdownError``.
492
-
493
- It purposefully deviates from queue.Queue, and is *not* meant
494
- to be a drop in replacement for ``queue.Queue``.
495
-
496
- """
497
-
498
- def _init(self, maxsize):
499
- self._shutdown = False
500
- self._shutdown_lock = threading.Lock()
501
- # queue.Queue is an old style class so we don't use super().
502
- return queue.Queue._init(self, maxsize)
503
-
504
- def trigger_shutdown(self):
505
- with self._shutdown_lock:
506
- self._shutdown = True
507
- logger.debug("The IO queue is now shutdown.")
508
-
509
- def put(self, item):
510
- # Note: this is not sufficient, it's still possible to deadlock!
511
- # Need to hook into the condition vars used by this class.
512
- with self._shutdown_lock:
513
- if self._shutdown:
514
- raise QueueShutdownError(
515
- "Cannot put item to queue when " "queue has been shutdown."
516
- )
517
- return queue.Queue.put(self, item)
518
-
519
-
520
- class MultipartDownloader:
521
- def __init__(
522
- self,
523
- client,
524
- config,
525
- osutil,
526
- executor_cls=concurrent.futures.ThreadPoolExecutor,
527
- ):
528
- self._client = client
529
- self._config = config
530
- self._os = osutil
531
- self._executor_cls = executor_cls
532
- self._ioqueue = ShutdownQueue(self._config.max_io_queue)
533
-
534
- def download_file(
535
- self, bucket, key, filename, object_size, extra_args, callback=None
536
- ):
537
- with self._executor_cls(max_workers=2) as controller:
538
- # 1 thread for the future that manages the uploading of files
539
- # 1 thread for the future that manages IO writes.
540
- download_parts_handler = functools.partial(
541
- self._download_file_as_future,
542
- bucket,
543
- key,
544
- filename,
545
- object_size,
546
- callback,
547
- )
548
- parts_future = controller.submit(download_parts_handler)
549
-
550
- io_writes_handler = functools.partial(
551
- self._perform_io_writes, filename
552
- )
553
- io_future = controller.submit(io_writes_handler)
554
- results = concurrent.futures.wait(
555
- [parts_future, io_future],
556
- return_when=concurrent.futures.FIRST_EXCEPTION,
557
- )
558
- self._process_future_results(results)
559
-
560
- def _process_future_results(self, futures):
561
- finished, unfinished = futures
562
- for future in finished:
563
- future.result()
564
-
565
- def _download_file_as_future(
566
- self, bucket, key, filename, object_size, callback
567
- ):
568
- part_size = self._config.multipart_chunksize
569
- num_parts = int(math.ceil(object_size / float(part_size)))
570
- max_workers = self._config.max_concurrency
571
- download_partial = functools.partial(
572
- self._download_range,
573
- bucket,
574
- key,
575
- filename,
576
- part_size,
577
- num_parts,
578
- callback,
579
- )
580
- try:
581
- with self._executor_cls(max_workers=max_workers) as executor:
582
- list(executor.map(download_partial, range(num_parts)))
583
- finally:
584
- self._ioqueue.put(SHUTDOWN_SENTINEL)
585
-
586
- def _calculate_range_param(self, part_size, part_index, num_parts):
587
- start_range = part_index * part_size
588
- if part_index == num_parts - 1:
589
- end_range = ''
590
- else:
591
- end_range = start_range + part_size - 1
592
- range_param = f'bytes={start_range}-{end_range}'
593
- return range_param
594
-
595
- def _download_range(
596
- self, bucket, key, filename, part_size, num_parts, callback, part_index
597
- ):
598
- try:
599
- range_param = self._calculate_range_param(
600
- part_size, part_index, num_parts
601
- )
602
-
603
- max_attempts = self._config.num_download_attempts
604
- last_exception = None
605
- for i in range(max_attempts):
606
- try:
607
- logger.debug("Making get_object call.")
608
- response = self._client.get_object(
609
- Bucket=bucket, Key=key, Range=range_param
610
- )
611
- streaming_body = StreamReaderProgress(
612
- response['Body'], callback
613
- )
614
- buffer_size = 1024 * 16
615
- current_index = part_size * part_index
616
- for chunk in iter(
617
- lambda: streaming_body.read(buffer_size), b''
618
- ):
619
- self._ioqueue.put((current_index, chunk))
620
- current_index += len(chunk)
621
- return
622
- except (
623
- socket.timeout,
624
- OSError,
625
- ReadTimeoutError,
626
- IncompleteReadError,
627
- ) as e:
628
- logger.debug(
629
- "Retrying exception caught (%s), "
630
- "retrying request, (attempt %s / %s)",
631
- e,
632
- i,
633
- max_attempts,
634
- exc_info=True,
635
- )
636
- last_exception = e
637
- continue
638
- raise RetriesExceededError(last_exception)
639
- finally:
640
- logger.debug("EXITING _download_range for part: %s", part_index)
641
-
642
- def _perform_io_writes(self, filename):
643
- with self._os.open(filename, 'wb') as f:
644
- while True:
645
- task = self._ioqueue.get()
646
- if task is SHUTDOWN_SENTINEL:
647
- logger.debug(
648
- "Shutdown sentinel received in IO handler, "
649
- "shutting down IO handler."
650
- )
651
- return
652
- else:
653
- try:
654
- offset, data = task
655
- f.seek(offset)
656
- f.write(data)
657
- except Exception as e:
658
- logger.debug(
659
- "Caught exception in IO thread: %s",
660
- e,
661
- exc_info=True,
662
- )
663
- self._ioqueue.trigger_shutdown()
664
- raise
665
-
666
-
667
- class TransferConfig:
668
- def __init__(
669
- self,
670
- multipart_threshold=8 * MB,
671
- max_concurrency=10,
672
- multipart_chunksize=8 * MB,
673
- num_download_attempts=5,
674
- max_io_queue=100,
675
- ):
676
- self.multipart_threshold = multipart_threshold
677
- self.max_concurrency = max_concurrency
678
- self.multipart_chunksize = multipart_chunksize
679
- self.num_download_attempts = num_download_attempts
680
- self.max_io_queue = max_io_queue
681
-
682
-
683
- class S3Transfer:
684
-
685
- ALLOWED_DOWNLOAD_ARGS = [
686
- 'VersionId',
687
- 'SSECustomerAlgorithm',
688
- 'SSECustomerKey',
689
- 'SSECustomerKeyMD5',
690
- 'RequestPayer',
691
- ]
692
-
693
- ALLOWED_UPLOAD_ARGS = [
694
- 'ACL',
695
- 'CacheControl',
696
- 'ContentDisposition',
697
- 'ContentEncoding',
698
- 'ContentLanguage',
699
- 'ContentType',
700
- 'Expires',
701
- 'GrantFullControl',
702
- 'GrantRead',
703
- 'GrantReadACP',
704
- 'GrantWriteACL',
705
- 'Metadata',
706
- 'RequestPayer',
707
- 'ServerSideEncryption',
708
- 'StorageClass',
709
- 'SSECustomerAlgorithm',
710
- 'SSECustomerKey',
711
- 'SSECustomerKeyMD5',
712
- 'SSEKMSKeyId',
713
- 'SSEKMSEncryptionContext',
714
- 'Tagging',
715
- ]
716
-
717
- def __init__(self, client, config=None, osutil=None):
718
- self._client = client
719
- if config is None:
720
- config = TransferConfig()
721
- self._config = config
722
- if osutil is None:
723
- osutil = OSUtils()
724
- self._osutil = osutil
725
-
726
- def upload_file(
727
- self, filename, bucket, key, callback=None, extra_args=None
728
- ):
729
- """Upload a file to an S3 object.
730
-
731
- Variants have also been injected into S3 client, Bucket and Object.
732
- You don't have to use S3Transfer.upload_file() directly.
733
- """
734
- if extra_args is None:
735
- extra_args = {}
736
- self._validate_all_known_args(extra_args, self.ALLOWED_UPLOAD_ARGS)
737
- events = self._client.meta.events
738
- events.register_first(
739
- 'request-created.s3',
740
- disable_upload_callbacks,
741
- unique_id='s3upload-callback-disable',
742
- )
743
- events.register_last(
744
- 'request-created.s3',
745
- enable_upload_callbacks,
746
- unique_id='s3upload-callback-enable',
747
- )
748
- if (
749
- self._osutil.get_file_size(filename)
750
- >= self._config.multipart_threshold
751
- ):
752
- self._multipart_upload(filename, bucket, key, callback, extra_args)
753
- else:
754
- self._put_object(filename, bucket, key, callback, extra_args)
755
-
756
- def _put_object(self, filename, bucket, key, callback, extra_args):
757
- # We're using open_file_chunk_reader so we can take advantage of the
758
- # progress callback functionality.
759
- open_chunk_reader = self._osutil.open_file_chunk_reader
760
- with open_chunk_reader(
761
- filename,
762
- 0,
763
- self._osutil.get_file_size(filename),
764
- callback=callback,
765
- ) as body:
766
- self._client.put_object(
767
- Bucket=bucket, Key=key, Body=body, **extra_args
768
- )
769
-
770
- def download_file(
771
- self, bucket, key, filename, extra_args=None, callback=None
772
- ):
773
- """Download an S3 object to a file.
774
-
775
- Variants have also been injected into S3 client, Bucket and Object.
776
- You don't have to use S3Transfer.download_file() directly.
777
- """
778
- # This method will issue a ``head_object`` request to determine
779
- # the size of the S3 object. This is used to determine if the
780
- # object is downloaded in parallel.
781
- if extra_args is None:
782
- extra_args = {}
783
- self._validate_all_known_args(extra_args, self.ALLOWED_DOWNLOAD_ARGS)
784
- object_size = self._object_size(bucket, key, extra_args)
785
- temp_filename = filename + os.extsep + random_file_extension()
786
- try:
787
- self._download_file(
788
- bucket, key, temp_filename, object_size, extra_args, callback
789
- )
790
- except Exception:
791
- logger.debug(
792
- "Exception caught in download_file, removing partial "
793
- "file: %s",
794
- temp_filename,
795
- exc_info=True,
796
- )
797
- self._osutil.remove_file(temp_filename)
798
- raise
799
- else:
800
- self._osutil.rename_file(temp_filename, filename)
801
-
802
- def _download_file(
803
- self, bucket, key, filename, object_size, extra_args, callback
804
- ):
805
- if object_size >= self._config.multipart_threshold:
806
- self._ranged_download(
807
- bucket, key, filename, object_size, extra_args, callback
808
- )
809
- else:
810
- self._get_object(bucket, key, filename, extra_args, callback)
811
-
812
- def _validate_all_known_args(self, actual, allowed):
813
- for kwarg in actual:
814
- if kwarg not in allowed:
815
- raise ValueError(
816
- "Invalid extra_args key '%s', "
817
- "must be one of: %s" % (kwarg, ', '.join(allowed))
818
- )
819
-
820
- def _ranged_download(
821
- self, bucket, key, filename, object_size, extra_args, callback
822
- ):
823
- downloader = MultipartDownloader(
824
- self._client, self._config, self._osutil
825
- )
826
- downloader.download_file(
827
- bucket, key, filename, object_size, extra_args, callback
828
- )
829
-
830
- def _get_object(self, bucket, key, filename, extra_args, callback):
831
- # precondition: num_download_attempts > 0
832
- max_attempts = self._config.num_download_attempts
833
- last_exception = None
834
- for i in range(max_attempts):
835
- try:
836
- return self._do_get_object(
837
- bucket, key, filename, extra_args, callback
838
- )
839
- except (
840
- socket.timeout,
841
- OSError,
842
- ReadTimeoutError,
843
- IncompleteReadError,
844
- ) as e:
845
- # TODO: we need a way to reset the callback if the
846
- # download failed.
847
- logger.debug(
848
- "Retrying exception caught (%s), "
849
- "retrying request, (attempt %s / %s)",
850
- e,
851
- i,
852
- max_attempts,
853
- exc_info=True,
854
- )
855
- last_exception = e
856
- continue
857
- raise RetriesExceededError(last_exception)
858
-
859
- def _do_get_object(self, bucket, key, filename, extra_args, callback):
860
- response = self._client.get_object(
861
- Bucket=bucket, Key=key, **extra_args
862
- )
863
- streaming_body = StreamReaderProgress(response['Body'], callback)
864
- with self._osutil.open(filename, 'wb') as f:
865
- for chunk in iter(lambda: streaming_body.read(8192), b''):
866
- f.write(chunk)
867
-
868
- def _object_size(self, bucket, key, extra_args):
869
- return self._client.head_object(Bucket=bucket, Key=key, **extra_args)[
870
- 'ContentLength'
871
- ]
872
-
873
- def _multipart_upload(self, filename, bucket, key, callback, extra_args):
874
- uploader = MultipartUploader(self._client, self._config, self._osutil)
875
- uploader.upload_file(filename, bucket, key, callback, extra_args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/urllib3/util/__init__.py DELETED
@@ -1,49 +0,0 @@
1
- from __future__ import absolute_import
2
-
3
- # For backwards compatibility, provide imports that used to be here.
4
- from .connection import is_connection_dropped
5
- from .request import SKIP_HEADER, SKIPPABLE_HEADERS, make_headers
6
- from .response import is_fp_closed
7
- from .retry import Retry
8
- from .ssl_ import (
9
- ALPN_PROTOCOLS,
10
- HAS_SNI,
11
- IS_PYOPENSSL,
12
- IS_SECURETRANSPORT,
13
- PROTOCOL_TLS,
14
- SSLContext,
15
- assert_fingerprint,
16
- resolve_cert_reqs,
17
- resolve_ssl_version,
18
- ssl_wrap_socket,
19
- )
20
- from .timeout import Timeout, current_time
21
- from .url import Url, get_host, parse_url, split_first
22
- from .wait import wait_for_read, wait_for_write
23
-
24
- __all__ = (
25
- "HAS_SNI",
26
- "IS_PYOPENSSL",
27
- "IS_SECURETRANSPORT",
28
- "SSLContext",
29
- "PROTOCOL_TLS",
30
- "ALPN_PROTOCOLS",
31
- "Retry",
32
- "Timeout",
33
- "Url",
34
- "assert_fingerprint",
35
- "current_time",
36
- "is_connection_dropped",
37
- "is_fp_closed",
38
- "get_host",
39
- "parse_url",
40
- "make_headers",
41
- "resolve_cert_reqs",
42
- "resolve_ssl_version",
43
- "split_first",
44
- "ssl_wrap_socket",
45
- "wait_for_read",
46
- "wait_for_write",
47
- "SKIP_HEADER",
48
- "SKIPPABLE_HEADERS",
49
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Boadiwaa/Recipes/openai/api_resources/abstract/listable_api_resource.py DELETED
@@ -1,47 +0,0 @@
1
- from openai import api_requestor, util, error
2
- from openai.api_resources.abstract.api_resource import APIResource
3
- from openai.util import ApiType
4
-
5
-
6
- class ListableAPIResource(APIResource):
7
- @classmethod
8
- def auto_paging_iter(cls, *args, **params):
9
- return cls.list(*args, **params).auto_paging_iter()
10
-
11
- @classmethod
12
- def list(
13
- cls,
14
- api_key=None,
15
- request_id=None,
16
- api_version=None,
17
- organization=None,
18
- api_base=None,
19
- api_type=None,
20
- **params,
21
- ):
22
- requestor = api_requestor.APIRequestor(
23
- api_key,
24
- api_base=api_base or cls.api_base(),
25
- api_version=api_version,
26
- api_type=api_type,
27
- organization=organization,
28
- )
29
-
30
- typed_api_type, api_version = cls._get_api_type_and_version(api_type, api_version)
31
-
32
- if typed_api_type == ApiType.AZURE:
33
- base = cls.class_url()
34
- url = "/%s%s?api-version=%s" % (cls.azure_api_prefix, base, api_version)
35
- elif typed_api_type == ApiType.OPEN_AI:
36
- url = cls.class_url()
37
- else:
38
- raise error.InvalidAPIType('Unsupported API type %s' % api_type)
39
-
40
- response, _, api_key = requestor.request(
41
- "get", url, params, request_id=request_id
42
- )
43
- openai_object = util.convert_to_openai_object(
44
- response, api_key, api_version, organization
45
- )
46
- openai_object._retrieve_params = params
47
- return openai_object
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tests/test_numpy_array.cpp DELETED
@@ -1,388 +0,0 @@
1
- /*
2
- tests/test_numpy_array.cpp -- test core array functionality
3
-
4
- Copyright (c) 2016 Ivan Smirnov <[email protected]>
5
-
6
- All rights reserved. Use of this source code is governed by a
7
- BSD-style license that can be found in the LICENSE file.
8
- */
9
-
10
- #include "pybind11_tests.h"
11
-
12
- #include <pybind11/numpy.h>
13
- #include <pybind11/stl.h>
14
-
15
- #include <cstdint>
16
-
17
- // Size / dtype checks.
18
- struct DtypeCheck {
19
- py::dtype numpy{};
20
- py::dtype pybind11{};
21
- };
22
-
23
- template <typename T>
24
- DtypeCheck get_dtype_check(const char* name) {
25
- py::module np = py::module::import("numpy");
26
- DtypeCheck check{};
27
- check.numpy = np.attr("dtype")(np.attr(name));
28
- check.pybind11 = py::dtype::of<T>();
29
- return check;
30
- }
31
-
32
- std::vector<DtypeCheck> get_concrete_dtype_checks() {
33
- return {
34
- // Normalization
35
- get_dtype_check<std::int8_t>("int8"),
36
- get_dtype_check<std::uint8_t>("uint8"),
37
- get_dtype_check<std::int16_t>("int16"),
38
- get_dtype_check<std::uint16_t>("uint16"),
39
- get_dtype_check<std::int32_t>("int32"),
40
- get_dtype_check<std::uint32_t>("uint32"),
41
- get_dtype_check<std::int64_t>("int64"),
42
- get_dtype_check<std::uint64_t>("uint64")
43
- };
44
- }
45
-
46
- struct DtypeSizeCheck {
47
- std::string name{};
48
- int size_cpp{};
49
- int size_numpy{};
50
- // For debugging.
51
- py::dtype dtype{};
52
- };
53
-
54
- template <typename T>
55
- DtypeSizeCheck get_dtype_size_check() {
56
- DtypeSizeCheck check{};
57
- check.name = py::type_id<T>();
58
- check.size_cpp = sizeof(T);
59
- check.dtype = py::dtype::of<T>();
60
- check.size_numpy = check.dtype.attr("itemsize").template cast<int>();
61
- return check;
62
- }
63
-
64
- std::vector<DtypeSizeCheck> get_platform_dtype_size_checks() {
65
- return {
66
- get_dtype_size_check<short>(),
67
- get_dtype_size_check<unsigned short>(),
68
- get_dtype_size_check<int>(),
69
- get_dtype_size_check<unsigned int>(),
70
- get_dtype_size_check<long>(),
71
- get_dtype_size_check<unsigned long>(),
72
- get_dtype_size_check<long long>(),
73
- get_dtype_size_check<unsigned long long>(),
74
- };
75
- }
76
-
77
- // Arrays.
78
- using arr = py::array;
79
- using arr_t = py::array_t<uint16_t, 0>;
80
- static_assert(std::is_same<arr_t::value_type, uint16_t>::value, "");
81
-
82
- template<typename... Ix> arr data(const arr& a, Ix... index) {
83
- return arr(a.nbytes() - a.offset_at(index...), (const uint8_t *) a.data(index...));
84
- }
85
-
86
- template<typename... Ix> arr data_t(const arr_t& a, Ix... index) {
87
- return arr(a.size() - a.index_at(index...), a.data(index...));
88
- }
89
-
90
- template<typename... Ix> arr& mutate_data(arr& a, Ix... index) {
91
- auto ptr = (uint8_t *) a.mutable_data(index...);
92
- for (ssize_t i = 0; i < a.nbytes() - a.offset_at(index...); i++)
93
- ptr[i] = (uint8_t) (ptr[i] * 2);
94
- return a;
95
- }
96
-
97
- template<typename... Ix> arr_t& mutate_data_t(arr_t& a, Ix... index) {
98
- auto ptr = a.mutable_data(index...);
99
- for (ssize_t i = 0; i < a.size() - a.index_at(index...); i++)
100
- ptr[i]++;
101
- return a;
102
- }
103
-
104
- template<typename... Ix> ssize_t index_at(const arr& a, Ix... idx) { return a.index_at(idx...); }
105
- template<typename... Ix> ssize_t index_at_t(const arr_t& a, Ix... idx) { return a.index_at(idx...); }
106
- template<typename... Ix> ssize_t offset_at(const arr& a, Ix... idx) { return a.offset_at(idx...); }
107
- template<typename... Ix> ssize_t offset_at_t(const arr_t& a, Ix... idx) { return a.offset_at(idx...); }
108
- template<typename... Ix> ssize_t at_t(const arr_t& a, Ix... idx) { return a.at(idx...); }
109
- template<typename... Ix> arr_t& mutate_at_t(arr_t& a, Ix... idx) { a.mutable_at(idx...)++; return a; }
110
-
111
- #define def_index_fn(name, type) \
112
- sm.def(#name, [](type a) { return name(a); }); \
113
- sm.def(#name, [](type a, int i) { return name(a, i); }); \
114
- sm.def(#name, [](type a, int i, int j) { return name(a, i, j); }); \
115
- sm.def(#name, [](type a, int i, int j, int k) { return name(a, i, j, k); });
116
-
117
- template <typename T, typename T2> py::handle auxiliaries(T &&r, T2 &&r2) {
118
- if (r.ndim() != 2) throw std::domain_error("error: ndim != 2");
119
- py::list l;
120
- l.append(*r.data(0, 0));
121
- l.append(*r2.mutable_data(0, 0));
122
- l.append(r.data(0, 1) == r2.mutable_data(0, 1));
123
- l.append(r.ndim());
124
- l.append(r.itemsize());
125
- l.append(r.shape(0));
126
- l.append(r.shape(1));
127
- l.append(r.size());
128
- l.append(r.nbytes());
129
- return l.release();
130
- }
131
-
132
- // note: declaration at local scope would create a dangling reference!
133
- static int data_i = 42;
134
-
135
- TEST_SUBMODULE(numpy_array, sm) {
136
- try { py::module::import("numpy"); }
137
- catch (...) { return; }
138
-
139
- // test_dtypes
140
- py::class_<DtypeCheck>(sm, "DtypeCheck")
141
- .def_readonly("numpy", &DtypeCheck::numpy)
142
- .def_readonly("pybind11", &DtypeCheck::pybind11)
143
- .def("__repr__", [](const DtypeCheck& self) {
144
- return py::str("<DtypeCheck numpy={} pybind11={}>").format(
145
- self.numpy, self.pybind11);
146
- });
147
- sm.def("get_concrete_dtype_checks", &get_concrete_dtype_checks);
148
-
149
- py::class_<DtypeSizeCheck>(sm, "DtypeSizeCheck")
150
- .def_readonly("name", &DtypeSizeCheck::name)
151
- .def_readonly("size_cpp", &DtypeSizeCheck::size_cpp)
152
- .def_readonly("size_numpy", &DtypeSizeCheck::size_numpy)
153
- .def("__repr__", [](const DtypeSizeCheck& self) {
154
- return py::str("<DtypeSizeCheck name='{}' size_cpp={} size_numpy={} dtype={}>").format(
155
- self.name, self.size_cpp, self.size_numpy, self.dtype);
156
- });
157
- sm.def("get_platform_dtype_size_checks", &get_platform_dtype_size_checks);
158
-
159
- // test_array_attributes
160
- sm.def("ndim", [](const arr& a) { return a.ndim(); });
161
- sm.def("shape", [](const arr& a) { return arr(a.ndim(), a.shape()); });
162
- sm.def("shape", [](const arr& a, ssize_t dim) { return a.shape(dim); });
163
- sm.def("strides", [](const arr& a) { return arr(a.ndim(), a.strides()); });
164
- sm.def("strides", [](const arr& a, ssize_t dim) { return a.strides(dim); });
165
- sm.def("writeable", [](const arr& a) { return a.writeable(); });
166
- sm.def("size", [](const arr& a) { return a.size(); });
167
- sm.def("itemsize", [](const arr& a) { return a.itemsize(); });
168
- sm.def("nbytes", [](const arr& a) { return a.nbytes(); });
169
- sm.def("owndata", [](const arr& a) { return a.owndata(); });
170
-
171
- // test_index_offset
172
- def_index_fn(index_at, const arr&);
173
- def_index_fn(index_at_t, const arr_t&);
174
- def_index_fn(offset_at, const arr&);
175
- def_index_fn(offset_at_t, const arr_t&);
176
- // test_data
177
- def_index_fn(data, const arr&);
178
- def_index_fn(data_t, const arr_t&);
179
- // test_mutate_data, test_mutate_readonly
180
- def_index_fn(mutate_data, arr&);
181
- def_index_fn(mutate_data_t, arr_t&);
182
- def_index_fn(at_t, const arr_t&);
183
- def_index_fn(mutate_at_t, arr_t&);
184
-
185
- // test_make_c_f_array
186
- sm.def("make_f_array", [] { return py::array_t<float>({ 2, 2 }, { 4, 8 }); });
187
- sm.def("make_c_array", [] { return py::array_t<float>({ 2, 2 }, { 8, 4 }); });
188
-
189
- // test_empty_shaped_array
190
- sm.def("make_empty_shaped_array", [] { return py::array(py::dtype("f"), {}, {}); });
191
- // test numpy scalars (empty shape, ndim==0)
192
- sm.def("scalar_int", []() { return py::array(py::dtype("i"), {}, {}, &data_i); });
193
-
194
- // test_wrap
195
- sm.def("wrap", [](py::array a) {
196
- return py::array(
197
- a.dtype(),
198
- {a.shape(), a.shape() + a.ndim()},
199
- {a.strides(), a.strides() + a.ndim()},
200
- a.data(),
201
- a
202
- );
203
- });
204
-
205
- // test_numpy_view
206
- struct ArrayClass {
207
- int data[2] = { 1, 2 };
208
- ArrayClass() { py::print("ArrayClass()"); }
209
- ~ArrayClass() { py::print("~ArrayClass()"); }
210
- };
211
- py::class_<ArrayClass>(sm, "ArrayClass")
212
- .def(py::init<>())
213
- .def("numpy_view", [](py::object &obj) {
214
- py::print("ArrayClass::numpy_view()");
215
- ArrayClass &a = obj.cast<ArrayClass&>();
216
- return py::array_t<int>({2}, {4}, a.data, obj);
217
- }
218
- );
219
-
220
- // test_cast_numpy_int64_to_uint64
221
- sm.def("function_taking_uint64", [](uint64_t) { });
222
-
223
- // test_isinstance
224
- sm.def("isinstance_untyped", [](py::object yes, py::object no) {
225
- return py::isinstance<py::array>(yes) && !py::isinstance<py::array>(no);
226
- });
227
- sm.def("isinstance_typed", [](py::object o) {
228
- return py::isinstance<py::array_t<double>>(o) && !py::isinstance<py::array_t<int>>(o);
229
- });
230
-
231
- // test_constructors
232
- sm.def("default_constructors", []() {
233
- return py::dict(
234
- "array"_a=py::array(),
235
- "array_t<int32>"_a=py::array_t<std::int32_t>(),
236
- "array_t<double>"_a=py::array_t<double>()
237
- );
238
- });
239
- sm.def("converting_constructors", [](py::object o) {
240
- return py::dict(
241
- "array"_a=py::array(o),
242
- "array_t<int32>"_a=py::array_t<std::int32_t>(o),
243
- "array_t<double>"_a=py::array_t<double>(o)
244
- );
245
- });
246
-
247
- // test_overload_resolution
248
- sm.def("overloaded", [](py::array_t<double>) { return "double"; });
249
- sm.def("overloaded", [](py::array_t<float>) { return "float"; });
250
- sm.def("overloaded", [](py::array_t<int>) { return "int"; });
251
- sm.def("overloaded", [](py::array_t<unsigned short>) { return "unsigned short"; });
252
- sm.def("overloaded", [](py::array_t<long long>) { return "long long"; });
253
- sm.def("overloaded", [](py::array_t<std::complex<double>>) { return "double complex"; });
254
- sm.def("overloaded", [](py::array_t<std::complex<float>>) { return "float complex"; });
255
-
256
- sm.def("overloaded2", [](py::array_t<std::complex<double>>) { return "double complex"; });
257
- sm.def("overloaded2", [](py::array_t<double>) { return "double"; });
258
- sm.def("overloaded2", [](py::array_t<std::complex<float>>) { return "float complex"; });
259
- sm.def("overloaded2", [](py::array_t<float>) { return "float"; });
260
-
261
- // Only accept the exact types:
262
- sm.def("overloaded3", [](py::array_t<int>) { return "int"; }, py::arg().noconvert());
263
- sm.def("overloaded3", [](py::array_t<double>) { return "double"; }, py::arg().noconvert());
264
-
265
- // Make sure we don't do unsafe coercion (e.g. float to int) when not using forcecast, but
266
- // rather that float gets converted via the safe (conversion to double) overload:
267
- sm.def("overloaded4", [](py::array_t<long long, 0>) { return "long long"; });
268
- sm.def("overloaded4", [](py::array_t<double, 0>) { return "double"; });
269
-
270
- // But we do allow conversion to int if forcecast is enabled (but only if no overload matches
271
- // without conversion)
272
- sm.def("overloaded5", [](py::array_t<unsigned int>) { return "unsigned int"; });
273
- sm.def("overloaded5", [](py::array_t<double>) { return "double"; });
274
-
275
- // test_greedy_string_overload
276
- // Issue 685: ndarray shouldn't go to std::string overload
277
- sm.def("issue685", [](std::string) { return "string"; });
278
- sm.def("issue685", [](py::array) { return "array"; });
279
- sm.def("issue685", [](py::object) { return "other"; });
280
-
281
- // test_array_unchecked_fixed_dims
282
- sm.def("proxy_add2", [](py::array_t<double> a, double v) {
283
- auto r = a.mutable_unchecked<2>();
284
- for (ssize_t i = 0; i < r.shape(0); i++)
285
- for (ssize_t j = 0; j < r.shape(1); j++)
286
- r(i, j) += v;
287
- }, py::arg().noconvert(), py::arg());
288
-
289
- sm.def("proxy_init3", [](double start) {
290
- py::array_t<double, py::array::c_style> a({ 3, 3, 3 });
291
- auto r = a.mutable_unchecked<3>();
292
- for (ssize_t i = 0; i < r.shape(0); i++)
293
- for (ssize_t j = 0; j < r.shape(1); j++)
294
- for (ssize_t k = 0; k < r.shape(2); k++)
295
- r(i, j, k) = start++;
296
- return a;
297
- });
298
- sm.def("proxy_init3F", [](double start) {
299
- py::array_t<double, py::array::f_style> a({ 3, 3, 3 });
300
- auto r = a.mutable_unchecked<3>();
301
- for (ssize_t k = 0; k < r.shape(2); k++)
302
- for (ssize_t j = 0; j < r.shape(1); j++)
303
- for (ssize_t i = 0; i < r.shape(0); i++)
304
- r(i, j, k) = start++;
305
- return a;
306
- });
307
- sm.def("proxy_squared_L2_norm", [](py::array_t<double> a) {
308
- auto r = a.unchecked<1>();
309
- double sumsq = 0;
310
- for (ssize_t i = 0; i < r.shape(0); i++)
311
- sumsq += r[i] * r(i); // Either notation works for a 1D array
312
- return sumsq;
313
- });
314
-
315
- sm.def("proxy_auxiliaries2", [](py::array_t<double> a) {
316
- auto r = a.unchecked<2>();
317
- auto r2 = a.mutable_unchecked<2>();
318
- return auxiliaries(r, r2);
319
- });
320
-
321
- // test_array_unchecked_dyn_dims
322
- // Same as the above, but without a compile-time dimensions specification:
323
- sm.def("proxy_add2_dyn", [](py::array_t<double> a, double v) {
324
- auto r = a.mutable_unchecked();
325
- if (r.ndim() != 2) throw std::domain_error("error: ndim != 2");
326
- for (ssize_t i = 0; i < r.shape(0); i++)
327
- for (ssize_t j = 0; j < r.shape(1); j++)
328
- r(i, j) += v;
329
- }, py::arg().noconvert(), py::arg());
330
- sm.def("proxy_init3_dyn", [](double start) {
331
- py::array_t<double, py::array::c_style> a({ 3, 3, 3 });
332
- auto r = a.mutable_unchecked();
333
- if (r.ndim() != 3) throw std::domain_error("error: ndim != 3");
334
- for (ssize_t i = 0; i < r.shape(0); i++)
335
- for (ssize_t j = 0; j < r.shape(1); j++)
336
- for (ssize_t k = 0; k < r.shape(2); k++)
337
- r(i, j, k) = start++;
338
- return a;
339
- });
340
- sm.def("proxy_auxiliaries2_dyn", [](py::array_t<double> a) {
341
- return auxiliaries(a.unchecked(), a.mutable_unchecked());
342
- });
343
-
344
- sm.def("array_auxiliaries2", [](py::array_t<double> a) {
345
- return auxiliaries(a, a);
346
- });
347
-
348
- // test_array_failures
349
- // Issue #785: Uninformative "Unknown internal error" exception when constructing array from empty object:
350
- sm.def("array_fail_test", []() { return py::array(py::object()); });
351
- sm.def("array_t_fail_test", []() { return py::array_t<double>(py::object()); });
352
- // Make sure the error from numpy is being passed through:
353
- sm.def("array_fail_test_negative_size", []() { int c = 0; return py::array(-1, &c); });
354
-
355
- // test_initializer_list
356
- // Issue (unnumbered; reported in #788): regression: initializer lists can be ambiguous
357
- sm.def("array_initializer_list1", []() { return py::array_t<float>(1); }); // { 1 } also works, but clang warns about it
358
- sm.def("array_initializer_list2", []() { return py::array_t<float>({ 1, 2 }); });
359
- sm.def("array_initializer_list3", []() { return py::array_t<float>({ 1, 2, 3 }); });
360
- sm.def("array_initializer_list4", []() { return py::array_t<float>({ 1, 2, 3, 4 }); });
361
-
362
- // test_array_resize
363
- // reshape array to 2D without changing size
364
- sm.def("array_reshape2", [](py::array_t<double> a) {
365
- const ssize_t dim_sz = (ssize_t)std::sqrt(a.size());
366
- if (dim_sz * dim_sz != a.size())
367
- throw std::domain_error("array_reshape2: input array total size is not a squared integer");
368
- a.resize({dim_sz, dim_sz});
369
- });
370
-
371
- // resize to 3D array with each dimension = N
372
- sm.def("array_resize3", [](py::array_t<double> a, size_t N, bool refcheck) {
373
- a.resize({N, N, N}, refcheck);
374
- });
375
-
376
- // test_array_create_and_resize
377
- // return 2D array with Nrows = Ncols = N
378
- sm.def("create_and_resize", [](size_t N) {
379
- py::array_t<double> a;
380
- a.resize({N, N});
381
- std::fill(a.mutable_data(), a.mutable_data() + a.size(), 42.);
382
- return a;
383
- });
384
-
385
- sm.def("index_using_ellipsis", [](py::array a) {
386
- return a[py::make_tuple(0, py::ellipsis(), 0)];
387
- });
388
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/fill.h DELETED
@@ -1,209 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- /*! \file fill.h
19
- * \brief Fills a range with a constant value
20
- */
21
-
22
- #pragma once
23
-
24
- #include <thrust/detail/config.h>
25
- #include <thrust/detail/execution_policy.h>
26
-
27
- namespace thrust
28
- {
29
-
30
-
31
- /*! \addtogroup transformations
32
- * \addtogroup filling
33
- * \ingroup transformations
34
- * \{
35
- */
36
-
37
-
38
- /*! \p fill assigns the value \p value to every element in
39
- * the range <tt>[first, last)</tt>. That is, for every
40
- * iterator \c i in <tt>[first, last)</tt>, it performs
41
- * the assignment <tt>*i = value</tt>.
42
- *
43
- * The algorithm's execution is parallelized as determined by \p exec.
44
- *
45
- * \param exec The execution policy to use for parallelization.
46
- * \param first The beginning of the sequence.
47
- * \param last The end of the sequence.
48
- * \param value The value to be copied.
49
- *
50
- * \tparam DerivedPolicy The name of the derived execution policy.
51
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
52
- * and \p ForwardIterator is mutable.
53
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html">Assignable</a>,
54
- * and \p T's \c value_type is convertible to \p ForwardIterator's \c value_type.
55
- *
56
- * The following code snippet demonstrates how to use \p fill to set a thrust::device_vector's
57
- * elements to a given value using the \p thrust::device execution policy for parallelization:
58
- *
59
- * \code
60
- * #include <thrust/fill.h>
61
- * #include <thrust/device_vector.h>
62
- * #include <thrust/execution_policy.h>
63
- * ...
64
- * thrust::device_vector<int> v(4);
65
- * thrust::fill(thrust::device, v.begin(), v.end(), 137);
66
- *
67
- * // v[0] == 137, v[1] == 137, v[2] == 137, v[3] == 137
68
- * \endcode
69
- *
70
- * \see http://www.sgi.com/tech/stl/fill.html
71
- * \see \c fill_n
72
- * \see \c uninitialized_fill
73
- */
74
- template<typename DerivedPolicy, typename ForwardIterator, typename T>
75
- __host__ __device__
76
- void fill(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
77
- ForwardIterator first,
78
- ForwardIterator last,
79
- const T &value);
80
-
81
-
82
- /*! \p fill assigns the value \p value to every element in
83
- * the range <tt>[first, last)</tt>. That is, for every
84
- * iterator \c i in <tt>[first, last)</tt>, it performs
85
- * the assignment <tt>*i = value</tt>.
86
- *
87
- * \param first The beginning of the sequence.
88
- * \param last The end of the sequence.
89
- * \param value The value to be copied.
90
- *
91
- * \tparam ForwardIterator is a model of <a href="http://www.sgi.com/tech/stl/ForwardIterator.html">Forward Iterator</a>,
92
- * and \p ForwardIterator is mutable.
93
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html">Assignable</a>,
94
- * and \p T's \c value_type is convertible to \p ForwardIterator's \c value_type.
95
- *
96
- * The following code snippet demonstrates how to use \p fill to set a thrust::device_vector's
97
- * elements to a given value.
98
- *
99
- * \code
100
- * #include <thrust/fill.h>
101
- * #include <thrust/device_vector.h>
102
- * ...
103
- * thrust::device_vector<int> v(4);
104
- * thrust::fill(v.begin(), v.end(), 137);
105
- *
106
- * // v[0] == 137, v[1] == 137, v[2] == 137, v[3] == 137
107
- * \endcode
108
- *
109
- * \see http://www.sgi.com/tech/stl/fill.html
110
- * \see \c fill_n
111
- * \see \c uninitialized_fill
112
- */
113
- template<typename ForwardIterator, typename T>
114
- __host__ __device__
115
- void fill(ForwardIterator first,
116
- ForwardIterator last,
117
- const T &value);
118
-
119
-
120
- /*! \p fill_n assigns the value \p value to every element in
121
- * the range <tt>[first, first+n)</tt>. That is, for every
122
- * iterator \c i in <tt>[first, first+n)</tt>, it performs
123
- * the assignment <tt>*i = value</tt>.
124
- *
125
- * The algorithm's execution is parallelized as determined by \p exec.
126
- *
127
- * \param exec The execution policy to use for parallelization.
128
- * \param first The beginning of the sequence.
129
- * \param n The size of the sequence.
130
- * \param value The value to be copied.
131
- * \return <tt>first + n</tt>
132
- *
133
- * \tparam DerivedPolicy The name of the derived execution policy.
134
- * \tparam OutputIterator is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a>.
135
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html">Assignable</a>,
136
- * and \p T's \c value_type is convertible to a type in \p OutputIterator's set of \c value_type.
137
- *
138
- * The following code snippet demonstrates how to use \p fill to set a thrust::device_vector's
139
- * elements to a given value using the \p thrust::device execution policy for parallelization:
140
- *
141
- * \code
142
- * #include <thrust/fill.h>
143
- * #include <thrust/device_vector.h>
144
- * #include <thrust/execution_policy.h>
145
- * ...
146
- * thrust::device_vector<int> v(4);
147
- * thrust::fill_n(thrust::device, v.begin(), v.size(), 137);
148
- *
149
- * // v[0] == 137, v[1] == 137, v[2] == 137, v[3] == 137
150
- * \endcode
151
- *
152
- * \see http://www.sgi.com/tech/stl/fill_n.html
153
- * \see \c fill
154
- * \see \c uninitialized_fill_n
155
- */
156
- template<typename DerivedPolicy, typename OutputIterator, typename Size, typename T>
157
- __host__ __device__
158
- OutputIterator fill_n(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
159
- OutputIterator first,
160
- Size n,
161
- const T &value);
162
-
163
-
164
- /*! \p fill_n assigns the value \p value to every element in
165
- * the range <tt>[first, first+n)</tt>. That is, for every
166
- * iterator \c i in <tt>[first, first+n)</tt>, it performs
167
- * the assignment <tt>*i = value</tt>.
168
- *
169
- * \param first The beginning of the sequence.
170
- * \param n The size of the sequence.
171
- * \param value The value to be copied.
172
- * \return <tt>first + n</tt>
173
- *
174
- * \tparam OutputIterator is a model of <a href="http://www.sgi.com/tech/stl/OutputIterator.html">Output Iterator</a>.
175
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/Assignable.html">Assignable</a>,
176
- * and \p T's \c value_type is convertible to a type in \p OutputIterator's set of \c value_type.
177
- *
178
- * The following code snippet demonstrates how to use \p fill to set a thrust::device_vector's
179
- * elements to a given value.
180
- *
181
- * \code
182
- * #include <thrust/fill.h>
183
- * #include <thrust/device_vector.h>
184
- * ...
185
- * thrust::device_vector<int> v(4);
186
- * thrust::fill_n(v.begin(), v.size(), 137);
187
- *
188
- * // v[0] == 137, v[1] == 137, v[2] == 137, v[3] == 137
189
- * \endcode
190
- *
191
- * \see http://www.sgi.com/tech/stl/fill_n.html
192
- * \see \c fill
193
- * \see \c uninitialized_fill_n
194
- */
195
- template<typename OutputIterator, typename Size, typename T>
196
- __host__ __device__
197
- OutputIterator fill_n(OutputIterator first,
198
- Size n,
199
- const T &value);
200
-
201
-
202
- /*! \} // end filling
203
- * \} // transformations
204
- */
205
-
206
- } // end namespace thrust
207
-
208
- #include <thrust/detail/fill.inl>
209
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/necks/rfp.py DELETED
@@ -1,128 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- from mmcv.cnn import constant_init, kaiming_init, xavier_init
5
-
6
- from ..builder import NECKS, build_backbone
7
- from .fpn import FPN
8
-
9
-
10
- class ASPP(nn.Module):
11
- """ASPP (Atrous Spatial Pyramid Pooling)
12
-
13
- This is an implementation of the ASPP module used in DetectoRS
14
- (https://arxiv.org/pdf/2006.02334.pdf)
15
-
16
- Args:
17
- in_channels (int): Number of input channels.
18
- out_channels (int): Number of channels produced by this module
19
- dilations (tuple[int]): Dilations of the four branches.
20
- Default: (1, 3, 6, 1)
21
- """
22
-
23
- def __init__(self, in_channels, out_channels, dilations=(1, 3, 6, 1)):
24
- super().__init__()
25
- assert dilations[-1] == 1
26
- self.aspp = nn.ModuleList()
27
- for dilation in dilations:
28
- kernel_size = 3 if dilation > 1 else 1
29
- padding = dilation if dilation > 1 else 0
30
- conv = nn.Conv2d(
31
- in_channels,
32
- out_channels,
33
- kernel_size=kernel_size,
34
- stride=1,
35
- dilation=dilation,
36
- padding=padding,
37
- bias=True)
38
- self.aspp.append(conv)
39
- self.gap = nn.AdaptiveAvgPool2d(1)
40
- self.init_weights()
41
-
42
- def init_weights(self):
43
- for m in self.modules():
44
- if isinstance(m, nn.Conv2d):
45
- kaiming_init(m)
46
-
47
- def forward(self, x):
48
- avg_x = self.gap(x)
49
- out = []
50
- for aspp_idx in range(len(self.aspp)):
51
- inp = avg_x if (aspp_idx == len(self.aspp) - 1) else x
52
- out.append(F.relu_(self.aspp[aspp_idx](inp)))
53
- out[-1] = out[-1].expand_as(out[-2])
54
- out = torch.cat(out, dim=1)
55
- return out
56
-
57
-
58
- @NECKS.register_module()
59
- class RFP(FPN):
60
- """RFP (Recursive Feature Pyramid)
61
-
62
- This is an implementation of RFP in `DetectoRS
63
- <https://arxiv.org/pdf/2006.02334.pdf>`_. Different from standard FPN, the
64
- input of RFP should be multi level features along with origin input image
65
- of backbone.
66
-
67
- Args:
68
- rfp_steps (int): Number of unrolled steps of RFP.
69
- rfp_backbone (dict): Configuration of the backbone for RFP.
70
- aspp_out_channels (int): Number of output channels of ASPP module.
71
- aspp_dilations (tuple[int]): Dilation rates of four branches.
72
- Default: (1, 3, 6, 1)
73
- """
74
-
75
- def __init__(self,
76
- rfp_steps,
77
- rfp_backbone,
78
- aspp_out_channels,
79
- aspp_dilations=(1, 3, 6, 1),
80
- **kwargs):
81
- super().__init__(**kwargs)
82
- self.rfp_steps = rfp_steps
83
- self.rfp_modules = nn.ModuleList()
84
- for rfp_idx in range(1, rfp_steps):
85
- rfp_module = build_backbone(rfp_backbone)
86
- self.rfp_modules.append(rfp_module)
87
- self.rfp_aspp = ASPP(self.out_channels, aspp_out_channels,
88
- aspp_dilations)
89
- self.rfp_weight = nn.Conv2d(
90
- self.out_channels,
91
- 1,
92
- kernel_size=1,
93
- stride=1,
94
- padding=0,
95
- bias=True)
96
-
97
- def init_weights(self):
98
- # Avoid using super().init_weights(), which may alter the default
99
- # initialization of the modules in self.rfp_modules that have missing
100
- # keys in the pretrained checkpoint.
101
- for convs in [self.lateral_convs, self.fpn_convs]:
102
- for m in convs.modules():
103
- if isinstance(m, nn.Conv2d):
104
- xavier_init(m, distribution='uniform')
105
- for rfp_idx in range(self.rfp_steps - 1):
106
- self.rfp_modules[rfp_idx].init_weights(
107
- self.rfp_modules[rfp_idx].pretrained)
108
- constant_init(self.rfp_weight, 0)
109
-
110
- def forward(self, inputs):
111
- inputs = list(inputs)
112
- assert len(inputs) == len(self.in_channels) + 1 # +1 for input image
113
- img = inputs.pop(0)
114
- # FPN forward
115
- x = super().forward(tuple(inputs))
116
- for rfp_idx in range(self.rfp_steps - 1):
117
- rfp_feats = [x[0]] + list(
118
- self.rfp_aspp(x[i]) for i in range(1, len(x)))
119
- x_idx = self.rfp_modules[rfp_idx].rfp_forward(img, rfp_feats)
120
- # FPN forward
121
- x_idx = super().forward(x_idx)
122
- x_new = []
123
- for ft_idx in range(len(x_idx)):
124
- add_weight = torch.sigmoid(self.rfp_weight(x_idx[ft_idx]))
125
- x_new.append(add_weight * x_idx[ft_idx] +
126
- (1 - add_weight) * x[ft_idx])
127
- x = x_new
128
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/walt/datasets/pipelines/compose.py DELETED
@@ -1,52 +0,0 @@
1
- import collections
2
-
3
- from mmcv.utils import build_from_cfg
4
-
5
- from ..builder import PIPELINES
6
-
7
-
8
- @PIPELINES.register_module()
9
- class Compose(object):
10
- """Compose multiple transforms sequentially.
11
-
12
- Args:
13
- transforms (Sequence[dict | callable]): Sequence of transform object or
14
- config dict to be composed.
15
- """
16
-
17
- def __init__(self, transforms):
18
- assert isinstance(transforms, collections.abc.Sequence)
19
- self.transforms = []
20
- for transform in transforms:
21
- if isinstance(transform, dict):
22
- transform = build_from_cfg(transform, PIPELINES)
23
- self.transforms.append(transform)
24
- elif callable(transform):
25
- self.transforms.append(transform)
26
- else:
27
- raise TypeError('transform must be callable or a dict')
28
-
29
- def __call__(self, data):
30
- """Call function to apply transforms sequentially.
31
-
32
- Args:
33
- data (dict): A result dict contains the data to transform.
34
-
35
- Returns:
36
- dict: Transformed data.
37
- """
38
-
39
- for t in self.transforms:
40
- #print(data)
41
- data = t(data)
42
- if data is None:
43
- return None
44
- return data
45
-
46
- def __repr__(self):
47
- format_string = self.__class__.__name__ + '('
48
- for t in self.transforms:
49
- format_string += '\n'
50
- format_string += f' {t}'
51
- format_string += '\n)'
52
- return format_string
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/lama-example/bin/gen_outpainting_dataset.py DELETED
@@ -1,88 +0,0 @@
1
- #!/usr/bin/env python3
2
- import glob
3
- import logging
4
- import os
5
- import shutil
6
- import sys
7
- import traceback
8
-
9
- from saicinpainting.evaluation.data import load_image
10
- from saicinpainting.evaluation.utils import move_to_device
11
-
12
- os.environ['OMP_NUM_THREADS'] = '1'
13
- os.environ['OPENBLAS_NUM_THREADS'] = '1'
14
- os.environ['MKL_NUM_THREADS'] = '1'
15
- os.environ['VECLIB_MAXIMUM_THREADS'] = '1'
16
- os.environ['NUMEXPR_NUM_THREADS'] = '1'
17
-
18
- import cv2
19
- import hydra
20
- import numpy as np
21
- import torch
22
- import tqdm
23
- import yaml
24
- from omegaconf import OmegaConf
25
- from torch.utils.data._utils.collate import default_collate
26
-
27
- from saicinpainting.training.data.datasets import make_default_val_dataset
28
- from saicinpainting.training.trainers import load_checkpoint
29
- from saicinpainting.utils import register_debug_signal_handlers
30
-
31
- LOGGER = logging.getLogger(__name__)
32
-
33
-
34
- def main(args):
35
- try:
36
- if not args.indir.endswith('/'):
37
- args.indir += '/'
38
-
39
- for in_img in glob.glob(os.path.join(args.indir, '**', '*' + args.img_suffix), recursive=True):
40
- if 'mask' in os.path.basename(in_img):
41
- continue
42
-
43
- out_img_path = os.path.join(args.outdir, os.path.splitext(in_img[len(args.indir):])[0] + '.png')
44
- out_mask_path = f'{os.path.splitext(out_img_path)[0]}_mask.png'
45
-
46
- os.makedirs(os.path.dirname(out_img_path), exist_ok=True)
47
-
48
- img = load_image(in_img)
49
- height, width = img.shape[1:]
50
- pad_h, pad_w = int(height * args.coef / 2), int(width * args.coef / 2)
51
-
52
- mask = np.zeros((height, width), dtype='uint8')
53
-
54
- if args.expand:
55
- img = np.pad(img, ((0, 0), (pad_h, pad_h), (pad_w, pad_w)))
56
- mask = np.pad(mask, ((pad_h, pad_h), (pad_w, pad_w)), mode='constant', constant_values=255)
57
- else:
58
- mask[:pad_h] = 255
59
- mask[-pad_h:] = 255
60
- mask[:, :pad_w] = 255
61
- mask[:, -pad_w:] = 255
62
-
63
- # img = np.pad(img, ((0, 0), (pad_h * 2, pad_h * 2), (pad_w * 2, pad_w * 2)), mode='symmetric')
64
- # mask = np.pad(mask, ((pad_h * 2, pad_h * 2), (pad_w * 2, pad_w * 2)), mode = 'symmetric')
65
-
66
- img = np.clip(np.transpose(img, (1, 2, 0)) * 255, 0, 255).astype('uint8')
67
- img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
68
- cv2.imwrite(out_img_path, img)
69
-
70
- cv2.imwrite(out_mask_path, mask)
71
- except KeyboardInterrupt:
72
- LOGGER.warning('Interrupted by user')
73
- except Exception as ex:
74
- LOGGER.critical(f'Prediction failed due to {ex}:\n{traceback.format_exc()}')
75
- sys.exit(1)
76
-
77
-
78
- if __name__ == '__main__':
79
- import argparse
80
-
81
- aparser = argparse.ArgumentParser()
82
- aparser.add_argument('indir', type=str, help='Root directory with images')
83
- aparser.add_argument('outdir', type=str, help='Where to store results')
84
- aparser.add_argument('--img-suffix', type=str, default='.png', help='Input image extension')
85
- aparser.add_argument('--expand', action='store_true', help='Generate mask by padding (true) or by cropping (false)')
86
- aparser.add_argument('--coef', type=float, default=0.2, help='How much to crop/expand in order to get masks')
87
-
88
- main(aparser.parse_args())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat/server/babel.py DELETED
@@ -1,48 +0,0 @@
1
- import os
2
- import subprocess
3
- from flask import request, session, jsonify
4
- from flask_babel import Babel
5
-
6
-
7
- def get_languages_from_dir(directory):
8
- """Return a list of directory names in the given directory."""
9
- return [name for name in os.listdir(directory)
10
- if os.path.isdir(os.path.join(directory, name))]
11
-
12
-
13
- BABEL_DEFAULT_LOCALE = 'en_US'
14
- BABEL_LANGUAGES = get_languages_from_dir('translations')
15
-
16
-
17
- def create_babel(app):
18
- """Create and initialize a Babel instance with the given Flask app."""
19
- babel = Babel(app)
20
- app.config['BABEL_DEFAULT_LOCALE'] = BABEL_DEFAULT_LOCALE
21
- app.config['BABEL_LANGUAGES'] = BABEL_LANGUAGES
22
-
23
- babel.init_app(app, locale_selector=get_locale)
24
- compile_translations()
25
-
26
-
27
- def get_locale():
28
- """Get the user's locale from the session or the request's accepted languages."""
29
- return session.get('language') or request.accept_languages.best_match(BABEL_LANGUAGES)
30
-
31
-
32
- def get_languages():
33
- """Return a list of available languages in JSON format."""
34
- return jsonify(BABEL_LANGUAGES)
35
-
36
-
37
- def compile_translations():
38
- """Compile the translation files."""
39
- result = subprocess.run(
40
- ['pybabel', 'compile', '-d', 'translations'],
41
- stdout=subprocess.PIPE,
42
- )
43
-
44
- if result.returncode != 0:
45
- raise Exception(
46
- f'Compiling translations failed:\n{result.stdout.decode()}')
47
-
48
- print('Translations compiled successfully')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/aiofiles/threadpool/__init__.py DELETED
@@ -1,134 +0,0 @@
1
- """Handle files using a thread pool executor."""
2
- import asyncio
3
- import sys
4
- from types import coroutine
5
-
6
- from io import (
7
- FileIO,
8
- TextIOBase,
9
- BufferedReader,
10
- BufferedWriter,
11
- BufferedRandom,
12
- BufferedIOBase,
13
- )
14
- from functools import partial, singledispatch
15
-
16
- from .binary import (
17
- AsyncBufferedIOBase,
18
- AsyncBufferedReader,
19
- AsyncFileIO,
20
- AsyncIndirectBufferedIOBase,
21
- AsyncIndirectBufferedReader,
22
- AsyncIndirectFileIO,
23
- )
24
- from .text import AsyncTextIOWrapper, AsyncTextIndirectIOWrapper
25
- from ..base import AiofilesContextManager
26
-
27
- sync_open = open
28
-
29
- __all__ = (
30
- "open",
31
- "stdin",
32
- "stdout",
33
- "stderr",
34
- "stdin_bytes",
35
- "stdout_bytes",
36
- "stderr_bytes",
37
- )
38
-
39
-
40
- def open(
41
- file,
42
- mode="r",
43
- buffering=-1,
44
- encoding=None,
45
- errors=None,
46
- newline=None,
47
- closefd=True,
48
- opener=None,
49
- *,
50
- loop=None,
51
- executor=None
52
- ):
53
- return AiofilesContextManager(
54
- _open(
55
- file,
56
- mode=mode,
57
- buffering=buffering,
58
- encoding=encoding,
59
- errors=errors,
60
- newline=newline,
61
- closefd=closefd,
62
- opener=opener,
63
- loop=loop,
64
- executor=executor,
65
- )
66
- )
67
-
68
-
69
- @coroutine
70
- def _open(
71
- file,
72
- mode="r",
73
- buffering=-1,
74
- encoding=None,
75
- errors=None,
76
- newline=None,
77
- closefd=True,
78
- opener=None,
79
- *,
80
- loop=None,
81
- executor=None
82
- ):
83
- """Open an asyncio file."""
84
- if loop is None:
85
- loop = asyncio.get_running_loop()
86
- cb = partial(
87
- sync_open,
88
- file,
89
- mode=mode,
90
- buffering=buffering,
91
- encoding=encoding,
92
- errors=errors,
93
- newline=newline,
94
- closefd=closefd,
95
- opener=opener,
96
- )
97
- f = yield from loop.run_in_executor(executor, cb)
98
-
99
- return wrap(f, loop=loop, executor=executor)
100
-
101
-
102
- @singledispatch
103
- def wrap(file, *, loop=None, executor=None):
104
- raise TypeError("Unsupported io type: {}.".format(file))
105
-
106
-
107
- @wrap.register(TextIOBase)
108
- def _(file, *, loop=None, executor=None):
109
- return AsyncTextIOWrapper(file, loop=loop, executor=executor)
110
-
111
-
112
- @wrap.register(BufferedWriter)
113
- @wrap.register(BufferedIOBase)
114
- def _(file, *, loop=None, executor=None):
115
- return AsyncBufferedIOBase(file, loop=loop, executor=executor)
116
-
117
-
118
- @wrap.register(BufferedReader)
119
- @wrap.register(BufferedRandom)
120
- def _(file, *, loop=None, executor=None):
121
- return AsyncBufferedReader(file, loop=loop, executor=executor)
122
-
123
-
124
- @wrap.register(FileIO)
125
- def _(file, *, loop=None, executor=None):
126
- return AsyncFileIO(file, loop=loop, executor=executor)
127
-
128
-
129
- stdin = AsyncTextIndirectIOWrapper('sys.stdin', None, None, indirect=lambda: sys.stdin)
130
- stdout = AsyncTextIndirectIOWrapper('sys.stdout', None, None, indirect=lambda: sys.stdout)
131
- stderr = AsyncTextIndirectIOWrapper('sys.stderr', None, None, indirect=lambda: sys.stderr)
132
- stdin_bytes = AsyncIndirectBufferedIOBase('sys.stdin.buffer', None, None, indirect=lambda: sys.stdin.buffer)
133
- stdout_bytes = AsyncIndirectBufferedIOBase('sys.stdout.buffer', None, None, indirect=lambda: sys.stdout.buffer)
134
- stderr_bytes = AsyncIndirectBufferedIOBase('sys.stderr.buffer', None, None, indirect=lambda: sys.stderr.buffer)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/dsv-576afacd.js DELETED
@@ -1,6 +0,0 @@
1
- var D={},A={},E=34,m=10,R=13;function I(r){return new Function("d","return {"+r.map(function(t,e){return JSON.stringify(t)+": d["+e+'] || ""'}).join(",")+"}")}function B(r,t){var e=I(r);return function(a,c){return t(e(a),c,r)}}function F(r){var t=Object.create(null),e=[];return r.forEach(function(a){for(var c in a)c in t||e.push(t[c]=c)}),e}function f(r,t){var e=r+"",a=e.length;return a<t?new Array(t-a+1).join(0)+e:e}function L(r){return r<0?"-"+f(-r,6):r>9999?"+"+f(r,6):f(r,4)}function S(r){var t=r.getUTCHours(),e=r.getUTCMinutes(),a=r.getUTCSeconds(),c=r.getUTCMilliseconds();return isNaN(r)?"Invalid Date":L(r.getUTCFullYear())+"-"+f(r.getUTCMonth()+1,2)+"-"+f(r.getUTCDate(),2)+(c?"T"+f(t,2)+":"+f(e,2)+":"+f(a,2)+"."+f(c,3)+"Z":a?"T"+f(t,2)+":"+f(e,2)+":"+f(a,2)+"Z":e||t?"T"+f(t,2)+":"+f(e,2)+"Z":"")}function Z(r){var t=new RegExp('["'+r+`
2
- \r]`),e=r.charCodeAt(0);function a(n,o){var s,i,u=c(n,function(h,l){if(s)return s(h,l-1);i=h,s=o?B(h,o):I(h)});return u.columns=i||[],u}function c(n,o){var s=[],i=n.length,u=0,h=0,l,v=i<=0,C=!1;n.charCodeAt(i-1)===m&&--i,n.charCodeAt(i-1)===R&&--i;function w(){if(v)return A;if(C)return C=!1,D;var j,d=u,p;if(n.charCodeAt(d)===E){for(;u++<i&&n.charCodeAt(u)!==E||n.charCodeAt(++u)===E;);return(j=u)>=i?v=!0:(p=n.charCodeAt(u++))===m?C=!0:p===R&&(C=!0,n.charCodeAt(u)===m&&++u),n.slice(d+1,j-1).replace(/""/g,'"')}for(;u<i;){if((p=n.charCodeAt(j=u++))===m)C=!0;else if(p===R)C=!0,n.charCodeAt(u)===m&&++u;else if(p!==e)continue;return n.slice(d,j)}return v=!0,n.slice(d,i)}for(;(l=w())!==A;){for(var T=[];l!==D&&l!==A;)T.push(l),l=w();o&&(T=o(T,h++))==null||s.push(T)}return s}function U(n,o){return n.map(function(s){return o.map(function(i){return g(s[i])}).join(r)})}function O(n,o){return o==null&&(o=F(n)),[o.map(g).join(r)].concat(U(n,o)).join(`
3
- `)}function M(n,o){return o==null&&(o=F(n)),U(n,o).join(`
4
- `)}function b(n){return n.map(N).join(`
5
- `)}function N(n){return n.map(g).join(r)}function g(n){return n==null?"":n instanceof Date?S(n):t.test(n+="")?'"'+n.replace(/"/g,'""')+'"':n}return{parse:a,parseRows:c,format:O,formatBody:M,formatRows:b,formatRow:N,formatValue:g}}export{Z as d};
6
- //# sourceMappingURL=dsv-576afacd.js.map