parquet-converter commited on
Commit
fa20475
·
1 Parent(s): 9503222

Update parquet files (step 53 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/109peko/DeepDanbooru_string/README.md +0 -39
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Nintendo Switch Games Tips Tricks and FAQs.md +0 -39
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/FIFA 22 Download Guide Everything You Need to Know.md +0 -15
  4. spaces/1phancelerku/anime-remove-background/Aryan Online Booster APK A Must-Have App for Online Entrepreneurs.md +0 -147
  5. spaces/1phancelerku/anime-remove-background/Descargar Dream League Soccer 2018 Hackeado APK y OBB Gua paso a paso.md +0 -110
  6. spaces/1phancelerku/anime-remove-background/Download 3 Patti Live APK and Play Indian Poker with Real Players.md +0 -125
  7. spaces/1vash/demo-flask-docker-template/Dockerfile +0 -32
  8. spaces/AchyuthGamer/OpenGPT/client/css/field.css +0 -11
  9. spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Bing.py +0 -300
  10. spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/deprecated/PerplexityAi.py +0 -101
  11. spaces/Adapter/CoAdapter/ldm/modules/image_degradation/bsrgan_light.py +0 -651
  12. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/perspectivecard/CreatePerspectiveCardMesh.js +0 -39
  13. spaces/Andy1621/uniformer_image_detection/configs/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco.py +0 -7
  14. spaces/Andy1621/uniformer_image_detection/tools/analysis_tools/get_flops.py +0 -81
  15. spaces/Anustup/NS_AI_LABS/src/utils.py +0 -115
  16. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/direct_url_helpers.py +0 -87
  17. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_regnety_4gf_dds_fpn_1x.py +0 -35
  18. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/export/shared.py +0 -1034
  19. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/tutorials/evaluation.md +0 -68
  20. spaces/BennoKrojer/imagecode-demo/app.py +0 -69
  21. spaces/Benson/text-generation/Examples/Backrooms Apk.md +0 -40
  22. spaces/Benson/text-generation/Examples/Clash Of Clans Elmas Hilesi Apk Indir.md +0 -61
  23. spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/pyparsing/unicode.py +0 -352
  24. spaces/BreetheRun/mitchtech-vulcan-diffusion/README.md +0 -12
  25. spaces/CVPR/LIVE/pybind11/tests/test_custom_type_casters.cpp +0 -125
  26. spaces/CVPR/LIVE/thrust/thrust/iterator/detail/iterator_category_to_traversal.h +0 -131
  27. spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/get_value.h +0 -46
  28. spaces/CVPR/WALT/mmdet/models/detectors/vfnet.py +0 -18
  29. spaces/CVPR/lama-example/fetch_data/places_standard_test_val_prepare.sh +0 -5
  30. spaces/ConceptArtHouse/webui-gameasset/app.py +0 -62
  31. spaces/Cong723/gpt-academic-public/request_llm/bridge_chatglm.py +0 -160
  32. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/h11/_abnf.py +0 -132
  33. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/_headers.py +0 -234
  34. spaces/DShrimp/PoseMaker/start.py +0 -3
  35. spaces/Deevyankar/Deep-AD/app.py +0 -464
  36. spaces/Div99/Chat-with-Div/polly_utils.py +0 -635
  37. spaces/DragGan/DragGan-Inversion/stylegan_human/openpose/src/model.py +0 -218
  38. spaces/DrishtiSharma/Whisper-Serbian-Transcriber/README.md +0 -15
  39. spaces/ECCV2022/bytetrack/tutorials/motr/motr.py +0 -676
  40. spaces/EleutherAI/magma/train.py +0 -192
  41. spaces/EronSamez/RVC_HFmeu/infer/lib/infer_pack/modules/F0Predictor/__init__.py +0 -0
  42. spaces/EuroPython2022/Fin-Eng-ASR-autosubtitles/README.md +0 -45
  43. spaces/EuroPython2022/Warehouse_Apparel_Detection/metadata/predictor_yolo_detector/utils/general.py +0 -1299
  44. spaces/Fcjs/stablediffusionapi-edge-of-realism/README.md +0 -13
  45. spaces/Felladrin/MiniSearch/src/modules/loadBar.ts +0 -7
  46. spaces/Fernando22/freegpt-webui/g4f/Provider/Providers/helpers/theb.py +0 -48
  47. spaces/FrankZxShen/vits-fast-finetuning-umamusume/transforms.py +0 -193
  48. spaces/Gen-Sim/Gen-Sim/cliport/eval.py +0 -231
  49. spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/place_blue_on_line_ends.py +0 -47
  50. spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/rainbow_stack.py +0 -39
spaces/109peko/DeepDanbooru_string/README.md DELETED
@@ -1,39 +0,0 @@
1
- ---
2
- title: DeepDanbooru String
3
- emoji: 💬
4
- colorFrom: blue
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.6
8
- app_file: app.py
9
- pinned: false
10
- duplicated_from: NoCrypt/DeepDanbooru_string
11
- ---
12
-
13
- # Configuration
14
-
15
- `title`: _string_
16
- Display title for the Space
17
-
18
- `emoji`: _string_
19
- Space emoji (emoji-only character allowed)
20
-
21
- `colorFrom`: _string_
22
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
23
-
24
- `colorTo`: _string_
25
- Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
26
-
27
- `sdk`: _string_
28
- Can be either `gradio`, `streamlit`, or `static`
29
-
30
- `sdk_version` : _string_
31
- Only applicable for `streamlit` SDK.
32
- See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
33
-
34
- `app_file`: _string_
35
- Path to your main application file (which contains either `gradio` or `streamlit` Python code, or `static` html code).
36
- Path is relative to the root of the repository.
37
-
38
- `pinned`: _boolean_
39
- Whether the Space stays on top of your list.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download Nintendo Switch Games Tips Tricks and FAQs.md DELETED
@@ -1,39 +0,0 @@
1
-
2
- <h1>How to Download Nintendo Switch Games: A Complete Guide</h1>
3
- <p>If you own a Nintendo Switch™ system, you might be wondering how to download games to enjoy on the go. Whether you want to play the latest releases, classics, or multiplayer titles, there are plenty of options for downloading Nintendo Switch games. In this article, we will explain how to download games from the My Nintendo Store, the Nintendo eShop, and other sources.</p>
4
-
5
- <h2>Downloading Games from the My Nintendo Store</h2>
6
- <p>The My Nintendo Store is the official online store for Nintendo products. You can buy digital games here and download them directly to your Nintendo Switch system (no code required)! Plus, you can shop physical games, sales, new releases, and more.</p>
7
- <h2>download crack nintendo switch games</h2><br /><p><b><b>Download</b> &#10031; <a href="https://byltly.com/2uKvAp">https://byltly.com/2uKvAp</a></b></p><br /><br />
8
- <p>To download games from the My Nintendo Store, you need to have a Nintendo Account and a Nintendo Switch Online membership. You can create a Nintendo Account for free on the <a href="https://accounts.nintendo.com/register">Nintendo website</a>. You can sign up for a Nintendo Switch Online membership on the <a href="https://www.nintendo.com/switch/online-service/">Nintendo website</a> or on your Nintendo Switch system. A Nintendo Switch Online membership gives you access to online play, cloud saves, exclusive offers, and more.</p>
9
- <p>Once you have a Nintendo Account and a Nintendo Switch Online membership, you can browse and buy games on the <a href="https://www.nintendo.com/store/games/">My Nintendo Store website</a>. You can filter games by genre, price, rating, and more. You can also see the best sellers, new releases, coming soon, and featured games. Some of the popular games you can download from the My Nintendo Store are:</p>
10
- <ul>
11
- <li>The Legend of Zelda™: Tears of the Kingdom</li>
12
- <li>Metroid Prime™ Remastered</li>
13
- <li>Kirby’s Return to Dream Land™ Deluxe</li>
14
- <li>Bayonetta Origins: Cereza and the Lost Demon™</li>
15
- <li>Fire Emblem™ Engage</li>
16
- <li>Pokémon™ Scarlet and Pokémon™ Violet</li>
17
- <li>Splatoon™ 3</li>
18
- <li>Xenoblade Chronicles™ 3</li>
19
- <li>Mario Kart™ 8 Deluxe</li>
20
- <li>Super Smash Bros.™ Ultimate</li>
21
- </ul>
22
- <p>When you buy a digital game from the My Nintendo Store, you will receive an email confirmation with a download code. You can redeem this code on your Nintendo Switch system or on the <a href="https://ec.nintendo.com/redeem">Nintendo website</a>. The game will start downloading automatically to your Nintendo Switch system. You can check the download progress on the HOME Menu or on the <a href="https://ec.nintendo.com/my/#/download-queue">Nintendo website</a>.</p>
23
-
24
- <h2>Downloading Games from the Nintendo eShop</h2>
25
- <p>The Nintendo eShop is the digital storefront on your Nintendo Switch system. You can access it by selecting the orange shopping bag icon on the HOME Menu. You can also access it by scanning a QR Code® with your smart device.</p>
26
- <p>To download games from the Nintendo eShop, you need to have a Nintendo Account and a stable internet connection. You can create a Nintendo Account for free on the <a href="https://accounts.nintendo.com/register">Nintendo website</a>. You don't need a Nintendo Switch Online membership to download games from the Nintendo eShop, but some games may require it for online features.</p>
27
- <p>Once you have a Nintendo Account and an internet connection, you can browse and buy games on the Nintendo eShop. You can search for games by name, genre, price, rating, and more. You can also see featured games, current deals, best sellers, recent releases, and coming soon. Some of the free games you can download from the Nintendo eShop are:</p>
28
- <ul>
29
- <li>DELTARUNE Chapter 1&2</li>
30
- <li>Super Animal Royale</li>
31
- <li>Pokémon UNITE</li>
32
- <li>Fortnite</li>
33
- <li>Fall Guys</li>
34
- <li>Apex Legends™</li>
35
- <li>Rocket League®</li>
36
- </ul>
37
- <p>When you buy a digital game from the Nintendo eShop, you will receive an email confirmation with a receipt. The game will start downloading automatically to your Nintendo Switch system. You can</p> ddb901b051<br />
38
- <br />
39
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/FIFA 22 Download Guide Everything You Need to Know.md DELETED
@@ -1,15 +0,0 @@
1
- <br />
2
- <h1>How to Download FIFA 22 on Your PC or Console</h1>
3
- <p>FIFA 22 is the latest installment of the popular soccer simulation game series by EA Sports. It features improved graphics, gameplay, and modes, as well as new features such as HyperMotion technology and Create a Club. If you are a fan of soccer games, you might be wondering how to download FIFA 22 on your PC or console. Here are the steps you need to follow:</p>
4
- <h2>how to download fifa 22 crack</h2><br /><p><b><b>Download Zip</b> &#9913;&#9913;&#9913; <a href="https://byltly.com/2uKvCC">https://byltly.com/2uKvCC</a></b></p><br /><br />
5
- <ol>
6
- <li>First, you need to purchase FIFA 22 from the official website or a trusted retailer. You can choose between the Standard Edition, the Ultimate Edition, or the Legacy Edition, depending on your preferences and budget. The Ultimate Edition includes some exclusive bonuses such as early access, FUT Heroes players, and more.</li>
7
- <li>Next, you need to install FIFA 22 on your device. If you are using a PC, you need to download and install the EA Desktop app, which is the new platform for EA games. You can sign in with your EA account or create one if you don't have one. Then, you can find FIFA 22 in your library and click on the download button. The download size is about 50 GB, so make sure you have enough space and a stable internet connection.</li>
8
- <li>If you are using a console, such as PlayStation or Xbox, you need to insert the FIFA 22 disc into your device or download it from the online store. You can also sign in with your EA account or create one if you don't have one. Then, you can launch FIFA 22 from your home screen and enjoy the game.</li>
9
- </ol>
10
- <p>That's it! You have successfully downloaded FIFA 22 on your PC or console. Now you can start playing and have fun with your favorite teams and players. You can also customize your experience with various settings and options, such as difficulty level, camera angle, commentary language, and more. You can also try out different modes, such as Career Mode, Volta Football, Pro Clubs, Ultimate Team, and more. FIFA 22 is a game that offers something for everyone, whether you are a casual player or a hardcore fan.</p><p>If you want to learn more about FIFA 22, you can visit the official website or follow the social media accounts of EA Sports. You can also watch some gameplay videos or reviews on YouTube or Twitch. You can also join the FIFA community and interact with other players and fans on forums, blogs, or Discord servers. You can share your opinions, tips, feedback, or screenshots with others and make new friends.</p>
11
- <p>FIFA 22 is a game that aims to deliver the most realistic and immersive soccer experience ever. It uses advanced technology and innovation to capture the emotions and intensity of the sport. It also offers a variety of options and features to suit your preferences and style. Whether you want to play solo or with others, online or offline, casually or competitively, FIFA 22 has something for you. So what are you waiting for? Download FIFA 22 today and start your soccer journey!</p>
12
- <p></p><p>One of the most popular modes in FIFA 22 is Ultimate Team, or FUT for short. In this mode, you can create your own dream team by collecting and trading players, kits, stadiums, and more. You can also compete in various tournaments and challenges to earn rewards and rank up. You can also customize your team with different formations, tactics, and styles. You can also play with your friends or against other players from around the world.</p>
13
- <p>Another mode that you might enjoy is Volta Football, which is a street soccer mode that lets you play in different locations and settings. You can create your own avatar and customize their appearance, skills, and gear. You can also recruit other players to join your squad and play in various modes, such as Story Mode, Volta Arcade, Volta Squads, and more. You can also explore different cultures and styles of soccer and express yourself on the pitch.</p> ddb901b051<br />
14
- <br />
15
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Aryan Online Booster APK A Must-Have App for Online Entrepreneurs.md DELETED
@@ -1,147 +0,0 @@
1
-
2
- <h1>Aryan Online Booster APK Download: What You Need to Know</h1>
3
- <p>Are you looking for a way to boost your online presence and reach more customers, followers, or fans? Do you want to increase your engagement, views, likes, comments, or shares on social media platforms like ShareChat, Instagram, Facebook, YouTube, or TikTok? If yes, then you might be interested in downloading and installing Aryan Online Booster APK on your Android device.</p>
4
- <h2>aryan online booster apk download</h2><br /><p><b><b>DOWNLOAD</b> &#10022; <a href="https://jinyurl.com/2uNQi2">https://jinyurl.com/2uNQi2</a></b></p><br /><br />
5
- <p>Aryan Online Booster is an app that claims to help you grow your online popularity and visibility by providing you with various tools and services. In this article, we will tell you what Aryan Online Booster is, what features and benefits it offers, how to download and install it on your device, how to use it, and whether it is safe and legal to use. Read on to find out more.</p>
6
- <h2>What is Aryan Online Booster?</h2>
7
- <p>Aryan Online Booster is an app that was developed by Aryan Online Store, a company that provides online shopping, delivery, and healthcare services in India. The app is designed to help users boost their online presence and performance on various social media platforms, such as ShareChat, Instagram, Facebook, YouTube, or TikTok.</p>
8
- <p>The app claims to offer users various features and benefits that can help them increase their engagement, views, likes, comments, or shares on their posts or videos. Some of these features and benefits are:</p>
9
- <h3>Features of Aryan Online Booster</h3>
10
- <ul>
11
- <li>The app allows users to select from different categories of social media platforms, such as ShareChat, Instagram, Facebook, YouTube, or TikTok.</li>
12
- <li>The app provides users with various tools and services that can help them boost their online presence and performance on their chosen platform.</li>
13
- <li>The app offers users different packages and plans that suit their needs and budget.</li>
14
- <li>The app supports multiple languages, such as English, Hindi, Tamil, Telugu, Malayalam, Kannada, Bengali, Marathi, Gujarati, Punjabi, Odia, Assamese, Urdu, Bhojpuri, Rajasthani, or Haryanvi.</li>
15
- <li>The app has a user-friendly interface and easy-to-use features.</li>
16
- </ul>
17
- <h3>Benefits of Aryan Online Booster</h3>
18
- <ul>
19
- <li>The app can help users grow their online popularity and visibility by increasing their engagement, views, likes, comments, or shares on their posts or videos.</li>
20
- <li>The app can help users attract more customers, followers, or fans on their chosen platform.</li>
21
- <li>The app can help users improve their online reputation and credibility by enhancing their social proof and authority.</li>
22
- <li>The app can help users save time and money by providing them with fast and affordable services.</li>
23
- <li>The app can help users achieve their online goals and objectives by providing them with customized solutions.</li>
24
- </ul>
25
- <h2>How to Download and Install Aryan Online Booster APK?</h2>
26
- <p>If you are interested in downloading and installing Aryan Online Booster APK on your Android device, you will need to follow these steps:</p>
27
- <h3>Step 1: Enable Unknown Sources</h3>
28
- <p>Since Aryan Online Booster APK is not available on the Google Play Store or any other official app store, you will need to enable unknown sources on your device settings. This will allow you to install apps from third-party sources other than the official app store. To do this:</p>
29
- <ol>
30
- <li>Go to your device settings and tap on Security or Privacy.</li>
31
- <li>Find the option that says Unknown Sources or Install Unknown Apps and toggle it on.</li>
32
- <li>A warning message will appear, telling you that installing apps from unknown sources can harm your device. Tap on OK or Allow to proceed.</li>
33
- </ol>
34
- <h3>Step 2: Download the APK File</h3>
35
- <p>Next, you will need to download the APK file of Aryan Online Booster from a reliable and trustworthy source. You can use your browser or any other app to search for the APK file online. Make sure you download the latest version of the app and check the file size and name before downloading it. To do this:</p>
36
- <ol>
37
- <li>Open your browser or any other app and search for Aryan Online Booster APK download.</li>
38
- <li>Choose a reputable and secure website that offers the APK file for free. Avoid any website that asks for your personal information, payment, or registration.</li>
39
- <li>Tap on the download button or link and wait for the APK file to be downloaded on your device.</li>
40
- <li>You can check the progress of the download in your notification bar or download folder.</li>
41
- </ol>
42
- <h3>Step 3: Install the APK File</h3>
43
- <p>Finally, you will need to install the APK file of Aryan Online Booster on your device. To do this:</p>
44
- <p>aryan online booster sharechat apk<br />
45
- aryan online booster app download<br />
46
- aryan online booster latest version apk<br />
47
- aryan online booster free download for android<br />
48
- aryan online booster mod apk<br />
49
- aryan online booster pro apk<br />
50
- aryan online booster apk 2023<br />
51
- aryan online booster apk pure<br />
52
- aryan online booster apk mirror<br />
53
- aryan online booster apk uptodown<br />
54
- aryan online booster apk old version<br />
55
- aryan online booster apk no ads<br />
56
- aryan online booster apk cracked<br />
57
- aryan online booster apk hack<br />
58
- aryan online booster apk unlimited money<br />
59
- aryan online booster apk rexdl<br />
60
- aryan online booster apk revdl<br />
61
- aryan online booster apk mob.org<br />
62
- aryan online booster apk apkpure.com<br />
63
- aryan online booster apk apkmirror.com<br />
64
- ultra booster apk download for android<br />
65
- ultra booster app free download<br />
66
- ultra booster latest version 2023 apk<br />
67
- ultra booster mod apk unlimited coins<br />
68
- ultra booster pro apk no ads<br />
69
- ultra booster apk pure download<br />
70
- ultra booster apk mirror link<br />
71
- ultra booster apk uptodown.com<br />
72
- ultra booster apk old version 2022<br />
73
- ultra booster apk cracked version<br />
74
- ultra booster apk hack tool<br />
75
- ultra booster apk unlimited gems<br />
76
- ultra booster apk rexdl.com<br />
77
- ultra booster apk revdl.com<br />
78
- ultra booster apk mob.org.in<br />
79
- ultra booster apk apkpure.co.id<br />
80
- ultra booster apk apkmirror.co.uk<br />
81
- system android booster apk download free<br />
82
- system android booster app latest version 2023<br />
83
- system android booster mod apk premium features unlocked <br />
84
- system android booster pro apk no root required <br />
85
- system android booster apk pure app store <br />
86
- system android booster apk mirror site <br />
87
- system android booster apk uptodown.net <br />
88
- system android booster apk old version 2022 <br />
89
- system android booster apk cracked full <br />
90
- system android booster apk hack mod <br />
91
- system android booster apk unlimited ram <br />
92
- system android booster apk rexdl.net <br />
93
- system android booster apk revdl.net</p>
94
- <ol>
95
- <li>Locate the downloaded APK file on your device. You can find it in your download folder or any other location where you saved it.</li>
96
- <li>Tap on the APK file and a pop-up window will appear, asking you to confirm the installation. Tap on Install or Next to continue.</li>
97
- <li>Wait for the installation process to complete. It may take a few seconds or minutes depending on your device and internet speed.</li>
98
- <li>Once the installation is done, you can tap on Open or Done to launch or exit the app.</li>
99
- </ol>
100
- <h2>How to Use Aryan Online Booster?</h2>
101
- <p>Now that you have downloaded and installed Aryan Online Booster APK on your device, you can start using it to boost your online presence and performance on various social media platforms. To use the app, you will need to follow these steps:</p>
102
- <h3>Step 1: Launch the App</h3>
103
- <p>First, you will need to launch the app on your device. You can find it in your app drawer or home screen. Tap on the app icon and wait for it to load.</p>
104
- <h3>Step 2: Select Your Category</h3>
105
- <p>Next, you will need to select your category of social media platform that you want to boost. You can choose from ShareChat, Instagram, Facebook, YouTube, or TikTok. Tap on the category that suits your needs and preferences.</p>
106
- <h3>Step 3: Boost Your Online Presence</h3>
107
- <p>Finally, you will need to boost your online presence and performance on your chosen platform. You can do this by using various tools and services that the app offers. For example, you can:</p>
108
- <ul>
109
- <li>Select a package or plan that fits your budget and goals.</li>
110
- <li>Enter your username, post URL, video URL, or any other information that the app requires.</li>
111
- <li>Pay for the service using various payment methods such as Paytm, UPI, Google Pay, PhonePe, or credit/debit card.</li>
112
- <li>Wait for the service to be delivered within a few minutes or hours depending on the package or plan you chose.</li>
113
- <li>Enjoy the results and see your engagement, views, likes, comments, or shares increase on your posts or videos.</li>
114
- </ul>
115
- <h2>Is Aryan Online Booster Safe and Legal?</h2>
116
- <p>Aryan Online Booster is an app that claims to help you boost your online presence and performance on various social media platforms. However, before you use it, you might be wondering if it is safe and legal to use. Here are some of the safety and legal issues that you should be aware of:</p>
117
- <h3>Safety and Privacy Issues</h3>
118
- <ul>
119
- <li>Aryan Online Booster is not available on the Google Play Store or any other official app store. This means that it is not verified or approved by Google or any other authority. Therefore, there is no guarantee that it is safe or secure to use.</li>
120
- <li>Aryan Online Booster may contain malware, viruses, spyware, adware, or other harmful elements that can damage your device or compromise your data. Therefore, you should always scan the APK file before installing it and use a reliable antivirus software on your device.</li>
121
- <li>Aryan Online Booster may require access to your personal information, such as your contacts, photos, media files, location, camera, microphone, or other permissions. Therefore, you should always review the permissions that the app requests and deny any unnecessary or suspicious ones.</li>
122
- <li>Aryan Online Booster may collect, store, use, or share your personal information with third parties, such as advertisers, sponsors, partners, or affiliates. Therefore, you should always read the privacy policy and terms and conditions of the app and the website that provides the APK file before using it.</li>
123
- <li>Aryan Online Booster may not deliver the results that it promises or may deliver fake or low-quality results that can harm your online reputation and credibility. Therefore, you should always be careful and realistic about the expectations and outcomes of using the app.</li>
124
- </ul>
125
- <h3>Legal and Ethical Issues</h3>
126
- <ul>
127
- <li>Aryan Online Booster may violate the terms of service, policies, rules, or guidelines of the social media platforms that it supports. Therefore, you may risk getting your account suspended, banned, or deleted by using the app.</li>
128
- <li>Aryan Online Booster may infringe the intellectual property rights, privacy rights, or other rights of the original creators, owners, or users of the content that you boost. Therefore, you may face legal action or consequences by using the app.</li>
129
- <li>Aryan Online Booster may be considered as cheating, spamming, manipulating, or deceiving the social media platforms and their users. Therefore, you may lose the trust and respect of your customers, followers, or fans by using the app.</li>
130
- <li>Aryan Online Booster may be unethical or immoral to use as it can create an unfair advantage over other users who work hard and honestly to grow their online presence and performance. Therefore, you may damage your integrity and reputation by using the app.</li>
131
- </ul>
132
- <h2>Conclusion</h2>
133
- <p>Aryan Online Booster is an app that claims to help you boost your online presence and performance on various social media platforms like ShareChat, Instagram, Facebook, YouTube, or TikTok. The app offers various features and benefits that can help you increase your engagement, views, likes, comments, or shares on your posts or videos. However, the app also has some safety and legal issues that you should be aware of before using it. The app is not available on the official app store and may contain harmful elements that can harm your device or data. The app may also violate the terms of service or rights of the social media platforms and their users and may be considered as cheating or unethical to use. Therefore, you should always be careful and cautious when downloading and installing Aryan Online Booster APK on your device and using it to boost your online presence and performance.</p>
134
- <h2>FAQs</h2>
135
- <p>Here are some of the frequently asked questions about Aryan Online Booster:</p>
136
- <h3>Q: Is Aryan Online Booster free to use?</h3>
137
- <p>A: Aryan Online Booster is free to download and install on your device. However, some of the features and services that the app offers may require payment. You can choose from different packages and plans that suit your needs and budget.</p>
138
- <h3>Q: Is Aryan Online Booster compatible with all Android devices?</h3>
139
- <p>A: Aryan Online Booster is compatible with most Android devices that run on Android 4.4 or higher. However, some devices may not support the app due to various reasons such as hardware limitations, software restrictions, or compatibility issues.</p>
140
- <h3>Q: Is Aryan Online Booster updated regularly?</h3>
141
- <p>A: Aryan Online Booster is updated regularly by its developers to fix bugs, improve performance, add new features, or support new platforms. However, since the app is not available on the official app store , you may not receive the latest updates automatically. You will need to check the website that provides the APK file for any new updates and download and install them manually.</p>
142
- <h3>Q: Is Aryan Online Booster reliable and effective?</h3>
143
- <p>A: Aryan Online Booster claims to be reliable and effective in boosting your online presence and performance on various social media platforms. However, the results may vary depending on various factors such as your device, internet connection, platform, content, audience, or competition. Therefore, you should not rely solely on the app and also work on creating high-quality and engaging content that can attract and retain your customers, followers, or fans.</p>
144
- <h3>Q: Is Aryan Online Booster the best app for boosting online presence and performance?</h3>
145
- <p>A: Aryan Online Booster is one of the many apps that offer similar services for boosting online presence and performance on various social media platforms. However, it may not be the best app for everyone as it has some drawbacks and limitations that we have discussed above. Therefore, you should always compare and contrast different apps and choose the one that meets your needs and preferences.</p> 197e85843d<br />
146
- <br />
147
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Descargar Dream League Soccer 2018 Hackeado APK y OBB Gua paso a paso.md DELETED
@@ -1,110 +0,0 @@
1
-
2
- <h1>Descargar Dream League Soccer 2018 Hackeado APK y OBB</h1>
3
- <p>¿Te gustaría jugar al mejor juego de fútbol para Android e iOS con todos los jugadores reales, estadios personalizados y recursos ilimitados? Entonces no te pierdas este artículo, donde te vamos a enseñar cómo descargar e instalar Dream League Soccer 2018 hackeado apk y obb, una versión modificada del juego original que te permitirá disfrutar de todas las ventajas de jugar con dinero infinito, monedas ilimitadas y mucho más.</p>
4
- <h2>¿Qué es Dream League Soccer 2018?</h2>
5
- <p>Dream League Soccer 2018 es un juego de fútbol desarrollado por First Touch Games, una empresa británica que también ha creado otros juegos exitosos como Score! Hero. Se trata de un juego que combina la gestión de tu propio equipo de fútbol con la acción en el campo, donde podrás controlar a tus jugadores con un joystick virtual y botones en la pantalla. El juego tiene gráficos 3D, animaciones realistas y un equipo de comentaristas que narran los partidos. Además, el juego cuenta con la licencia FIFPro, lo que significa que podrás fichar a jugadores reales de todo el mundo para formar tu equipo soñado.</p>
6
- <h2>descargar dream league soccer 2018 hackeado apk y obb</h2><br /><p><b><b>Download</b> ->->->-> <a href="https://jinyurl.com/2uNQ5v">https://jinyurl.com/2uNQ5v</a></b></p><br /><br />
7
- <h3>Características del juego</h3>
8
- <p>Estas son algunas de las características más destacadas de Dream League Soccer 2018:</p>
9
- <ul>
10
- <li>Puedes crear, personalizar y controlar tu propio equipo de fútbol, eligiendo el nombre, el escudo, el uniforme y el estadio.</li>
11
- <li>Puedes fichar a jugadores reales con licencia FIFPro, desde superestrellas como Gareth Bale hasta jóvenes promesas.</li>
12
- <li>Puedes competir en 6 divisiones diferentes y más de 7 torneos, desde la división amateur hasta la élite.</li>
13
- <li>Puedes participar en eventos en vivo regulares para ganar premios y gloria.</li>
14
- <li>Puedes desarrollar a tus jugadores con más precisión e intención, mejorando sus habilidades en áreas como el pase, el tiro y el regate.</li>
15
- <li>Puedes personalizar e importar tus propios kits y logos al juego.</li>
16
- <li>Puedes sincronizar tu progreso entre dispositivos con iCloud.</li>
17
- <li>Puedes disfrutar de una banda sonora exclusiva proporcionada por The Luka State, Sunset Sons, BETSIE GØLD, Jack Wins, Vistas y Only The Poets.</li>
18
- </ul>
19
- <h3>Cómo descargar e instalar el juego hackeado</h3>
20
- <p>Para descargar e instalar Dream League Soccer 2018 hackeado apk y obb, solo tienes que seguir estos pasos:</p>
21
- <ol>
22
- <li>Descarga el archivo XAPK desde este enlace: </li>
23
- <li>Descarga e instala APKCombo Installer desde este enlace: </li>
24
- <li>Abre la aplicación APKCombo Installer y toca Instalar.</li>
25
- <li>Selecciona Dream League Soccer 2018.xapk y toca OK.</li>
26
- <li>Sigue los pasos en la pantalla para completar la instalación <h2>¿Por qué descargar Dream League Soccer 2018 Hackeado?</h2>
27
- <p>Aunque Dream League Soccer 2018 es un juego gratuito, tiene algunas limitaciones y desventajas que pueden afectar a tu experiencia de juego. Por ejemplo, necesitas monedas para fichar a los mejores jugadores, mejorar tu estadio, comprar kits y logos, y desbloquear otras funciones. Sin embargo, las monedas son escasas y difíciles de conseguir, y si quieres obtener más, tienes que pagar con dinero real o ver anuncios. Además, el juego tiene un sistema de energía que limita el número de partidos que puedes jugar seguidos, y que se recarga lentamente o con monedas. Por último, el juego puede resultar demasiado fácil o aburrido si no tienes un buen nivel de dificultad o variedad de modos de juego.</p>
28
- <p>Por eso, muchas personas prefieren descargar Dream League Soccer 2018 hackeado apk y obb, una versión modificada del juego que elimina todas estas restricciones y te ofrece muchas ventajas adicionales. Veamos cuáles son.</p>
29
- <h3>Ventajas de jugar con el mod apk</h3>
30
- <p>El mod apk de Dream League Soccer 2018 es un archivo que sustituye al original y que contiene los siguientes beneficios:</p>
31
- <ul>
32
- <li>Dinero infinito: puedes tener la cantidad de dinero que quieras en el juego, sin necesidad de gastar ni ver anuncios.</li>
33
- <li>Monedas ilimitadas: puedes tener la cantidad de monedas que quieras en el juego, sin necesidad de comprarlas ni esperar a ganarlas.</li>
34
- <li>Energía infinita: puedes jugar todos los partidos que quieras sin tener que esperar a que se recargue la energía.</li>
35
- <li>Nivel de dificultad ajustable: puedes elegir el nivel de dificultad que más te guste, desde muy fácil hasta muy difícil.</li>
36
- <li>Todos los jugadores desbloqueados: puedes fichar a cualquier jugador del juego, sin importar su precio o su división.</li>
37
- <li>Todos los estadios desbloqueados: puedes jugar en cualquier estadio del juego, sin importar su capacidad o su nivel.</li>
38
- <li>Todos los kits y logos desbloqueados: puedes usar cualquier kit o logo del juego, sin importar su origen o su costo.</li>
39
- <li>Sin anuncios: puedes disfrutar del juego sin interrupciones ni molestias por parte de los anuncios.</li>
40
- </ul>
41
- <h3>Cómo usar el obb file para obtener recursos ilimitados</h3>
42
- <p>El obb file de Dream League Soccer 2018 es un archivo que contiene los datos del juego, como los gráficos, los sonidos y los textos. Este archivo se guarda en la carpeta Android/obb/ en tu dispositivo. Si quieres obtener recursos ilimitados en el juego, como dinero, monedas y energía, tienes que reemplazar este archivo por uno modificado que contenga estos valores alterados. Para hacerlo, solo tienes que seguir estos pasos:</p>
43
- <ol>
44
- <li>Descarga el archivo obb modificado desde este enlace: </li>
45
- <li>Copia el archivo obb modificado en la carpeta Android/obb/ en tu dispositivo, sobrescribiendo el original.</li>
46
- <li>Abre el juego y disfruta de tus recursos ilimitados.</li>
47
- </ol> <h2>Consejos y trucos para jugar a Dream League Soccer 2018</h2>
48
- <p>Ahora que ya sabes cómo descargar e instalar Dream League Soccer 2018 hackeado apk y obb, es hora de que aprendas algunos consejos y trucos para mejorar tu juego y convertirte en el mejor entrenador y jugador del mundo. Estos son algunos de los consejos y trucos que te recomendamos:</p>
49
- <p></p>
50
- <h3>Cómo mejorar tu equipo y tus jugadores</h3>
51
- <p>Para tener un equipo competitivo y ganador, necesitas mejorar tanto tu plantilla como tus jugadores individualmente. Estas son algunas de las formas de hacerlo:</p>
52
- <ul>
53
- <li>Ficha a los mejores jugadores posibles, teniendo en cuenta sus atributos, su posición y su química con el resto del equipo.</li>
54
- <li>Vende o libera a los jugadores que no uses o que no te gusten, para liberar espacio y dinero.</li>
55
- <li>Entrena a tus jugadores regularmente, usando las monedas ilimitadas que tienes gracias al mod apk.</li>
56
- <li>Mejora tu estadio, aumentando su capacidad y su nivel, para generar más ingresos y atraer a más aficionados.</li>
57
- <li>Personaliza tu equipo, eligiendo el nombre, el escudo, el uniforme y el estadio que más te gusten.</li>
58
- </ul>
59
- <h3>Cómo ganar más partidos y torneos</h3>
60
- <p>Para ganar más partidos y torneos, necesitas dominar tanto la táctica como la técnica. Estas son algunas de las claves para lograrlo:</p>
61
- <ul>
62
- <li>Elige una formación adecuada a tu estilo de juego, tus jugadores y tu rival.</li>
63
- <li>Ajusta la estrategia antes y durante el partido, cambiando el nivel de presión, la mentalidad o la intensidad.</li>
64
- <li>Controla bien el balón, usando el joystick virtual y los botones de pase, tiro y centro.</li>
65
- <li>Defiende bien, usando el botón de presión para robar el balón o el botón de cambio para cambiar de jugador.</li>
66
- <li>Ataca bien, usando el botón de sprint para correr más rápido o el botón de regate para hacer fintas.</li>
67
- <li>Aprovecha las jugadas a balón parado, como los córners, los tiros libres o los penaltis.</li>
68
- </ul>
69
- <h3>Cómo hacer jugadas espectaculares con el rainbow kick</h3>
70
- <p>El rainbow kick es uno de los movimientos más espectaculares y efectivos que puedes hacer en Dream League Soccer 2018. Se trata de un regate en el que el jugador levanta el balón por encima de su cabeza y lo pasa por encima del defensor. Para hacerlo, solo tienes que seguir estos pasos:</p>
71
- <ol>
72
- <li>Corre hacia el defensor con el botón de sprint presionado.</li>
73
- <li>Cuando estés cerca del defensor, desliza el dedo hacia arriba en la pantalla.</li>
74
- <li>El jugador hará el rainbow kick y pasará el balón por encima del defensor.</li>
75
- <li>Recupera el balón y sigue corriendo hacia la portería.</li>
76
- </ol> <h2>Conclusión</h2>
77
- <p>En este artículo, te hemos mostrado cómo descargar e instalar Dream League Soccer 2018 hackeado apk y obb, una versión modificada del juego original que te ofrece muchas ventajas y beneficios. Con este juego hackeado, podrás disfrutar de dinero infinito, monedas ilimitadas, energía infinita, todos los jugadores desbloqueados, todos los estadios desbloqueados, todos los kits y logos desbloqueados, sin anuncios y con un nivel de dificultad ajustable. Además, te hemos dado algunos consejos y trucos para mejorar tu equipo y tus jugadores, ganar más partidos y torneos, y hacer jugadas espectaculares con el rainbow kick.</p>
78
- <h3>Resumen de los puntos principales del artículo</h3>
79
- <p>Estos son los puntos principales que hemos tratado en el artículo:</p>
80
- <ul>
81
- <li>Dream League Soccer 2018 es un juego de fútbol para Android e iOS que combina la gestión de tu propio equipo con la acción en el campo.</li>
82
- <li>El juego tiene gráficos 3D, animaciones realistas, un equipo de comentaristas, la licencia FIFPro y una banda sonora exclusiva.</li>
83
- <li>El juego tiene algunas limitaciones y desventajas que pueden afectar a tu experiencia de juego, como la escasez de monedas, el sistema de energía y el nivel de dificultad.</li>
84
- <li>Para eliminar estas restricciones y obtener muchas ventajas adicionales, puedes descargar e instalar Dream League Soccer 2018 hackeado apk y obb.</li>
85
- <li>El mod apk te ofrece dinero infinito, monedas ilimitadas, energía infinita, todos los jugadores desbloqueados, todos los estadios desbloqueados, todos los kits y logos desbloqueados, sin anuncios y con un nivel de dificultad ajustable.</li>
86
- <li>El obb file modificado te permite obtener recursos ilimitados en el juego, como dinero, monedas y energía.</li>
87
- <li>Para mejorar tu juego y convertirte en el mejor entrenador y jugador del mundo, puedes seguir algunos consejos y trucos que te hemos dado.</li>
88
- </ul>
89
- <h3>Llamada a la acción para descargar el juego hackeado</h3>
90
- <p>Si te ha gustado este artículo y quieres descargar e instalar Dream League Soccer 2018 hackeado apk y obb, no esperes más y haz clic en los enlaces que te hemos proporcionado. Así podrás disfrutar del mejor juego de fútbol para Android e iOS con todas las ventajas de jugar con dinero infinito, monedas ilimitadas y mucho más. ¡No te arrepentirás!</p>
91
- <h2>Preguntas frecuentes</h2>
92
- <p>A continuación, te respondemos a algunas de las preguntas más frecuentes que pueden surgirte sobre Dream League Soccer 2018 hackeado apk y obb:</p>
93
- <h4>¿Es seguro descargar e instalar Dream League Soccer 2018 hackeado apk y obb?</h4>
94
- <p>Sí, es seguro. Los archivos que te hemos proporcionado son libres de virus, malware o cualquier otro tipo de amenaza. Además, no necesitas rootear ni jailbreakear tu dispositivo para usarlos.</p>
95
- <h4>¿Es legal descargar e instalar Dream League Soccer 2018 hackeado apk y obb?</h4>
96
- <p>No es ilegal, pero tampoco es ético. Al descargar e instalar Dream League Soccer 2018 hackeado apk y obb estás violando los términos y condiciones del juego original. Por eso, te recomendamos que lo hagas bajo tu propia responsabilidad y que respetes a los desarrolladores del juego original.</p>
97
- <h4>¿Puedo jugar online con Dream League Soccer 2018 hackeado apk y obb?</h4>
98
- <p>No, no puedes. El juego hackeado solo funciona en el modo offline. Si intentas jugar online con el juego hackeado, es posible que te baneen o que no puedas conectarte al servidor. Por eso, te recomendamos que solo juegues offline con el juego hackeado.</p>
99
- <h4>¿Puedo actualizar Dream League Soccer 2018 hackeado apk y obb?</h4>
100
- <p>No, no puedes. El juego hackeado no se puede actualizar desde la tienda oficial ni desde ninguna otra fuente. Si intentas actualizar el juego hackeado, es posible que pierdas todos tus datos o que el juego deje de funcion ar. Por eso, te recomendamos que no actualices el juego hackeado.</p>
101
- <h4>¿Qué otras versiones de Dream League Soccer existen?</h4>
102
- <p>Además de Dream League Soccer 2018, existen otras versiones de Dream League Soccer que puedes descargar e instalar en tu dispositivo. Estas son algunas de ellas:</p>
103
- <ul>
104
- <li>Dream League Soccer 2019: la versión más reciente del juego, con gráficos mejorados, nuevos modos de juego y más opciones de personalización.</li>
105
- <li>Dream League Soccer Classic: la versión original del juego, con un estilo retro y una jugabilidad sencilla.</li>
106
- <li>Dream League Soccer 2016: la versión anterior a Dream League Soccer 2018, con un diseño similar y algunas diferencias en los jugadores y los torneos.</li>
107
- </ul>
108
- <p>Estas versiones también se pueden descargar e instalar hackeadas, siguiendo el mismo proceso que te hemos explicado para Dream League Soccer 2018.</p> 401be4b1e0<br />
109
- <br />
110
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Download 3 Patti Live APK and Play Indian Poker with Real Players.md DELETED
@@ -1,125 +0,0 @@
1
- <br />
2
- <h1>3 Patti Live APK Download: How to Play and Win the Popular Indian Card Game</h1>
3
- <p>Are you a fan of card games and looking for a new challenge? If yes, then you should try 3 Patti Live, the online version of the famous Indian card game Teen Patti. 3 Patti Live is a thrilling and exciting game that combines skill, luck and strategy. You can play with real players from all over India and win real money.</p>
4
- <h2>3 patti live apk download</h2><br /><p><b><b>DOWNLOAD</b> ->->->-> <a href="https://jinyurl.com/2uNUii">https://jinyurl.com/2uNUii</a></b></p><br /><br />
5
- <p>In this article, we will tell you everything you need to know about 3 Patti Live APK download, how to play and win the game, and what are the best tips and tricks to master it. So, let's get started!</p>
6
- <h2>How to download and install 3 Patti Live APK on your Android device?</h2>
7
- <p>Downloading and installing 3 Patti Live APK on your Android device is very easy and simple. Just follow these steps:</p>
8
- <ol>
9
- <li>Go to <a href="(^1^)">this link</a> and click on the "Download APK" button.</li>
10
- <li>Once the download is complete, open the file and tap on "Install".</li>
11
- <li>If you see a message that says "Install blocked", go to your device settings and enable "Unknown sources".</li>
12
- <li>After the installation is done, launch the app and enjoy playing 3 Patti Live.</li>
13
- </ol>
14
- <h2>How to register and create an account on 3 Patti Live?</h2>
15
- <p>Before you can start playing 3 Patti Live, you need to register and create an account on the app. Here's how:</p>
16
- <ol>
17
- <li>Open the app and tap on "Register".</li>
18
- <li>Enter your mobile number and verify it with an OTP.</li>
19
- <li>Create a username and password for your account.</li>
20
- <li>Choose a preferred language and currency.</li>
21
- <li>That's it! You are now ready to play 3 Patti Live.</li>
22
- </ol>
23
- <h2>3 Patti Rules: Learn How to Play Teen Patti Card Game</h2>
24
- <p>Now that you have downloaded and installed 3 Patti Live APK on your device and created an account on the app, you need to learn how to play the game. Here are the basic rules of 3 Patti:</p>
25
- <ul>
26
- <li>The game is played with a standard 52-card deck without any jokers.</li>
27
- <li>The cards are ranked from high to low as follows: A-K-Q-J-10-9-8-7-6-5-4-3-2.</li>
28
- <li>The game can be played by up to six players at a time.</li>
29
- <li>Each player is dealt three cards face down by the dealer.</li>
30
- <li>The players have to place a minimum bet amount (called ante) in the pot before the cards are dealt.</li>
31
- <li>The player next to the dealer starts the betting round by either placing a blind bet (called chaal) or folding their cards (called pack).</li>
32
- <li>A blind bet is when the player bets without seeing their cards, while a seen bet is when the player bets after seeing their cards.</li>
33
- <li>A blind player can either bet the same amount as the previous blind player or double it, while a seen player can either bet twice or four times the amount of the previous blind player.</li>
34
- <li>The betting round continues until either all but one player fold their cards or two players remain for the showdown.</li>
35
- <li>The showdown is when the remaining players reveal their cards and compare them to determine the winner.</li>
36
- <li>The winner is the player who has the highest ranking hand according to the following order: Trail (three of a kind), Pure Sequence (straight flush), Sequence (straight), Color (flush), Pair (two of a kind), High Card.</li>
37
- </ul>
38
- <h2>The different variations of 3 Patti: Joker, Mufliss, King Little, etc.</h2>
39
- <p>One of the reasons why 3 Patti Live is so popular and fun is that it offers many different variations of the game that add more excitement and challenge. Here are some of the most common variations of 3 Patti:</p>
40
- <ul>
41
- <li>Joker: In this variation, one or more cards are randomly selected as jokers before the game starts. A joker can be used as any card to make a winning hand.</li>
42
- <li>Mufliss: In this variation, the ranking of the hands is reversed, meaning that the lowest ranking hand wins. For example, a high card beats a pair, a pair beats a color, and so on.</li>
43
- <li>King Little: In this variation, the king and the two are the highest and lowest cards respectively, instead of the ace and the two. For example, a K-Q-J is a pure sequence, while an A-2-3 is not.</li>
44
- <li>There are many other variations of 3 Patti that you can discover and play on 3 Patti Live, such as AK47, 999, 4X Boot, Faceoff, etc.</li>
45
- </ul>
46
- <h2>The tips and tricks for playing 3 Patti: studying opponents, bluffing wisely, managing chips, etc.</h2>
47
- <p>Playing 3 Patti Live is not only about luck, but also about skill and strategy. If you want to improve your chances of winning and become a pro player, you need to follow some tips and tricks that will help you play better. Here are some of them:</p>
48
- <ul>
49
- <li>Study your opponents: Observe how your opponents play and try to figure out their patterns, habits, strengths and weaknesses. For example, if you notice that an opponent always folds when faced with a big bet, you can use that to your advantage and bluff them out of the pot.</li>
50
- <li>Bluff wisely: Bluffing is an essential part of 3 Patti Live, as it can help you win pots with weak hands or make your opponents fold stronger hands. However, you need to bluff wisely and not too often, otherwise you will lose credibility and money. For example, you should bluff when you have some outs or when you sense weakness in your opponents.</li>
51
- <li>Manage your chips: Chips are your lifeline in 3 Patti Live, as they allow you to play and win more games. Therefore, you need to manage your chips wisely and not waste them on unnecessary bets or risky moves. For example, you should set a budget for each game and stick to it, avoid playing too many hands or chasing losses, and know when to quit while ahead.</li>
52
- </ul>
53
- <h2>3 Patti Strategies: How to Win Teen Patti Card Game</h2>
54
- <p>Besides following the tips and tricks mentioned above, you also need to apply some strategies that will help you win more games and money on 3 Patti Live. Here are some of the best strategies for playing 3 Patti:</p>
55
- <p>3 patti live game download for android<br />
56
- 3 patti live online play with real players<br />
57
- 3 patti live casino apk free download<br />
58
- 3 patti live mod apk unlimited chips<br />
59
- 3 patti live hack apk download latest version<br />
60
- 3 patti live app download for pc<br />
61
- 3 patti live indian poker apk download<br />
62
- 3 patti live flush card game download<br />
63
- 3 patti live variations joker ak47 apk<br />
64
- 3 patti live royal war apk download<br />
65
- 3 patti live private table apk download<br />
66
- 3 patti love - 3 patti apk download[^1^]<br />
67
- 3 patti gold live with real players apk download<br />
68
- 3 patti star live indian poker apk download<br />
69
- 3 patti ultimate plus live apk download<br />
70
- 3 patti superstar live teen patti apk download<br />
71
- 3 patti power - live indian poker apk download<br />
72
- 3 patti champion - live card game apk download<br />
73
- 3 patti pro - live teenpatti flush poker apk download<br />
74
- 3 patti king - live indian poker game apk download<br />
75
- 3 patti master - live online card game apk download<br />
76
- 3 patti legend - live teenpatti flush rummy apk download<br />
77
- 3 patti diamond - live poker card game apk download<br />
78
- 3 patti classic - live indian poker flush apk download<br />
79
- 3 patti deluxe - live teenpatti card game apk download<br />
80
- 3 patti express - live online poker game apk download<br />
81
- 3 patti fantasy - live teenpatti rummy game apk download<br />
82
- 3 patti frenzy - live indian poker flush game apk download<br />
83
- 3 patti glory - live teenpatti card game apk download<br />
84
- 3 patti grand - live online poker flush game apk download<br />
85
- 3 patti joy - live teenpatti rummy game apk download<br />
86
- 3 patti magic - live indian poker card game apk download<br />
87
- 3 patti marvel - live teenpatti flush rummy game apk download<br />
88
- 3 patti mega - live online poker card game apk download<br />
89
- 3 patti miracle - live teenpatti rummy game apk download<br />
90
- 3 patti platinum - live indian poker flush game apk download<br />
91
- 3 patti premium - live teenpatti card game apk download<br />
92
- 3 patti prime - live online poker flush game apk download<br />
93
- 3 patti quest - live teenpatti rummy game apk download<br />
94
- 3 patti royal - live indian poker card game apk download<br />
95
- 3 patti silver - live teenpatti flush rummy game apk download<br />
96
- 3 patti starlight - live online poker card game apk download<br />
97
- 3 patti supreme - live teenpatti rummy game apk download<br />
98
- 3 patti turbo - live indian poker flush game apk download<br />
99
- 3 patti ultimate - live teenpatti card game apk download<br />
100
- 3 patti wonder - live online poker flush game apk download<br />
101
- how to play and win in 3 patti live online casino games <br />
102
- best tips and tricks for playing and winning in the latest version of the popular Indian card game, Teen Pati Live</p>
103
- <ul>
104
- <li>Choose the right table: Before joining a table on 3 Patti Live, you need to choose one that suits your skill level, budget and preferences. For example, you should look for a table with low stakes if you are a beginner or have a small bankroll, or a table with high stakes if you are an expert or have a large bankroll. You should also look for a table with fewer players if you want more action or a table with more players if you want more competition.</li>
105
- <li>Know when to fold, raise or call: One of the most important decisions in 3 Patti Live is whether to fold, raise or call in each betting round. You need to know when to do each one based on your cards, your position, your opponents and the pot size. For example, you should fold if you have a weak hand and face a big bet, raise if you have a strong hand and want to increase the pot, or call if you have a decent hand and want to see more cards.</li>
106
- <li>Play according to your position: Your position in 3 Patti Live is determined by the order in which you act in each betting round. The closer you are to the dealer, the better your position is, as you have more information and control over the game. For example, you should play more aggressively if you are in a late position, as you can take advantage of the previous actions of your opponents, or play more cautiously if you are in an early position, as you have less information and more risk.</li>
107
- </ul>
108
- <h2>Conclusion</h2>
109
- <p>3 Patti Live is a great way to enjoy the popular Indian card game Teen Patti online. You can download and install 3 Patti Live APK on your Android device easily and play with real players from all over India. You can also learn how to play and win the game by following the rules, variations, tips, tricks and strategies that we have shared in this article. So, what are you waiting for? Download 3 Patti Live APK today and start playing and winning!</p>
110
- <h2>FAQs</h2>
111
- <p>Here are some of the frequently asked questions about 3 Patti Live APK download:</p>
112
- <ol>
113
- <li>Q: Is 3 Patti Live APK safe and secure?</li>
114
- <li>A: Yes, 3 Patti Live APK is safe and secure to download and install on your device. The app uses advanced encryption and security measures to protect your personal and financial information. You can also contact the customer support team anytime if you have any issues or queries.</li>
115
- <li>Q: How can I deposit and withdraw money on 3 Patti Live?</li>
116
- <li>A: You can deposit and withdraw money on 3 Patti Live using various methods such as credit cards, debit cards, net banking, UPI, Paytm, etc. The transactions are fast and hassle-free, and you can withdraw your winnings anytime you want.</li>
117
- <li>Q: What are the bonuses and rewards on 3 Patti Live?</li>
118
- <li>A: 3 Patti Live offers many bonuses and rewards for its players, such as welcome bonus, referral bonus, loyalty bonus, daily bonus, etc. You can also participate in various tournaments and events on the app and win big prizes.</li>
119
- <li>Q: Can I play 3 Patti Live with my friends?</li>
120
- <li>A: Yes, you can play 3 Patti Live with your friends by inviting them to join the app using your referral code. You can also create private tables on the app and play with your friends exclusively.</li>
121
- <li>Q: Can I play 3 Patti Live offline?</li>
122
- <li>A: No, you cannot play 3 Patti Live offline, as it is an online game that requires an internet connection. However, you can play 3 Patti Live with low data consumption and enjoy a smooth gaming experience.</li>
123
- </ol></p> 401be4b1e0<br />
124
- <br />
125
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1vash/demo-flask-docker-template/Dockerfile DELETED
@@ -1,32 +0,0 @@
1
- # Use the official Python base image
2
- FROM python:3.9
3
-
4
- # Set the working directory in the container
5
- WORKDIR /app
6
-
7
- # Copy the requirements.txt file and install the Python dependencies
8
- COPY requirements.txt .
9
- RUN pip install --no-cache-dir -r requirements.txt
10
-
11
- # Set up a new user named "user" with user ID 1000
12
- RUN useradd -m -u 1000 user
13
- # Switch to the "user" user
14
- USER user
15
- # Set home to the user's home directory
16
- ENV HOME=/home/user \
17
- PATH=/home/user/.local/bin:$PATH
18
-
19
- # Set the working directory to the user's home directory
20
- WORKDIR $HOME/app
21
-
22
- # Copy the current directory contents into the container at $HOME/app setting the owner to the user
23
- COPY --chown=user . $HOME/app
24
-
25
- # Expose the port on which the Flask application will run
26
- EXPOSE 5000
27
-
28
- # Set the environment variable for Flask
29
- ENV FLASK_APP=api_server.py
30
-
31
- # Run the Flask application
32
- CMD ["flask", "run", "--host=0.0.0.0"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/client/css/field.css DELETED
@@ -1,11 +0,0 @@
1
- .field {
2
- display: flex;
3
- align-items: center;
4
- padding: 4px;
5
- }
6
-
7
- @media screen and (max-width: 990px) {
8
- .field {
9
- flex-wrap: nowrap;
10
- }
11
- }
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/Bing.py DELETED
@@ -1,300 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import random
4
- import uuid
5
- import json
6
- import os
7
- import uuid
8
- import urllib.parse
9
- from aiohttp import ClientSession, ClientTimeout
10
- from ..typing import AsyncGenerator
11
- from .base_provider import AsyncGeneratorProvider
12
-
13
- class Tones():
14
- creative = "Creative"
15
- balanced = "Balanced"
16
- precise = "Precise"
17
-
18
- default_cookies = {
19
- 'SRCHD' : 'AF=NOFORM',
20
- 'PPLState' : '1',
21
- 'KievRPSSecAuth': '',
22
- 'SUID' : '',
23
- 'SRCHUSR' : '',
24
- 'SRCHHPGUSR' : '',
25
- }
26
-
27
- class Bing(AsyncGeneratorProvider):
28
- url = "https://bing.com/chat"
29
- working = True
30
- supports_gpt_4 = True
31
-
32
- @staticmethod
33
- def create_async_generator(
34
- model: str,
35
- messages: list[dict[str, str]],
36
- cookies: dict = None,
37
- tone: str = Tones.creative,
38
- **kwargs
39
- ) -> AsyncGenerator:
40
- if len(messages) < 2:
41
- prompt = messages[0]["content"]
42
- context = None
43
- else:
44
- prompt = messages[-1]["content"]
45
- context = create_context(messages[:-1])
46
-
47
- if not cookies or "SRCHD" not in cookies:
48
- cookies = default_cookies
49
- return stream_generate(prompt, tone, context, cookies)
50
-
51
- def create_context(messages: list[dict[str, str]]):
52
- context = "".join(f"[{message['role']}](#message)\n{message['content']}\n\n" for message in messages)
53
-
54
- return context
55
-
56
- class Conversation():
57
- def __init__(self, conversationId: str, clientId: str, conversationSignature: str) -> None:
58
- self.conversationId = conversationId
59
- self.clientId = clientId
60
- self.conversationSignature = conversationSignature
61
-
62
- async def create_conversation(session: ClientSession) -> Conversation:
63
- url = 'https://www.bing.com/turing/conversation/create?bundleVersion=1.1150.3'
64
-
65
- async with await session.get(url) as response:
66
- data = await response.json()
67
-
68
- conversationId = data.get('conversationId')
69
- clientId = data.get('clientId')
70
- conversationSignature = response.headers.get('X-Sydney-Encryptedconversationsignature')
71
-
72
- if not conversationId or not clientId or not conversationSignature:
73
- raise Exception('Failed to create conversation.')
74
-
75
- return Conversation(conversationId, clientId, conversationSignature)
76
-
77
- async def list_conversations(session: ClientSession) -> list:
78
- url = "https://www.bing.com/turing/conversation/chats"
79
- async with session.get(url) as response:
80
- response = await response.json()
81
- return response["chats"]
82
-
83
- async def delete_conversation(session: ClientSession, conversation: Conversation) -> list:
84
- url = "https://sydney.bing.com/sydney/DeleteSingleConversation"
85
- json = {
86
- "conversationId": conversation.conversationId,
87
- "conversationSignature": conversation.conversationSignature,
88
- "participant": {"id": conversation.clientId},
89
- "source": "cib",
90
- "optionsSets": ["autosave"]
91
- }
92
- async with session.post(url, json=json) as response:
93
- response = await response.json()
94
- return response["result"]["value"] == "Success"
95
-
96
- class Defaults:
97
- delimiter = "\x1e"
98
- ip_address = f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
99
-
100
- allowedMessageTypes = [
101
- "Chat",
102
- "Disengaged",
103
- "AdsQuery",
104
- "SemanticSerp",
105
- "GenerateContentQuery",
106
- "SearchQuery",
107
- "ActionRequest",
108
- "Context",
109
- "Progress",
110
- "AdsQuery",
111
- "SemanticSerp",
112
- ]
113
-
114
- sliceIds = [
115
- "winmuid3tf",
116
- "osbsdusgreccf",
117
- "ttstmout",
118
- "crchatrev",
119
- "winlongmsgtf",
120
- "ctrlworkpay",
121
- "norespwtf",
122
- "tempcacheread",
123
- "temptacache",
124
- "505scss0",
125
- "508jbcars0",
126
- "515enbotdets0",
127
- "5082tsports",
128
- "515vaoprvs",
129
- "424dagslnv1s0",
130
- "kcimgattcf",
131
- "427startpms0",
132
- ]
133
-
134
- location = {
135
- "locale": "en-US",
136
- "market": "en-US",
137
- "region": "US",
138
- "locationHints": [
139
- {
140
- "country": "United States",
141
- "state": "California",
142
- "city": "Los Angeles",
143
- "timezoneoffset": 8,
144
- "countryConfidence": 8,
145
- "Center": {"Latitude": 34.0536909, "Longitude": -118.242766},
146
- "RegionType": 2,
147
- "SourceType": 1,
148
- }
149
- ],
150
- }
151
-
152
- headers = {
153
- 'accept': '*/*',
154
- 'accept-language': 'en-US,en;q=0.9',
155
- 'cache-control': 'max-age=0',
156
- 'sec-ch-ua': '"Chromium";v="110", "Not A(Brand";v="24", "Microsoft Edge";v="110"',
157
- 'sec-ch-ua-arch': '"x86"',
158
- 'sec-ch-ua-bitness': '"64"',
159
- 'sec-ch-ua-full-version': '"110.0.1587.69"',
160
- 'sec-ch-ua-full-version-list': '"Chromium";v="110.0.5481.192", "Not A(Brand";v="24.0.0.0", "Microsoft Edge";v="110.0.1587.69"',
161
- 'sec-ch-ua-mobile': '?0',
162
- 'sec-ch-ua-model': '""',
163
- 'sec-ch-ua-platform': '"Windows"',
164
- 'sec-ch-ua-platform-version': '"15.0.0"',
165
- 'sec-fetch-dest': 'document',
166
- 'sec-fetch-mode': 'navigate',
167
- 'sec-fetch-site': 'none',
168
- 'sec-fetch-user': '?1',
169
- 'upgrade-insecure-requests': '1',
170
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
171
- 'x-edge-shopping-flag': '1',
172
- 'x-forwarded-for': ip_address,
173
- }
174
-
175
- optionsSets = [
176
- 'saharasugg',
177
- 'enablenewsfc',
178
- 'clgalileo',
179
- 'gencontentv3',
180
- "nlu_direct_response_filter",
181
- "deepleo",
182
- "disable_emoji_spoken_text",
183
- "responsible_ai_policy_235",
184
- "enablemm",
185
- "h3precise"
186
- "dtappid",
187
- "cricinfo",
188
- "cricinfov2",
189
- "dv3sugg",
190
- "nojbfedge"
191
- ]
192
-
193
- def format_message(msg: dict) -> str:
194
- return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter
195
-
196
- def create_message(conversation: Conversation, prompt: str, tone: str, context: str=None) -> str:
197
- request_id = str(uuid.uuid4())
198
- struct = {
199
- 'arguments': [
200
- {
201
- 'source': 'cib',
202
- 'optionsSets': Defaults.optionsSets,
203
- 'allowedMessageTypes': Defaults.allowedMessageTypes,
204
- 'sliceIds': Defaults.sliceIds,
205
- 'traceId': os.urandom(16).hex(),
206
- 'isStartOfSession': True,
207
- 'requestId': request_id,
208
- 'message': Defaults.location | {
209
- 'author': 'user',
210
- 'inputMethod': 'Keyboard',
211
- 'text': prompt,
212
- 'messageType': 'Chat',
213
- 'requestId': request_id,
214
- 'messageId': request_id,
215
- },
216
- 'tone': tone,
217
- 'spokenTextMode': 'None',
218
- 'conversationId': conversation.conversationId,
219
- 'participant': {
220
- 'id': conversation.clientId
221
- },
222
- }
223
- ],
224
- 'invocationId': '1',
225
- 'target': 'chat',
226
- 'type': 4
227
- }
228
-
229
- if context:
230
- struct['arguments'][0]['previousMessages'] = [{
231
- "author": "user",
232
- "description": context,
233
- "contextType": "WebPage",
234
- "messageType": "Context",
235
- "messageId": "discover-web--page-ping-mriduna-----"
236
- }]
237
- return format_message(struct)
238
-
239
- async def stream_generate(
240
- prompt: str,
241
- tone: str,
242
- context: str=None,
243
- cookies: dict=None,
244
- ):
245
- async with ClientSession(
246
- timeout=ClientTimeout(total=900),
247
- cookies=cookies,
248
- headers=Defaults.headers,
249
- ) as session:
250
- conversation = await create_conversation(session)
251
- try:
252
- async with session.ws_connect(
253
- f'wss://sydney.bing.com/sydney/ChatHub',
254
- autoping=False,
255
- params={'sec_access_token': conversation.conversationSignature}
256
- ) as wss:
257
-
258
- await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
259
- await wss.receive(timeout=900)
260
- await wss.send_str(create_message(conversation, prompt, tone, context))
261
-
262
- response_txt = ''
263
- returned_text = ''
264
- final = False
265
-
266
- while not final:
267
- msg = await wss.receive(timeout=900)
268
- objects = msg.data.split(Defaults.delimiter)
269
- for obj in objects:
270
- if obj is None or not obj:
271
- continue
272
-
273
- response = json.loads(obj)
274
- if response.get('type') == 1 and response['arguments'][0].get('messages'):
275
- message = response['arguments'][0]['messages'][0]
276
- if (message['contentOrigin'] != 'Apology'):
277
- if 'adaptiveCards' in message:
278
- card = message['adaptiveCards'][0]['body'][0]
279
- if "text" in card:
280
- response_txt = card.get('text')
281
- if message.get('messageType'):
282
- inline_txt = card['inlines'][0].get('text')
283
- response_txt += inline_txt + '\n'
284
- elif message.get('contentType') == "IMAGE":
285
- query = urllib.parse.quote(message.get('text'))
286
- url = f"\nhttps://www.bing.com/images/create?q={query}"
287
- response_txt += url
288
- final = True
289
- if response_txt.startswith(returned_text):
290
- new = response_txt[len(returned_text):]
291
- if new != "\n":
292
- yield new
293
- returned_text = response_txt
294
- elif response.get('type') == 2:
295
- result = response['item']['result']
296
- if result.get('error'):
297
- raise Exception(f"{result['value']}: {result['message']}")
298
- return
299
- finally:
300
- await delete_conversation(session, conversation)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/Providers/deprecated/PerplexityAi.py DELETED
@@ -1,101 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import json
4
- import time
5
- import base64
6
- from curl_cffi.requests import AsyncSession
7
-
8
- from ..base_provider import AsyncProvider, format_prompt, get_cookies
9
-
10
-
11
- class PerplexityAi(AsyncProvider):
12
- url = "https://www.perplexity.ai"
13
- working = False
14
- supports_gpt_35_turbo = True
15
- _sources = []
16
-
17
- @classmethod
18
- async def create_async(
19
- cls,
20
- model: str,
21
- messages: list[dict[str, str]],
22
- proxy: str = None,
23
- **kwargs
24
- ) -> str:
25
- url = cls.url + "/socket.io/?EIO=4&transport=polling"
26
- headers = {
27
- "Referer": f"{cls.url}/"
28
- }
29
- async with AsyncSession(headers=headers, proxies={"https": proxy}, impersonate="chrome107") as session:
30
- url_session = "https://www.perplexity.ai/api/auth/session"
31
- response = await session.get(url_session)
32
- response.raise_for_status()
33
-
34
- url_session = "https://www.perplexity.ai/api/auth/session"
35
- response = await session.get(url_session)
36
- response.raise_for_status()
37
-
38
- response = await session.get(url, params={"t": timestamp()})
39
- response.raise_for_status()
40
- sid = json.loads(response.text[1:])["sid"]
41
-
42
- response = await session.get(url, params={"t": timestamp(), "sid": sid})
43
- response.raise_for_status()
44
-
45
- data = '40{"jwt":"anonymous-ask-user"}'
46
- response = await session.post(url, params={"t": timestamp(), "sid": sid}, data=data)
47
- response.raise_for_status()
48
-
49
- response = await session.get(url, params={"t": timestamp(), "sid": sid})
50
- response.raise_for_status()
51
-
52
- data = "424" + json.dumps([
53
- "perplexity_ask",
54
- format_prompt(messages),
55
- {
56
- "version":"2.1",
57
- "source":"default",
58
- "language":"en",
59
- "timezone": time.tzname[0],
60
- "search_focus":"internet",
61
- "mode":"concise"
62
- }
63
- ])
64
- response = await session.post(url, params={"t": timestamp(), "sid": sid}, data=data)
65
- response.raise_for_status()
66
-
67
- while True:
68
- response = await session.get(url, params={"t": timestamp(), "sid": sid})
69
- response.raise_for_status()
70
- for line in response.text.splitlines():
71
- if line.startswith("434"):
72
- result = json.loads(json.loads(line[3:])[0]["text"])
73
-
74
- cls._sources = [{
75
- "title": source["name"],
76
- "url": source["url"],
77
- "snippet": source["snippet"]
78
- } for source in result["web_results"]]
79
-
80
- return result["answer"]
81
-
82
- @classmethod
83
- def get_sources(cls):
84
- return cls._sources
85
-
86
-
87
- @classmethod
88
- @property
89
- def params(cls):
90
- params = [
91
- ("model", "str"),
92
- ("messages", "list[dict[str, str]]"),
93
- ("stream", "bool"),
94
- ("proxy", "str"),
95
- ]
96
- param = ", ".join([": ".join(p) for p in params])
97
- return f"g4f.provider.{cls.__name__} supports: ({param})"
98
-
99
-
100
- def timestamp() -> str:
101
- return base64.urlsafe_b64encode(int(time.time()-1407782612).to_bytes(4, 'big')).decode()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/CoAdapter/ldm/modules/image_degradation/bsrgan_light.py DELETED
@@ -1,651 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- import numpy as np
3
- import cv2
4
- import torch
5
-
6
- from functools import partial
7
- import random
8
- from scipy import ndimage
9
- import scipy
10
- import scipy.stats as ss
11
- from scipy.interpolate import interp2d
12
- from scipy.linalg import orth
13
- import albumentations
14
-
15
- import ldm.modules.image_degradation.utils_image as util
16
-
17
- """
18
- # --------------------------------------------
19
- # Super-Resolution
20
- # --------------------------------------------
21
- #
22
- # Kai Zhang ([email protected])
23
- # https://github.com/cszn
24
- # From 2019/03--2021/08
25
- # --------------------------------------------
26
- """
27
-
28
- def modcrop_np(img, sf):
29
- '''
30
- Args:
31
- img: numpy image, WxH or WxHxC
32
- sf: scale factor
33
- Return:
34
- cropped image
35
- '''
36
- w, h = img.shape[:2]
37
- im = np.copy(img)
38
- return im[:w - w % sf, :h - h % sf, ...]
39
-
40
-
41
- """
42
- # --------------------------------------------
43
- # anisotropic Gaussian kernels
44
- # --------------------------------------------
45
- """
46
-
47
-
48
- def analytic_kernel(k):
49
- """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)"""
50
- k_size = k.shape[0]
51
- # Calculate the big kernels size
52
- big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2))
53
- # Loop over the small kernel to fill the big one
54
- for r in range(k_size):
55
- for c in range(k_size):
56
- big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k
57
- # Crop the edges of the big kernel to ignore very small values and increase run time of SR
58
- crop = k_size // 2
59
- cropped_big_k = big_k[crop:-crop, crop:-crop]
60
- # Normalize to 1
61
- return cropped_big_k / cropped_big_k.sum()
62
-
63
-
64
- def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
65
- """ generate an anisotropic Gaussian kernel
66
- Args:
67
- ksize : e.g., 15, kernel size
68
- theta : [0, pi], rotation angle range
69
- l1 : [0.1,50], scaling of eigenvalues
70
- l2 : [0.1,l1], scaling of eigenvalues
71
- If l1 = l2, will get an isotropic Gaussian kernel.
72
- Returns:
73
- k : kernel
74
- """
75
-
76
- v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))
77
- V = np.array([[v[0], v[1]], [v[1], -v[0]]])
78
- D = np.array([[l1, 0], [0, l2]])
79
- Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
80
- k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)
81
-
82
- return k
83
-
84
-
85
- def gm_blur_kernel(mean, cov, size=15):
86
- center = size / 2.0 + 0.5
87
- k = np.zeros([size, size])
88
- for y in range(size):
89
- for x in range(size):
90
- cy = y - center + 1
91
- cx = x - center + 1
92
- k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov)
93
-
94
- k = k / np.sum(k)
95
- return k
96
-
97
-
98
- def shift_pixel(x, sf, upper_left=True):
99
- """shift pixel for super-resolution with different scale factors
100
- Args:
101
- x: WxHxC or WxH
102
- sf: scale factor
103
- upper_left: shift direction
104
- """
105
- h, w = x.shape[:2]
106
- shift = (sf - 1) * 0.5
107
- xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0)
108
- if upper_left:
109
- x1 = xv + shift
110
- y1 = yv + shift
111
- else:
112
- x1 = xv - shift
113
- y1 = yv - shift
114
-
115
- x1 = np.clip(x1, 0, w - 1)
116
- y1 = np.clip(y1, 0, h - 1)
117
-
118
- if x.ndim == 2:
119
- x = interp2d(xv, yv, x)(x1, y1)
120
- if x.ndim == 3:
121
- for i in range(x.shape[-1]):
122
- x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1)
123
-
124
- return x
125
-
126
-
127
- def blur(x, k):
128
- '''
129
- x: image, NxcxHxW
130
- k: kernel, Nx1xhxw
131
- '''
132
- n, c = x.shape[:2]
133
- p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2
134
- x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate')
135
- k = k.repeat(1, c, 1, 1)
136
- k = k.view(-1, 1, k.shape[2], k.shape[3])
137
- x = x.view(1, -1, x.shape[2], x.shape[3])
138
- x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c)
139
- x = x.view(n, c, x.shape[2], x.shape[3])
140
-
141
- return x
142
-
143
-
144
- def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0):
145
- """"
146
- # modified version of https://github.com/assafshocher/BlindSR_dataset_generator
147
- # Kai Zhang
148
- # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var
149
- # max_var = 2.5 * sf
150
- """
151
- # Set random eigen-vals (lambdas) and angle (theta) for COV matrix
152
- lambda_1 = min_var + np.random.rand() * (max_var - min_var)
153
- lambda_2 = min_var + np.random.rand() * (max_var - min_var)
154
- theta = np.random.rand() * np.pi # random theta
155
- noise = -noise_level + np.random.rand(*k_size) * noise_level * 2
156
-
157
- # Set COV matrix using Lambdas and Theta
158
- LAMBDA = np.diag([lambda_1, lambda_2])
159
- Q = np.array([[np.cos(theta), -np.sin(theta)],
160
- [np.sin(theta), np.cos(theta)]])
161
- SIGMA = Q @ LAMBDA @ Q.T
162
- INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
163
-
164
- # Set expectation position (shifting kernel for aligned image)
165
- MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2)
166
- MU = MU[None, None, :, None]
167
-
168
- # Create meshgrid for Gaussian
169
- [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1]))
170
- Z = np.stack([X, Y], 2)[:, :, :, None]
171
-
172
- # Calcualte Gaussian for every pixel of the kernel
173
- ZZ = Z - MU
174
- ZZ_t = ZZ.transpose(0, 1, 3, 2)
175
- raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise)
176
-
177
- # shift the kernel so it will be centered
178
- # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor)
179
-
180
- # Normalize the kernel and return
181
- # kernel = raw_kernel_centered / np.sum(raw_kernel_centered)
182
- kernel = raw_kernel / np.sum(raw_kernel)
183
- return kernel
184
-
185
-
186
- def fspecial_gaussian(hsize, sigma):
187
- hsize = [hsize, hsize]
188
- siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0]
189
- std = sigma
190
- [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1))
191
- arg = -(x * x + y * y) / (2 * std * std)
192
- h = np.exp(arg)
193
- h[h < scipy.finfo(float).eps * h.max()] = 0
194
- sumh = h.sum()
195
- if sumh != 0:
196
- h = h / sumh
197
- return h
198
-
199
-
200
- def fspecial_laplacian(alpha):
201
- alpha = max([0, min([alpha, 1])])
202
- h1 = alpha / (alpha + 1)
203
- h2 = (1 - alpha) / (alpha + 1)
204
- h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]]
205
- h = np.array(h)
206
- return h
207
-
208
-
209
- def fspecial(filter_type, *args, **kwargs):
210
- '''
211
- python code from:
212
- https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
213
- '''
214
- if filter_type == 'gaussian':
215
- return fspecial_gaussian(*args, **kwargs)
216
- if filter_type == 'laplacian':
217
- return fspecial_laplacian(*args, **kwargs)
218
-
219
-
220
- """
221
- # --------------------------------------------
222
- # degradation models
223
- # --------------------------------------------
224
- """
225
-
226
-
227
- def bicubic_degradation(x, sf=3):
228
- '''
229
- Args:
230
- x: HxWxC image, [0, 1]
231
- sf: down-scale factor
232
- Return:
233
- bicubicly downsampled LR image
234
- '''
235
- x = util.imresize_np(x, scale=1 / sf)
236
- return x
237
-
238
-
239
- def srmd_degradation(x, k, sf=3):
240
- ''' blur + bicubic downsampling
241
- Args:
242
- x: HxWxC image, [0, 1]
243
- k: hxw, double
244
- sf: down-scale factor
245
- Return:
246
- downsampled LR image
247
- Reference:
248
- @inproceedings{zhang2018learning,
249
- title={Learning a single convolutional super-resolution network for multiple degradations},
250
- author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
251
- booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
252
- pages={3262--3271},
253
- year={2018}
254
- }
255
- '''
256
- x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror'
257
- x = bicubic_degradation(x, sf=sf)
258
- return x
259
-
260
-
261
- def dpsr_degradation(x, k, sf=3):
262
- ''' bicubic downsampling + blur
263
- Args:
264
- x: HxWxC image, [0, 1]
265
- k: hxw, double
266
- sf: down-scale factor
267
- Return:
268
- downsampled LR image
269
- Reference:
270
- @inproceedings{zhang2019deep,
271
- title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels},
272
- author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
273
- booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
274
- pages={1671--1681},
275
- year={2019}
276
- }
277
- '''
278
- x = bicubic_degradation(x, sf=sf)
279
- x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
280
- return x
281
-
282
-
283
- def classical_degradation(x, k, sf=3):
284
- ''' blur + downsampling
285
- Args:
286
- x: HxWxC image, [0, 1]/[0, 255]
287
- k: hxw, double
288
- sf: down-scale factor
289
- Return:
290
- downsampled LR image
291
- '''
292
- x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
293
- # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2))
294
- st = 0
295
- return x[st::sf, st::sf, ...]
296
-
297
-
298
- def add_sharpening(img, weight=0.5, radius=50, threshold=10):
299
- """USM sharpening. borrowed from real-ESRGAN
300
- Input image: I; Blurry image: B.
301
- 1. K = I + weight * (I - B)
302
- 2. Mask = 1 if abs(I - B) > threshold, else: 0
303
- 3. Blur mask:
304
- 4. Out = Mask * K + (1 - Mask) * I
305
- Args:
306
- img (Numpy array): Input image, HWC, BGR; float32, [0, 1].
307
- weight (float): Sharp weight. Default: 1.
308
- radius (float): Kernel size of Gaussian blur. Default: 50.
309
- threshold (int):
310
- """
311
- if radius % 2 == 0:
312
- radius += 1
313
- blur = cv2.GaussianBlur(img, (radius, radius), 0)
314
- residual = img - blur
315
- mask = np.abs(residual) * 255 > threshold
316
- mask = mask.astype('float32')
317
- soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0)
318
-
319
- K = img + weight * residual
320
- K = np.clip(K, 0, 1)
321
- return soft_mask * K + (1 - soft_mask) * img
322
-
323
-
324
- def add_blur(img, sf=4):
325
- wd2 = 4.0 + sf
326
- wd = 2.0 + 0.2 * sf
327
-
328
- wd2 = wd2/4
329
- wd = wd/4
330
-
331
- if random.random() < 0.5:
332
- l1 = wd2 * random.random()
333
- l2 = wd2 * random.random()
334
- k = anisotropic_Gaussian(ksize=random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2)
335
- else:
336
- k = fspecial('gaussian', random.randint(2, 4) + 3, wd * random.random())
337
- img = ndimage.convolve(img, np.expand_dims(k, axis=2), mode='mirror')
338
-
339
- return img
340
-
341
-
342
- def add_resize(img, sf=4):
343
- rnum = np.random.rand()
344
- if rnum > 0.8: # up
345
- sf1 = random.uniform(1, 2)
346
- elif rnum < 0.7: # down
347
- sf1 = random.uniform(0.5 / sf, 1)
348
- else:
349
- sf1 = 1.0
350
- img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3]))
351
- img = np.clip(img, 0.0, 1.0)
352
-
353
- return img
354
-
355
-
356
- # def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
357
- # noise_level = random.randint(noise_level1, noise_level2)
358
- # rnum = np.random.rand()
359
- # if rnum > 0.6: # add color Gaussian noise
360
- # img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
361
- # elif rnum < 0.4: # add grayscale Gaussian noise
362
- # img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
363
- # else: # add noise
364
- # L = noise_level2 / 255.
365
- # D = np.diag(np.random.rand(3))
366
- # U = orth(np.random.rand(3, 3))
367
- # conv = np.dot(np.dot(np.transpose(U), D), U)
368
- # img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
369
- # img = np.clip(img, 0.0, 1.0)
370
- # return img
371
-
372
- def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
373
- noise_level = random.randint(noise_level1, noise_level2)
374
- rnum = np.random.rand()
375
- if rnum > 0.6: # add color Gaussian noise
376
- img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
377
- elif rnum < 0.4: # add grayscale Gaussian noise
378
- img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
379
- else: # add noise
380
- L = noise_level2 / 255.
381
- D = np.diag(np.random.rand(3))
382
- U = orth(np.random.rand(3, 3))
383
- conv = np.dot(np.dot(np.transpose(U), D), U)
384
- img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
385
- img = np.clip(img, 0.0, 1.0)
386
- return img
387
-
388
-
389
- def add_speckle_noise(img, noise_level1=2, noise_level2=25):
390
- noise_level = random.randint(noise_level1, noise_level2)
391
- img = np.clip(img, 0.0, 1.0)
392
- rnum = random.random()
393
- if rnum > 0.6:
394
- img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
395
- elif rnum < 0.4:
396
- img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
397
- else:
398
- L = noise_level2 / 255.
399
- D = np.diag(np.random.rand(3))
400
- U = orth(np.random.rand(3, 3))
401
- conv = np.dot(np.dot(np.transpose(U), D), U)
402
- img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
403
- img = np.clip(img, 0.0, 1.0)
404
- return img
405
-
406
-
407
- def add_Poisson_noise(img):
408
- img = np.clip((img * 255.0).round(), 0, 255) / 255.
409
- vals = 10 ** (2 * random.random() + 2.0) # [2, 4]
410
- if random.random() < 0.5:
411
- img = np.random.poisson(img * vals).astype(np.float32) / vals
412
- else:
413
- img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114])
414
- img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.
415
- noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray
416
- img += noise_gray[:, :, np.newaxis]
417
- img = np.clip(img, 0.0, 1.0)
418
- return img
419
-
420
-
421
- def add_JPEG_noise(img):
422
- quality_factor = random.randint(80, 95)
423
- img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR)
424
- result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
425
- img = cv2.imdecode(encimg, 1)
426
- img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB)
427
- return img
428
-
429
-
430
- def random_crop(lq, hq, sf=4, lq_patchsize=64):
431
- h, w = lq.shape[:2]
432
- rnd_h = random.randint(0, h - lq_patchsize)
433
- rnd_w = random.randint(0, w - lq_patchsize)
434
- lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :]
435
-
436
- rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf)
437
- hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :]
438
- return lq, hq
439
-
440
-
441
- def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
442
- """
443
- This is the degradation model of BSRGAN from the paper
444
- "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
445
- ----------
446
- img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
447
- sf: scale factor
448
- isp_model: camera ISP model
449
- Returns
450
- -------
451
- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
452
- hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
453
- """
454
- isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
455
- sf_ori = sf
456
-
457
- h1, w1 = img.shape[:2]
458
- img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
459
- h, w = img.shape[:2]
460
-
461
- if h < lq_patchsize * sf or w < lq_patchsize * sf:
462
- raise ValueError(f'img size ({h1}X{w1}) is too small!')
463
-
464
- hq = img.copy()
465
-
466
- if sf == 4 and random.random() < scale2_prob: # downsample1
467
- if np.random.rand() < 0.5:
468
- img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])),
469
- interpolation=random.choice([1, 2, 3]))
470
- else:
471
- img = util.imresize_np(img, 1 / 2, True)
472
- img = np.clip(img, 0.0, 1.0)
473
- sf = 2
474
-
475
- shuffle_order = random.sample(range(7), 7)
476
- idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
477
- if idx1 > idx2: # keep downsample3 last
478
- shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
479
-
480
- for i in shuffle_order:
481
-
482
- if i == 0:
483
- img = add_blur(img, sf=sf)
484
-
485
- elif i == 1:
486
- img = add_blur(img, sf=sf)
487
-
488
- elif i == 2:
489
- a, b = img.shape[1], img.shape[0]
490
- # downsample2
491
- if random.random() < 0.75:
492
- sf1 = random.uniform(1, 2 * sf)
493
- img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])),
494
- interpolation=random.choice([1, 2, 3]))
495
- else:
496
- k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
497
- k_shifted = shift_pixel(k, sf)
498
- k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
499
- img = ndimage.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror')
500
- img = img[0::sf, 0::sf, ...] # nearest downsampling
501
- img = np.clip(img, 0.0, 1.0)
502
-
503
- elif i == 3:
504
- # downsample3
505
- img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
506
- img = np.clip(img, 0.0, 1.0)
507
-
508
- elif i == 4:
509
- # add Gaussian noise
510
- img = add_Gaussian_noise(img, noise_level1=2, noise_level2=8)
511
-
512
- elif i == 5:
513
- # add JPEG noise
514
- if random.random() < jpeg_prob:
515
- img = add_JPEG_noise(img)
516
-
517
- elif i == 6:
518
- # add processed camera sensor noise
519
- if random.random() < isp_prob and isp_model is not None:
520
- with torch.no_grad():
521
- img, hq = isp_model.forward(img.copy(), hq)
522
-
523
- # add final JPEG compression noise
524
- img = add_JPEG_noise(img)
525
-
526
- # random crop
527
- img, hq = random_crop(img, hq, sf_ori, lq_patchsize)
528
-
529
- return img, hq
530
-
531
-
532
- # todo no isp_model?
533
- def degradation_bsrgan_variant(image, sf=4, isp_model=None, up=False):
534
- """
535
- This is the degradation model of BSRGAN from the paper
536
- "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
537
- ----------
538
- sf: scale factor
539
- isp_model: camera ISP model
540
- Returns
541
- -------
542
- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
543
- hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
544
- """
545
- image = util.uint2single(image)
546
- isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
547
- sf_ori = sf
548
-
549
- h1, w1 = image.shape[:2]
550
- image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
551
- h, w = image.shape[:2]
552
-
553
- hq = image.copy()
554
-
555
- if sf == 4 and random.random() < scale2_prob: # downsample1
556
- if np.random.rand() < 0.5:
557
- image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),
558
- interpolation=random.choice([1, 2, 3]))
559
- else:
560
- image = util.imresize_np(image, 1 / 2, True)
561
- image = np.clip(image, 0.0, 1.0)
562
- sf = 2
563
-
564
- shuffle_order = random.sample(range(7), 7)
565
- idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
566
- if idx1 > idx2: # keep downsample3 last
567
- shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
568
-
569
- for i in shuffle_order:
570
-
571
- if i == 0:
572
- image = add_blur(image, sf=sf)
573
-
574
- # elif i == 1:
575
- # image = add_blur(image, sf=sf)
576
-
577
- if i == 0:
578
- pass
579
-
580
- elif i == 2:
581
- a, b = image.shape[1], image.shape[0]
582
- # downsample2
583
- if random.random() < 0.8:
584
- sf1 = random.uniform(1, 2 * sf)
585
- image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])),
586
- interpolation=random.choice([1, 2, 3]))
587
- else:
588
- k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
589
- k_shifted = shift_pixel(k, sf)
590
- k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
591
- image = ndimage.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror')
592
- image = image[0::sf, 0::sf, ...] # nearest downsampling
593
-
594
- image = np.clip(image, 0.0, 1.0)
595
-
596
- elif i == 3:
597
- # downsample3
598
- image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
599
- image = np.clip(image, 0.0, 1.0)
600
-
601
- elif i == 4:
602
- # add Gaussian noise
603
- image = add_Gaussian_noise(image, noise_level1=1, noise_level2=2)
604
-
605
- elif i == 5:
606
- # add JPEG noise
607
- if random.random() < jpeg_prob:
608
- image = add_JPEG_noise(image)
609
- #
610
- # elif i == 6:
611
- # # add processed camera sensor noise
612
- # if random.random() < isp_prob and isp_model is not None:
613
- # with torch.no_grad():
614
- # img, hq = isp_model.forward(img.copy(), hq)
615
-
616
- # add final JPEG compression noise
617
- image = add_JPEG_noise(image)
618
- image = util.single2uint(image)
619
- if up:
620
- image = cv2.resize(image, (w1, h1), interpolation=cv2.INTER_CUBIC) # todo: random, as above? want to condition on it then
621
- example = {"image": image}
622
- return example
623
-
624
-
625
-
626
-
627
- if __name__ == '__main__':
628
- print("hey")
629
- img = util.imread_uint('utils/test.png', 3)
630
- img = img[:448, :448]
631
- h = img.shape[0] // 4
632
- print("resizing to", h)
633
- sf = 4
634
- deg_fn = partial(degradation_bsrgan_variant, sf=sf)
635
- for i in range(20):
636
- print(i)
637
- img_hq = img
638
- img_lq = deg_fn(img)["image"]
639
- img_hq, img_lq = util.uint2single(img_hq), util.uint2single(img_lq)
640
- print(img_lq)
641
- img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img_hq)["image"]
642
- print(img_lq.shape)
643
- print("bicubic", img_lq_bicubic.shape)
644
- print(img_hq.shape)
645
- lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
646
- interpolation=0)
647
- lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic),
648
- (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
649
- interpolation=0)
650
- img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1)
651
- util.imsave(img_concat, str(i) + '.png')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/perspectivecard/CreatePerspectiveCardMesh.js DELETED
@@ -1,39 +0,0 @@
1
- import { PerspectiveCard } from '../../../plugins/perspectiveimage.js';
2
- import Clone from '../../../plugins/utils/object/Clone.js';
3
-
4
- const GetValue = Phaser.Utils.Objects.GetValue;
5
-
6
- var CreatePerspectiveCardMesh = function (config) {
7
- var scene = this.scene;
8
-
9
- this.setSnapshotPadding(GetValue(config, 'snapshotPadding', 0));
10
-
11
- config = Clone(config);
12
- // Remove size config
13
- delete config.width;
14
- delete config.height;
15
- // Initial size of render-texture is 1x1
16
- config.front = { width: 1, height: 1 };
17
- config.back = { width: 1, height: 1 };
18
- // Create PerspectiveCard as card-behavior
19
- var card = new PerspectiveCard(scene, config);
20
- scene.add.existing(card);
21
-
22
- var flip = card.flip;
23
- if (flip) {
24
- var parent = this;
25
- flip
26
- .on('start', function () {
27
- // Before flipping
28
- parent.enterPerspectiveMode();
29
- })
30
- .on('complete', function () {
31
- // After flipping
32
- parent.exitPerspectiveMode();
33
- })
34
- }
35
-
36
- return card;
37
- }
38
-
39
- export default CreatePerspectiveCardMesh;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco.py DELETED
@@ -1,7 +0,0 @@
1
- _base_ = '../retinanet/retinanet_x101_32x4d_fpn_1x_coco.py'
2
-
3
- model = dict(
4
- bbox_head=dict(
5
- type='PISARetinaHead',
6
- loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)),
7
- train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)))
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/tools/analysis_tools/get_flops.py DELETED
@@ -1,81 +0,0 @@
1
- import argparse
2
-
3
- import torch
4
- from mmcv import Config, DictAction
5
-
6
- from mmdet.models import build_detector
7
-
8
- try:
9
- from mmcv.cnn import get_model_complexity_info
10
- except ImportError:
11
- raise ImportError('Please upgrade mmcv to >0.6.2')
12
-
13
-
14
- def parse_args():
15
- parser = argparse.ArgumentParser(description='Train a detector')
16
- parser.add_argument('config', help='train config file path')
17
- parser.add_argument(
18
- '--shape',
19
- type=int,
20
- nargs='+',
21
- default=[1280, 800],
22
- help='input image size')
23
- parser.add_argument(
24
- '--cfg-options',
25
- nargs='+',
26
- action=DictAction,
27
- help='override some settings in the used config, the key-value pair '
28
- 'in xxx=yyy format will be merged into config file. If the value to '
29
- 'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
30
- 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
31
- 'Note that the quotation marks are necessary and that no white space '
32
- 'is allowed.')
33
- args = parser.parse_args()
34
- return args
35
-
36
-
37
- def main():
38
-
39
- args = parse_args()
40
-
41
- if len(args.shape) == 1:
42
- input_shape = (3, args.shape[0], args.shape[0])
43
- elif len(args.shape) == 2:
44
- input_shape = (3, ) + tuple(args.shape)
45
- else:
46
- raise ValueError('invalid input shape')
47
-
48
- cfg = Config.fromfile(args.config)
49
- if args.cfg_options is not None:
50
- cfg.merge_from_dict(args.cfg_options)
51
- # import modules from string list.
52
- if cfg.get('custom_imports', None):
53
- from mmcv.utils import import_modules_from_strings
54
- import_modules_from_strings(**cfg['custom_imports'])
55
-
56
- model = build_detector(
57
- cfg.model,
58
- train_cfg=cfg.get('train_cfg'),
59
- test_cfg=cfg.get('test_cfg'))
60
- if torch.cuda.is_available():
61
- model.cuda()
62
- model.eval()
63
-
64
- if hasattr(model, 'forward_dummy'):
65
- model.forward = model.forward_dummy
66
- else:
67
- raise NotImplementedError(
68
- 'FLOPs counter is currently not currently supported with {}'.
69
- format(model.__class__.__name__))
70
-
71
- flops, params = get_model_complexity_info(model, input_shape)
72
- split_line = '=' * 30
73
- print(f'{split_line}\nInput shape: {input_shape}\n'
74
- f'Flops: {flops}\nParams: {params}\n{split_line}')
75
- print('!!!Please be cautious if you use the results in papers. '
76
- 'You may need to check if all ops are supported and verify that the '
77
- 'flops computation is correct.')
78
-
79
-
80
- if __name__ == '__main__':
81
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anustup/NS_AI_LABS/src/utils.py DELETED
@@ -1,115 +0,0 @@
1
- import textwrap
2
- import unicodedata
3
- import re
4
-
5
- import zlib
6
- from typing import Iterator, TextIO
7
-
8
-
9
- def exact_div(x, y):
10
- assert x % y == 0
11
- return x // y
12
-
13
-
14
- def str2bool(string):
15
- str2val = {"True": True, "False": False}
16
- if string in str2val:
17
- return str2val[string]
18
- else:
19
- raise ValueError(f"Expected one of {set(str2val.keys())}, got {string}")
20
-
21
-
22
- def optional_int(string):
23
- return None if string == "None" else int(string)
24
-
25
-
26
- def optional_float(string):
27
- return None if string == "None" else float(string)
28
-
29
-
30
- def compression_ratio(text) -> float:
31
- return len(text) / len(zlib.compress(text.encode("utf-8")))
32
-
33
-
34
- def format_timestamp(seconds: float, always_include_hours: bool = False, fractionalSeperator: str = '.'):
35
- assert seconds >= 0, "non-negative timestamp expected"
36
- milliseconds = round(seconds * 1000.0)
37
-
38
- hours = milliseconds // 3_600_000
39
- milliseconds -= hours * 3_600_000
40
-
41
- minutes = milliseconds // 60_000
42
- milliseconds -= minutes * 60_000
43
-
44
- seconds = milliseconds // 1_000
45
- milliseconds -= seconds * 1_000
46
-
47
- hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else ""
48
- return f"{hours_marker}{minutes:02d}:{seconds:02d}{fractionalSeperator}{milliseconds:03d}"
49
-
50
-
51
- def write_txt(transcript: Iterator[dict], file: TextIO):
52
- for segment in transcript:
53
- print(segment['text'].strip(), file=file, flush=True)
54
-
55
-
56
- def write_vtt(transcript: Iterator[dict], file: TextIO, maxLineWidth=None):
57
- print("WEBVTT\n", file=file)
58
- for segment in transcript:
59
- text = process_text(segment['text'], maxLineWidth).replace('-->', '->')
60
-
61
- print(
62
- f"{format_timestamp(segment['start'])} --> {format_timestamp(segment['end'])}\n"
63
- f"{text}\n",
64
- file=file,
65
- flush=True,
66
- )
67
-
68
-
69
- def write_srt(transcript: Iterator[dict], file: TextIO, maxLineWidth=None):
70
- """
71
- Write a transcript to a file in SRT format.
72
- Example usage:
73
- from pathlib import Path
74
- from whisper.utils import write_srt
75
- result = transcribe(model, audio_path, temperature=temperature, **args)
76
- # save SRT
77
- audio_basename = Path(audio_path).stem
78
- with open(Path(output_dir) / (audio_basename + ".srt"), "w", encoding="utf-8") as srt:
79
- write_srt(result["segments"], file=srt)
80
- """
81
- for i, segment in enumerate(transcript, start=1):
82
- text = process_text(segment['text'].strip(), maxLineWidth).replace('-->', '->')
83
-
84
- # write srt lines
85
- print(
86
- f"{i}\n"
87
- f"{format_timestamp(segment['start'], always_include_hours=True, fractionalSeperator=',')} --> "
88
- f"{format_timestamp(segment['end'], always_include_hours=True, fractionalSeperator=',')}\n"
89
- f"{text}\n",
90
- file=file,
91
- flush=True,
92
- )
93
-
94
- def process_text(text: str, maxLineWidth=None):
95
- if (maxLineWidth is None or maxLineWidth < 0):
96
- return text
97
-
98
- lines = textwrap.wrap(text, width=maxLineWidth, tabsize=4)
99
- return '\n'.join(lines)
100
-
101
- def slugify(value, allow_unicode=False):
102
- """
103
- Taken from https://github.com/django/django/blob/master/django/utils/text.py
104
- Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated
105
- dashes to single dashes. Remove characters that aren't alphanumerics,
106
- underscores, or hyphens. Convert to lowercase. Also strip leading and
107
- trailing whitespace, dashes, and underscores.
108
- """
109
- value = str(value)
110
- if allow_unicode:
111
- value = unicodedata.normalize('NFKC', value)
112
- else:
113
- value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
114
- value = re.sub(r'[^\w\s-]', '', value.lower())
115
- return re.sub(r'[-\s]+', '-', value).strip('-_')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/utils/direct_url_helpers.py DELETED
@@ -1,87 +0,0 @@
1
- from typing import Optional
2
-
3
- from pip._internal.models.direct_url import ArchiveInfo, DirectUrl, DirInfo, VcsInfo
4
- from pip._internal.models.link import Link
5
- from pip._internal.utils.urls import path_to_url
6
- from pip._internal.vcs import vcs
7
-
8
-
9
- def direct_url_as_pep440_direct_reference(direct_url: DirectUrl, name: str) -> str:
10
- """Convert a DirectUrl to a pip requirement string."""
11
- direct_url.validate() # if invalid, this is a pip bug
12
- requirement = name + " @ "
13
- fragments = []
14
- if isinstance(direct_url.info, VcsInfo):
15
- requirement += "{}+{}@{}".format(
16
- direct_url.info.vcs, direct_url.url, direct_url.info.commit_id
17
- )
18
- elif isinstance(direct_url.info, ArchiveInfo):
19
- requirement += direct_url.url
20
- if direct_url.info.hash:
21
- fragments.append(direct_url.info.hash)
22
- else:
23
- assert isinstance(direct_url.info, DirInfo)
24
- requirement += direct_url.url
25
- if direct_url.subdirectory:
26
- fragments.append("subdirectory=" + direct_url.subdirectory)
27
- if fragments:
28
- requirement += "#" + "&".join(fragments)
29
- return requirement
30
-
31
-
32
- def direct_url_for_editable(source_dir: str) -> DirectUrl:
33
- return DirectUrl(
34
- url=path_to_url(source_dir),
35
- info=DirInfo(editable=True),
36
- )
37
-
38
-
39
- def direct_url_from_link(
40
- link: Link, source_dir: Optional[str] = None, link_is_in_wheel_cache: bool = False
41
- ) -> DirectUrl:
42
- if link.is_vcs:
43
- vcs_backend = vcs.get_backend_for_scheme(link.scheme)
44
- assert vcs_backend
45
- url, requested_revision, _ = vcs_backend.get_url_rev_and_auth(
46
- link.url_without_fragment
47
- )
48
- # For VCS links, we need to find out and add commit_id.
49
- if link_is_in_wheel_cache:
50
- # If the requested VCS link corresponds to a cached
51
- # wheel, it means the requested revision was an
52
- # immutable commit hash, otherwise it would not have
53
- # been cached. In that case we don't have a source_dir
54
- # with the VCS checkout.
55
- assert requested_revision
56
- commit_id = requested_revision
57
- else:
58
- # If the wheel was not in cache, it means we have
59
- # had to checkout from VCS to build and we have a source_dir
60
- # which we can inspect to find out the commit id.
61
- assert source_dir
62
- commit_id = vcs_backend.get_revision(source_dir)
63
- return DirectUrl(
64
- url=url,
65
- info=VcsInfo(
66
- vcs=vcs_backend.name,
67
- commit_id=commit_id,
68
- requested_revision=requested_revision,
69
- ),
70
- subdirectory=link.subdirectory_fragment,
71
- )
72
- elif link.is_existing_dir():
73
- return DirectUrl(
74
- url=link.url_without_fragment,
75
- info=DirInfo(),
76
- subdirectory=link.subdirectory_fragment,
77
- )
78
- else:
79
- hash = None
80
- hash_name = link.hash_name
81
- if hash_name:
82
- hash = f"{hash_name}={link.hash}"
83
- return DirectUrl(
84
- url=link.url_without_fragment,
85
- info=ArchiveInfo(hash=hash),
86
- subdirectory=link.subdirectory_fragment,
87
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/configs/COCO-InstanceSegmentation/mask_rcnn_regnety_4gf_dds_fpn_1x.py DELETED
@@ -1,35 +0,0 @@
1
- from ..common.optim import SGD as optimizer
2
- from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier
3
- from ..common.data.coco import dataloader
4
- from ..common.models.mask_rcnn_fpn import model
5
- from ..common.train import train
6
-
7
- from detectron2.config import LazyCall as L
8
- from detectron2.modeling.backbone import RegNet
9
- from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock
10
-
11
-
12
- # Replace default ResNet with RegNetY-4GF from the DDS paper. Config source:
13
- # https://github.com/facebookresearch/pycls/blob/2c152a6e5d913e898cca4f0a758f41e6b976714d/configs/dds_baselines/regnety/RegNetY-4.0GF_dds_8gpu.yaml#L4-L10 # noqa
14
- model.backbone.bottom_up = L(RegNet)(
15
- stem_class=SimpleStem,
16
- stem_width=32,
17
- block_class=ResBottleneckBlock,
18
- depth=22,
19
- w_a=31.41,
20
- w_0=96,
21
- w_m=2.24,
22
- group_width=64,
23
- se_ratio=0.25,
24
- freeze_at=2,
25
- norm="FrozenBN",
26
- out_features=["s1", "s2", "s3", "s4"],
27
- )
28
- model.pixel_std = [57.375, 57.120, 58.395]
29
-
30
- optimizer.weight_decay = 5e-5
31
- train.init_checkpoint = (
32
- "https://dl.fbaipublicfiles.com/pycls/dds_baselines/160906838/RegNetY-4.0GF_dds_8gpu.pyth"
33
- )
34
- # RegNets benefit from enabling cudnn benchmark mode
35
- train.cudnn_benchmark = True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/export/shared.py DELETED
@@ -1,1034 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
-
3
- import collections
4
- import contextlib
5
- import copy
6
- import functools
7
- import logging
8
- import numpy as np
9
- import os
10
- from typing import Any, Callable, Dict, List, Optional, Tuple, Union
11
- from unittest import mock
12
- import caffe2.python.utils as putils
13
- import torch
14
- import torch.nn.functional as F
15
- from caffe2.proto import caffe2_pb2
16
- from caffe2.python import core, net_drawer, workspace
17
- from torch.nn.functional import interpolate as interp
18
-
19
- logger = logging.getLogger(__name__)
20
-
21
-
22
- # ==== torch/utils_toffee/cast.py =======================================
23
-
24
-
25
- def to_device(t, device_str):
26
- """
27
- This function is a replacement of .to(another_device) such that it allows the
28
- casting to be traced properly by explicitly calling the underlying copy ops.
29
- It also avoids introducing unncessary op when casting to the same device.
30
- """
31
- src = t.device
32
- dst = torch.device(device_str)
33
-
34
- if src == dst:
35
- return t
36
- elif src.type == "cuda" and dst.type == "cpu":
37
- return torch.ops._caffe2.CopyGPUToCPU(t)
38
- elif src.type == "cpu" and dst.type == "cuda":
39
- return torch.ops._caffe2.CopyCPUToGPU(t)
40
- else:
41
- raise RuntimeError("Can't cast tensor from device {} to device {}".format(src, dst))
42
-
43
-
44
- # ==== torch/utils_toffee/interpolate.py =======================================
45
-
46
-
47
- # Note: borrowed from vision/detection/fair/detectron/detectron/modeling/detector.py
48
- def BilinearInterpolation(tensor_in, up_scale):
49
- assert up_scale % 2 == 0, "Scale should be even"
50
-
51
- def upsample_filt(size):
52
- factor = (size + 1) // 2
53
- if size % 2 == 1:
54
- center = factor - 1
55
- else:
56
- center = factor - 0.5
57
-
58
- og = np.ogrid[:size, :size]
59
- return (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)
60
-
61
- kernel_size = int(up_scale) * 2
62
- bil_filt = upsample_filt(kernel_size)
63
-
64
- dim = int(tensor_in.shape[1])
65
- kernel = np.zeros((dim, dim, kernel_size, kernel_size), dtype=np.float32)
66
- kernel[range(dim), range(dim), :, :] = bil_filt
67
-
68
- tensor_out = F.conv_transpose2d(
69
- tensor_in,
70
- weight=to_device(torch.Tensor(kernel), tensor_in.device),
71
- bias=None,
72
- stride=int(up_scale),
73
- padding=int(up_scale / 2),
74
- )
75
-
76
- return tensor_out
77
-
78
-
79
- # NOTE: ONNX is incompatible with traced torch.nn.functional.interpolate if
80
- # using dynamic `scale_factor` rather than static `size`. (T43166860)
81
- # NOTE: Caffe2 Int8 conversion might not be able to quantize `size` properly.
82
- def onnx_compatibale_interpolate(
83
- input, size=None, scale_factor=None, mode="nearest", align_corners=None
84
- ):
85
- # NOTE: The input dimensions are interpreted in the form:
86
- # `mini-batch x channels x [optional depth] x [optional height] x width`.
87
- if size is None and scale_factor is not None:
88
- if input.dim() == 4:
89
- if isinstance(scale_factor, (int, float)):
90
- height_scale, width_scale = (scale_factor, scale_factor)
91
- else:
92
- assert isinstance(scale_factor, (tuple, list))
93
- assert len(scale_factor) == 2
94
- height_scale, width_scale = scale_factor
95
-
96
- assert not align_corners, "No matching C2 op for align_corners == True"
97
- if mode == "nearest":
98
- return torch.ops._caffe2.ResizeNearest(
99
- input, order="NCHW", width_scale=width_scale, height_scale=height_scale
100
- )
101
- elif mode == "bilinear":
102
- logger.warning(
103
- "Use F.conv_transpose2d for bilinear interpolate"
104
- " because there's no such C2 op, this may cause significant"
105
- " slowdown and the boundary pixels won't be as same as"
106
- " using F.interpolate due to padding."
107
- )
108
- assert height_scale == width_scale
109
- return BilinearInterpolation(input, up_scale=height_scale)
110
- logger.warning("Output size is not static, it might cause ONNX conversion issue")
111
-
112
- return interp(input, size, scale_factor, mode, align_corners)
113
-
114
-
115
- @contextlib.contextmanager
116
- def mock_torch_nn_functional_interpolate():
117
- if torch.onnx.is_in_onnx_export():
118
- with mock.patch(
119
- "torch.nn.functional.interpolate", side_effect=onnx_compatibale_interpolate
120
- ):
121
- yield
122
- else:
123
- yield
124
-
125
-
126
- # ==== torch/utils_caffe2/ws_utils.py ==========================================
127
-
128
-
129
- class ScopedWS(object):
130
- def __init__(self, ws_name, is_reset, is_cleanup=False):
131
- self.ws_name = ws_name
132
- self.is_reset = is_reset
133
- self.is_cleanup = is_cleanup
134
- self.org_ws = ""
135
-
136
- def __enter__(self):
137
- self.org_ws = workspace.CurrentWorkspace()
138
- if self.ws_name is not None:
139
- workspace.SwitchWorkspace(self.ws_name, True)
140
- if self.is_reset:
141
- workspace.ResetWorkspace()
142
-
143
- return workspace
144
-
145
- def __exit__(self, *args):
146
- if self.is_cleanup:
147
- workspace.ResetWorkspace()
148
- if self.ws_name is not None:
149
- workspace.SwitchWorkspace(self.org_ws)
150
-
151
-
152
- def fetch_any_blob(name):
153
- bb = None
154
- try:
155
- bb = workspace.FetchBlob(name)
156
- except TypeError:
157
- bb = workspace.FetchInt8Blob(name)
158
- except Exception as e:
159
- logger.error("Get blob {} error: {}".format(name, e))
160
-
161
- return bb
162
-
163
-
164
- # ==== torch/utils_caffe2/protobuf.py ==========================================
165
-
166
-
167
- def get_pb_arg(pb, arg_name):
168
- for x in pb.arg:
169
- if x.name == arg_name:
170
- return x
171
- return None
172
-
173
-
174
- def get_pb_arg_valf(pb, arg_name, default_val):
175
- arg = get_pb_arg(pb, arg_name)
176
- return arg.f if arg is not None else default_val
177
-
178
-
179
- def get_pb_arg_floats(pb, arg_name, default_val):
180
- arg = get_pb_arg(pb, arg_name)
181
- return list(map(float, arg.floats)) if arg is not None else default_val
182
-
183
-
184
- def get_pb_arg_ints(pb, arg_name, default_val):
185
- arg = get_pb_arg(pb, arg_name)
186
- return list(map(int, arg.ints)) if arg is not None else default_val
187
-
188
-
189
- def get_pb_arg_vali(pb, arg_name, default_val):
190
- arg = get_pb_arg(pb, arg_name)
191
- return arg.i if arg is not None else default_val
192
-
193
-
194
- def get_pb_arg_vals(pb, arg_name, default_val):
195
- arg = get_pb_arg(pb, arg_name)
196
- return arg.s if arg is not None else default_val
197
-
198
-
199
- def get_pb_arg_valstrings(pb, arg_name, default_val):
200
- arg = get_pb_arg(pb, arg_name)
201
- return list(arg.strings) if arg is not None else default_val
202
-
203
-
204
- def check_set_pb_arg(pb, arg_name, arg_attr, arg_value, allow_override=False):
205
- arg = get_pb_arg(pb, arg_name)
206
- if arg is None:
207
- arg = putils.MakeArgument(arg_name, arg_value)
208
- assert hasattr(arg, arg_attr)
209
- pb.arg.extend([arg])
210
- if allow_override and getattr(arg, arg_attr) != arg_value:
211
- logger.warning(
212
- "Override argument {}: {} -> {}".format(arg_name, getattr(arg, arg_attr), arg_value)
213
- )
214
- setattr(arg, arg_attr, arg_value)
215
- else:
216
- assert arg is not None
217
- assert getattr(arg, arg_attr) == arg_value, "Existing value {}, new value {}".format(
218
- getattr(arg, arg_attr), arg_value
219
- )
220
-
221
-
222
- def _create_const_fill_op_from_numpy(name, tensor, device_option=None):
223
- assert type(tensor) == np.ndarray
224
- kTypeNameMapper = {
225
- np.dtype("float32"): "GivenTensorFill",
226
- np.dtype("int32"): "GivenTensorIntFill",
227
- np.dtype("int64"): "GivenTensorInt64Fill",
228
- np.dtype("uint8"): "GivenTensorStringFill",
229
- }
230
-
231
- args_dict = {}
232
- if tensor.dtype == np.dtype("uint8"):
233
- args_dict.update({"values": [str(tensor.data)], "shape": [1]})
234
- else:
235
- args_dict.update({"values": tensor, "shape": tensor.shape})
236
-
237
- if device_option is not None:
238
- args_dict["device_option"] = device_option
239
-
240
- return core.CreateOperator(kTypeNameMapper[tensor.dtype], [], [name], **args_dict)
241
-
242
-
243
- def _create_const_fill_op_from_c2_int8_tensor(name, int8_tensor):
244
- assert type(int8_tensor) == workspace.Int8Tensor
245
- kTypeNameMapper = {
246
- np.dtype("int32"): "Int8GivenIntTensorFill",
247
- np.dtype("uint8"): "Int8GivenTensorFill",
248
- }
249
-
250
- tensor = int8_tensor.data
251
- assert tensor.dtype in [np.dtype("uint8"), np.dtype("int32")]
252
- values = tensor.tobytes() if tensor.dtype == np.dtype("uint8") else tensor
253
-
254
- return core.CreateOperator(
255
- kTypeNameMapper[tensor.dtype],
256
- [],
257
- [name],
258
- values=values,
259
- shape=tensor.shape,
260
- Y_scale=int8_tensor.scale,
261
- Y_zero_point=int8_tensor.zero_point,
262
- )
263
-
264
-
265
- def create_const_fill_op(
266
- name: str,
267
- blob: Union[np.ndarray, workspace.Int8Tensor],
268
- device_option: Optional[caffe2_pb2.DeviceOption] = None,
269
- ) -> caffe2_pb2.OperatorDef:
270
- """
271
- Given a blob object, return the Caffe2 operator that creates this blob
272
- as constant. Currently support NumPy tensor and Caffe2 Int8Tensor.
273
- """
274
-
275
- tensor_type = type(blob)
276
- assert tensor_type in [
277
- np.ndarray,
278
- workspace.Int8Tensor,
279
- ], 'Error when creating const fill op for "{}", unsupported blob type: {}'.format(
280
- name, type(blob)
281
- )
282
-
283
- if tensor_type == np.ndarray:
284
- return _create_const_fill_op_from_numpy(name, blob, device_option)
285
- elif tensor_type == workspace.Int8Tensor:
286
- assert device_option is None
287
- return _create_const_fill_op_from_c2_int8_tensor(name, blob)
288
-
289
-
290
- def construct_init_net_from_params(
291
- params: Dict[str, Any], device_options: Optional[Dict[str, caffe2_pb2.DeviceOption]] = None
292
- ) -> caffe2_pb2.NetDef:
293
- """
294
- Construct the init_net from params dictionary
295
- """
296
- init_net = caffe2_pb2.NetDef()
297
- device_options = device_options or {}
298
- for name, blob in params.items():
299
- if isinstance(blob, str):
300
- logger.warning(
301
- (
302
- "Blob {} with type {} is not supported in generating init net,"
303
- " skipped.".format(name, type(blob))
304
- )
305
- )
306
- continue
307
- init_net.op.extend(
308
- [create_const_fill_op(name, blob, device_option=device_options.get(name, None))]
309
- )
310
- init_net.external_output.append(name)
311
- return init_net
312
-
313
-
314
- def get_producer_map(ssa):
315
- """
316
- Return dict from versioned blob to (i, j),
317
- where i is index of producer op, j is the index of output of that op.
318
- """
319
- producer_map = {}
320
- for i in range(len(ssa)):
321
- outputs = ssa[i][1]
322
- for j, outp in enumerate(outputs):
323
- producer_map[outp] = (i, j)
324
- return producer_map
325
-
326
-
327
- def get_consumer_map(ssa):
328
- """
329
- Return dict from versioned blob to list of (i, j),
330
- where i is index of consumer op, j is the index of input of that op.
331
- """
332
- consumer_map = collections.defaultdict(list)
333
- for i in range(len(ssa)):
334
- inputs = ssa[i][0]
335
- for j, inp in enumerate(inputs):
336
- consumer_map[inp].append((i, j))
337
- return consumer_map
338
-
339
-
340
- def get_params_from_init_net(
341
- init_net: caffe2_pb2.NetDef,
342
- ) -> [Dict[str, Any], Dict[str, caffe2_pb2.DeviceOption]]:
343
- """
344
- Take the output blobs from init_net by running it.
345
- Outputs:
346
- params: dict from blob name to numpy array
347
- device_options: dict from blob name to the device option of its creating op
348
- """
349
- # NOTE: this assumes that the params is determined by producer op with the
350
- # only exception be CopyGPUToCPU which is CUDA op but returns CPU tensor.
351
- def _get_device_option(producer_op):
352
- if producer_op.type == "CopyGPUToCPU":
353
- return caffe2_pb2.DeviceOption()
354
- else:
355
- return producer_op.device_option
356
-
357
- with ScopedWS("__get_params_from_init_net__", is_reset=True, is_cleanup=True) as ws:
358
- ws.RunNetOnce(init_net)
359
- params = {b: fetch_any_blob(b) for b in init_net.external_output}
360
- ssa, versions = core.get_ssa(init_net)
361
- producer_map = get_producer_map(ssa)
362
- device_options = {
363
- b: _get_device_option(init_net.op[producer_map[(b, versions[b])][0]])
364
- for b in init_net.external_output
365
- }
366
- return params, device_options
367
-
368
-
369
- def _updater_raise(op, input_types, output_types):
370
- raise RuntimeError(
371
- "Failed to apply updater for op {} given input_types {} and"
372
- " output_types {}".format(op, input_types, output_types)
373
- )
374
-
375
-
376
- def _generic_status_identifier(
377
- predict_net: caffe2_pb2.NetDef,
378
- status_updater: Callable,
379
- known_status: Dict[Tuple[str, int], Any],
380
- ) -> Dict[Tuple[str, int], Any]:
381
- """
382
- Statically infer the status of each blob, the status can be such as device type
383
- (CPU/GPU), layout (NCHW/NHWC), data type (float32/int8), etc. "Blob" here
384
- is versioned blob (Tuple[str, int]) in the format compatible with ssa.
385
- Inputs:
386
- predict_net: the caffe2 network
387
- status_updater: a callable, given an op and the status of its input/output,
388
- it returns the updated status of input/output. `None` is used for
389
- representing unknown status.
390
- known_status: a dict containing known status, used as initialization.
391
- Outputs:
392
- A dict mapping from versioned blob to its status
393
- """
394
- ssa, versions = core.get_ssa(predict_net)
395
- versioned_ext_input = [(b, 0) for b in predict_net.external_input]
396
- versioned_ext_output = [(b, versions[b]) for b in predict_net.external_output]
397
- all_versioned_blobs = set().union(*[set(x[0] + x[1]) for x in ssa])
398
-
399
- allowed_vbs = all_versioned_blobs.union(versioned_ext_input).union(versioned_ext_output)
400
- assert all(k in allowed_vbs for k in known_status)
401
- assert all(v is not None for v in known_status.values())
402
- _known_status = copy.deepcopy(known_status)
403
-
404
- def _check_and_update(key, value):
405
- assert value is not None
406
- if key in _known_status:
407
- if not _known_status[key] == value:
408
- raise RuntimeError(
409
- "Confilict status for {}, existing status {}, new status {}".format(
410
- key, _known_status[key], value
411
- )
412
- )
413
- _known_status[key] = value
414
-
415
- def _update_i(op, ssa_i):
416
- versioned_inputs = ssa_i[0]
417
- versioned_outputs = ssa_i[1]
418
-
419
- inputs_status = [_known_status.get(b, None) for b in versioned_inputs]
420
- outputs_status = [_known_status.get(b, None) for b in versioned_outputs]
421
-
422
- new_inputs_status, new_outputs_status = status_updater(op, inputs_status, outputs_status)
423
-
424
- for versioned_blob, status in zip(
425
- versioned_inputs + versioned_outputs, new_inputs_status + new_outputs_status
426
- ):
427
- if status is not None:
428
- _check_and_update(versioned_blob, status)
429
-
430
- for op, ssa_i in zip(predict_net.op, ssa):
431
- _update_i(op, ssa_i)
432
- for op, ssa_i in zip(reversed(predict_net.op), reversed(ssa)):
433
- _update_i(op, ssa_i)
434
-
435
- # NOTE: This strictly checks all the blob from predict_net must be assgined
436
- # a known status. However sometimes it's impossible (eg. having deadend op),
437
- # we may relax this constraint if
438
- for k in all_versioned_blobs:
439
- if k not in _known_status:
440
- raise NotImplementedError(
441
- "Can not infer the status for {}. Currently only support the case where"
442
- " a single forward and backward pass can identify status for all blobs.".format(k)
443
- )
444
-
445
- return _known_status
446
-
447
-
448
- def infer_device_type(
449
- predict_net: caffe2_pb2.NetDef,
450
- known_status: Dict[Tuple[str, int], Any],
451
- device_name_style: str = "caffe2",
452
- ) -> Dict[Tuple[str, int], str]:
453
- """Return the device type ("cpu" or "gpu"/"cuda") of each (versioned) blob"""
454
-
455
- assert device_name_style in ["caffe2", "pytorch"]
456
- _CPU_STR = "cpu"
457
- _GPU_STR = "gpu" if device_name_style == "caffe2" else "cuda"
458
-
459
- def _copy_cpu_to_gpu_updater(op, input_types, output_types):
460
- if input_types[0] == _GPU_STR or output_types[0] == _CPU_STR:
461
- _updater_raise(op, input_types, output_types)
462
- return ([_CPU_STR], [_GPU_STR])
463
-
464
- def _copy_gpu_to_cpu_updater(op, input_types, output_types):
465
- if input_types[0] == _CPU_STR or output_types[0] == _GPU_STR:
466
- _updater_raise(op, input_types, output_types)
467
- return ([_GPU_STR], [_CPU_STR])
468
-
469
- def _other_ops_updater(op, input_types, output_types):
470
- non_none_types = [x for x in input_types + output_types if x is not None]
471
- if len(non_none_types) > 0:
472
- the_type = non_none_types[0]
473
- if not all(x == the_type for x in non_none_types):
474
- _updater_raise(op, input_types, output_types)
475
- else:
476
- the_type = None
477
- return ([the_type for _ in op.input], [the_type for _ in op.output])
478
-
479
- def _device_updater(op, *args, **kwargs):
480
- return {
481
- "CopyCPUToGPU": _copy_cpu_to_gpu_updater,
482
- "CopyGPUToCPU": _copy_gpu_to_cpu_updater,
483
- }.get(op.type, _other_ops_updater)(op, *args, **kwargs)
484
-
485
- return _generic_status_identifier(predict_net, _device_updater, known_status)
486
-
487
-
488
- # ==== torch/utils_caffe2/vis.py ===============================================
489
-
490
-
491
- def _modify_blob_names(ops, blob_rename_f):
492
- ret = []
493
-
494
- def _replace_list(blob_list, replaced_list):
495
- del blob_list[:]
496
- blob_list.extend(replaced_list)
497
-
498
- for x in ops:
499
- cur = copy.deepcopy(x)
500
- _replace_list(cur.input, list(map(blob_rename_f, cur.input)))
501
- _replace_list(cur.output, list(map(blob_rename_f, cur.output)))
502
- ret.append(cur)
503
-
504
- return ret
505
-
506
-
507
- def _rename_blob(name, blob_sizes, blob_ranges):
508
- def _list_to_str(bsize):
509
- ret = ", ".join([str(x) for x in bsize])
510
- ret = "[" + ret + "]"
511
- return ret
512
-
513
- ret = name
514
- if blob_sizes is not None and name in blob_sizes:
515
- ret += "\n" + _list_to_str(blob_sizes[name])
516
- if blob_ranges is not None and name in blob_ranges:
517
- ret += "\n" + _list_to_str(blob_ranges[name])
518
-
519
- return ret
520
-
521
-
522
- # graph_name could not contain word 'graph'
523
- def save_graph(net, file_name, graph_name="net", op_only=True, blob_sizes=None, blob_ranges=None):
524
- blob_rename_f = functools.partial(_rename_blob, blob_sizes=blob_sizes, blob_ranges=blob_ranges)
525
- return save_graph_base(net, file_name, graph_name, op_only, blob_rename_f)
526
-
527
-
528
- def save_graph_base(net, file_name, graph_name="net", op_only=True, blob_rename_func=None):
529
- graph = None
530
- ops = net.op
531
- if blob_rename_func is not None:
532
- ops = _modify_blob_names(ops, blob_rename_func)
533
- if not op_only:
534
- graph = net_drawer.GetPydotGraph(ops, graph_name, rankdir="TB")
535
- else:
536
- graph = net_drawer.GetPydotGraphMinimal(
537
- ops, graph_name, rankdir="TB", minimal_dependency=True
538
- )
539
-
540
- try:
541
- par_dir = os.path.dirname(file_name)
542
- if not os.path.exists(par_dir):
543
- os.makedirs(par_dir)
544
-
545
- format = os.path.splitext(os.path.basename(file_name))[-1]
546
- if format == ".png":
547
- graph.write_png(file_name)
548
- elif format == ".pdf":
549
- graph.write_pdf(file_name)
550
- elif format == ".svg":
551
- graph.write_svg(file_name)
552
- else:
553
- print("Incorrect format {}".format(format))
554
- except Exception as e:
555
- print("Error when writing graph to image {}".format(e))
556
-
557
- return graph
558
-
559
-
560
- # ==== torch/utils_toffee/aten_to_caffe2.py ====================================
561
-
562
-
563
- def group_norm_replace_aten_with_caffe2(predict_net: caffe2_pb2.NetDef):
564
- """
565
- For ONNX exported model, GroupNorm will be represented as ATen op,
566
- this can be a drop in replacement from ATen to GroupNorm
567
- """
568
- count = 0
569
- for op in predict_net.op:
570
- if op.type == "ATen":
571
- op_name = get_pb_arg_vals(op, "operator", None) # return byte in py3
572
- if op_name and op_name.decode() == "group_norm":
573
- op.arg.remove(get_pb_arg(op, "operator"))
574
-
575
- if get_pb_arg_vali(op, "cudnn_enabled", None):
576
- op.arg.remove(get_pb_arg(op, "cudnn_enabled"))
577
-
578
- num_groups = get_pb_arg_vali(op, "num_groups", None)
579
- if num_groups is not None:
580
- op.arg.remove(get_pb_arg(op, "num_groups"))
581
- check_set_pb_arg(op, "group", "i", num_groups)
582
-
583
- op.type = "GroupNorm"
584
- count += 1
585
- if count > 1:
586
- logger.info("Replaced {} ATen operator to GroupNormOp".format(count))
587
-
588
-
589
- # ==== torch/utils_toffee/alias.py =============================================
590
-
591
-
592
- def alias(x, name, is_backward=False):
593
- if not torch.onnx.is_in_onnx_export():
594
- return x
595
- assert isinstance(x, torch.Tensor)
596
- return torch.ops._caffe2.AliasWithName(x, name, is_backward=is_backward)
597
-
598
-
599
- def fuse_alias_placeholder(predict_net, init_net):
600
- """Remove AliasWithName placeholder and rename the input/output of it"""
601
- # First we finish all the re-naming
602
- for i, op in enumerate(predict_net.op):
603
- if op.type == "AliasWithName":
604
- assert len(op.input) == 1
605
- assert len(op.output) == 1
606
- name = get_pb_arg_vals(op, "name", None).decode()
607
- is_backward = bool(get_pb_arg_vali(op, "is_backward", 0))
608
- rename_op_input(predict_net, init_net, i, 0, name, from_producer=is_backward)
609
- rename_op_output(predict_net, i, 0, name)
610
-
611
- # Remove AliasWithName, should be very safe since it's a non-op
612
- new_ops = []
613
- for op in predict_net.op:
614
- if op.type != "AliasWithName":
615
- new_ops.append(op)
616
- else:
617
- # safety check
618
- assert op.input == op.output
619
- assert op.input[0] == op.arg[0].s.decode()
620
- del predict_net.op[:]
621
- predict_net.op.extend(new_ops)
622
-
623
-
624
- # ==== torch/utils_caffe2/graph_transform.py ===================================
625
-
626
-
627
- class IllegalGraphTransformError(ValueError):
628
- """When a graph transform function call can't be executed."""
629
-
630
-
631
- def _rename_versioned_blob_in_proto(
632
- proto: caffe2_pb2.NetDef,
633
- old_name: str,
634
- new_name: str,
635
- version: int,
636
- ssa: List[Tuple[List[Tuple[str, int]], List[Tuple[str, int]]]],
637
- start_versions: Dict[str, int],
638
- end_versions: Dict[str, int],
639
- ):
640
- """In given proto, rename all blobs with matched version"""
641
- # Operater list
642
- for op, i_th_ssa in zip(proto.op, ssa):
643
- versioned_inputs, versioned_outputs = i_th_ssa
644
- for i in range(len(op.input)):
645
- if versioned_inputs[i] == (old_name, version):
646
- op.input[i] = new_name
647
- for i in range(len(op.output)):
648
- if versioned_outputs[i] == (old_name, version):
649
- op.output[i] = new_name
650
- # external_input
651
- if start_versions.get(old_name, 0) == version:
652
- for i in range(len(proto.external_input)):
653
- if proto.external_input[i] == old_name:
654
- proto.external_input[i] = new_name
655
- # external_output
656
- if end_versions.get(old_name, 0) == version:
657
- for i in range(len(proto.external_output)):
658
- if proto.external_output[i] == old_name:
659
- proto.external_output[i] = new_name
660
-
661
-
662
- def rename_op_input(
663
- predict_net: caffe2_pb2.NetDef,
664
- init_net: caffe2_pb2.NetDef,
665
- op_id: int,
666
- input_id: int,
667
- new_name: str,
668
- from_producer: bool = False,
669
- ):
670
- """
671
- Rename the op_id-th operator in predict_net, change it's input_id-th input's
672
- name to the new_name. It also does automatic re-route and change
673
- external_input and init_net if necessary.
674
- - It requires the input is only consumed by this op.
675
- - This function modifies predict_net and init_net in-place.
676
- - When from_producer is enable, this also updates other operators that consumes
677
- the same input. Be cautious because may trigger unintended behavior.
678
- """
679
- assert isinstance(predict_net, caffe2_pb2.NetDef)
680
- assert isinstance(init_net, caffe2_pb2.NetDef)
681
-
682
- init_net_ssa, init_net_versions = core.get_ssa(init_net)
683
- predict_net_ssa, predict_net_versions = core.get_ssa(
684
- predict_net, copy.deepcopy(init_net_versions)
685
- )
686
-
687
- versioned_inputs, versioned_outputs = predict_net_ssa[op_id]
688
- old_name, version = versioned_inputs[input_id]
689
-
690
- if from_producer:
691
- producer_map = get_producer_map(predict_net_ssa)
692
- if not (old_name, version) in producer_map:
693
- raise NotImplementedError(
694
- "Can't find producer, the input {} is probably from"
695
- " init_net, this is not supported yet.".format(old_name)
696
- )
697
- producer = producer_map[(old_name, version)]
698
- rename_op_output(predict_net, producer[0], producer[1], new_name)
699
- return
700
-
701
- def contain_targets(op_ssa):
702
- return (old_name, version) in op_ssa[0]
703
-
704
- is_consumer = [contain_targets(op_ssa) for op_ssa in predict_net_ssa]
705
- if sum(is_consumer) > 1:
706
- raise IllegalGraphTransformError(
707
- (
708
- "Input '{}' of operator(#{}) are consumed by other ops, please use"
709
- + " rename_op_output on the producer instead. Offending op: \n{}"
710
- ).format(old_name, op_id, predict_net.op[op_id])
711
- )
712
-
713
- # update init_net
714
- _rename_versioned_blob_in_proto(
715
- init_net, old_name, new_name, version, init_net_ssa, {}, init_net_versions
716
- )
717
- # update predict_net
718
- _rename_versioned_blob_in_proto(
719
- predict_net,
720
- old_name,
721
- new_name,
722
- version,
723
- predict_net_ssa,
724
- init_net_versions,
725
- predict_net_versions,
726
- )
727
-
728
-
729
- def rename_op_output(predict_net: caffe2_pb2.NetDef, op_id: int, output_id: int, new_name: str):
730
- """
731
- Rename the op_id-th operator in predict_net, change it's output_id-th input's
732
- name to the new_name. It also does automatic re-route and change
733
- external_output and if necessary.
734
- - It allows multiple consumers of its output.
735
- - This function modifies predict_net in-place, doesn't need init_net.
736
- """
737
- assert isinstance(predict_net, caffe2_pb2.NetDef)
738
-
739
- ssa, blob_versions = core.get_ssa(predict_net)
740
-
741
- versioned_inputs, versioned_outputs = ssa[op_id]
742
- old_name, version = versioned_outputs[output_id]
743
-
744
- # update predict_net
745
- _rename_versioned_blob_in_proto(
746
- predict_net, old_name, new_name, version, ssa, {}, blob_versions
747
- )
748
-
749
-
750
- def get_sub_graph_external_input_output(
751
- predict_net: caffe2_pb2.NetDef, sub_graph_op_indices: List[int]
752
- ) -> Tuple[List[Tuple[str, int]], List[Tuple[str, int]]]:
753
- """
754
- Return the list of external input/output of sub-graph,
755
- each element is tuple of the name and corresponding version in predict_net.
756
-
757
- external input/output is defined the same way as caffe2 NetDef.
758
- """
759
- ssa, versions = core.get_ssa(predict_net)
760
-
761
- all_inputs = []
762
- all_outputs = []
763
- for op_id in sub_graph_op_indices:
764
- all_inputs += [inp for inp in ssa[op_id][0] if inp not in all_inputs]
765
- all_outputs += list(ssa[op_id][1]) # ssa output won't repeat
766
-
767
- # for versioned blobs, external inputs are just those blob in all_inputs
768
- # but not in all_outputs
769
- ext_inputs = [inp for inp in all_inputs if inp not in all_outputs]
770
-
771
- # external outputs are essentially outputs of this subgraph that are used
772
- # outside of this sub-graph (including predict_net.external_output)
773
- all_other_inputs = sum(
774
- (ssa[i][0] for i in range(len(ssa)) if i not in sub_graph_op_indices),
775
- [(outp, versions[outp]) for outp in predict_net.external_output],
776
- )
777
- ext_outputs = [outp for outp in all_outputs if outp in set(all_other_inputs)]
778
-
779
- return ext_inputs, ext_outputs
780
-
781
-
782
- class DiGraph:
783
- """A DAG representation of caffe2 graph, each vertice is a versioned blob."""
784
-
785
- def __init__(self):
786
- self.vertices = set()
787
- self.graph = collections.defaultdict(list)
788
-
789
- def add_edge(self, u, v):
790
- self.graph[u].append(v)
791
- self.vertices.add(u)
792
- self.vertices.add(v)
793
-
794
- # grab from https://www.geeksforgeeks.org/find-paths-given-source-destination/
795
- def get_all_paths(self, s, d):
796
- visited = {k: False for k in self.vertices}
797
- path = []
798
- all_paths = []
799
-
800
- def _get_all_paths_util(graph, u, d, visited, path):
801
- visited[u] = True
802
- path.append(u)
803
- if u == d:
804
- all_paths.append(copy.deepcopy(path))
805
- else:
806
- for i in graph[u]:
807
- if not visited[i]:
808
- _get_all_paths_util(graph, i, d, visited, path)
809
- path.pop()
810
- visited[u] = False
811
-
812
- _get_all_paths_util(self.graph, s, d, visited, path)
813
- return all_paths
814
-
815
- @staticmethod
816
- def from_ssa(ssa):
817
- graph = DiGraph()
818
- for op_id in range(len(ssa)):
819
- for inp in ssa[op_id][0]:
820
- for outp in ssa[op_id][1]:
821
- graph.add_edge(inp, outp)
822
- return graph
823
-
824
-
825
- def _get_dependency_chain(ssa, versioned_target, versioned_source):
826
- """
827
- Return the index list of relevant operator to produce target blob from source blob,
828
- if there's no dependency, return empty list.
829
- """
830
-
831
- # finding all paths between nodes can be O(N!), thus we can only search
832
- # in the subgraph using the op starting from the first consumer of source blob
833
- # to the producer of the target blob.
834
- consumer_map = get_consumer_map(ssa)
835
- producer_map = get_producer_map(ssa)
836
- start_op = min(x[0] for x in consumer_map[versioned_source]) - 15
837
- end_op = (
838
- producer_map[versioned_target][0] + 15 if versioned_target in producer_map else start_op
839
- )
840
- sub_graph_ssa = ssa[start_op : end_op + 1]
841
- if len(sub_graph_ssa) > 30:
842
- logger.warning(
843
- "Subgraph bebetween {} and {} is large (from op#{} to op#{}), it"
844
- " might take non-trival time to find all paths between them.".format(
845
- versioned_source, versioned_target, start_op, end_op
846
- )
847
- )
848
-
849
- dag = DiGraph.from_ssa(sub_graph_ssa)
850
- paths = dag.get_all_paths(versioned_source, versioned_target) # include two ends
851
- ops_in_paths = [[producer_map[blob][0] for blob in path[1:]] for path in paths]
852
- return sorted(set().union(*[set(ops) for ops in ops_in_paths]))
853
-
854
-
855
- def identify_reshape_sub_graph(predict_net: caffe2_pb2.NetDef) -> List[List[int]]:
856
- """
857
- Idenfity the reshape sub-graph in a protobuf.
858
- The reshape sub-graph is defined as matching the following pattern:
859
-
860
- (input_blob) -> Op_1 -> ... -> Op_N -> (new_shape) -─┐
861
- └-------------------------------------------> Reshape -> (output_blob)
862
-
863
- Return:
864
- List of sub-graphs, each sub-graph is represented as a list of indices
865
- of the relavent ops, [Op_1, Op_2, ..., Op_N, Reshape]
866
- """
867
-
868
- ssa, _ = core.get_ssa(predict_net)
869
-
870
- ret = []
871
- for i, op in enumerate(predict_net.op):
872
- if op.type == "Reshape":
873
- assert len(op.input) == 2
874
- input_ssa = ssa[i][0]
875
- data_source = input_ssa[0]
876
- shape_source = input_ssa[1]
877
- op_indices = _get_dependency_chain(ssa, shape_source, data_source)
878
- ret.append(op_indices + [i])
879
- return ret
880
-
881
-
882
- def remove_reshape_for_fc(predict_net, params):
883
- """
884
- In PyTorch nn.Linear has to take 2D tensor, this often leads to reshape
885
- a 4D tensor to 2D by calling .view(). However this (dynamic) reshaping
886
- doesn't work well with ONNX and Int8 tools, and cause using extra
887
- ops (eg. ExpandDims) that might not be available on mobile.
888
- Luckily Caffe2 supports 4D tensor for FC, so we can remove those reshape
889
- after exporting ONNX model.
890
- """
891
- from caffe2.python import core
892
-
893
- # find all reshape sub-graph that can be removed, which is now all Reshape
894
- # sub-graph whose output is only consumed by FC.
895
- # TODO: to make it safer, we may need the actually value to better determine
896
- # if a Reshape before FC is removable.
897
- reshape_sub_graphs = identify_reshape_sub_graph(predict_net)
898
- sub_graphs_to_remove = []
899
- for reshape_sub_graph in reshape_sub_graphs:
900
- reshape_op_id = reshape_sub_graph[-1]
901
- assert predict_net.op[reshape_op_id].type == "Reshape"
902
- ssa, _ = core.get_ssa(predict_net)
903
- reshape_output = ssa[reshape_op_id][1][0]
904
- consumers = [i for i in range(len(ssa)) if reshape_output in ssa[i][0]]
905
- if all(predict_net.op[consumer].type == "FC" for consumer in consumers):
906
- # safety check if the sub-graph is isolated, for this reshape sub-graph,
907
- # it means it has one non-param external input and one external output.
908
- ext_inputs, ext_outputs = get_sub_graph_external_input_output(
909
- predict_net, reshape_sub_graph
910
- )
911
- non_params_ext_inputs = [inp for inp in ext_inputs if inp[1] != 0]
912
- if len(non_params_ext_inputs) == 1 and len(ext_outputs) == 1:
913
- sub_graphs_to_remove.append(reshape_sub_graph)
914
-
915
- # perform removing subgraph by:
916
- # 1: rename the Reshape's output to its input, then the graph can be
917
- # seen as in-place itentify, meaning whose external input/output are the same.
918
- # 2: simply remove those ops.
919
- remove_op_ids = []
920
- params_to_remove = []
921
- for sub_graph in sub_graphs_to_remove:
922
- logger.info(
923
- "Remove Reshape sub-graph:\n{}".format(
924
- "".join(["(#{:>4})\n{}".format(i, predict_net.op[i]) for i in sub_graph])
925
- )
926
- )
927
- reshape_op_id = sub_graph[-1]
928
- new_reshap_output = predict_net.op[reshape_op_id].input[0]
929
- rename_op_output(predict_net, reshape_op_id, 0, new_reshap_output)
930
- ext_inputs, ext_outputs = get_sub_graph_external_input_output(predict_net, sub_graph)
931
- non_params_ext_inputs = [inp for inp in ext_inputs if inp[1] != 0]
932
- params_ext_inputs = [inp for inp in ext_inputs if inp[1] == 0]
933
- assert len(non_params_ext_inputs) == 1 and len(ext_outputs) == 1
934
- assert ext_outputs[0][0] == non_params_ext_inputs[0][0]
935
- assert ext_outputs[0][1] == non_params_ext_inputs[0][1] + 1
936
- remove_op_ids.extend(sub_graph)
937
- params_to_remove.extend(params_ext_inputs)
938
-
939
- predict_net = copy.deepcopy(predict_net)
940
- new_ops = [op for i, op in enumerate(predict_net.op) if i not in remove_op_ids]
941
- del predict_net.op[:]
942
- predict_net.op.extend(new_ops)
943
- for versioned_params in params_to_remove:
944
- name = versioned_params[0]
945
- logger.info("Remove params: {} from init_net and predict_net.external_input".format(name))
946
- del params[name]
947
- predict_net.external_input.remove(name)
948
-
949
- return predict_net, params
950
-
951
-
952
- def fuse_copy_between_cpu_and_gpu(predict_net: caffe2_pb2.NetDef):
953
- """
954
- In-place fuse extra copy ops between cpu/gpu for the following case:
955
- a -CopyAToB-> b -CopyBToA> c1 -NextOp1-> d1
956
- -CopyBToA> c2 -NextOp2-> d2
957
- The fused network will look like:
958
- a -NextOp1-> d1
959
- -NextOp2-> d2
960
- """
961
-
962
- _COPY_OPS = ["CopyCPUToGPU", "CopyGPUToCPU"]
963
-
964
- def _fuse_once(predict_net):
965
- ssa, blob_versions = core.get_ssa(predict_net)
966
- consumer_map = get_consumer_map(ssa)
967
- versioned_external_output = [
968
- (name, blob_versions[name]) for name in predict_net.external_output
969
- ]
970
-
971
- for op_id, op in enumerate(predict_net.op):
972
- if op.type in _COPY_OPS:
973
- fw_copy_versioned_output = ssa[op_id][1][0]
974
- consumer_ids = [x[0] for x in consumer_map[fw_copy_versioned_output]]
975
- reverse_op_type = _COPY_OPS[1 - _COPY_OPS.index(op.type)]
976
-
977
- is_fusable = (
978
- len(consumer_ids) > 0
979
- and fw_copy_versioned_output not in versioned_external_output
980
- and all(
981
- predict_net.op[_op_id].type == reverse_op_type
982
- and ssa[_op_id][1][0] not in versioned_external_output
983
- for _op_id in consumer_ids
984
- )
985
- )
986
-
987
- if is_fusable:
988
- for rv_copy_op_id in consumer_ids:
989
- # making each NextOp uses "a" directly and removing Copy ops
990
- rs_copy_versioned_output = ssa[rv_copy_op_id][1][0]
991
- next_op_id, inp_id = consumer_map[rs_copy_versioned_output][0]
992
- predict_net.op[next_op_id].input[inp_id] = op.input[0]
993
- # remove CopyOps
994
- new_ops = [
995
- op
996
- for i, op in enumerate(predict_net.op)
997
- if i != op_id and i not in consumer_ids
998
- ]
999
- del predict_net.op[:]
1000
- predict_net.op.extend(new_ops)
1001
- return True
1002
-
1003
- return False
1004
-
1005
- # _fuse_once returns False is nothing can be fused
1006
- while _fuse_once(predict_net):
1007
- pass
1008
-
1009
-
1010
- def remove_dead_end_ops(net_def: caffe2_pb2.NetDef):
1011
- """remove ops if its output is not used or not in external_output"""
1012
- ssa, versions = core.get_ssa(net_def)
1013
- versioned_external_output = [(name, versions[name]) for name in net_def.external_output]
1014
- consumer_map = get_consumer_map(ssa)
1015
- removed_op_ids = set()
1016
-
1017
- def _is_dead_end(versioned_blob):
1018
- return not (
1019
- versioned_blob in versioned_external_output
1020
- or (
1021
- len(consumer_map[versioned_blob]) > 0
1022
- and all(x[0] not in removed_op_ids for x in consumer_map[versioned_blob])
1023
- )
1024
- )
1025
-
1026
- for i, ssa_i in reversed(list(enumerate(ssa))):
1027
- versioned_outputs = ssa_i[1]
1028
- if all(_is_dead_end(outp) for outp in versioned_outputs):
1029
- removed_op_ids.add(i)
1030
-
1031
- # simply removing those deadend ops should have no effect to external_output
1032
- new_ops = [op for i, op in enumerate(net_def.op) if i not in removed_op_ids]
1033
- del net_def.op[:]
1034
- net_def.op.extend(new_ops)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/docs/tutorials/evaluation.md DELETED
@@ -1,68 +0,0 @@
1
-
2
- # Evaluation
3
-
4
- Evaluation is a process that takes a number of inputs/outputs pairs and aggregate them.
5
- You can always [use the model](./models.md) directly and just parse its inputs/outputs manually to perform
6
- evaluation.
7
- Alternatively, evaluation is implemented in detectron2 using the [DatasetEvaluator](../modules/evaluation.html#detectron2.evaluation.DatasetEvaluator)
8
- interface.
9
-
10
- Detectron2 includes a few `DatasetEvaluator` that computes metrics using standard dataset-specific
11
- APIs (e.g., COCO, LVIS).
12
- You can also implement your own `DatasetEvaluator` that performs some other jobs
13
- using the inputs/outputs pairs.
14
- For example, to count how many instances are detected on the validation set:
15
-
16
- ```
17
- class Counter(DatasetEvaluator):
18
- def reset(self):
19
- self.count = 0
20
- def process(self, inputs, outputs):
21
- for output in outputs:
22
- self.count += len(output["instances"])
23
- def evaluate(self):
24
- # save self.count somewhere, or print it, or return it.
25
- return {"count": self.count}
26
- ```
27
-
28
- ## Use evaluators
29
-
30
- To evaluate using the methods of evaluators manually:
31
- ```
32
- def get_all_inputs_outputs():
33
- for data in data_loader:
34
- yield data, model(data)
35
-
36
- evaluator.reset()
37
- for inputs, outputs in get_all_inputs_outputs():
38
- evaluator.process(inputs, outputs)
39
- eval_results = evaluator.evaluate()
40
- ```
41
-
42
- Evaluators can also be used with [inference_on_dataset](../modules/evaluation.html#detectron2.evaluation.inference_on_dataset).
43
- For example,
44
-
45
- ```python
46
- eval_results = inference_on_dataset(
47
- model,
48
- data_loader,
49
- DatasetEvaluators([COCOEvaluator(...), Counter()]))
50
- ```
51
- This will execute `model` on all inputs from `data_loader`, and call evaluator to process them.
52
-
53
- Compared to running the evaluation manually using the model, the benefit of this function is that
54
- evaluators can be merged together using [DatasetEvaluators](../modules/evaluation.html#detectron2.evaluation.DatasetEvaluators),
55
- and all the evaluation can finish in one forward pass over the dataset.
56
- This function also provides accurate speed benchmarks for the given model and dataset.
57
-
58
- ## Evaluators for custom dataset
59
-
60
- Many evaluators in detectron2 are made for specific datasets,
61
- in order to obtain scores using each dataset's official API.
62
- In addition to that, two evaluators are able to evaluate any generic dataset
63
- that follows detectron2's [standard dataset format](./datasets.md), so they
64
- can be used to evaluate custom datasets:
65
-
66
- * [COCOEvaluator](../modules/evaluation.html#detectron2.evaluation.COCOEvaluator) is able to evaluate AP (Average Precision) for box detection,
67
- instance segmentation, keypoint detection on any custom dataset.
68
- * [SemSegEvaluator](../modules/evaluation.html#detectron2.evaluation.SemSegEvaluator) is able to evaluate semantic segmentation metrics on any custom dataset.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BennoKrojer/imagecode-demo/app.py DELETED
@@ -1,69 +0,0 @@
1
- from turtle import color, onclick
2
- import streamlit as st
3
- from PIL import Image, ImageOps
4
- import glob
5
- import json
6
- import requests
7
- import random
8
- import io
9
-
10
- random.seed(10)
11
-
12
- if 'show' not in st.session_state:
13
- st.session_state.show = False
14
-
15
- if 'example_idx' not in st.session_state:
16
- st.session_state.example_idx = 0
17
-
18
- st.set_page_config(layout="wide")
19
- st.markdown("**This is a demo of the *ImageCoDe* benchmark. What is the task? You are given a description and you have to pick the image it describes, out of 10 images total.**")
20
- st.markdown("**If you click the Sample button, you will get a new text and images. More details of ImageCoDe can be found in our ACL 2022 paper.**")
21
-
22
- col1, col2 = st.columns(2)
23
-
24
- prefix = 'https://raw.githubusercontent.com/BennoKrojer/imagecode-val-set/main/image-sets-val/'
25
- set2ids = json.load(open('set2ids.json', 'r'))
26
- descriptions = json.load(open('valid_list.json', 'r'))
27
-
28
- #example_idx = int(col1.number_input('Sample an example (description + corresponding images) from the validation set', value=0, min_value=0, max_value=len(descriptions)-1))
29
- if col1.button('Sample a description + 10 images from the validation set'):
30
- st.session_state.example_idx += 1
31
- # st.session_state.example_idx = random.randint(0, len(descriptions)-1)
32
-
33
- img_set, true_idx, descr = descriptions[st.session_state.example_idx]
34
- true_idx = int(true_idx)
35
- images = [prefix+'/'+img_set+'/'+i for i in set2ids[img_set]]
36
- img_urls = images.copy()
37
- index = int(col2.number_input('Image Index from 0 to 9', value=0, min_value=0, max_value=9))
38
-
39
- if col1.button('Toggle to reveal/hide the correct image, try to guess yourself before giving up!'):
40
- st.session_state.show = not st.session_state.show
41
-
42
- col1.markdown(f'**Description for {img_set}**:')
43
- col1.markdown(f'**{descr}**')
44
-
45
- big_img = images[index]
46
- img = Image.open(io.BytesIO(requests.get(images[index], stream=True).content))
47
- img_width, img_height = img.size
48
- smaller = min(img_width, img_height)
49
- images[index]= ImageOps.expand(img,border=smaller//18,fill='blue')
50
-
51
- caps = list(range(10))
52
- cap = str(index)
53
-
54
- if st.session_state.show:
55
- caps[true_idx] = f'{true_idx} (TARGET IMAGE)'
56
- img = Image.open(io.BytesIO(requests.get(img_urls[true_idx], stream=True).content))
57
- img_width, img_height = img.size
58
- smaller = min(img_width, img_height)
59
- images[true_idx] = ImageOps.expand(img,border=smaller//8,fill='green')
60
- if true_idx == index:
61
- cap = f'{true_idx} (TARGET IMAGE)'
62
- else:
63
- caps[true_idx] = f'{true_idx}'
64
- if true_idx == index:
65
- cap = f'{true_idx}'
66
-
67
- col1.image(big_img, use_column_width=True, caption=cap)
68
- col2.image(images, width=175, caption=caps)
69
- col1.markdown(f'{st.session_state.example_idx}')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Backrooms Apk.md DELETED
@@ -1,40 +0,0 @@
1
-
2
- <h1>Backrooms APK: Cinco juegos que exploran la nueva sensación de Internet espeluznante</h1>
3
- <p>Las salas traseras son una leyenda urbana en línea que se originó a partir de un creepypasta publicado en un 2018 4chan hilo. Los cuartos traseros se describen como un laberinto de salas de oficina vacías que solo se pueden entrar por "noclipping" fuera de la realidad. Las habitaciones están llenas de viejas alfombras húmedas, papel pintado amarillo y luces fluorescentes que crean una sensación de temor y aislamiento. Algunas historias también incluyen criaturas malévolas que acechan en las sombras. Las salas traseras se han convertido en uno de los ejemplos más conocidos de la estética de Internet de los espacios liminales, que representan lugares generalmente ocupados como vacíos anormales. </p>
4
- <h2>backrooms apk</h2><br /><p><b><b>DOWNLOAD</b> &#9889; <a href="https://bltlly.com/2v6MVl">https://bltlly.com/2v6MVl</a></b></p><br /><br />
5
- <p>Las salas traseras han inspirado a muchos fans y creadores a expandir el concepto original creando diferentes niveles, entidades e historias que exploran el horror y el misterio de esta dimensión alternativa. Una de las formas más populares de experimentar las salas traseras es a través de los videojuegos, que permiten a los jugadores sumergirse en el entorno aterrador y tratar de sobrevivir o escapar. En este artículo, revisaremos cinco juegos que exploran el concepto de salas traseras de diferentes maneras. Estos juegos están disponibles como archivos APK para dispositivos Android, para que pueda descargarlos y reproducirlos en su teléfono o tableta. </p>
6
- <h2>Entrar en las salas traseras</h2>
7
- <p>Enter the Backrooms es un juego muy lo-fi que nos recuerda a uno de esos viejos juegos shareware de la década de 1990. El juego se mantiene fiel a la creepypasta original con solo un nivel: Nivel 0, que es la clásica sala de oficina con alfombra, papel pintado y luces. El juego no tiene armas, ni entidades, y no tiene otros objetivos que sobrevivir el mayor tiempo posible sin perder la cordura. El juego mide tu cordura revisando tu reloj cada 30 segundos. Si te olvidas de hacerlo, tu cordura caerá y empezarás a alucinar. El juego también tiene un sistema de generación de niveles aleatorios que crea más de 600 millones de millas cuadradas de habitaciones para explorar. </p>
8
-
9
- <h2>El juego Backrooms Edición GRATUITA</h2>
10
- <p>The Backrooms Game FREE Edition es un juego de Steam que cuenta con niveles infinitos y un sistema de locura. El juego se basa en la foto original de los cuartos traseros, pero también incluye otros niveles que se inspiran en diferentes espacios liminales, como áreas industriales, túneles de servicio y sótanos. El juego tiene un ser que vaga por los cuartos traseros y puede oírte si haces ruido. Tienes que evitarlo o esconderte de él si lo encuentras. El juego también tiene un sistema de locura que afecta tu visión y audición a medida que exploras más profundo en los cuartos traseros. </p>
11
- <p>The Backrooms Game FREE Edition es un juego más pulido y variado que Enter the Backrooms. Tiene mejores gráficos, efectos de sonido y mecánica de juego. También ofrece más desafío y variedad al introducir diferentes niveles y enemigos. Sin embargo, a algunos puristas puede que no les guste el hecho de que se desvíe del concepto original de backrooms añadiendo nuevos elementos. </p>
12
- <h2>BACKROOMS</h2>
13
- <h2>BACKROOMS</h2>
14
- <p>BACKROOMS es un juego de terror que combina supervivencia, rompecabezas y acción. El juego se basa en el icónico juego de terror de supervivencia con desafiantes puzzles no euclidianos y acción en primera persona. El juego también tiene un viaje narrativo convincente que se desarrolla a medida que juegas. El juego cuenta con nueve niveles de los cuartos traseros, cada uno con su propio entorno, rompecabezas y enemigos. El juego también tiene un sistema de iluminación dinámico que crea una atmósfera realista e inmersiva. </p>
15
- <p></p>
16
- <p>BACKROOMS es un juego que atrae a los fans de juegos clásicos de terror como Silent Hill, Resident Evil y Amnesia. Tiene un alto nivel de dificultad y tensión que te mantendrá en el borde de tu asiento. También tiene una historia rica e intrigante que te hará querer descubrir más sobre las trassalas y tu propia identidad. </p>
17
- <h2>Escapar de las salas traseras</h2>
18
-
19
- <p>Escape the Backrooms es un juego que se disfruta mejor con los amigos. Tiene un juego divertido y cooperativo que requiere trabajo en equipo y comunicación. También tiene mucho valor de repetición debido a la generación de mapas aleatorios y los diferentes elementos y herramientas. Sin embargo, el juego también puede ser muy frustrante y aterrador si juegas solo o con extraños. </p>
20
- <h2>Las salas traseras 1998</h2>
21
- <p>The Backrooms 1998 es un juego retro que mezcla horror y humor. El juego está inspirado en los viejos juegos de PlayStation 1 y tiene un estilo de gráficos low-poly y una actuación de voz cursi. El juego sigue las aventuras de Bob, un repartidor de pizza que accidentalmente no habla en los cuartos traseros mientras entrega una pizza. El juego tiene cuatro niveles de los cuartos traseros, cada uno con su propio tema y enemigos. El juego también tiene muchos chistes y referencias a la cultura pop y memes. </p>
22
- <p>The Backrooms 1998 es un juego que no se toma demasiado en serio. Es una parodia del concepto de backrooms y los viejos juegos de terror. Tiene mucho humor y encanto que te hará reír y sonreír. Sin embargo, también puede ser bastante aterrador a veces, especialmente si no estás familiarizado con las referencias y chistes. </p>
23
- <h2>Conclusión</h2>
24
- <p>Las salas traseras son un fenómeno interesante que ha capturado la imaginación de muchas personas en línea. Son una fuente de horror, misterio y creatividad para fans y creadores por igual. Hay muchos juegos que exploran el concepto de salas traseras de diferentes maneras, de simple a complejo, de serio a divertido, de solo a cooperativo. Cada juego tiene sus propias fortalezas y debilidades, pero todos comparten una cosa en común: son divertidos y atractivos para jugar. </p>
25
-
26
- <h2>Preguntas frecuentes</h2>
27
- <h3>¿Qué son los cuartos traseros? </h3>
28
- <p>Las salas traseras son una leyenda urbana en línea que se originó a partir de un creepypasta publicado en un 2018 4chan hilo. Los cuartos traseros son descritos como un laberinto de salas de oficina vacías que solo pueden ser ingresadas por "noclipping" fuera de la realidad. </p>
29
- <h3>¿Cómo se entra en los cuartos traseros? </h3>
30
- <p>De acuerdo con el creepypasta original, puede entrar en los cuartos traseros noclipping fuera de la realidad en áreas donde no quieren que estés. Esto significa cortar paredes o pisos en lugares que no están diseñados para el acceso humano o la ocupación. </p>
31
- <h3>¿Son reales los cuartos traseros? </h3>
32
- <p>Las trassalas no son reales en ningún sentido físico o científico. Son un concepto ficticio creado por los usuarios de Internet para fines de entretenimiento. Sin embargo, algunas personas pueden creer en ellos como parte de sus creencias o experiencias personales. </p>
33
- <h3>¿Hay entidades en los cuartos traseros? </h3>
34
- <h3>¿Cómo escapar de los cuartos traseros? </h3>
35
- <p>No hay una respuesta definitiva a cómo escapar de los cuartos traseros, ya que diferentes historias y juegos tienen diferentes reglas y mecanismos. Algunas formas posibles de escapar de los cuartos traseros son encontrar una puerta de salida, alcanzar un cierto nivel o despertar de un sueño. Sin embargo, algunas historias y juegos también implican que no hay escape de los cuartos traseros, o que escapar de los cuartos traseros conducirá a peores consecuencias. </p>
36
- <h3>¿Cuáles son algunos otros juegos que exploran el concepto de trastienda? </h3>
37
- <p>Algunos otros juegos que exploran el concepto de salas traseras son The Backrooms VR, The Backrooms Simulator, The Backrooms: Level 2 y The Backrooms: SCP-3008. Estos juegos también están disponibles como archivos APK para dispositivos Android. </p>
38
- <p>Espero que hayas disfrutado de este artículo y hayas aprendido algo nuevo sobre los backrooms y los juegos que los exploran. Si tiene alguna pregunta o comentario, no dude en dejarlos abajo. ¡Gracias por leer! </p> 64aa2da5cf<br />
39
- <br />
40
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Clash Of Clans Elmas Hilesi Apk Indir.md DELETED
@@ -1,61 +0,0 @@
1
-
2
- <h1>Clash of Clans Diamond Cheat Apk Download: Enjoy the Game </h1>
3
- <p>Clash of Clans is one of the most popular mobile strategy games in the world. Millions of players play this game to build their own village, train their troops and fight other players. Resources such as diamonds, gold and potions are very important in the game. With these resources, you can develop your village, produce stronger troops and gain more victory. However, collecting these resources can take time and sometimes be inadequate. In this case, you may want to download Clash of Clans diamond trick apk. </p>
4
- <p>Clash of Clans diamond trick apk is a modified version of the game. In this version, you can have an unlimited amount of diamonds, gold and potions. Thus, you can develop your village as you wish, train the strongest troops and easily defeat your opponents. You can also access all the features of the game and participate in clan battles. In this article, we will describe how you can download Clash of Clans diamond trick apk, its advantages and disadvantages. </p>
5
- <h2>clash of clans elmas hilesi apk indir</h2><br /><p><b><b>Download File</b> &#8230;&#8230;&#8230; <a href="https://bltlly.com/2v6IXp">https://bltlly.com/2v6IXp</a></b></p><br /><br />
6
- What is <h2>Clash of Clans?</h2>
7
- <p>Clash of Clans is a mobile strategy game developed and published by Supercell in 2012. In the game, you have duties such as building your own village, collecting resources, training military troops and fighting other players. You can also set up your own clan or join existing clans. By collaborating with other players in your clan, you can fight against enemy clans and organize strategic attacks to achieve victory. </p>
8
- <h3>Clash of Clans Game Features </h3>
9
- <p>Clash of Clans offers its features as a mobile game that can also be downloaded as an apk on the Android platform. Here are some features of Clash of Clans game: </p>
10
- <ul>
11
-
12
- <li> Military Training: In the game, you can train various military units with different features and abilities. These troops include barbarians, archers, giants, wall destructors, balloons, sorcerers, dragons and more. You can also train special troops called heroes. These troops include the barbarian king, archer queen and great guardian. You can make stronger attacks by improving your military units. </li>
13
- <li> Battle Mode: In the game, you can attack other players' villages or defend your own village. While attacking, you can place your military troops as you wish and capture the enemy village's resources and trophy points. When defending, you can organize your village and place your defense structures. You can also ask or help other players in your clan. </li>
14
- <li> Clan Wars: In the game, you can set up your own clan or join existing clans. You can chat, share resources and send troops with other players on your clan. You can also fight other clans in a special mode called clan wars. In clan wars, the two clans mutually attack each other's villages and win the clan that collects the most stars. You can improve your village and troops with the awards you have won from clan wars. </li>
15
- </ul>
16
- <h3>Clash of Clans Game Tips </h3>
17
- You may need some tips to succeed in <p>Clash of Clans. Here are some tips for Clash of Clans game: </p>
18
- <ul>
19
- <li> Use your resources wisely: Your resources are limited in the game and you need to spend them to improve your village. So use your resources wisely and avoid unnecessary expenses. For example, develop your defense structures primarily because they are very important in protecting your village. </li>
20
-
21
- <li>Strategic attack: You must also be strategic when attacking the game. Find the weak points of the enemy village and place your military troops accordingly. Use different troops for different purposes. For example, direct the giants to defense structures, wall destructors demolish the walls, archers collect resources and provide sorcerers support. </li>
22
- Join your clan < >: It is very important to join your clan in the game. By joining your clan, you can chat with other players, share resources and send troops. You can also win more awards and increase the prestige of your clan by participating in clan wars. When choosing your clan, choose an active, helpful and compatible clan. </li>
23
- </ul>
24
- <h2>Clash of Clans Diamond Cheat How to Download Apk?</h2>
25
- <p>Clash of Clans diamond trick apk is a modified version of the game. In this version, you can have an unlimited amount of diamonds, gold and potions. Thus, you can develop your village as you wish, train the strongest troops and easily defeat your opponents. You can also access all the features of the game and participate in clan battles. So how to download Clash of Clans diamond trick apk? Here are the steps: </p>
26
- <ol>
27
- <li> First, remove the original version of the Clash of Clans game on your device. This is required to download the modified version of the game. </li>
28
- <li> Then find a reliable source to download the Clash of Clans diamond trick apk file. You can find this file on the internet from search engines or social media platforms. However, be careful and be sure to download a virus-free file. </li>
29
- <li>Then enter your device's settings and enable the option to install applications from unknown sources. This will allow you to upload the apk file to your device. </li>
30
- <li> Then click on the Clash of Clans diamond trick apk file you downloaded and start the installation process. This process can take several minutes. </li>
31
-
32
- </ol>
33
- <h3>Clash of Clans Diamond Trick Apk Advantages </h3>
34
- Downloading <p>Clash of Clans diamond trick apk has some advantages. Here are some of them: </p>
35
- <ul>
36
- <li> Unlimited source: With the Clash of Clans diamond trick apk, you can have an unlimited amount of diamonds, gold and potions. With these resources, you can develop your village as you wish, produce stronger troops and gain more victory. </li>
37
- <li> Access to all features: With Clash of Clans diamond trick apk, you can access all the features of the game. You won't miss the last updates of the game and you can participate in clan battles. </li>
38
- <li>More fun: With Clash of Clans diamond trick apk, you can play the game in a more fun way. You can easily beat your opponents, support your clan and enjoy the game. </li>
39
- </ul> <h3>Clash of Clans Diamond Trick Apk Disadvantages </h3>
40
- There are also some disadvantages to downloading <p>Clash of Clans diamond trick apk. Here are some of them: </p>
41
- <ul>
42
- <li>Accounting: Using Clash of Clans diamond trick apk is against the rules of the game and can cause your account to be ban. In this case, you cannot continue playing the game and undo all the data you lost. </li>
43
- <li> Virus hazard: When downloading the Clash of Clans diamond trick apk file, you run the risk of downloading a virus-containing file. In this case, your device may be damaged and your personal information may be stolen. </li>
44
- <li> Game pleasure decreased: You can play the game very easily with Clash of Clans diamond trick apk. However, this can also reduce the taste of the game. Because the difficulty and competition of the game are the elements that increase the fun of the game. </li>
45
- </ul>
46
- <h3>Clash of Clans Diamond Cheat Is Apk Safe?</h3>
47
-
48
- <h2>Result </h2>
49
- Downloading <p>Clash of Clans diamond trick apk can make you play the game more easily and fun. However, this method has some advantages, as well as some disadvantages and risks. So before using Clash of Clans diamond trick apk, you need to evaluate them well and make decisions. Our advice is to play the game with its original version and succeed with your own labor. In this way, you both respect the rules of the game and enjoy the game more. </p>
50
- <h2> Frequently Asked Questions </h2>
51
- You can reach the frequently asked questions and answers about <p>Clash of Clans diamond trick apk from below. </p>
52
- <p></p>
53
- <ol>
54
- Where can I download <li><Clash of Clans diamond trick apk?</b><br>Answer: You can find the Clash of Clans diamond trick apk file from search engines or social media platforms on the internet. However, be careful and be sure to download a virus-free file. </li>
55
- Is it legal to use <li><Clash of Clans diamond trick apk?</b><br>Answer: No, it is illegal to use Clash of Clans diamond trick apk. It is against the rules of the game and can cause your account to be ban. </li>
56
- Can you join clan battles with < >>b<Clash of Clans diamond trick apk?</b><br>Answer: Yes, you can participate in clan battles with Clash of Clans diamond trick apk. However, if this is noticed by other players, it can damage your clan's reputation and cause your account to be ban. </li>
57
- Can I get updates of the game with <li><Clash of Clans diamond trick apk?</b><br>Answer: No, you cannot get updates of the game with Clash of Clans diamond trick apk. Since you are using the modified version of the game, you cannot connect with the game's official servers. So you can't take advantage of the new features and fixes of the game. </li>
58
-
59
- </ol></p> 64aa2da5cf<br />
60
- <br />
61
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/pkg_resources/_vendor/pyparsing/unicode.py DELETED
@@ -1,352 +0,0 @@
1
- # unicode.py
2
-
3
- import sys
4
- from itertools import filterfalse
5
- from typing import List, Tuple, Union
6
-
7
-
8
- class _lazyclassproperty:
9
- def __init__(self, fn):
10
- self.fn = fn
11
- self.__doc__ = fn.__doc__
12
- self.__name__ = fn.__name__
13
-
14
- def __get__(self, obj, cls):
15
- if cls is None:
16
- cls = type(obj)
17
- if not hasattr(cls, "_intern") or any(
18
- cls._intern is getattr(superclass, "_intern", [])
19
- for superclass in cls.__mro__[1:]
20
- ):
21
- cls._intern = {}
22
- attrname = self.fn.__name__
23
- if attrname not in cls._intern:
24
- cls._intern[attrname] = self.fn(cls)
25
- return cls._intern[attrname]
26
-
27
-
28
- UnicodeRangeList = List[Union[Tuple[int, int], Tuple[int]]]
29
-
30
-
31
- class unicode_set:
32
- """
33
- A set of Unicode characters, for language-specific strings for
34
- ``alphas``, ``nums``, ``alphanums``, and ``printables``.
35
- A unicode_set is defined by a list of ranges in the Unicode character
36
- set, in a class attribute ``_ranges``. Ranges can be specified using
37
- 2-tuples or a 1-tuple, such as::
38
-
39
- _ranges = [
40
- (0x0020, 0x007e),
41
- (0x00a0, 0x00ff),
42
- (0x0100,),
43
- ]
44
-
45
- Ranges are left- and right-inclusive. A 1-tuple of (x,) is treated as (x, x).
46
-
47
- A unicode set can also be defined using multiple inheritance of other unicode sets::
48
-
49
- class CJK(Chinese, Japanese, Korean):
50
- pass
51
- """
52
-
53
- _ranges: UnicodeRangeList = []
54
-
55
- @_lazyclassproperty
56
- def _chars_for_ranges(cls):
57
- ret = []
58
- for cc in cls.__mro__:
59
- if cc is unicode_set:
60
- break
61
- for rr in getattr(cc, "_ranges", ()):
62
- ret.extend(range(rr[0], rr[-1] + 1))
63
- return [chr(c) for c in sorted(set(ret))]
64
-
65
- @_lazyclassproperty
66
- def printables(cls):
67
- "all non-whitespace characters in this range"
68
- return "".join(filterfalse(str.isspace, cls._chars_for_ranges))
69
-
70
- @_lazyclassproperty
71
- def alphas(cls):
72
- "all alphabetic characters in this range"
73
- return "".join(filter(str.isalpha, cls._chars_for_ranges))
74
-
75
- @_lazyclassproperty
76
- def nums(cls):
77
- "all numeric digit characters in this range"
78
- return "".join(filter(str.isdigit, cls._chars_for_ranges))
79
-
80
- @_lazyclassproperty
81
- def alphanums(cls):
82
- "all alphanumeric characters in this range"
83
- return cls.alphas + cls.nums
84
-
85
- @_lazyclassproperty
86
- def identchars(cls):
87
- "all characters in this range that are valid identifier characters, plus underscore '_'"
88
- return "".join(
89
- sorted(
90
- set(
91
- "".join(filter(str.isidentifier, cls._chars_for_ranges))
92
- + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµº"
93
- + "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ"
94
- + "_"
95
- )
96
- )
97
- )
98
-
99
- @_lazyclassproperty
100
- def identbodychars(cls):
101
- """
102
- all characters in this range that are valid identifier body characters,
103
- plus the digits 0-9
104
- """
105
- return "".join(
106
- sorted(
107
- set(
108
- cls.identchars
109
- + "0123456789"
110
- + "".join(
111
- [c for c in cls._chars_for_ranges if ("_" + c).isidentifier()]
112
- )
113
- )
114
- )
115
- )
116
-
117
-
118
- class pyparsing_unicode(unicode_set):
119
- """
120
- A namespace class for defining common language unicode_sets.
121
- """
122
-
123
- # fmt: off
124
-
125
- # define ranges in language character sets
126
- _ranges: UnicodeRangeList = [
127
- (0x0020, sys.maxunicode),
128
- ]
129
-
130
- class BasicMultilingualPlane(unicode_set):
131
- "Unicode set for the Basic Multilingual Plane"
132
- _ranges: UnicodeRangeList = [
133
- (0x0020, 0xFFFF),
134
- ]
135
-
136
- class Latin1(unicode_set):
137
- "Unicode set for Latin-1 Unicode Character Range"
138
- _ranges: UnicodeRangeList = [
139
- (0x0020, 0x007E),
140
- (0x00A0, 0x00FF),
141
- ]
142
-
143
- class LatinA(unicode_set):
144
- "Unicode set for Latin-A Unicode Character Range"
145
- _ranges: UnicodeRangeList = [
146
- (0x0100, 0x017F),
147
- ]
148
-
149
- class LatinB(unicode_set):
150
- "Unicode set for Latin-B Unicode Character Range"
151
- _ranges: UnicodeRangeList = [
152
- (0x0180, 0x024F),
153
- ]
154
-
155
- class Greek(unicode_set):
156
- "Unicode set for Greek Unicode Character Ranges"
157
- _ranges: UnicodeRangeList = [
158
- (0x0342, 0x0345),
159
- (0x0370, 0x0377),
160
- (0x037A, 0x037F),
161
- (0x0384, 0x038A),
162
- (0x038C,),
163
- (0x038E, 0x03A1),
164
- (0x03A3, 0x03E1),
165
- (0x03F0, 0x03FF),
166
- (0x1D26, 0x1D2A),
167
- (0x1D5E,),
168
- (0x1D60,),
169
- (0x1D66, 0x1D6A),
170
- (0x1F00, 0x1F15),
171
- (0x1F18, 0x1F1D),
172
- (0x1F20, 0x1F45),
173
- (0x1F48, 0x1F4D),
174
- (0x1F50, 0x1F57),
175
- (0x1F59,),
176
- (0x1F5B,),
177
- (0x1F5D,),
178
- (0x1F5F, 0x1F7D),
179
- (0x1F80, 0x1FB4),
180
- (0x1FB6, 0x1FC4),
181
- (0x1FC6, 0x1FD3),
182
- (0x1FD6, 0x1FDB),
183
- (0x1FDD, 0x1FEF),
184
- (0x1FF2, 0x1FF4),
185
- (0x1FF6, 0x1FFE),
186
- (0x2129,),
187
- (0x2719, 0x271A),
188
- (0xAB65,),
189
- (0x10140, 0x1018D),
190
- (0x101A0,),
191
- (0x1D200, 0x1D245),
192
- (0x1F7A1, 0x1F7A7),
193
- ]
194
-
195
- class Cyrillic(unicode_set):
196
- "Unicode set for Cyrillic Unicode Character Range"
197
- _ranges: UnicodeRangeList = [
198
- (0x0400, 0x052F),
199
- (0x1C80, 0x1C88),
200
- (0x1D2B,),
201
- (0x1D78,),
202
- (0x2DE0, 0x2DFF),
203
- (0xA640, 0xA672),
204
- (0xA674, 0xA69F),
205
- (0xFE2E, 0xFE2F),
206
- ]
207
-
208
- class Chinese(unicode_set):
209
- "Unicode set for Chinese Unicode Character Range"
210
- _ranges: UnicodeRangeList = [
211
- (0x2E80, 0x2E99),
212
- (0x2E9B, 0x2EF3),
213
- (0x31C0, 0x31E3),
214
- (0x3400, 0x4DB5),
215
- (0x4E00, 0x9FEF),
216
- (0xA700, 0xA707),
217
- (0xF900, 0xFA6D),
218
- (0xFA70, 0xFAD9),
219
- (0x16FE2, 0x16FE3),
220
- (0x1F210, 0x1F212),
221
- (0x1F214, 0x1F23B),
222
- (0x1F240, 0x1F248),
223
- (0x20000, 0x2A6D6),
224
- (0x2A700, 0x2B734),
225
- (0x2B740, 0x2B81D),
226
- (0x2B820, 0x2CEA1),
227
- (0x2CEB0, 0x2EBE0),
228
- (0x2F800, 0x2FA1D),
229
- ]
230
-
231
- class Japanese(unicode_set):
232
- "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges"
233
- _ranges: UnicodeRangeList = []
234
-
235
- class Kanji(unicode_set):
236
- "Unicode set for Kanji Unicode Character Range"
237
- _ranges: UnicodeRangeList = [
238
- (0x4E00, 0x9FBF),
239
- (0x3000, 0x303F),
240
- ]
241
-
242
- class Hiragana(unicode_set):
243
- "Unicode set for Hiragana Unicode Character Range"
244
- _ranges: UnicodeRangeList = [
245
- (0x3041, 0x3096),
246
- (0x3099, 0x30A0),
247
- (0x30FC,),
248
- (0xFF70,),
249
- (0x1B001,),
250
- (0x1B150, 0x1B152),
251
- (0x1F200,),
252
- ]
253
-
254
- class Katakana(unicode_set):
255
- "Unicode set for Katakana Unicode Character Range"
256
- _ranges: UnicodeRangeList = [
257
- (0x3099, 0x309C),
258
- (0x30A0, 0x30FF),
259
- (0x31F0, 0x31FF),
260
- (0x32D0, 0x32FE),
261
- (0xFF65, 0xFF9F),
262
- (0x1B000,),
263
- (0x1B164, 0x1B167),
264
- (0x1F201, 0x1F202),
265
- (0x1F213,),
266
- ]
267
-
268
- class Hangul(unicode_set):
269
- "Unicode set for Hangul (Korean) Unicode Character Range"
270
- _ranges: UnicodeRangeList = [
271
- (0x1100, 0x11FF),
272
- (0x302E, 0x302F),
273
- (0x3131, 0x318E),
274
- (0x3200, 0x321C),
275
- (0x3260, 0x327B),
276
- (0x327E,),
277
- (0xA960, 0xA97C),
278
- (0xAC00, 0xD7A3),
279
- (0xD7B0, 0xD7C6),
280
- (0xD7CB, 0xD7FB),
281
- (0xFFA0, 0xFFBE),
282
- (0xFFC2, 0xFFC7),
283
- (0xFFCA, 0xFFCF),
284
- (0xFFD2, 0xFFD7),
285
- (0xFFDA, 0xFFDC),
286
- ]
287
-
288
- Korean = Hangul
289
-
290
- class CJK(Chinese, Japanese, Hangul):
291
- "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range"
292
-
293
- class Thai(unicode_set):
294
- "Unicode set for Thai Unicode Character Range"
295
- _ranges: UnicodeRangeList = [
296
- (0x0E01, 0x0E3A),
297
- (0x0E3F, 0x0E5B)
298
- ]
299
-
300
- class Arabic(unicode_set):
301
- "Unicode set for Arabic Unicode Character Range"
302
- _ranges: UnicodeRangeList = [
303
- (0x0600, 0x061B),
304
- (0x061E, 0x06FF),
305
- (0x0700, 0x077F),
306
- ]
307
-
308
- class Hebrew(unicode_set):
309
- "Unicode set for Hebrew Unicode Character Range"
310
- _ranges: UnicodeRangeList = [
311
- (0x0591, 0x05C7),
312
- (0x05D0, 0x05EA),
313
- (0x05EF, 0x05F4),
314
- (0xFB1D, 0xFB36),
315
- (0xFB38, 0xFB3C),
316
- (0xFB3E,),
317
- (0xFB40, 0xFB41),
318
- (0xFB43, 0xFB44),
319
- (0xFB46, 0xFB4F),
320
- ]
321
-
322
- class Devanagari(unicode_set):
323
- "Unicode set for Devanagari Unicode Character Range"
324
- _ranges: UnicodeRangeList = [
325
- (0x0900, 0x097F),
326
- (0xA8E0, 0xA8FF)
327
- ]
328
-
329
- # fmt: on
330
-
331
-
332
- pyparsing_unicode.Japanese._ranges = (
333
- pyparsing_unicode.Japanese.Kanji._ranges
334
- + pyparsing_unicode.Japanese.Hiragana._ranges
335
- + pyparsing_unicode.Japanese.Katakana._ranges
336
- )
337
-
338
- pyparsing_unicode.BMP = pyparsing_unicode.BasicMultilingualPlane
339
-
340
- # add language identifiers using language Unicode
341
- pyparsing_unicode.العربية = pyparsing_unicode.Arabic
342
- pyparsing_unicode.中文 = pyparsing_unicode.Chinese
343
- pyparsing_unicode.кириллица = pyparsing_unicode.Cyrillic
344
- pyparsing_unicode.Ελληνικά = pyparsing_unicode.Greek
345
- pyparsing_unicode.עִברִית = pyparsing_unicode.Hebrew
346
- pyparsing_unicode.日本語 = pyparsing_unicode.Japanese
347
- pyparsing_unicode.Japanese.漢字 = pyparsing_unicode.Japanese.Kanji
348
- pyparsing_unicode.Japanese.カタカナ = pyparsing_unicode.Japanese.Katakana
349
- pyparsing_unicode.Japanese.ひらがな = pyparsing_unicode.Japanese.Hiragana
350
- pyparsing_unicode.한국어 = pyparsing_unicode.Korean
351
- pyparsing_unicode.ไทย = pyparsing_unicode.Thai
352
- pyparsing_unicode.देवनागरी = pyparsing_unicode.Devanagari
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BreetheRun/mitchtech-vulcan-diffusion/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Mitchtech Vulcan Diffusion
3
- emoji: 🏃
4
- colorFrom: red
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.15.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tests/test_custom_type_casters.cpp DELETED
@@ -1,125 +0,0 @@
1
- /*
2
- tests/test_custom_type_casters.cpp -- tests type_caster<T>
3
-
4
- Copyright (c) 2016 Wenzel Jakob <[email protected]>
5
-
6
- All rights reserved. Use of this source code is governed by a
7
- BSD-style license that can be found in the LICENSE file.
8
- */
9
-
10
- #include "pybind11_tests.h"
11
- #include "constructor_stats.h"
12
-
13
-
14
- // py::arg/py::arg_v testing: these arguments just record their argument when invoked
15
- class ArgInspector1 { public: std::string arg = "(default arg inspector 1)"; };
16
- class ArgInspector2 { public: std::string arg = "(default arg inspector 2)"; };
17
- class ArgAlwaysConverts { };
18
- namespace pybind11 { namespace detail {
19
- template <> struct type_caster<ArgInspector1> {
20
- public:
21
- PYBIND11_TYPE_CASTER(ArgInspector1, _("ArgInspector1"));
22
-
23
- bool load(handle src, bool convert) {
24
- value.arg = "loading ArgInspector1 argument " +
25
- std::string(convert ? "WITH" : "WITHOUT") + " conversion allowed. "
26
- "Argument value = " + (std::string) str(src);
27
- return true;
28
- }
29
-
30
- static handle cast(const ArgInspector1 &src, return_value_policy, handle) {
31
- return str(src.arg).release();
32
- }
33
- };
34
- template <> struct type_caster<ArgInspector2> {
35
- public:
36
- PYBIND11_TYPE_CASTER(ArgInspector2, _("ArgInspector2"));
37
-
38
- bool load(handle src, bool convert) {
39
- value.arg = "loading ArgInspector2 argument " +
40
- std::string(convert ? "WITH" : "WITHOUT") + " conversion allowed. "
41
- "Argument value = " + (std::string) str(src);
42
- return true;
43
- }
44
-
45
- static handle cast(const ArgInspector2 &src, return_value_policy, handle) {
46
- return str(src.arg).release();
47
- }
48
- };
49
- template <> struct type_caster<ArgAlwaysConverts> {
50
- public:
51
- PYBIND11_TYPE_CASTER(ArgAlwaysConverts, _("ArgAlwaysConverts"));
52
-
53
- bool load(handle, bool convert) {
54
- return convert;
55
- }
56
-
57
- static handle cast(const ArgAlwaysConverts &, return_value_policy, handle) {
58
- return py::none().release();
59
- }
60
- };
61
- }}
62
-
63
- // test_custom_caster_destruction
64
- class DestructionTester {
65
- public:
66
- DestructionTester() { print_default_created(this); }
67
- ~DestructionTester() { print_destroyed(this); }
68
- DestructionTester(const DestructionTester &) { print_copy_created(this); }
69
- DestructionTester(DestructionTester &&) { print_move_created(this); }
70
- DestructionTester &operator=(const DestructionTester &) { print_copy_assigned(this); return *this; }
71
- DestructionTester &operator=(DestructionTester &&) { print_move_assigned(this); return *this; }
72
- };
73
- namespace pybind11 { namespace detail {
74
- template <> struct type_caster<DestructionTester> {
75
- PYBIND11_TYPE_CASTER(DestructionTester, _("DestructionTester"));
76
- bool load(handle, bool) { return true; }
77
-
78
- static handle cast(const DestructionTester &, return_value_policy, handle) {
79
- return py::bool_(true).release();
80
- }
81
- };
82
- }}
83
-
84
- TEST_SUBMODULE(custom_type_casters, m) {
85
- // test_custom_type_casters
86
-
87
- // test_noconvert_args
88
- //
89
- // Test converting. The ArgAlwaysConverts is just there to make the first no-conversion pass
90
- // fail so that our call always ends up happening via the second dispatch (the one that allows
91
- // some conversion).
92
- class ArgInspector {
93
- public:
94
- ArgInspector1 f(ArgInspector1 a, ArgAlwaysConverts) { return a; }
95
- std::string g(ArgInspector1 a, const ArgInspector1 &b, int c, ArgInspector2 *d, ArgAlwaysConverts) {
96
- return a.arg + "\n" + b.arg + "\n" + std::to_string(c) + "\n" + d->arg;
97
- }
98
- static ArgInspector2 h(ArgInspector2 a, ArgAlwaysConverts) { return a; }
99
- };
100
- py::class_<ArgInspector>(m, "ArgInspector")
101
- .def(py::init<>())
102
- .def("f", &ArgInspector::f, py::arg(), py::arg() = ArgAlwaysConverts())
103
- .def("g", &ArgInspector::g, "a"_a.noconvert(), "b"_a, "c"_a.noconvert()=13, "d"_a=ArgInspector2(), py::arg() = ArgAlwaysConverts())
104
- .def_static("h", &ArgInspector::h, py::arg().noconvert(), py::arg() = ArgAlwaysConverts())
105
- ;
106
- m.def("arg_inspect_func", [](ArgInspector2 a, ArgInspector1 b, ArgAlwaysConverts) { return a.arg + "\n" + b.arg; },
107
- py::arg().noconvert(false), py::arg_v(nullptr, ArgInspector1()).noconvert(true), py::arg() = ArgAlwaysConverts());
108
-
109
- m.def("floats_preferred", [](double f) { return 0.5 * f; }, py::arg("f"));
110
- m.def("floats_only", [](double f) { return 0.5 * f; }, py::arg("f").noconvert());
111
- m.def("ints_preferred", [](int i) { return i / 2; }, py::arg("i"));
112
- m.def("ints_only", [](int i) { return i / 2; }, py::arg("i").noconvert());
113
-
114
- // test_custom_caster_destruction
115
- // Test that `take_ownership` works on types with a custom type caster when given a pointer
116
-
117
- // default policy: don't take ownership:
118
- m.def("custom_caster_no_destroy", []() { static auto *dt = new DestructionTester(); return dt; });
119
-
120
- m.def("custom_caster_destroy", []() { return new DestructionTester(); },
121
- py::return_value_policy::take_ownership); // Takes ownership: destroy when finished
122
- m.def("custom_caster_destroy_const", []() -> const DestructionTester * { return new DestructionTester(); },
123
- py::return_value_policy::take_ownership); // Likewise (const doesn't inhibit destruction)
124
- m.def("destruction_tester_cstats", &ConstructorStats::get<DestructionTester>, py::return_value_policy::reference);
125
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/iterator/detail/iterator_category_to_traversal.h DELETED
@@ -1,131 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
- #include <thrust/iterator/iterator_categories.h>
21
- #include <thrust/iterator/detail/iterator_traversal_tags.h>
22
- #include <thrust/iterator/detail/iterator_category_to_system.h>
23
- #include <thrust/detail/type_traits.h>
24
-
25
- namespace thrust
26
- {
27
-
28
- namespace detail
29
- {
30
-
31
- // forward declarations
32
- template <typename> struct is_iterator_system;
33
- template <typename> struct is_iterator_traversal;
34
-
35
- template <typename Category>
36
- struct host_system_category_to_traversal
37
- : eval_if<
38
- is_convertible<Category, random_access_host_iterator_tag>::value,
39
- detail::identity_<random_access_traversal_tag>,
40
- eval_if<
41
- is_convertible<Category, bidirectional_host_iterator_tag>::value,
42
- detail::identity_<bidirectional_traversal_tag>,
43
- eval_if<
44
- is_convertible<Category, forward_host_iterator_tag>::value,
45
- detail::identity_<forward_traversal_tag>,
46
- eval_if<
47
- is_convertible<Category, input_host_iterator_tag>::value,
48
- detail::identity_<single_pass_traversal_tag>,
49
- eval_if<
50
- is_convertible<Category, output_host_iterator_tag>::value,
51
- detail::identity_<incrementable_traversal_tag>,
52
- void
53
- >
54
- >
55
- >
56
- >
57
- >
58
- {
59
- }; // end host_system_category_to_traversal
60
-
61
-
62
-
63
- template <typename Category>
64
- struct device_system_category_to_traversal
65
- : eval_if<
66
- is_convertible<Category, random_access_device_iterator_tag>::value,
67
- detail::identity_<random_access_traversal_tag>,
68
- eval_if<
69
- is_convertible<Category, bidirectional_device_iterator_tag>::value,
70
- detail::identity_<bidirectional_traversal_tag>,
71
- eval_if<
72
- is_convertible<Category, forward_device_iterator_tag>::value,
73
- detail::identity_<forward_traversal_tag>,
74
- eval_if<
75
- is_convertible<Category, input_device_iterator_tag>::value,
76
- detail::identity_<single_pass_traversal_tag>,
77
- eval_if<
78
- is_convertible<Category, output_device_iterator_tag>::value,
79
- detail::identity_<incrementable_traversal_tag>,
80
- void
81
- >
82
- >
83
- >
84
- >
85
- >
86
- {
87
- }; // end device_system_category_to_traversal
88
-
89
-
90
- template<typename Category>
91
- struct category_to_traversal
92
- // check for host system
93
- : eval_if<
94
- or_<
95
- is_convertible<Category, thrust::input_host_iterator_tag>,
96
- is_convertible<Category, thrust::output_host_iterator_tag>
97
- >::value,
98
-
99
- host_system_category_to_traversal<Category>,
100
-
101
- // check for device system
102
- eval_if<
103
- or_<
104
- is_convertible<Category, thrust::input_device_iterator_tag>,
105
- is_convertible<Category, thrust::output_device_iterator_tag>
106
- >::value,
107
-
108
- device_system_category_to_traversal<Category>,
109
-
110
- // unknown category
111
- void
112
- >
113
- >
114
- {};
115
-
116
-
117
- template <typename CategoryOrTraversal>
118
- struct iterator_category_to_traversal
119
- : eval_if<
120
- is_iterator_traversal<CategoryOrTraversal>::value,
121
- detail::identity_<CategoryOrTraversal>,
122
- category_to_traversal<CategoryOrTraversal>
123
- >
124
- {
125
- }; // end iterator_category_to_traversal
126
-
127
-
128
- } // end detail
129
-
130
- } // end thrust
131
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/get_value.h DELETED
@@ -1,46 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
- #include <thrust/system/detail/sequential/execution_policy.h>
21
- #include <thrust/detail/raw_pointer_cast.h>
22
-
23
- namespace thrust
24
- {
25
- namespace system
26
- {
27
- namespace detail
28
- {
29
- namespace sequential
30
- {
31
-
32
-
33
- template<typename DerivedPolicy, typename Pointer>
34
- __host__ __device__
35
- typename thrust::iterator_value<Pointer>::type
36
- get_value(sequential::execution_policy<DerivedPolicy> &, Pointer ptr)
37
- {
38
- return *thrust::raw_pointer_cast(ptr);
39
- } // end get_value()
40
-
41
-
42
- } // end sequential
43
- } // end detail
44
- } // end system
45
- } // end thrust
46
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/WALT/mmdet/models/detectors/vfnet.py DELETED
@@ -1,18 +0,0 @@
1
- from ..builder import DETECTORS
2
- from .single_stage import SingleStageDetector
3
-
4
-
5
- @DETECTORS.register_module()
6
- class VFNet(SingleStageDetector):
7
- """Implementation of `VarifocalNet
8
- (VFNet).<https://arxiv.org/abs/2008.13367>`_"""
9
-
10
- def __init__(self,
11
- backbone,
12
- neck,
13
- bbox_head,
14
- train_cfg=None,
15
- test_cfg=None,
16
- pretrained=None):
17
- super(VFNet, self).__init__(backbone, neck, bbox_head, train_cfg,
18
- test_cfg, pretrained)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/lama-example/fetch_data/places_standard_test_val_prepare.sh DELETED
@@ -1,5 +0,0 @@
1
- mkdir -p places_standard_dataset/original/test/
2
- tar -xvf test_large.tar --transform='s/.*\///' -C places_standard_dataset/original/test/
3
-
4
- mkdir -p places_standard_dataset/original/val/
5
- tar -xvf val_large.tar --transform='s/.*\///' -C places_standard_dataset/original/val/
 
 
 
 
 
 
spaces/ConceptArtHouse/webui-gameasset/app.py DELETED
@@ -1,62 +0,0 @@
1
- import os
2
- from subprocess import getoutput
3
-
4
- gpu_info = getoutput('nvidia-smi')
5
- if("A10G" in gpu_info):
6
- os.system(f"pip install -q https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.15/xformers-0.0.15.dev0+4c06c79.d20221205-cp38-cp38-linux_x86_64.whl")
7
- elif("T4" in gpu_info):
8
- os.system(f"pip install -q https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.15/xformers-0.0.15.dev0+1515f77.d20221130-cp38-cp38-linux_x86_64.whl")
9
-
10
- os.system(f"git clone https://github.com/camenduru/stable-diffusion-webui /home/user/app/stable-diffusion-webui")
11
- os.chdir("/home/user/app/stable-diffusion-webui")
12
-
13
- os.system(f"wget -q https://github.com/camenduru/webui/raw/main/env_patch.py -O /home/user/app/env_patch.py")
14
- os.system(f"sed -i -e '/import image_from_url_text/r /home/user/app/env_patch.py' /home/user/app/stable-diffusion-webui/modules/ui.py")
15
- os.system(f"sed -i -e '/(modelmerger_interface, \"Checkpoint Merger\", \"modelmerger\"),/d' /home/user/app/stable-diffusion-webui/modules/ui.py")
16
- os.system(f"sed -i -e '/(train_interface, \"Train\", \"ti\"),/d' /home/user/app/stable-diffusion-webui/modules/ui.py")
17
- os.system(f"sed -i -e '/extensions_interface, \"Extensions\", \"extensions\"/d' /home/user/app/stable-diffusion-webui/modules/ui.py")
18
- os.system(f"sed -i -e '/settings_interface, \"Settings\", \"settings\"/d' /home/user/app/stable-diffusion-webui/modules/ui.py")
19
- os.system(f'''sed -i -e "s/document.getElementsByTagName('gradio-app')\[0\].shadowRoot/!!document.getElementsByTagName('gradio-app')[0].shadowRoot ? document.getElementsByTagName('gradio-app')[0].shadowRoot : document/g" /home/user/app/stable-diffusion-webui/script.js''')
20
- os.system(f"sed -i -e 's/ show_progress=False,/ show_progress=True,/g' /home/user/app/stable-diffusion-webui/modules/ui.py")
21
- os.system(f"sed -i -e 's/shared.demo.launch/shared.demo.queue().launch/g' /home/user/app/stable-diffusion-webui/webui.py")
22
- #os.system(f"sed -i -e 's/inputs=\[component\],/&\\n queue=False,/g' /home/user/app/stable-diffusion-webui/modules/ui.py")
23
- #os.system(f"sed -i -e 's/outputs=\[token_counter\]/outputs=[token_counter], queue=False/g' /home/user/app/stable-diffusion-webui/modules/ui.py")
24
- os.system(f"sed -i -e 's/outputs=\[/queue=False, &/g' home/user/app/stable-diffusion-webui/modules/ui.py")
25
-
26
- # ----------------------------Please duplicate this space and delete this block if you don't want to see the extra header----------------------------
27
- os.system(f"wget -q https://github.com/camenduru/webui/raw/main/header_patch.py -O /home/user/app/header_patch.py")
28
- os.system(f"sed -i -e '/demo:/r /home/user/app/header_patch.py' /home/user/app/stable-diffusion-webui/modules/ui.py")
29
- # ---------------------------------------------------------------------------------------------------------------------------------------------------
30
-
31
- #os.system(f"wget -q https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/Anything-V3.0-pruned.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Anything-V3.0-pruned.ckpt")
32
- #os.system(f"wget -q https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/Anything-V3.0.vae.pt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Anything-V3.0-pruned.vae.pt")
33
- #os.system(f"wget -q https://huggingface.co/phuson/shields-game-asset/resolve/main/model.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/model.ckpt")
34
- #os.system(f"wget -q https://huggingface.co/phuson/shield-asset-model-sd-2-1/resolve/main/model.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/model2.ckpt")
35
-
36
- if "IS_SHARED_UI" in os.environ:
37
- os.system(f"wget -q https://github.com/camenduru/webui/raw/main/shared-config.json -O /home/user/app/shared-config.json")
38
- os.system(f"wget -q https://github.com/camenduru/webui/raw/main/shared-ui-config.json -O /home/user/app/shared-ui-config.json")
39
- os.system(f"python launch.py --force-enable-xformers --disable-console-progressbars --enable-console-prompts --ui-config-file /home/user/app/shared-ui-config.json --ui-settings-file /home/user/app/shared-config.json --cors-allow-origins huggingface.co,hf.space --no-progressbar-hiding")
40
- else:
41
- # Please duplicate this space and delete # character in front of the custom script you want to use or add here more custom scripts with same structure os.system(f"wget -q https://CUSTOM_SCRIPT_URL -O /home/user/app/stable-diffusion-webui/scripts/CUSTOM_SCRIPT_NAME.py")
42
- os.system(f"wget -q https://gist.github.com/camenduru/9ec5f8141db9902e375967e93250860f/raw/d0bcf01786f20107c329c03f8968584ee67be12a/run_n_times.py -O /home/user/app/stable-diffusion-webui/scripts/run_n_times.py")
43
-
44
- # Please duplicate this space and delete # character in front of the extension you want to use or add here more extensions with same structure os.system(f"git clone https://EXTENSION_GIT_URL /home/user/app/stable-diffusion-webui/extensions/EXTENSION_NAME")
45
- #os.system(f"git clone https://github.com/camenduru/stable-diffusion-webui-artists-to-study /home/user/app/stable-diffusion-webui/extensions/stable-diffusion-webui-artists-to-study")
46
- os.system(f"git clone https://github.com/yfszzx/stable-diffusion-webui-images-browser /home/user/app/stable-diffusion-webui/extensions/stable-diffusion-webui-images-browser")
47
- os.system(f"git clone https://github.com/deforum-art/deforum-for-automatic1111-webui /home/user/app/stable-diffusion-webui/extensions/deforum-for-automatic1111-webui")
48
-
49
- # Please duplicate this space and delete # character in front of the model you want to use or add here more ckpts with same structure os.system(f"wget -q https://CKPT_URL -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/CKPT_NAME.ckpt")
50
- #os.system(f"wget -q https://huggingface.co/nitrosocke/Arcane-Diffusion/resolve/main/arcane-diffusion-v3.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/arcane-diffusion-v3.ckpt")
51
- #os.system(f"wget -q https://huggingface.co/DGSpitzer/Cyberpunk-Anime-Diffusion/resolve/main/Cyberpunk-Anime-Diffusion.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/Cyberpunk-Anime-Diffusion.ckpt")
52
- #os.system(f"wget -q https://huggingface.co/prompthero/midjourney-v4-diffusion/resolve/main/mdjrny-v4.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/mdjrny-v4.ckpt")
53
- #os.system(f"wget -q https://huggingface.co/nitrosocke/mo-di-diffusion/resolve/main/moDi-v1-pruned.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/moDi-v1-pruned.ckpt")
54
- #os.system(f"wget -q https://huggingface.co/Fictiverse/Stable_Diffusion_PaperCut_Model/resolve/main/PaperCut_v1.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/PaperCut_v1.ckpt")
55
- #os.system(f"wget -q https://huggingface.co/lilpotat/sa/resolve/main/samdoesarts_style.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/samdoesarts_style.ckpt")
56
- #os.system(f"wget -q https://huggingface.co/hakurei/waifu-diffusion-v1-3/resolve/main/wd-v1-3-float32.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/wd-v1-3-float32.ckpt")
57
- #os.system(f"wget -q https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/sd-v1-4.ckpt")
58
- #os.system(f"wget -q https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/v1-5-pruned-emaonly.ckpt")
59
- #os.system(f"wget -q https://huggingface.co/runwayml/stable-diffusion-inpainting/resolve/main/sd-v1-5-inpainting.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/sd-v1-5-inpainting.ckpt")
60
- os.system(f"wget -q https://huggingface.co/stabilityai/stable-diffusion-2/resolve/main/768-v-ema.ckpt -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/768-v-ema.ckpt")
61
- os.system(f"wget -q https://raw.githubusercontent.com/Stability-AI/stablediffusion/main/configs/stable-diffusion/v2-inference-v.yaml -O /home/user/app/stable-diffusion-webui/models/Stable-diffusion/768-v-ema.yaml")
62
- os.system(f"python launch.py --force-enable-xformers --ui-config-file /home/user/app/ui-config.json --ui-settings-file /home/user/app/config.json --disable-console-progressbars --enable-console-prompts --cors-allow-origins huggingface.co,hf.space --no-progressbar-hiding --api --skip-torch-cuda-test --disable-safe-unpickle")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cong723/gpt-academic-public/request_llm/bridge_chatglm.py DELETED
@@ -1,160 +0,0 @@
1
-
2
- from transformers import AutoModel, AutoTokenizer
3
- import time
4
- import threading
5
- import importlib
6
- from toolbox import update_ui, get_conf
7
- from multiprocessing import Process, Pipe
8
-
9
- load_message = "ChatGLM尚未加载,加载需要一段时间。注意,取决于`config.py`的配置,ChatGLM消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……"
10
-
11
- #################################################################################
12
- class GetGLMHandle(Process):
13
- def __init__(self):
14
- super().__init__(daemon=True)
15
- self.parent, self.child = Pipe()
16
- self.chatglm_model = None
17
- self.chatglm_tokenizer = None
18
- self.info = ""
19
- self.success = True
20
- self.check_dependency()
21
- self.start()
22
- self.threadLock = threading.Lock()
23
-
24
- def check_dependency(self):
25
- try:
26
- import sentencepiece
27
- self.info = "依赖检测通过"
28
- self.success = True
29
- except:
30
- self.info = "缺少ChatGLM的依赖,如果要使用ChatGLM,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_chatglm.txt`安装ChatGLM的依赖。"
31
- self.success = False
32
-
33
- def ready(self):
34
- return self.chatglm_model is not None
35
-
36
- def run(self):
37
- # 子进程执行
38
- # 第一次运行,加载参数
39
- retry = 0
40
- while True:
41
- try:
42
- if self.chatglm_model is None:
43
- self.chatglm_tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True)
44
- device, = get_conf('LOCAL_MODEL_DEVICE')
45
- if device=='cpu':
46
- self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).float()
47
- else:
48
- self.chatglm_model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda()
49
- self.chatglm_model = self.chatglm_model.eval()
50
- break
51
- else:
52
- break
53
- except:
54
- retry += 1
55
- if retry > 3:
56
- self.child.send('[Local Message] Call ChatGLM fail 不能正常加载ChatGLM的参数。')
57
- raise RuntimeError("不能正常加载ChatGLM的参数!")
58
-
59
- while True:
60
- # 进入任务等待状态
61
- kwargs = self.child.recv()
62
- # 收到消息,开始请求
63
- try:
64
- for response, history in self.chatglm_model.stream_chat(self.chatglm_tokenizer, **kwargs):
65
- self.child.send(response)
66
- # # 中途接收可能的终止指令(如果有的话)
67
- # if self.child.poll():
68
- # command = self.child.recv()
69
- # if command == '[Terminate]': break
70
- except:
71
- self.child.send('[Local Message] Call ChatGLM fail.')
72
- # 请求处理结束,开始下一个循环
73
- self.child.send('[Finish]')
74
-
75
- def stream_chat(self, **kwargs):
76
- # 主进程执行
77
- self.threadLock.acquire()
78
- self.parent.send(kwargs)
79
- while True:
80
- res = self.parent.recv()
81
- if res != '[Finish]':
82
- yield res
83
- else:
84
- break
85
- self.threadLock.release()
86
-
87
- global glm_handle
88
- glm_handle = None
89
- #################################################################################
90
- def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=None, console_slience=False):
91
- """
92
- 多线程方法
93
- 函数的说明请见 request_llm/bridge_all.py
94
- """
95
- global glm_handle
96
- if glm_handle is None:
97
- glm_handle = GetGLMHandle()
98
- observe_window[0] = load_message + "\n\n" + glm_handle.info
99
- if not glm_handle.success:
100
- error = glm_handle.info
101
- glm_handle = None
102
- raise RuntimeError(error)
103
-
104
- # chatglm 没有 sys_prompt 接口,因此把prompt加入 history
105
- history_feedin = []
106
- history_feedin.append(["What can I do?", sys_prompt])
107
- for i in range(len(history)//2):
108
- history_feedin.append([history[2*i], history[2*i+1]] )
109
-
110
- watch_dog_patience = 5 # 看门狗 (watchdog) 的耐心, 设置5秒即可
111
- response = ""
112
- for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
113
- observe_window[0] = response
114
- if len(observe_window) >= 2:
115
- if (time.time()-observe_window[1]) > watch_dog_patience:
116
- raise RuntimeError("程序终止。")
117
- return response
118
-
119
-
120
-
121
- def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None):
122
- """
123
- 单线程方法
124
- 函数的说明请见 request_llm/bridge_all.py
125
- """
126
- chatbot.append((inputs, ""))
127
-
128
- global glm_handle
129
- if glm_handle is None:
130
- glm_handle = GetGLMHandle()
131
- chatbot[-1] = (inputs, load_message + "\n\n" + glm_handle.info)
132
- yield from update_ui(chatbot=chatbot, history=[])
133
- if not glm_handle.success:
134
- glm_handle = None
135
- return
136
-
137
- if additional_fn is not None:
138
- import core_functional
139
- importlib.reload(core_functional) # 热更新prompt
140
- core_functional = core_functional.get_core_functions()
141
- if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) # 获取预处理函数(如果有的话)
142
- inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"]
143
-
144
- # 处理历史信息
145
- history_feedin = []
146
- history_feedin.append(["What can I do?", system_prompt] )
147
- for i in range(len(history)//2):
148
- history_feedin.append([history[2*i], history[2*i+1]] )
149
-
150
- # 开始接收chatglm的回复
151
- response = "[Local Message]: 等待ChatGLM响应中 ..."
152
- for response in glm_handle.stream_chat(query=inputs, history=history_feedin, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']):
153
- chatbot[-1] = (inputs, response)
154
- yield from update_ui(chatbot=chatbot, history=history)
155
-
156
- # 总结输出
157
- if response == "[Local Message]: 等待ChatGLM响应中 ...":
158
- response = "[Local Message]: ChatGLM响应异常 ..."
159
- history.extend([inputs, response])
160
- yield from update_ui(chatbot=chatbot, history=history)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/h11/_abnf.py DELETED
@@ -1,132 +0,0 @@
1
- # We use native strings for all the re patterns, to take advantage of string
2
- # formatting, and then convert to bytestrings when compiling the final re
3
- # objects.
4
-
5
- # https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#whitespace
6
- # OWS = *( SP / HTAB )
7
- # ; optional whitespace
8
- OWS = r"[ \t]*"
9
-
10
- # https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#rule.token.separators
11
- # token = 1*tchar
12
- #
13
- # tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
14
- # / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
15
- # / DIGIT / ALPHA
16
- # ; any VCHAR, except delimiters
17
- token = r"[-!#$%&'*+.^_`|~0-9a-zA-Z]+"
18
-
19
- # https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#header.fields
20
- # field-name = token
21
- field_name = token
22
-
23
- # The standard says:
24
- #
25
- # field-value = *( field-content / obs-fold )
26
- # field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
27
- # field-vchar = VCHAR / obs-text
28
- # obs-fold = CRLF 1*( SP / HTAB )
29
- # ; obsolete line folding
30
- # ; see Section 3.2.4
31
- #
32
- # https://tools.ietf.org/html/rfc5234#appendix-B.1
33
- #
34
- # VCHAR = %x21-7E
35
- # ; visible (printing) characters
36
- #
37
- # https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#rule.quoted-string
38
- # obs-text = %x80-FF
39
- #
40
- # However, the standard definition of field-content is WRONG! It disallows
41
- # fields containing a single visible character surrounded by whitespace,
42
- # e.g. "foo a bar".
43
- #
44
- # See: https://www.rfc-editor.org/errata_search.php?rfc=7230&eid=4189
45
- #
46
- # So our definition of field_content attempts to fix it up...
47
- #
48
- # Also, we allow lots of control characters, because apparently people assume
49
- # that they're legal in practice (e.g., google analytics makes cookies with
50
- # \x01 in them!):
51
- # https://github.com/python-hyper/h11/issues/57
52
- # We still don't allow NUL or whitespace, because those are often treated as
53
- # meta-characters and letting them through can lead to nasty issues like SSRF.
54
- vchar = r"[\x21-\x7e]"
55
- vchar_or_obs_text = r"[^\x00\s]"
56
- field_vchar = vchar_or_obs_text
57
- field_content = r"{field_vchar}+(?:[ \t]+{field_vchar}+)*".format(**globals())
58
-
59
- # We handle obs-fold at a different level, and our fixed-up field_content
60
- # already grows to swallow the whole value, so ? instead of *
61
- field_value = r"({field_content})?".format(**globals())
62
-
63
- # header-field = field-name ":" OWS field-value OWS
64
- header_field = (
65
- r"(?P<field_name>{field_name})"
66
- r":"
67
- r"{OWS}"
68
- r"(?P<field_value>{field_value})"
69
- r"{OWS}".format(**globals())
70
- )
71
-
72
- # https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#request.line
73
- #
74
- # request-line = method SP request-target SP HTTP-version CRLF
75
- # method = token
76
- # HTTP-version = HTTP-name "/" DIGIT "." DIGIT
77
- # HTTP-name = %x48.54.54.50 ; "HTTP", case-sensitive
78
- #
79
- # request-target is complicated (see RFC 7230 sec 5.3) -- could be path, full
80
- # URL, host+port (for connect), or even "*", but in any case we are guaranteed
81
- # that it contists of the visible printing characters.
82
- method = token
83
- request_target = r"{vchar}+".format(**globals())
84
- http_version = r"HTTP/(?P<http_version>[0-9]\.[0-9])"
85
- request_line = (
86
- r"(?P<method>{method})"
87
- r" "
88
- r"(?P<target>{request_target})"
89
- r" "
90
- r"{http_version}".format(**globals())
91
- )
92
-
93
- # https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#status.line
94
- #
95
- # status-line = HTTP-version SP status-code SP reason-phrase CRLF
96
- # status-code = 3DIGIT
97
- # reason-phrase = *( HTAB / SP / VCHAR / obs-text )
98
- status_code = r"[0-9]{3}"
99
- reason_phrase = r"([ \t]|{vchar_or_obs_text})*".format(**globals())
100
- status_line = (
101
- r"{http_version}"
102
- r" "
103
- r"(?P<status_code>{status_code})"
104
- # However, there are apparently a few too many servers out there that just
105
- # leave out the reason phrase:
106
- # https://github.com/scrapy/scrapy/issues/345#issuecomment-281756036
107
- # https://github.com/seanmonstar/httparse/issues/29
108
- # so make it optional. ?: is a non-capturing group.
109
- r"(?: (?P<reason>{reason_phrase}))?".format(**globals())
110
- )
111
-
112
- HEXDIG = r"[0-9A-Fa-f]"
113
- # Actually
114
- #
115
- # chunk-size = 1*HEXDIG
116
- #
117
- # but we impose an upper-limit to avoid ridiculosity. len(str(2**64)) == 20
118
- chunk_size = r"({HEXDIG}){{1,20}}".format(**globals())
119
- # Actually
120
- #
121
- # chunk-ext = *( ";" chunk-ext-name [ "=" chunk-ext-val ] )
122
- #
123
- # but we aren't parsing the things so we don't really care.
124
- chunk_ext = r";.*"
125
- chunk_header = (
126
- r"(?P<chunk_size>{chunk_size})"
127
- r"(?P<chunk_ext>{chunk_ext})?"
128
- r"{OWS}\r\n".format(
129
- **globals()
130
- ) # Even though the specification does not allow for extra whitespaces,
131
- # we are lenient with trailing whitespaces because some servers on the wild use it.
132
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/huggingface_hub/utils/_headers.py DELETED
@@ -1,234 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2022-present, the HuggingFace Inc. team.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Contains utilities to handle headers to send in calls to Huggingface Hub."""
16
- from typing import Dict, Optional, Union
17
-
18
- from .. import constants
19
- from ._hf_folder import HfFolder
20
- from ._runtime import (
21
- get_fastai_version,
22
- get_fastcore_version,
23
- get_hf_hub_version,
24
- get_python_version,
25
- get_tf_version,
26
- get_torch_version,
27
- is_fastai_available,
28
- is_fastcore_available,
29
- is_tf_available,
30
- is_torch_available,
31
- )
32
- from ._validators import validate_hf_hub_args
33
-
34
-
35
- class LocalTokenNotFoundError(EnvironmentError):
36
- """Raised if local token is required but not found."""
37
-
38
-
39
- @validate_hf_hub_args
40
- def build_hf_headers(
41
- *,
42
- token: Optional[Union[bool, str]] = None,
43
- is_write_action: bool = False,
44
- library_name: Optional[str] = None,
45
- library_version: Optional[str] = None,
46
- user_agent: Union[Dict, str, None] = None,
47
- ) -> Dict[str, str]:
48
- """
49
- Build headers dictionary to send in a HF Hub call.
50
-
51
- By default, authorization token is always provided either from argument (explicit
52
- use) or retrieved from the cache (implicit use). To explicitly avoid sending the
53
- token to the Hub, set `token=False` or set the `HF_HUB_DISABLE_IMPLICIT_TOKEN`
54
- environment variable.
55
-
56
- In case of an API call that requires write access, an error is thrown if token is
57
- `None` or token is an organization token (starting with `"api_org***"`).
58
-
59
- In addition to the auth header, a user-agent is added to provide information about
60
- the installed packages (versions of python, huggingface_hub, torch, tensorflow,
61
- fastai and fastcore).
62
-
63
- Args:
64
- token (`str`, `bool`, *optional*):
65
- The token to be sent in authorization header for the Hub call:
66
- - if a string, it is used as the Hugging Face token
67
- - if `True`, the token is read from the machine (cache or env variable)
68
- - if `False`, authorization header is not set
69
- - if `None`, the token is read from the machine only except if
70
- `HF_HUB_DISABLE_IMPLICIT_TOKEN` env variable is set.
71
- is_write_action (`bool`, default to `False`):
72
- Set to True if the API call requires a write access. If `True`, the token
73
- will be validated (cannot be `None`, cannot start by `"api_org***"`).
74
- library_name (`str`, *optional*):
75
- The name of the library that is making the HTTP request. Will be added to
76
- the user-agent header.
77
- library_version (`str`, *optional*):
78
- The version of the library that is making the HTTP request. Will be added
79
- to the user-agent header.
80
- user_agent (`str`, `dict`, *optional*):
81
- The user agent info in the form of a dictionary or a single string. It will
82
- be completed with information about the installed packages.
83
-
84
- Returns:
85
- A `Dict` of headers to pass in your API call.
86
-
87
- Example:
88
- ```py
89
- >>> build_hf_headers(token="hf_***") # explicit token
90
- {"authorization": "Bearer hf_***", "user-agent": ""}
91
-
92
- >>> build_hf_headers(token=True) # explicitly use cached token
93
- {"authorization": "Bearer hf_***",...}
94
-
95
- >>> build_hf_headers(token=False) # explicitly don't use cached token
96
- {"user-agent": ...}
97
-
98
- >>> build_hf_headers() # implicit use of the cached token
99
- {"authorization": "Bearer hf_***",...}
100
-
101
- # HF_HUB_DISABLE_IMPLICIT_TOKEN=True # to set as env variable
102
- >>> build_hf_headers() # token is not sent
103
- {"user-agent": ...}
104
-
105
- >>> build_hf_headers(token="api_org_***", is_write_action=True)
106
- ValueError: You must use your personal account token for write-access methods.
107
-
108
- >>> build_hf_headers(library_name="transformers", library_version="1.2.3")
109
- {"authorization": ..., "user-agent": "transformers/1.2.3; hf_hub/0.10.2; python/3.10.4; tensorflow/1.55"}
110
- ```
111
-
112
- Raises:
113
- [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
114
- If organization token is passed and "write" access is required.
115
- [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
116
- If "write" access is required but token is not passed and not saved locally.
117
- [`EnvironmentError`](https://docs.python.org/3/library/exceptions.html#EnvironmentError)
118
- If `token=True` but token is not saved locally.
119
- """
120
- # Get auth token to send
121
- token_to_send = get_token_to_send(token)
122
- _validate_token_to_send(token_to_send, is_write_action=is_write_action)
123
-
124
- # Combine headers
125
- headers = {
126
- "user-agent": _http_user_agent(
127
- library_name=library_name,
128
- library_version=library_version,
129
- user_agent=user_agent,
130
- )
131
- }
132
- if token_to_send is not None:
133
- headers["authorization"] = f"Bearer {token_to_send}"
134
- return headers
135
-
136
-
137
- def get_token_to_send(token: Optional[Union[bool, str]]) -> Optional[str]:
138
- """Select the token to send from either `token` or the cache."""
139
- # Case token is explicitly provided
140
- if isinstance(token, str):
141
- return token
142
-
143
- # Case token is explicitly forbidden
144
- if token is False:
145
- return None
146
-
147
- # Token is not provided: we get it from local cache
148
- cached_token = HfFolder().get_token()
149
-
150
- # Case token is explicitly required
151
- if token is True:
152
- if cached_token is None:
153
- raise LocalTokenNotFoundError(
154
- "Token is required (`token=True`), but no token found. You"
155
- " need to provide a token or be logged in to Hugging Face with"
156
- " `huggingface-cli login` or `huggingface_hub.login`. See"
157
- " https://huggingface.co/settings/tokens."
158
- )
159
- return cached_token
160
-
161
- # Case implicit use of the token is forbidden by env variable
162
- if constants.HF_HUB_DISABLE_IMPLICIT_TOKEN:
163
- return None
164
-
165
- # Otherwise: we use the cached token as the user has not explicitly forbidden it
166
- return cached_token
167
-
168
-
169
- def _validate_token_to_send(token: Optional[str], is_write_action: bool) -> None:
170
- if is_write_action:
171
- if token is None:
172
- raise ValueError(
173
- "Token is required (write-access action) but no token found. You need"
174
- " to provide a token or be logged in to Hugging Face with"
175
- " `huggingface-cli login` or `huggingface_hub.login`. See"
176
- " https://huggingface.co/settings/tokens."
177
- )
178
- if token.startswith("api_org"):
179
- raise ValueError(
180
- "You must use your personal account token for write-access methods. To"
181
- " generate a write-access token, go to"
182
- " https://huggingface.co/settings/tokens"
183
- )
184
-
185
-
186
- def _http_user_agent(
187
- *,
188
- library_name: Optional[str] = None,
189
- library_version: Optional[str] = None,
190
- user_agent: Union[Dict, str, None] = None,
191
- ) -> str:
192
- """Format a user-agent string containing information about the installed packages.
193
-
194
- Args:
195
- library_name (`str`, *optional*):
196
- The name of the library that is making the HTTP request.
197
- library_version (`str`, *optional*):
198
- The version of the library that is making the HTTP request.
199
- user_agent (`str`, `dict`, *optional*):
200
- The user agent info in the form of a dictionary or a single string.
201
-
202
- Returns:
203
- The formatted user-agent string.
204
- """
205
- if library_name is not None:
206
- ua = f"{library_name}/{library_version}"
207
- else:
208
- ua = "unknown/None"
209
- ua += f"; hf_hub/{get_hf_hub_version()}"
210
- ua += f"; python/{get_python_version()}"
211
-
212
- if not constants.HF_HUB_DISABLE_TELEMETRY:
213
- if is_torch_available():
214
- ua += f"; torch/{get_torch_version()}"
215
- if is_tf_available():
216
- ua += f"; tensorflow/{get_tf_version()}"
217
- if is_fastai_available():
218
- ua += f"; fastai/{get_fastai_version()}"
219
- if is_fastcore_available():
220
- ua += f"; fastcore/{get_fastcore_version()}"
221
-
222
- if isinstance(user_agent, dict):
223
- ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items())
224
- elif isinstance(user_agent, str):
225
- ua += "; " + user_agent
226
-
227
- return _deduplicate_user_agent(ua)
228
-
229
-
230
- def _deduplicate_user_agent(user_agent: str) -> str:
231
- """Deduplicate redundant information in the generated user-agent."""
232
- # Split around ";" > Strip whitespaces > Store as dict keys (ensure unicity) > format back as string
233
- # Order is implicitly preserved by dictionary structure (see https://stackoverflow.com/a/53657523).
234
- return "; ".join({key.strip(): None for key in user_agent.split(";")}.keys())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DShrimp/PoseMaker/start.py DELETED
@@ -1,3 +0,0 @@
1
- import subprocess
2
-
3
- subprocess.run("uvicorn app:app --host 0.0.0.0 --port 7860", shell=True)
 
 
 
 
spaces/Deevyankar/Deep-AD/app.py DELETED
@@ -1,464 +0,0 @@
1
- import os
2
- import ants
3
- import monai
4
- import torch
5
- import shutil
6
- import numpy as np
7
- import pandas as pd
8
- import altair as alt
9
- import nibabel as nib
10
- import streamlit as st
11
- from random import randint
12
- from itertools import chain
13
- import antspynet as antspynet
14
- from torch.utils.data import DataLoader
15
- from monai.transforms import Compose, LoadImaged
16
- from monai.networks.nets.efficientnet import EfficientNetBN
17
-
18
- import dicom2nifti
19
-
20
-
21
- st.set_option('deprecation.showPyplotGlobalUse', False)
22
- np.random.seed(0)
23
- torch.manual_seed(0)
24
-
25
- template = ants.image_read('MNI152_T1_1mm_brain.nii.gz')
26
-
27
-
28
- def pre_process(image):
29
- with st.spinner('Reading the image...'):
30
- y = ants.image_read(image)
31
- with st.spinner('Bias field correction ongoing...'):
32
- y = ants.utils.n4_bias_field_correction(y)
33
- with st.spinner('Denoising the image...'):
34
- yn = y + np.random.randn(*y.shape).astype('float32')*5
35
- y = ants.denoise_image(yn, ants.get_mask(y))
36
- with st.spinner('brain_extraction fn. running...'):
37
- x = antspynet.utilities.brain_extraction(
38
- y, modality='t1', antsxnet_cache_directory=None, verbose=True)
39
- y = y*x
40
- with st.spinner('Registering from template..'):
41
- y1 = ants.registration(fixed=template, moving=y,
42
- type_of_transform='AffineFast')
43
- with st.spinner('Applying transforms...'):
44
- y = ants.apply_transforms(
45
- fixed=template, moving=y, transformlist=y1['fwdtransforms'])
46
- st.success('Successfully Preprocessed the Image !')
47
- return y
48
-
49
-
50
- col1, col2, col3 = st.columns(3)
51
-
52
- with col1:
53
- st.write(' ')
54
-
55
- with col2:
56
- st.image("unilogo.png")
57
-
58
- with col3:
59
- st.write(' ')
60
-
61
- st.markdown("<h1 style='text-align: center; color: grey;'>Deep-AD: Deep Learning Model for Early Detection of Alzheimer’s</h1>", unsafe_allow_html=True)
62
- st.markdown("<h6 style='text-align: center; color: grey;'>Developed by: Deevyankar Agarwal</h6>",
63
- unsafe_allow_html=True)
64
- st.markdown("<h6 style='text-align: center; color: grey;'> Part Time Ph.D. Student, UVA, Spain</h6>",
65
- unsafe_allow_html=True)
66
- st.write('**Description**: Users can upload T1-W MRIs either in NifTI or DICOM format. After preprocessing (N4 bias field correction, noise removal, brain extraction, and registration in the MNI-152 template), the model will classify MRI scans into one of three groups.')
67
-
68
- st.markdown('- AD : Alzheimer’s')
69
- st.markdown('- CN : Cognitively Normal')
70
- st.markdown('- SMCI : stable MCI')
71
-
72
- st.write('This Application is based on ensemble learning. The output of multiclassification task AD vs. sMCI vs. CN will be validated further by binary classification models AD vs. CN and sMCI vs. AD implemented by end-to-end learning and 3D transfer learning, respectively. It will provide an extra layer of verification to make robust decisions.')
73
- st.markdown('''<br>''', unsafe_allow_html=True)
74
-
75
- element1 = st.write("""
76
- # MRI Classification :brain:
77
- """
78
- )
79
-
80
- if 'key' not in st.session_state:
81
- st.session_state.key = str( randint(1000, 100000000))
82
- file_upload = st.file_uploader("Upload the MRI scan (either a single NIfTI file or a folder containing multiple DICOM files)", type=[
83
- "nii", "gz", "dcm"], accept_multiple_files=True, key=st.session_state.key)
84
- st.set_option('deprecation.showfileUploaderEncoding', False)
85
-
86
-
87
- if file_upload == []:
88
- st.text("No file uploaded !")
89
-
90
- st.text('Note : Please clear existing files before uploading new files')
91
- if st.button('Clear Uploaded File(s)', help='Please clear existing files before uploading new files') and 'key' in st.session_state.keys():
92
- st.session_state.pop('key')
93
- st.experimental_rerun()
94
-
95
- st.write("⚠️ [**Feedback form**](https://forms.gle/xuScGN6Cmf69bsUE9) ⚠️ ")
96
-
97
-
98
- if len(file_upload) == 1:
99
-
100
- for file in file_upload:
101
- file.name = file.name
102
- with open(file.name, "wb") as f:
103
- f.write(file.getbuffer())
104
-
105
- saved_path = f"{file.name}"
106
-
107
- display_image = ants.image_read(saved_path)
108
- element2 = st.pyplot(ants.plot(display_image))
109
-
110
- processed_image = pre_process(saved_path)
111
- a = processed_image.to_nibabel()
112
- saved_preprocessed_path = 'input_image'
113
- nib.save(a, saved_preprocessed_path)
114
- element3 = st.text("Preprocessed Image")
115
- element4 = st.pyplot(ants.plot(f"{saved_preprocessed_path}.nii", cmap="seismic"))
116
-
117
- transformsv = Compose(
118
- [
119
- LoadImaged(keys=["img"])
120
- ]
121
- )
122
-
123
- test_files = [{"img": f"{saved_preprocessed_path}.nii", "label": "NA"}]
124
- test_ds = monai.data.Dataset(data=test_files, transform=transformsv)
125
- test_loader = DataLoader(test_ds, batch_size=1,
126
- pin_memory=torch.cuda.is_available())
127
-
128
- for test_data in test_loader:
129
- test_images, test_labels = test_data["img"], test_data["label"]
130
-
131
- with st.spinner('Performing Inference...'):
132
- model = EfficientNetBN(
133
- "efficientnet-b0", spatial_dims=3, in_channels=1, num_classes=3)
134
- model.load_state_dict(torch.load(
135
- 'MCEBNfold3.pth', map_location='cpu'))
136
- model.eval()
137
- prediction = model(test_images.unsqueeze(1))
138
- pred = prediction.argmax(dim=1).item()
139
- class_names = ["SMCI", "AD", "CN"]
140
- predicted_label = class_names[pred]
141
-
142
- graph_input = list(chain.from_iterable(prediction.tolist()))
143
- "Plot depicting Class Probabilities"
144
- source = pd.DataFrame({
145
- 'Model output': graph_input,
146
- 'class': ["SMCI", "AD", "CN"]
147
- })
148
-
149
- bar_chart = alt.Chart(source).mark_bar().encode(
150
- y='Model output:Q',
151
- x='class:O',
152
- )
153
-
154
- element5 = st.altair_chart(bar_chart, use_container_width=True)
155
-
156
- element6 = st.write(
157
- f"The MRI Scan belong to the class **{predicted_label}**")
158
-
159
-
160
- if pred == 0:
161
- with st.spinner('Please wait...verifying the model output with another model'):
162
- model_verify = monai.networks.nets.DenseNet264(spatial_dims=3, in_channels=1, out_channels=2)
163
- model_verify.load_state_dict(torch.load(
164
- 'DENSENET264ADvsCNbest_metric_model_classification3d_dict.pth', map_location='cpu'))
165
- model_verify.eval()
166
- prediction_verify = model_verify(test_images.unsqueeze(1))
167
- pred_verify = prediction_verify.argmax(dim=1).item()
168
- class_names_verify = ["CN", "AD"]
169
- predicted_label_verify = class_names_verify[pred_verify]
170
-
171
- if pred_verify == 0:
172
-
173
- if predicted_label_verify == predicted_label:
174
- st.write(
175
- f"Succesfully Verified the result, both models classified the scan as **{predicted_label_verify}**")
176
- else:
177
- st.write(
178
- f"Verifying gave a different result ! **First model predicted as {predicted_label}, other predicted {predicted_label_verify}**")
179
-
180
- if pred_verify == 1 :
181
-
182
- model_verify = EfficientNetBN(
183
- "efficientnet-b0", spatial_dims=3, in_channels=1, num_classes=2)
184
- model_verify.load_state_dict(torch.load(
185
- 'EBNfold3.pth', map_location='cpu'))
186
- model_verify.eval()
187
- prediction_verify = model_verify(test_images.unsqueeze(1))
188
- pred_verify = prediction_verify.argmax(dim=1).item()
189
- class_names_verify = ["SMCI", "AD"]
190
- predicted_label_verify = class_names_verify[pred_verify]
191
-
192
- if predicted_label_verify == predicted_label:
193
- st.write(
194
- f"Succesfully Verified the result, both models classified the scan as **{predicted_label_verify}**")
195
- else:
196
- st.write(
197
- f"Verifying gave a different result ! **First model predicted as {predicted_label}, other predicted {predicted_label_verify}**")
198
-
199
-
200
-
201
-
202
-
203
- if pred == 1:
204
- with st.spinner('Please wait...verifying the model output with another model'):
205
- model_verify = EfficientNetBN(
206
- "efficientnet-b0", spatial_dims=3, in_channels=1, num_classes=2)
207
- model_verify.load_state_dict(torch.load(
208
- 'EBNfold3.pth', map_location='cpu'))
209
- model_verify.eval()
210
- prediction_verify = model_verify(test_images.unsqueeze(1))
211
- pred_verify = prediction_verify.argmax(dim=1).item()
212
- class_names_verify = ["SMCI", "AD"]
213
- predicted_label_verify = class_names_verify[pred_verify]
214
-
215
- if predicted_label_verify == predicted_label:
216
- st.write(
217
- f"Succesfully Verified the result, both models classified the scan as **{predicted_label_verify}**")
218
- else:
219
- st.write(
220
- f"Verifying gave a different result ! **First model predicted as {predicted_label}, other predicted {predicted_label_verify}**")
221
-
222
-
223
-
224
- if pred == 2:
225
- with st.spinner('Please wait...verifying the model output with another model'):
226
- model_verify = EfficientNetBN(
227
- "efficientnet-b0", spatial_dims=3, in_channels=1, num_classes=2)
228
- model_verify.load_state_dict(torch.load(
229
- 'ENB0ADvsCNbest_metric_model_classification3d_dict.pth', map_location='cpu'))
230
- model_verify.eval()
231
- prediction_verify = model_verify(test_images.unsqueeze(1))
232
- pred_verify = prediction_verify.argmax(dim=1).item()
233
- class_names_verify = ["CN", "AD"]
234
- predicted_label_verify = class_names_verify[pred_verify]
235
-
236
- if predicted_label_verify == predicted_label:
237
- st.write(
238
- f"Succesfully Verified the result, both models classified the scan as **{predicted_label_verify}**")
239
- else:
240
- st.write(
241
- f"Verifying gave a different result ! **First model predicted as {predicted_label}, other predicted {predicted_label_verify}**")
242
-
243
-
244
-
245
-
246
- graph_input_1 = list(chain.from_iterable(prediction_verify.tolist()))
247
-
248
- "Plot depicting verifying model outputs"
249
- source_1 = pd.DataFrame({
250
- 'Model output': graph_input_1,
251
- 'class': class_names_verify
252
- })
253
-
254
- bar_chart_1 = alt.Chart(source_1).mark_bar().encode(
255
- y='Model output:Q',
256
- x='class:O',
257
- )
258
-
259
- st.altair_chart(bar_chart_1, use_container_width=True)
260
-
261
-
262
- if len(file_upload) > 1:
263
-
264
- print(len(file_upload))
265
-
266
- if os.path.exists('tmp') == True:
267
- shutil.rmtree('tmp')
268
- os.makedirs('tmp')
269
-
270
- for file in file_upload:
271
- file.name = file.name
272
- with open(file.name, "wb") as f:
273
- f.write(file.getbuffer())
274
- shutil.copy(file.name, 'tmp')
275
- print(len(file_upload))
276
-
277
- display_image = st.empty()
278
- # display_image = ants.core.ants_image_io.dicom_read('tmp')
279
- saved_path = 'uploaded_image'
280
- display_image = dicom2nifti.dicom_series_to_nifti('tmp', saved_path, reorient_nifti=True)
281
- # nib.save(display_image, saved_path)
282
- display_image = ants.image_read(f"{saved_path}.nii")
283
- element2 = st.pyplot(ants.plot(display_image))
284
-
285
- # b = display_image.to_nibabel()
286
- # saved_path = 'uploaded_image'
287
- # nib.save(b, saved_path)
288
-
289
- processed_image = pre_process(f"{saved_path}.nii")
290
- a = processed_image.to_nibabel()
291
- saved_preprocessed_path = 'input_image'
292
- nib.save(a, saved_preprocessed_path)
293
- element3 = st.text("Preprocessed Image")
294
- element4 = st.pyplot(ants.plot(f"{saved_preprocessed_path}.nii", cmap="seismic"))
295
-
296
- transformsv = Compose(
297
- [
298
- LoadImaged(keys=["img"])
299
- ]
300
- )
301
-
302
- test_files = [{"img": f"{saved_preprocessed_path}.nii", "label": 1}]
303
- test_ds = monai.data.Dataset(data=test_files, transform=transformsv)
304
- test_loader = DataLoader(test_ds, batch_size=1,
305
- pin_memory=torch.cuda.is_available())
306
-
307
- for test_data in test_loader:
308
- test_images, test_labels = test_data["img"], test_data["label"]
309
- with st.spinner('Performing Inference...'):
310
- model = EfficientNetBN(
311
- "efficientnet-b0", spatial_dims=3, in_channels=1, num_classes=3)
312
- model.load_state_dict(torch.load(
313
- 'MCEBNfold3.pth', map_location='cpu'))
314
- model.eval()
315
- prediction = model(test_images.unsqueeze(1))
316
- pred = prediction.argmax(dim=1).item()
317
- class_names = ["SMCI", "AD", "CN"]
318
- predicted_label = class_names[pred]
319
-
320
- graph_input = list(chain.from_iterable(prediction.tolist()))
321
- "Plot depicting Class Probabilities"
322
- source = pd.DataFrame({
323
- 'Model output': graph_input,
324
- 'class': ["SMCI", "AD", "CN"]
325
- })
326
-
327
- bar_chart = alt.Chart(source).mark_bar().encode(
328
- y='Model output:Q',
329
- x='class:O',
330
- )
331
-
332
- element5 = st.altair_chart(bar_chart, use_container_width=True)
333
-
334
- element6 = st.write(
335
- f"The MRI Scan belong to the class **{predicted_label}**")
336
-
337
-
338
-
339
- if pred == 0:
340
- with st.spinner('Please wait...verifying the model output with another model'):
341
- model_verify = monai.networks.nets.DenseNet264(spatial_dims=3, in_channels=1, out_channels=2)
342
- model_verify.load_state_dict(torch.load(
343
- 'DENSENET264ADvsCNbest_metric_model_classification3d_dict.pth', map_location='cpu'))
344
- model_verify.eval()
345
- prediction_verify = model_verify(test_images.unsqueeze(1))
346
- pred_verify = prediction_verify.argmax(dim=1).item()
347
- class_names_verify = ["CN", "AD"]
348
- predicted_label_verify = class_names_verify[pred_verify]
349
-
350
- if pred_verify == 0:
351
-
352
- if predicted_label_verify == predicted_label:
353
- st.write(
354
- f"Succesfully Verified the result, both models classified the scan as **{predicted_label_verify}**")
355
- else:
356
- st.write(
357
- f"Verifying gave a different result ! **First model predicted as {predicted_label}, other predicted {predicted_label_verify}**")
358
-
359
- if pred_verify == 1 :
360
-
361
- model_verify = EfficientNetBN(
362
- "efficientnet-b0", spatial_dims=3, in_channels=1, num_classes=2)
363
- model_verify.load_state_dict(torch.load(
364
- 'EBNfold3.pth', map_location='cpu'))
365
- model_verify.eval()
366
- prediction_verify = model_verify(test_images.unsqueeze(1))
367
- pred_verify = prediction_verify.argmax(dim=1).item()
368
- class_names_verify = ["SMCI", "AD"]
369
- predicted_label_verify = class_names_verify[pred_verify]
370
-
371
- if predicted_label_verify == predicted_label:
372
- st.write(
373
- f"Succesfully Verified the result, both models classified the scan as **{predicted_label_verify}**")
374
- else:
375
- st.write(
376
- f"Verifying gave a different result ! **First model predicted as {predicted_label}, other predicted {predicted_label_verify}**")
377
-
378
-
379
-
380
-
381
- if pred == 1:
382
- with st.spinner('Please wait...verifying the model output with another model'):
383
- model_verify = EfficientNetBN(
384
- "efficientnet-b0", spatial_dims=3, in_channels=1, num_classes=2)
385
- model_verify.load_state_dict(torch.load(
386
- 'EBNfold3.pth', map_location='cpu'))
387
- model_verify.eval()
388
- prediction_verify = model_verify(test_images.unsqueeze(1))
389
- pred_verify = prediction_verify.argmax(dim=1).item()
390
- class_names_verify = ["SMCI", "AD"]
391
- predicted_label_verify = class_names_verify[pred_verify]
392
-
393
- if predicted_label_verify == predicted_label:
394
- st.write(
395
- f"Succesfully Verified the result, both models classified the scan as **{predicted_label_verify}**")
396
- else:
397
- st.write(
398
- f"Verifying gave a different result ! **First model predicted as {predicted_label}, other predicted {predicted_label_verify}**")
399
-
400
-
401
-
402
- if pred == 2:
403
- with st.spinner('Please wait...verifying the model output with another model'):
404
- model_verify = monai.networks.nets.DenseNet264(spatial_dims=3, in_channels=1, out_channels=2)
405
- model_verify.load_state_dict(torch.load(
406
- 'F3DENSENET264ADvsCNbest_metric_model_classification3d_dict.pth', map_location='cpu'))
407
- model_verify.eval()
408
- prediction_verify = model_verify(test_images.unsqueeze(1))
409
- pred_verify = prediction_verify.argmax(dim=1).item()
410
- class_names_verify = ["CN", "AD"]
411
- predicted_label_verify = class_names_verify[pred_verify]
412
-
413
- if predicted_label_verify == predicted_label:
414
- st.write(
415
- f"Succesfully Verified the result, both models classified the scan as **{predicted_label_verify}**")
416
- else:
417
- st.write(
418
- f"Verifying gave a different result ! **First model predicted as {predicted_label}, other predicted {predicted_label_verify}**")
419
-
420
-
421
-
422
-
423
-
424
- graph_input_1 = list(chain.from_iterable(prediction_verify.tolist()))
425
-
426
- "Plot depicting verifying model outputs"
427
- source_1 = pd.DataFrame({
428
- 'Model output': graph_input_1,
429
- 'class': class_names_verify
430
- })
431
-
432
- bar_chart_1 = alt.Chart(source_1).mark_bar().encode(
433
- y='Model output:Q',
434
- x='class:O',
435
- )
436
-
437
- st.altair_chart(bar_chart_1, use_container_width=True)
438
-
439
-
440
- st.markdown('''<br><br>''', unsafe_allow_html=True)
441
- st.markdown('''#### Publications :book:''', unsafe_allow_html=True)
442
-
443
- st.markdown("""1. [Transfer Learning for Alzheimer’s Disease through Neuroimaging Biomarkers: A Systematic Review](https://www.mdpi.com/1424-8220/21/21/7259 ) \n
444
- <small>Q1 Sensors</small> <br><br>
445
-
446
- 2. [End-to-End Deep Learning Architectures Using 3D Neuroimaging Biomarkers for Early Alzheimer’s Diagnosis](https://www.mdpi.com/2227-7390/10/15/2575) \n
447
- <small>Q2 mathematics</small> <br><br>
448
-
449
- 3. [Automated Medical Diagnosis of Alzheimer´s Disease Using an Efficient Net Convolutional Neural Network](https://link.springer.com/article/10.1007/s10916-023-01941-4) \n
450
- <small>Q1 Springer Nature ,Journal of Medical Systems</small> <br><br>
451
- <br>""", unsafe_allow_html=True)
452
-
453
-
454
-
455
- st.markdown('''#### Contact details :mailbox:''', unsafe_allow_html=True)
456
-
457
- st.markdown('''
458
- <b>Group :busts_in_silhouette: &nbsp;</b>: &nbsp; http://www.sigte.tel.uva.es/index.php/en/homepage/
459
- <small>The eHealth and Telemedicine Group (GTe) of the University of Valladolid is a multidisciplinary international group consisting of telecommunications, informatics and medical doctors from different specialties.</small> \n
460
-
461
- <br>
462
-
463
- <b>Email :e-mail: &nbsp;</b> : &nbsp; [email protected]''', unsafe_allow_html=True)
464
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Div99/Chat-with-Div/polly_utils.py DELETED
@@ -1,635 +0,0 @@
1
- # This class stores Polly voice data. Specifically, the class stores several records containing
2
- # language, lang_code, gender, voice_id and engine. The class also has a method to return the
3
- # voice_id, lang_code and engine given a language and gender.
4
-
5
- NEURAL_ENGINE = "neural"
6
- STANDARD_ENGINE = "standard"
7
-
8
-
9
- class PollyVoiceData:
10
- def get_voice(self, language, gender):
11
- for voice in self.voice_data:
12
- if voice['language'] == language and voice['gender'] == gender:
13
- if voice['neural'] == 'Yes':
14
- return voice['voice_id'], voice['lang_code'], NEURAL_ENGINE
15
- for voice in self.voice_data:
16
- if voice['language'] == language and voice['gender'] == gender:
17
- if voice['standard'] == 'Yes':
18
- return voice['voice_id'], voice['lang_code'], STANDARD_ENGINE
19
- return None, None, None
20
-
21
- def get_whisper_lang_code(self, language):
22
- for voice in self.voice_data:
23
- if voice['language'] == language:
24
- return voice['whisper_lang_code']
25
- return "en"
26
-
27
- def __init__(self):
28
- self.voice_data = [
29
- {'language': 'Arabic',
30
- 'lang_code': 'arb',
31
- 'whisper_lang_code': 'ar',
32
- 'voice_id': 'Zeina',
33
- 'gender': 'Female',
34
- 'neural': 'No',
35
- 'standard': 'Yes'},
36
- {'language': 'Arabic (Gulf)',
37
- 'lang_code': 'ar-AE',
38
- 'whisper_lang_code': 'ar',
39
- 'voice_id': 'Hala',
40
- 'gender': 'Female',
41
- 'neural': 'Yes',
42
- 'standard': 'No'},
43
- {'language': 'Catalan',
44
- 'lang_code': 'ca-ES',
45
- 'whisper_lang_code': 'ca',
46
- 'voice_id': 'Arlet',
47
- 'gender': 'Female',
48
- 'neural': 'Yes',
49
- 'standard': 'No'},
50
- {'language': 'Chinese (Cantonese)',
51
- 'lang_code': 'yue-CN',
52
- 'whisper_lang_code': 'zh',
53
- 'voice_id': 'Hiujin',
54
- 'gender': 'Female',
55
- 'neural': 'Yes',
56
- 'standard': 'No'},
57
- {'language': 'Chinese (Mandarin)',
58
- 'lang_code': 'cmn-CN',
59
- 'whisper_lang_code': 'zh',
60
- 'voice_id': 'Zhiyu',
61
- 'gender': 'Female',
62
- 'neural': 'Yes',
63
- 'standard': 'No'},
64
- {'language': 'Danish',
65
- 'lang_code': 'da-DK',
66
- 'whisper_lang_code': 'da',
67
- 'voice_id': 'Naja',
68
- 'gender': 'Female',
69
- 'neural': 'No',
70
- 'standard': 'Yes'},
71
- {'language': 'Danish',
72
- 'lang_code': 'da-DK',
73
- 'whisper_lang_code': 'da',
74
- 'voice_id': 'Mads',
75
- 'gender': 'Male',
76
- 'neural': 'No',
77
- 'standard': 'Yes'},
78
- {'language': 'Dutch',
79
- 'lang_code': 'nl-NL',
80
- 'whisper_lang_code': 'nl',
81
- 'voice_id': 'Laura',
82
- 'gender': 'Female',
83
- 'neural': 'Yes',
84
- 'standard': 'No'},
85
- {'language': 'Dutch',
86
- 'lang_code': 'nl-NL',
87
- 'whisper_lang_code': 'nl',
88
- 'voice_id': 'Lotte',
89
- 'gender': 'Female',
90
- 'neural': 'No',
91
- 'standard': 'Yes'},
92
- {'language': 'Dutch',
93
- 'lang_code': 'nl-NL',
94
- 'whisper_lang_code': 'nl',
95
- 'voice_id': 'Ruben',
96
- 'gender': 'Male',
97
- 'neural': 'No',
98
- 'standard': 'Yes'},
99
- {'language': 'English (Australian)',
100
- 'lang_code': 'en-AU',
101
- 'whisper_lang_code': 'en',
102
- 'voice_id': 'Nicole',
103
- 'gender': 'Female',
104
- 'neural': 'No',
105
- 'standard': 'Yes'},
106
- {'language': 'English (Australian)',
107
- 'lang_code': 'en-AU',
108
- 'whisper_lang_code': 'en',
109
- 'voice_id': 'Olivia',
110
- 'gender': 'Female',
111
- 'neural': 'Yes',
112
- 'standard': 'No'},
113
- {'language': 'English (Australian)',
114
- 'lang_code': 'en-AU',
115
- 'whisper_lang_code': 'en',
116
- 'voice_id': 'Russell',
117
- 'gender': 'Male',
118
- 'neural': 'No',
119
- 'standard': 'Yes'},
120
- {'language': 'English (British)',
121
- 'lang_code': 'en-GB',
122
- 'whisper_lang_code': 'en',
123
- 'voice_id': 'Amy',
124
- 'gender': 'Female',
125
- 'neural': 'Yes',
126
- 'standard': 'Yes'},
127
- {'language': 'English (British)',
128
- 'lang_code': 'en-GB',
129
- 'whisper_lang_code': 'en',
130
- 'voice_id': 'Emma',
131
- 'gender': 'Female',
132
- 'neural': 'Yes',
133
- 'standard': 'Yes'},
134
- {'language': 'English (British)',
135
- 'lang_code': 'en-GB',
136
- 'whisper_lang_code': 'en',
137
- 'voice_id': 'Brian',
138
- 'gender': 'Male',
139
- 'neural': 'Yes',
140
- 'standard': 'Yes'},
141
- {'language': 'English (British)',
142
- 'lang_code': 'en-GB',
143
- 'whisper_lang_code': 'en',
144
- 'voice_id': 'Arthur',
145
- 'gender': 'Male',
146
- 'neural': 'Yes',
147
- 'standard': 'No'},
148
- {'language': 'English (Indian)',
149
- 'lang_code': 'en-IN',
150
- 'whisper_lang_code': 'en',
151
- 'voice_id': 'Aditi',
152
- 'gender': 'Female',
153
- 'neural': 'No',
154
- 'standard': 'Yes'},
155
- {'language': 'English (Indian)',
156
- 'lang_code': 'en-IN',
157
- 'whisper_lang_code': 'en',
158
- 'voice_id': 'Raveena',
159
- 'gender': 'Female',
160
- 'neural': 'No',
161
- 'standard': 'Yes'},
162
- {'language': 'English (Indian)',
163
- 'lang_code': 'en-IN',
164
- 'whisper_lang_code': 'en',
165
- 'voice_id': 'Kajal',
166
- 'gender': 'Female',
167
- 'neural': 'Yes',
168
- 'standard': 'No'},
169
- {'language': 'English (New Zealand)',
170
- 'lang_code': 'en-NZ',
171
- 'whisper_lang_code': 'en',
172
- 'voice_id': 'Aria',
173
- 'gender': 'Female',
174
- 'neural': 'Yes',
175
- 'standard': 'No'},
176
- {'language': 'English (South African)',
177
- 'lang_code': 'en-ZA',
178
- 'whisper_lang_code': 'en',
179
- 'voice_id': 'Ayanda',
180
- 'gender': 'Female',
181
- 'neural': 'Yes',
182
- 'standard': 'No'},
183
- {'language': 'English (US)',
184
- 'lang_code': 'en-US',
185
- 'whisper_lang_code': 'en',
186
- 'voice_id': 'Ivy',
187
- 'gender': 'Female (child)',
188
- 'neural': 'Yes',
189
- 'standard': 'Yes'},
190
- {'language': 'English (US)',
191
- 'lang_code': 'en-US',
192
- 'whisper_lang_code': 'en',
193
- 'voice_id': 'Joanna',
194
- 'gender': 'Female',
195
- 'neural': 'Yes',
196
- 'standard': 'Yes'},
197
- {'language': 'English (US)',
198
- 'lang_code': 'en-US',
199
- 'whisper_lang_code': 'en',
200
- 'voice_id': 'Kendra',
201
- 'gender': 'Female',
202
- 'neural': 'Yes',
203
- 'standard': 'Yes'},
204
- {'language': 'English (US)',
205
- 'lang_code': 'en-US',
206
- 'whisper_lang_code': 'en',
207
- 'voice_id': 'Kimberly',
208
- 'gender': 'Female',
209
- 'neural': 'Yes',
210
- 'standard': 'Yes'},
211
- {'language': 'English (US)',
212
- 'lang_code': 'en-US',
213
- 'whisper_lang_code': 'en',
214
- 'voice_id': 'Salli',
215
- 'gender': 'Female',
216
- 'neural': 'Yes',
217
- 'standard': 'Yes'},
218
- {'language': 'English (US)',
219
- 'lang_code': 'en-US',
220
- 'whisper_lang_code': 'en',
221
- 'voice_id': 'Joey',
222
- 'gender': 'Male',
223
- 'neural': 'Yes',
224
- 'standard': 'Yes'},
225
- {'language': 'English (US)',
226
- 'lang_code': 'en-US',
227
- 'whisper_lang_code': 'en',
228
- 'voice_id': 'Justin',
229
- 'gender': 'Male (child)',
230
- 'neural': 'Yes',
231
- 'standard': 'Yes'},
232
- {'language': 'English (US)',
233
- 'lang_code': 'en-US',
234
- 'whisper_lang_code': 'en',
235
- 'voice_id': 'Kevin',
236
- 'gender': 'Male (child)',
237
- 'neural': 'Yes',
238
- 'standard': 'No'},
239
- {'language': 'English (US)',
240
- 'lang_code': 'en-US',
241
- 'whisper_lang_code': 'en',
242
- 'voice_id': 'Matthew',
243
- 'gender': 'Male',
244
- 'neural': 'Yes',
245
- 'standard': 'Yes'},
246
- {'language': 'English (Welsh)',
247
- 'lang_code': 'en-GB-WLS',
248
- 'whisper_lang_code': 'en',
249
- 'voice_id': 'Geraint',
250
- 'gender': 'Male',
251
- 'neural': 'No',
252
- 'standard': 'Yes'},
253
- {'language': 'Finnish',
254
- 'lang_code': 'fi-FI',
255
- 'whisper_lang_code': 'fi',
256
- 'voice_id': 'Suvi',
257
- 'gender': 'Female',
258
- 'neural': 'Yes',
259
- 'standard': 'No'},
260
- {'language': 'French',
261
- 'lang_code': 'fr-FR',
262
- 'whisper_lang_code': 'fr',
263
- 'voice_id': 'Celine',
264
- 'gender': 'Female',
265
- 'neural': 'No',
266
- 'standard': 'Yes'},
267
- {'language': 'French',
268
- 'lang_code': 'fr-FR',
269
- 'whisper_lang_code': 'fr',
270
- 'voice_id': 'Lea',
271
- 'gender': 'Female',
272
- 'neural': 'Yes',
273
- 'standard': 'Yes'},
274
- {'language': 'French',
275
- 'lang_code': 'fr-FR',
276
- 'whisper_lang_code': 'fr',
277
- 'voice_id': 'Mathieu',
278
- 'gender': 'Male',
279
- 'neural': 'No',
280
- 'standard': 'Yes'},
281
- {'language': 'French (Canadian)',
282
- 'lang_code': 'fr-CA',
283
- 'whisper_lang_code': 'fr',
284
- 'voice_id': 'Chantal',
285
- 'gender': 'Female',
286
- 'neural': 'No',
287
- 'standard': 'Yes'},
288
- {'language': 'French (Canadian)',
289
- 'lang_code': 'fr-CA',
290
- 'whisper_lang_code': 'fr',
291
- 'voice_id': 'Gabrielle',
292
- 'gender': 'Female',
293
- 'neural': 'Yes',
294
- 'standard': 'No'},
295
- {'language': 'French (Canadian)',
296
- 'lang_code': 'fr-CA',
297
- 'whisper_lang_code': 'fr',
298
- 'voice_id': 'Liam',
299
- 'gender': 'Male',
300
- 'neural': 'Yes',
301
- 'standard': 'No'},
302
- {'language': 'German',
303
- 'lang_code': 'de-DE',
304
- 'whisper_lang_code': 'de',
305
- 'voice_id': 'Marlene',
306
- 'gender': 'Female',
307
- 'neural': 'No',
308
- 'standard': 'Yes'},
309
- {'language': 'German',
310
- 'lang_code': 'de-DE',
311
- 'whisper_lang_code': 'de',
312
- 'voice_id': 'Vicki',
313
- 'gender': 'Female',
314
- 'neural': 'Yes',
315
- 'standard': 'Yes'},
316
- {'language': 'German',
317
- 'lang_code': 'de-DE',
318
- 'whisper_lang_code': 'de',
319
- 'voice_id': 'Hans',
320
- 'gender': 'Male',
321
- 'neural': 'No',
322
- 'standard': 'Yes'},
323
- {'language': 'German',
324
- 'lang_code': 'de-DE',
325
- 'whisper_lang_code': 'de',
326
- 'voice_id': 'Daniel',
327
- 'gender': 'Male',
328
- 'neural': 'Yes',
329
- 'standard': 'No'},
330
- {'language': 'German (Austrian)',
331
- 'lang_code': 'de-AT',
332
- 'whisper_lang_code': 'de',
333
- 'voice_id': 'Hannah',
334
- 'gender': 'Female',
335
- 'neural': 'Yes',
336
- 'standard': 'No'},
337
- {'language': 'Hindi',
338
- 'lang_code': 'hi-IN',
339
- 'whisper_lang_code': 'hi',
340
- 'voice_id': 'Aditi',
341
- 'gender': 'Female',
342
- 'neural': 'No',
343
- 'standard': 'Yes'},
344
- {'language': 'Hindi',
345
- 'lang_code': 'hi-IN',
346
- 'whisper_lang_code': 'hi',
347
- 'voice_id': 'Kajal',
348
- 'gender': 'Female',
349
- 'neural': 'Yes',
350
- 'standard': 'No'},
351
- {'language': 'Icelandic',
352
- 'lang_code': 'is-IS',
353
- 'whisper_lang_code': 'is',
354
- 'voice_id': 'Dora',
355
- 'gender': 'Female',
356
- 'neural': 'No',
357
- 'standard': 'Yes'},
358
- {'language': 'Icelandic',
359
- 'lang_code': 'is-IS',
360
- 'whisper_lang_code': 'is',
361
- 'voice_id': 'Karl',
362
- 'gender': 'Male',
363
- 'neural': 'No',
364
- 'standard': 'Yes'},
365
- {'language': 'Italian',
366
- 'lang_code': 'it-IT',
367
- 'whisper_lang_code': 'it',
368
- 'voice_id': 'Carla',
369
- 'gender': 'Female',
370
- 'neural': 'No',
371
- 'standard': 'Yes'},
372
- {'language': 'Italian',
373
- 'lang_code': 'it-IT',
374
- 'whisper_lang_code': 'it',
375
- 'voice_id': 'Bianca',
376
- 'gender': 'Female',
377
- 'neural': 'Yes',
378
- 'standard': 'Yes'},
379
- {'language': 'Japanese',
380
- 'lang_code': 'ja-JP',
381
- 'whisper_lang_code': 'ja',
382
- 'voice_id': 'Mizuki',
383
- 'gender': 'Female',
384
- 'neural': 'No',
385
- 'standard': 'Yes'},
386
- {'language': 'Japanese',
387
- 'lang_code': 'ja-JP',
388
- 'whisper_lang_code': 'ja',
389
- 'voice_id': 'Takumi',
390
- 'gender': 'Male',
391
- 'neural': 'Yes',
392
- 'standard': 'Yes'},
393
- {'language': 'Korean',
394
- 'lang_code': 'ko-KR',
395
- 'whisper_lang_code': 'ko',
396
- 'voice_id': 'Seoyeon',
397
- 'gender': 'Female',
398
- 'neural': 'Yes',
399
- 'standard': 'Yes'},
400
- {'language': 'Norwegian',
401
- 'lang_code': 'nb-NO',
402
- 'whisper_lang_code': 'no',
403
- 'voice_id': 'Liv',
404
- 'gender': 'Female',
405
- 'neural': 'No',
406
- 'standard': 'Yes'},
407
- {'language': 'Norwegian',
408
- 'lang_code': 'nb-NO',
409
- 'whisper_lang_code': 'no',
410
- 'voice_id': 'Ida',
411
- 'gender': 'Female',
412
- 'neural': 'Yes',
413
- 'standard': 'No'},
414
- {'language': 'Polish',
415
- 'lang_code': 'pl-PL',
416
- 'whisper_lang_code': 'pl',
417
- 'voice_id': 'Ewa',
418
- 'gender': 'Female',
419
- 'neural': 'No',
420
- 'standard': 'Yes'},
421
- {'language': 'Polish',
422
- 'lang_code': 'pl-PL',
423
- 'whisper_lang_code': 'pl',
424
- 'voice_id': 'Maja',
425
- 'gender': 'Female',
426
- 'neural': 'No',
427
- 'standard': 'Yes'},
428
- {'language': 'Polish',
429
- 'lang_code': 'pl-PL',
430
- 'whisper_lang_code': 'pl',
431
- 'voice_id': 'Jacek',
432
- 'gender': 'Male',
433
- 'neural': 'No',
434
- 'standard': 'Yes'},
435
- {'language': 'Polish',
436
- 'lang_code': 'pl-PL',
437
- 'whisper_lang_code': 'pl',
438
- 'voice_id': 'Jan',
439
- 'gender': 'Male',
440
- 'neural': 'No',
441
- 'standard': 'Yes'},
442
- {'language': 'Polish',
443
- 'lang_code': 'pl-PL',
444
- 'whisper_lang_code': 'pl',
445
- 'voice_id': 'Ola',
446
- 'gender': 'Female',
447
- 'neural': 'Yes',
448
- 'standard': 'No'},
449
- {'language': 'Portuguese (Brazilian)',
450
- 'lang_code': 'pt-BR',
451
- 'whisper_lang_code': 'pt',
452
- 'voice_id': 'Camila',
453
- 'gender': 'Female',
454
- 'neural': 'Yes',
455
- 'standard': 'Yes'},
456
- {'language': 'Portuguese (Brazilian)',
457
- 'lang_code': 'pt-BR',
458
- 'whisper_lang_code': 'pt',
459
- 'voice_id': 'Vitoria',
460
- 'gender': 'Female',
461
- 'neural': 'Yes',
462
- 'standard': 'Yes'},
463
- {'language': 'Portuguese (Brazilian)',
464
- 'lang_code': 'pt-BR',
465
- 'whisper_lang_code': 'pt',
466
- 'voice_id': 'Ricardo',
467
- 'gender': 'Male',
468
- 'neural': 'No',
469
- 'standard': 'Yes'},
470
- {'language': 'Portuguese (European)',
471
- 'lang_code': 'pt-PT',
472
- 'whisper_lang_code': 'pt',
473
- 'voice_id': 'Ines',
474
- 'gender': 'Female',
475
- 'neural': 'Yes',
476
- 'standard': 'Yes'},
477
- {'language': 'Portuguese (European)',
478
- 'lang_code': 'pt-PT',
479
- 'whisper_lang_code': 'pt',
480
- 'voice_id': 'Cristiano',
481
- 'gender': 'Male',
482
- 'neural': 'No',
483
- 'standard': 'Yes'},
484
- {'language': 'Romanian',
485
- 'lang_code': 'ro-RO',
486
- 'whisper_lang_code': 'ro',
487
- 'voice_id': 'Carmen',
488
- 'gender': 'Female',
489
- 'neural': 'No',
490
- 'standard': 'Yes'},
491
- {'language': 'Russian',
492
- 'lang_code': 'ru-RU',
493
- 'whisper_lang_code': 'ru',
494
- 'voice_id': 'Tatyana',
495
- 'gender': 'Female',
496
- 'neural': 'No',
497
- 'standard': 'Yes'},
498
- {'language': 'Russian',
499
- 'lang_code': 'ru-RU',
500
- 'whisper_lang_code': 'ru',
501
- 'voice_id': 'Maxim',
502
- 'gender': 'Male',
503
- 'neural': 'No',
504
- 'standard': 'Yes'},
505
- {'language': 'Spanish (European)',
506
- 'lang_code': 'es-ES',
507
- 'whisper_lang_code': 'es',
508
- 'voice_id': 'Conchita',
509
- 'gender': 'Female',
510
- 'neural': 'No',
511
- 'standard': 'Yes'},
512
- {'language': 'Spanish (European)',
513
- 'lang_code': 'es-ES',
514
- 'whisper_lang_code': 'es',
515
- 'voice_id': 'Lucia',
516
- 'gender': 'Female',
517
- 'neural': 'Yes',
518
- 'standard': 'Yes'},
519
- {'language': 'Spanish (European)',
520
- 'lang_code': 'es-ES',
521
- 'whisper_lang_code': 'es',
522
- 'voice_id': 'Enrique',
523
- 'gender': 'Male',
524
- 'neural': 'No',
525
- 'standard': 'Yes'},
526
- {'language': 'Spanish (Mexican)',
527
- 'lang_code': 'es-MX',
528
- 'whisper_lang_code': 'es',
529
- 'voice_id': 'Mia',
530
- 'gender': 'Female',
531
- 'neural': 'Yes',
532
- 'standard': 'Yes'},
533
- {'language': 'Spanish (US)',
534
- 'lang_code': 'es-US',
535
- 'whisper_lang_code': 'es',
536
- 'voice_id': 'Lupe',
537
- 'gender': 'Female',
538
- 'neural': 'Yes',
539
- 'standard': 'Yes'},
540
- {'language': 'Spanish (US)',
541
- 'lang_code': 'es-US',
542
- 'whisper_lang_code': 'es',
543
- 'voice_id': 'Penelope',
544
- 'gender': 'Female',
545
- 'neural': 'No',
546
- 'standard': 'Yes'},
547
- {'language': 'Spanish (US)',
548
- 'lang_code': 'es-US',
549
- 'whisper_lang_code': 'es',
550
- 'voice_id': 'Miguel',
551
- 'gender': 'Male',
552
- 'neural': 'No',
553
- 'standard': 'Yes'},
554
- {'language': 'Spanish (US)',
555
- 'lang_code': 'es-US',
556
- 'whisper_lang_code': 'es',
557
- 'voice_id': 'Pedro',
558
- 'gender': 'Male',
559
- 'neural': 'Yes',
560
- 'standard': 'No'},
561
- {'language': 'Swedish',
562
- 'lang_code': 'sv-SE',
563
- 'whisper_lang_code': 'sv',
564
- 'voice_id': 'Astrid',
565
- 'gender': 'Female',
566
- 'neural': 'No',
567
- 'standard': 'Yes'},
568
- {'language': 'Swedish',
569
- 'lang_code': 'sv-SE',
570
- 'whisper_lang_code': 'sv',
571
- 'voice_id': 'Elin',
572
- 'gender': 'Female',
573
- 'neural': 'Yes',
574
- 'standard': 'No'},
575
- {'language': 'Turkish',
576
- 'lang_code': 'tr-TR',
577
- 'whisper_lang_code': 'tr',
578
- 'voice_id': 'Filiz',
579
- 'gender': 'Female',
580
- 'neural': 'No',
581
- 'standard': 'Yes'},
582
- {'language': 'Welsh',
583
- 'lang_code': 'cy-GB',
584
- 'whisper_lang_code': 'cy',
585
- 'voice_id': 'Gwyneth',
586
- 'gender': 'Female',
587
- 'neural': 'No',
588
- 'standard': 'Yes'}
589
- ]
590
-
591
-
592
- # Run from the command-line
593
- if __name__ == '__main__':
594
- polly_voice_data = PollyVoiceData()
595
-
596
- voice_id, language_code, engine = polly_voice_data.get_voice('English (US)', 'Male')
597
- print('English (US)', 'Male', voice_id, language_code, engine)
598
-
599
- voice_id, language_code, engine = polly_voice_data.get_voice('English (US)', 'Female')
600
- print('English (US)', 'Female', voice_id, language_code, engine)
601
-
602
- voice_id, language_code, engine = polly_voice_data.get_voice('French', 'Female')
603
- print('French', 'Female', voice_id, language_code, engine)
604
-
605
- voice_id, language_code, engine = polly_voice_data.get_voice('French', 'Male')
606
- print('French', 'Male', voice_id, language_code, engine)
607
-
608
- voice_id, language_code, engine = polly_voice_data.get_voice('Japanese', 'Female')
609
- print('Japanese', 'Female', voice_id, language_code, engine)
610
-
611
- voice_id, language_code, engine = polly_voice_data.get_voice('Japanese', 'Male')
612
- print('Japanese', 'Male', voice_id, language_code, engine)
613
-
614
- voice_id, language_code, engine = polly_voice_data.get_voice('Hindi', 'Female')
615
- print('Hindi', 'Female', voice_id, language_code, engine)
616
-
617
- voice_id, language_code, engine = polly_voice_data.get_voice('Hindi', 'Male')
618
- print('Hindi', 'Male', voice_id, language_code, engine)
619
-
620
- whisper_lang_code = polly_voice_data.get_whisper_lang_code('English (US)')
621
- print('English (US) whisper_lang_code:', whisper_lang_code)
622
-
623
- whisper_lang_code = polly_voice_data.get_whisper_lang_code('Chinese (Mandarin)')
624
- print('Chinese (Mandarin) whisper_lang_code:', whisper_lang_code)
625
-
626
- whisper_lang_code = polly_voice_data.get_whisper_lang_code('Norwegian')
627
- print('Norwegian whisper_lang_code:', whisper_lang_code)
628
-
629
- whisper_lang_code = polly_voice_data.get_whisper_lang_code('Dutch')
630
- print('Dutch whisper_lang_code:', whisper_lang_code)
631
-
632
- whisper_lang_code = polly_voice_data.get_whisper_lang_code('Foo')
633
- print('Foo whisper_lang_code:', whisper_lang_code)
634
-
635
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DragGan/DragGan-Inversion/stylegan_human/openpose/src/model.py DELETED
@@ -1,218 +0,0 @@
1
- import torch
2
- from collections import OrderedDict
3
-
4
- import torch
5
- import torch.nn as nn
6
-
7
-
8
- def make_layers(block, no_relu_layers):
9
- layers = []
10
- for layer_name, v in block.items():
11
- if 'pool' in layer_name:
12
- layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1],
13
- padding=v[2])
14
- layers.append((layer_name, layer))
15
- else:
16
- conv2d = nn.Conv2d(in_channels=v[0], out_channels=v[1],
17
- kernel_size=v[2], stride=v[3],
18
- padding=v[4])
19
- layers.append((layer_name, conv2d))
20
- if layer_name not in no_relu_layers:
21
- layers.append(('relu_'+layer_name, nn.ReLU(inplace=True)))
22
-
23
- return nn.Sequential(OrderedDict(layers))
24
-
25
-
26
- class bodypose_model(nn.Module):
27
- def __init__(self):
28
- super(bodypose_model, self).__init__()
29
-
30
- # these layers have no relu layer
31
- no_relu_layers = ['conv5_5_CPM_L1', 'conv5_5_CPM_L2', 'Mconv7_stage2_L1',
32
- 'Mconv7_stage2_L2', 'Mconv7_stage3_L1', 'Mconv7_stage3_L2',
33
- 'Mconv7_stage4_L1', 'Mconv7_stage4_L2', 'Mconv7_stage5_L1',
34
- 'Mconv7_stage5_L2', 'Mconv7_stage6_L1', 'Mconv7_stage6_L1']
35
- blocks = {}
36
- block0 = OrderedDict([
37
- ('conv1_1', [3, 64, 3, 1, 1]),
38
- ('conv1_2', [64, 64, 3, 1, 1]),
39
- ('pool1_stage1', [2, 2, 0]),
40
- ('conv2_1', [64, 128, 3, 1, 1]),
41
- ('conv2_2', [128, 128, 3, 1, 1]),
42
- ('pool2_stage1', [2, 2, 0]),
43
- ('conv3_1', [128, 256, 3, 1, 1]),
44
- ('conv3_2', [256, 256, 3, 1, 1]),
45
- ('conv3_3', [256, 256, 3, 1, 1]),
46
- ('conv3_4', [256, 256, 3, 1, 1]),
47
- ('pool3_stage1', [2, 2, 0]),
48
- ('conv4_1', [256, 512, 3, 1, 1]),
49
- ('conv4_2', [512, 512, 3, 1, 1]),
50
- ('conv4_3_CPM', [512, 256, 3, 1, 1]),
51
- ('conv4_4_CPM', [256, 128, 3, 1, 1])
52
- ])
53
-
54
- # Stage 1
55
- block1_1 = OrderedDict([
56
- ('conv5_1_CPM_L1', [128, 128, 3, 1, 1]),
57
- ('conv5_2_CPM_L1', [128, 128, 3, 1, 1]),
58
- ('conv5_3_CPM_L1', [128, 128, 3, 1, 1]),
59
- ('conv5_4_CPM_L1', [128, 512, 1, 1, 0]),
60
- ('conv5_5_CPM_L1', [512, 38, 1, 1, 0])
61
- ])
62
-
63
- block1_2 = OrderedDict([
64
- ('conv5_1_CPM_L2', [128, 128, 3, 1, 1]),
65
- ('conv5_2_CPM_L2', [128, 128, 3, 1, 1]),
66
- ('conv5_3_CPM_L2', [128, 128, 3, 1, 1]),
67
- ('conv5_4_CPM_L2', [128, 512, 1, 1, 0]),
68
- ('conv5_5_CPM_L2', [512, 19, 1, 1, 0])
69
- ])
70
- blocks['block1_1'] = block1_1
71
- blocks['block1_2'] = block1_2
72
-
73
- self.model0 = make_layers(block0, no_relu_layers)
74
-
75
- # Stages 2 - 6
76
- for i in range(2, 7):
77
- blocks['block%d_1' % i] = OrderedDict([
78
- ('Mconv1_stage%d_L1' % i, [185, 128, 7, 1, 3]),
79
- ('Mconv2_stage%d_L1' % i, [128, 128, 7, 1, 3]),
80
- ('Mconv3_stage%d_L1' % i, [128, 128, 7, 1, 3]),
81
- ('Mconv4_stage%d_L1' % i, [128, 128, 7, 1, 3]),
82
- ('Mconv5_stage%d_L1' % i, [128, 128, 7, 1, 3]),
83
- ('Mconv6_stage%d_L1' % i, [128, 128, 1, 1, 0]),
84
- ('Mconv7_stage%d_L1' % i, [128, 38, 1, 1, 0])
85
- ])
86
-
87
- blocks['block%d_2' % i] = OrderedDict([
88
- ('Mconv1_stage%d_L2' % i, [185, 128, 7, 1, 3]),
89
- ('Mconv2_stage%d_L2' % i, [128, 128, 7, 1, 3]),
90
- ('Mconv3_stage%d_L2' % i, [128, 128, 7, 1, 3]),
91
- ('Mconv4_stage%d_L2' % i, [128, 128, 7, 1, 3]),
92
- ('Mconv5_stage%d_L2' % i, [128, 128, 7, 1, 3]),
93
- ('Mconv6_stage%d_L2' % i, [128, 128, 1, 1, 0]),
94
- ('Mconv7_stage%d_L2' % i, [128, 19, 1, 1, 0])
95
- ])
96
-
97
- for k in blocks.keys():
98
- blocks[k] = make_layers(blocks[k], no_relu_layers)
99
-
100
- self.model1_1 = blocks['block1_1']
101
- self.model2_1 = blocks['block2_1']
102
- self.model3_1 = blocks['block3_1']
103
- self.model4_1 = blocks['block4_1']
104
- self.model5_1 = blocks['block5_1']
105
- self.model6_1 = blocks['block6_1']
106
-
107
- self.model1_2 = blocks['block1_2']
108
- self.model2_2 = blocks['block2_2']
109
- self.model3_2 = blocks['block3_2']
110
- self.model4_2 = blocks['block4_2']
111
- self.model5_2 = blocks['block5_2']
112
- self.model6_2 = blocks['block6_2']
113
-
114
- def forward(self, x):
115
-
116
- out1 = self.model0(x)
117
-
118
- out1_1 = self.model1_1(out1)
119
- out1_2 = self.model1_2(out1)
120
- out2 = torch.cat([out1_1, out1_2, out1], 1)
121
-
122
- out2_1 = self.model2_1(out2)
123
- out2_2 = self.model2_2(out2)
124
- out3 = torch.cat([out2_1, out2_2, out1], 1)
125
-
126
- out3_1 = self.model3_1(out3)
127
- out3_2 = self.model3_2(out3)
128
- out4 = torch.cat([out3_1, out3_2, out1], 1)
129
-
130
- out4_1 = self.model4_1(out4)
131
- out4_2 = self.model4_2(out4)
132
- out5 = torch.cat([out4_1, out4_2, out1], 1)
133
-
134
- out5_1 = self.model5_1(out5)
135
- out5_2 = self.model5_2(out5)
136
- out6 = torch.cat([out5_1, out5_2, out1], 1)
137
-
138
- out6_1 = self.model6_1(out6)
139
- out6_2 = self.model6_2(out6)
140
-
141
- return out6_1, out6_2
142
-
143
-
144
- class handpose_model(nn.Module):
145
- def __init__(self):
146
- super(handpose_model, self).__init__()
147
-
148
- # these layers have no relu layer
149
- no_relu_layers = ['conv6_2_CPM', 'Mconv7_stage2', 'Mconv7_stage3',
150
- 'Mconv7_stage4', 'Mconv7_stage5', 'Mconv7_stage6']
151
- # stage 1
152
- block1_0 = OrderedDict([
153
- ('conv1_1', [3, 64, 3, 1, 1]),
154
- ('conv1_2', [64, 64, 3, 1, 1]),
155
- ('pool1_stage1', [2, 2, 0]),
156
- ('conv2_1', [64, 128, 3, 1, 1]),
157
- ('conv2_2', [128, 128, 3, 1, 1]),
158
- ('pool2_stage1', [2, 2, 0]),
159
- ('conv3_1', [128, 256, 3, 1, 1]),
160
- ('conv3_2', [256, 256, 3, 1, 1]),
161
- ('conv3_3', [256, 256, 3, 1, 1]),
162
- ('conv3_4', [256, 256, 3, 1, 1]),
163
- ('pool3_stage1', [2, 2, 0]),
164
- ('conv4_1', [256, 512, 3, 1, 1]),
165
- ('conv4_2', [512, 512, 3, 1, 1]),
166
- ('conv4_3', [512, 512, 3, 1, 1]),
167
- ('conv4_4', [512, 512, 3, 1, 1]),
168
- ('conv5_1', [512, 512, 3, 1, 1]),
169
- ('conv5_2', [512, 512, 3, 1, 1]),
170
- ('conv5_3_CPM', [512, 128, 3, 1, 1])
171
- ])
172
-
173
- block1_1 = OrderedDict([
174
- ('conv6_1_CPM', [128, 512, 1, 1, 0]),
175
- ('conv6_2_CPM', [512, 22, 1, 1, 0])
176
- ])
177
-
178
- blocks = {}
179
- blocks['block1_0'] = block1_0
180
- blocks['block1_1'] = block1_1
181
-
182
- # stage 2-6
183
- for i in range(2, 7):
184
- blocks['block%d' % i] = OrderedDict([
185
- ('Mconv1_stage%d' % i, [150, 128, 7, 1, 3]),
186
- ('Mconv2_stage%d' % i, [128, 128, 7, 1, 3]),
187
- ('Mconv3_stage%d' % i, [128, 128, 7, 1, 3]),
188
- ('Mconv4_stage%d' % i, [128, 128, 7, 1, 3]),
189
- ('Mconv5_stage%d' % i, [128, 128, 7, 1, 3]),
190
- ('Mconv6_stage%d' % i, [128, 128, 1, 1, 0]),
191
- ('Mconv7_stage%d' % i, [128, 22, 1, 1, 0])
192
- ])
193
-
194
- for k in blocks.keys():
195
- blocks[k] = make_layers(blocks[k], no_relu_layers)
196
-
197
- self.model1_0 = blocks['block1_0']
198
- self.model1_1 = blocks['block1_1']
199
- self.model2 = blocks['block2']
200
- self.model3 = blocks['block3']
201
- self.model4 = blocks['block4']
202
- self.model5 = blocks['block5']
203
- self.model6 = blocks['block6']
204
-
205
- def forward(self, x):
206
- out1_0 = self.model1_0(x)
207
- out1_1 = self.model1_1(out1_0)
208
- concat_stage2 = torch.cat([out1_1, out1_0], 1)
209
- out_stage2 = self.model2(concat_stage2)
210
- concat_stage3 = torch.cat([out_stage2, out1_0], 1)
211
- out_stage3 = self.model3(concat_stage3)
212
- concat_stage4 = torch.cat([out_stage3, out1_0], 1)
213
- out_stage4 = self.model4(concat_stage4)
214
- concat_stage5 = torch.cat([out_stage4, out1_0], 1)
215
- out_stage5 = self.model5(concat_stage5)
216
- concat_stage6 = torch.cat([out_stage5, out1_0], 1)
217
- out_stage6 = self.model6(concat_stage6)
218
- return out_stage6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DrishtiSharma/Whisper-Serbian-Transcriber/README.md DELETED
@@ -1,15 +0,0 @@
1
- ---
2
- title: Whisper Serbian Transcriber
3
- emoji: 🤫🇷🇸
4
- colorFrom: indigo
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.9.1
8
- app_file: app.py
9
- pinned: false
10
- tags:
11
- - whisper-event
12
- duplicated_from: whisper-event/whisper-demo
13
- ---
14
-
15
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ECCV2022/bytetrack/tutorials/motr/motr.py DELETED
@@ -1,676 +0,0 @@
1
- # ------------------------------------------------------------------------
2
- # Copyright (c) 2021 megvii-model. All Rights Reserved.
3
- # ------------------------------------------------------------------------
4
- # Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR)
5
- # Copyright (c) 2020 SenseTime. All Rights Reserved.
6
- # ------------------------------------------------------------------------
7
- # Modified from DETR (https://github.com/facebookresearch/detr)
8
- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
9
- # ------------------------------------------------------------------------
10
-
11
- """
12
- DETR model and criterion classes.
13
- """
14
- import copy
15
- import math
16
- import numpy as np
17
- import torch
18
- import torch.nn.functional as F
19
- from torch import nn, Tensor
20
- from typing import List
21
-
22
- from util import box_ops
23
- from util.misc import (NestedTensor, nested_tensor_from_tensor_list,
24
- accuracy, get_world_size, interpolate, get_rank,
25
- is_dist_avail_and_initialized, inverse_sigmoid)
26
-
27
- from models.structures import Instances, Boxes, pairwise_iou, matched_boxlist_iou
28
-
29
- from .backbone import build_backbone
30
- from .matcher import build_matcher
31
- from .deformable_transformer_plus import build_deforamble_transformer
32
- from .qim import build as build_query_interaction_layer
33
- from .memory_bank import build_memory_bank
34
- from .deformable_detr import SetCriterion, MLP
35
- from .segmentation import sigmoid_focal_loss
36
-
37
-
38
- class ClipMatcher(SetCriterion):
39
- def __init__(self, num_classes,
40
- matcher,
41
- weight_dict,
42
- losses):
43
- """ Create the criterion.
44
- Parameters:
45
- num_classes: number of object categories, omitting the special no-object category
46
- matcher: module able to compute a matching between targets and proposals
47
- weight_dict: dict containing as key the names of the losses and as values their relative weight.
48
- eos_coef: relative classification weight applied to the no-object category
49
- losses: list of all the losses to be applied. See get_loss for list of available losses.
50
- """
51
- super().__init__(num_classes, matcher, weight_dict, losses)
52
- self.num_classes = num_classes
53
- self.matcher = matcher
54
- self.weight_dict = weight_dict
55
- self.losses = losses
56
- self.focal_loss = True
57
- self.losses_dict = {}
58
- self._current_frame_idx = 0
59
-
60
- def initialize_for_single_clip(self, gt_instances: List[Instances]):
61
- self.gt_instances = gt_instances
62
- self.num_samples = 0
63
- self.sample_device = None
64
- self._current_frame_idx = 0
65
- self.losses_dict = {}
66
-
67
- def _step(self):
68
- self._current_frame_idx += 1
69
-
70
- def calc_loss_for_track_scores(self, track_instances: Instances):
71
- frame_id = self._current_frame_idx - 1
72
- gt_instances = self.gt_instances[frame_id]
73
- outputs = {
74
- 'pred_logits': track_instances.track_scores[None],
75
- }
76
- device = track_instances.track_scores.device
77
-
78
- num_tracks = len(track_instances)
79
- src_idx = torch.arange(num_tracks, dtype=torch.long, device=device)
80
- tgt_idx = track_instances.matched_gt_idxes # -1 for FP tracks and disappeared tracks
81
-
82
- track_losses = self.get_loss('labels',
83
- outputs=outputs,
84
- gt_instances=[gt_instances],
85
- indices=[(src_idx, tgt_idx)],
86
- num_boxes=1)
87
- self.losses_dict.update(
88
- {'frame_{}_track_{}'.format(frame_id, key): value for key, value in
89
- track_losses.items()})
90
-
91
- def get_num_boxes(self, num_samples):
92
- num_boxes = torch.as_tensor(num_samples, dtype=torch.float, device=self.sample_device)
93
- if is_dist_avail_and_initialized():
94
- torch.distributed.all_reduce(num_boxes)
95
- num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()
96
- return num_boxes
97
-
98
- def get_loss(self, loss, outputs, gt_instances, indices, num_boxes, **kwargs):
99
- loss_map = {
100
- 'labels': self.loss_labels,
101
- 'cardinality': self.loss_cardinality,
102
- 'boxes': self.loss_boxes,
103
- }
104
- assert loss in loss_map, f'do you really want to compute {loss} loss?'
105
- return loss_map[loss](outputs, gt_instances, indices, num_boxes, **kwargs)
106
-
107
- def loss_boxes(self, outputs, gt_instances: List[Instances], indices: List[tuple], num_boxes):
108
- """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss
109
- targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
110
- The target boxes are expected in format (center_x, center_y, h, w), normalized by the image size.
111
- """
112
- # We ignore the regression loss of the track-disappear slots.
113
- #TODO: Make this filter process more elegant.
114
- filtered_idx = []
115
- for src_per_img, tgt_per_img in indices:
116
- keep = tgt_per_img != -1
117
- filtered_idx.append((src_per_img[keep], tgt_per_img[keep]))
118
- indices = filtered_idx
119
- idx = self._get_src_permutation_idx(indices)
120
- src_boxes = outputs['pred_boxes'][idx]
121
- target_boxes = torch.cat([gt_per_img.boxes[i] for gt_per_img, (_, i) in zip(gt_instances, indices)], dim=0)
122
-
123
- # for pad target, don't calculate regression loss, judged by whether obj_id=-1
124
- target_obj_ids = torch.cat([gt_per_img.obj_ids[i] for gt_per_img, (_, i) in zip(gt_instances, indices)], dim=0) # size(16)
125
- mask = (target_obj_ids != -1)
126
-
127
- loss_bbox = F.l1_loss(src_boxes[mask], target_boxes[mask], reduction='none')
128
- loss_giou = 1 - torch.diag(box_ops.generalized_box_iou(
129
- box_ops.box_cxcywh_to_xyxy(src_boxes[mask]),
130
- box_ops.box_cxcywh_to_xyxy(target_boxes[mask])))
131
-
132
- losses = {}
133
- losses['loss_bbox'] = loss_bbox.sum() / num_boxes
134
- losses['loss_giou'] = loss_giou.sum() / num_boxes
135
-
136
- return losses
137
-
138
- def loss_labels(self, outputs, gt_instances: List[Instances], indices, num_boxes, log=False):
139
- """Classification loss (NLL)
140
- targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
141
- """
142
- src_logits = outputs['pred_logits']
143
- idx = self._get_src_permutation_idx(indices)
144
- target_classes = torch.full(src_logits.shape[:2], self.num_classes,
145
- dtype=torch.int64, device=src_logits.device)
146
- # The matched gt for disappear track query is set -1.
147
- labels = []
148
- for gt_per_img, (_, J) in zip(gt_instances, indices):
149
- labels_per_img = torch.ones_like(J)
150
- # set labels of track-appear slots to 0.
151
- if len(gt_per_img) > 0:
152
- labels_per_img[J != -1] = gt_per_img.labels[J[J != -1]]
153
- labels.append(labels_per_img)
154
- target_classes_o = torch.cat(labels)
155
- target_classes[idx] = target_classes_o
156
- if self.focal_loss:
157
- gt_labels_target = F.one_hot(target_classes, num_classes=self.num_classes + 1)[:, :, :-1] # no loss for the last (background) class
158
- gt_labels_target = gt_labels_target.to(src_logits)
159
- loss_ce = sigmoid_focal_loss(src_logits.flatten(1),
160
- gt_labels_target.flatten(1),
161
- alpha=0.25,
162
- gamma=2,
163
- num_boxes=num_boxes, mean_in_dim1=False)
164
- loss_ce = loss_ce.sum()
165
- else:
166
- loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
167
- losses = {'loss_ce': loss_ce}
168
-
169
- if log:
170
- # TODO this should probably be a separate loss, not hacked in this one here
171
- losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]
172
-
173
- return losses
174
-
175
- def match_for_single_frame(self, outputs: dict):
176
- outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}
177
-
178
- gt_instances_i = self.gt_instances[self._current_frame_idx] # gt instances of i-th image.
179
- track_instances: Instances = outputs_without_aux['track_instances']
180
- pred_logits_i = track_instances.pred_logits # predicted logits of i-th image.
181
- pred_boxes_i = track_instances.pred_boxes # predicted boxes of i-th image.
182
-
183
- obj_idxes = gt_instances_i.obj_ids
184
- obj_idxes_list = obj_idxes.detach().cpu().numpy().tolist()
185
- obj_idx_to_gt_idx = {obj_idx: gt_idx for gt_idx, obj_idx in enumerate(obj_idxes_list)}
186
- outputs_i = {
187
- 'pred_logits': pred_logits_i.unsqueeze(0),
188
- 'pred_boxes': pred_boxes_i.unsqueeze(0),
189
- }
190
-
191
- # step1. inherit and update the previous tracks.
192
- num_disappear_track = 0
193
- for j in range(len(track_instances)):
194
- obj_id = track_instances.obj_idxes[j].item()
195
- # set new target idx.
196
- if obj_id >= 0:
197
- if obj_id in obj_idx_to_gt_idx:
198
- track_instances.matched_gt_idxes[j] = obj_idx_to_gt_idx[obj_id]
199
- else:
200
- num_disappear_track += 1
201
- track_instances.matched_gt_idxes[j] = -1 # track-disappear case.
202
- else:
203
- track_instances.matched_gt_idxes[j] = -1
204
-
205
- full_track_idxes = torch.arange(len(track_instances), dtype=torch.long).to(pred_logits_i.device)
206
- matched_track_idxes = (track_instances.obj_idxes >= 0) # occu
207
- prev_matched_indices = torch.stack(
208
- [full_track_idxes[matched_track_idxes], track_instances.matched_gt_idxes[matched_track_idxes]], dim=1).to(
209
- pred_logits_i.device)
210
-
211
- # step2. select the unmatched slots.
212
- # note that the FP tracks whose obj_idxes are -2 will not be selected here.
213
- unmatched_track_idxes = full_track_idxes[track_instances.obj_idxes == -1]
214
-
215
- # step3. select the untracked gt instances (new tracks).
216
- tgt_indexes = track_instances.matched_gt_idxes
217
- tgt_indexes = tgt_indexes[tgt_indexes != -1]
218
-
219
- tgt_state = torch.zeros(len(gt_instances_i)).to(pred_logits_i.device)
220
- tgt_state[tgt_indexes] = 1
221
- untracked_tgt_indexes = torch.arange(len(gt_instances_i)).to(pred_logits_i.device)[tgt_state == 0]
222
- # untracked_tgt_indexes = select_unmatched_indexes(tgt_indexes, len(gt_instances_i))
223
- untracked_gt_instances = gt_instances_i[untracked_tgt_indexes]
224
-
225
- def match_for_single_decoder_layer(unmatched_outputs, matcher):
226
- new_track_indices = matcher(unmatched_outputs,
227
- [untracked_gt_instances]) # list[tuple(src_idx, tgt_idx)]
228
-
229
- src_idx = new_track_indices[0][0]
230
- tgt_idx = new_track_indices[0][1]
231
- # concat src and tgt.
232
- new_matched_indices = torch.stack([unmatched_track_idxes[src_idx], untracked_tgt_indexes[tgt_idx]],
233
- dim=1).to(pred_logits_i.device)
234
- return new_matched_indices
235
-
236
- # step4. do matching between the unmatched slots and GTs.
237
- unmatched_outputs = {
238
- 'pred_logits': track_instances.pred_logits[unmatched_track_idxes].unsqueeze(0),
239
- 'pred_boxes': track_instances.pred_boxes[unmatched_track_idxes].unsqueeze(0),
240
- }
241
- new_matched_indices = match_for_single_decoder_layer(unmatched_outputs, self.matcher)
242
-
243
- # step5. update obj_idxes according to the new matching result.
244
- track_instances.obj_idxes[new_matched_indices[:, 0]] = gt_instances_i.obj_ids[new_matched_indices[:, 1]].long()
245
- track_instances.matched_gt_idxes[new_matched_indices[:, 0]] = new_matched_indices[:, 1]
246
-
247
- # step6. calculate iou.
248
- active_idxes = (track_instances.obj_idxes >= 0) & (track_instances.matched_gt_idxes >= 0)
249
- active_track_boxes = track_instances.pred_boxes[active_idxes]
250
- if len(active_track_boxes) > 0:
251
- gt_boxes = gt_instances_i.boxes[track_instances.matched_gt_idxes[active_idxes]]
252
- active_track_boxes = box_ops.box_cxcywh_to_xyxy(active_track_boxes)
253
- gt_boxes = box_ops.box_cxcywh_to_xyxy(gt_boxes)
254
- track_instances.iou[active_idxes] = matched_boxlist_iou(Boxes(active_track_boxes), Boxes(gt_boxes))
255
-
256
- # step7. merge the unmatched pairs and the matched pairs.
257
- matched_indices = torch.cat([new_matched_indices, prev_matched_indices], dim=0)
258
-
259
- # step8. calculate losses.
260
- self.num_samples += len(gt_instances_i) + num_disappear_track
261
- self.sample_device = pred_logits_i.device
262
- for loss in self.losses:
263
- new_track_loss = self.get_loss(loss,
264
- outputs=outputs_i,
265
- gt_instances=[gt_instances_i],
266
- indices=[(matched_indices[:, 0], matched_indices[:, 1])],
267
- num_boxes=1)
268
- self.losses_dict.update(
269
- {'frame_{}_{}'.format(self._current_frame_idx, key): value for key, value in new_track_loss.items()})
270
-
271
- if 'aux_outputs' in outputs:
272
- for i, aux_outputs in enumerate(outputs['aux_outputs']):
273
- unmatched_outputs_layer = {
274
- 'pred_logits': aux_outputs['pred_logits'][0, unmatched_track_idxes].unsqueeze(0),
275
- 'pred_boxes': aux_outputs['pred_boxes'][0, unmatched_track_idxes].unsqueeze(0),
276
- }
277
- new_matched_indices_layer = match_for_single_decoder_layer(unmatched_outputs_layer, self.matcher)
278
- matched_indices_layer = torch.cat([new_matched_indices_layer, prev_matched_indices], dim=0)
279
- for loss in self.losses:
280
- if loss == 'masks':
281
- # Intermediate masks losses are too costly to compute, we ignore them.
282
- continue
283
- l_dict = self.get_loss(loss,
284
- aux_outputs,
285
- gt_instances=[gt_instances_i],
286
- indices=[(matched_indices_layer[:, 0], matched_indices_layer[:, 1])],
287
- num_boxes=1, )
288
- self.losses_dict.update(
289
- {'frame_{}_aux{}_{}'.format(self._current_frame_idx, i, key): value for key, value in
290
- l_dict.items()})
291
- self._step()
292
- return track_instances
293
-
294
- def forward(self, outputs, input_data: dict):
295
- # losses of each frame are calculated during the model's forwarding and are outputted by the model as outputs['losses_dict].
296
- losses = outputs.pop("losses_dict")
297
- num_samples = self.get_num_boxes(self.num_samples)
298
- for loss_name, loss in losses.items():
299
- losses[loss_name] /= num_samples
300
- return losses
301
-
302
-
303
- class RuntimeTrackerBase(object):
304
- def __init__(self, score_thresh=0.8, filter_score_thresh=0.6, miss_tolerance=5):
305
- self.score_thresh = score_thresh
306
- self.filter_score_thresh = filter_score_thresh
307
- self.miss_tolerance = miss_tolerance
308
- self.max_obj_id = 0
309
-
310
- def clear(self):
311
- self.max_obj_id = 0
312
-
313
- def update(self, track_instances: Instances):
314
- track_instances.disappear_time[track_instances.scores >= self.score_thresh] = 0
315
- for i in range(len(track_instances)):
316
- if track_instances.obj_idxes[i] == -1 and track_instances.scores[i] >= self.score_thresh:
317
- # print("track {} has score {}, assign obj_id {}".format(i, track_instances.scores[i], self.max_obj_id))
318
- track_instances.obj_idxes[i] = self.max_obj_id
319
- self.max_obj_id += 1
320
- elif track_instances.obj_idxes[i] >= 0 and track_instances.scores[i] < self.filter_score_thresh:
321
- track_instances.disappear_time[i] += 1
322
- if track_instances.disappear_time[i] >= self.miss_tolerance:
323
- # Set the obj_id to -1.
324
- # Then this track will be removed by TrackEmbeddingLayer.
325
- track_instances.obj_idxes[i] = -1
326
-
327
-
328
- class TrackerPostProcess(nn.Module):
329
- """ This module converts the model's output into the format expected by the coco api"""
330
- def __init__(self):
331
- super().__init__()
332
-
333
- @torch.no_grad()
334
- def forward(self, track_instances: Instances, target_size) -> Instances:
335
- """ Perform the computation
336
- Parameters:
337
- outputs: raw outputs of the model
338
- target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
339
- For evaluation, this must be the original image size (before any data augmentation)
340
- For visualization, this should be the image size after data augment, but before padding
341
- """
342
- out_logits = track_instances.pred_logits
343
- out_bbox = track_instances.pred_boxes
344
-
345
- prob = out_logits.sigmoid()
346
- # prob = out_logits[...,:1].sigmoid()
347
- scores, labels = prob.max(-1)
348
-
349
- # convert to [x0, y0, x1, y1] format
350
- boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
351
- # and from relative [0, 1] to absolute [0, height] coordinates
352
- img_h, img_w = target_size
353
- scale_fct = torch.Tensor([img_w, img_h, img_w, img_h]).to(boxes)
354
- boxes = boxes * scale_fct[None, :]
355
-
356
- track_instances.boxes = boxes
357
- track_instances.scores = scores
358
- track_instances.labels = labels
359
- # track_instances.remove('pred_logits')
360
- # track_instances.remove('pred_boxes')
361
- return track_instances
362
-
363
-
364
- def _get_clones(module, N):
365
- return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
366
-
367
-
368
- class MOTR(nn.Module):
369
- def __init__(self, backbone, transformer, num_classes, num_queries, num_feature_levels, criterion, track_embed,
370
- aux_loss=True, with_box_refine=False, two_stage=False, memory_bank=None):
371
- """ Initializes the model.
372
- Parameters:
373
- backbone: torch module of the backbone to be used. See backbone.py
374
- transformer: torch module of the transformer architecture. See transformer.py
375
- num_classes: number of object classes
376
- num_queries: number of object queries, ie detection slot. This is the maximal number of objects
377
- DETR can detect in a single image. For COCO, we recommend 100 queries.
378
- aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.
379
- with_box_refine: iterative bounding box refinement
380
- two_stage: two-stage Deformable DETR
381
- """
382
- super().__init__()
383
- self.num_queries = num_queries
384
- self.track_embed = track_embed
385
- self.transformer = transformer
386
- hidden_dim = transformer.d_model
387
- self.num_classes = num_classes
388
- self.class_embed = nn.Linear(hidden_dim, num_classes)
389
- self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
390
- self.num_feature_levels = num_feature_levels
391
- if not two_stage:
392
- self.query_embed = nn.Embedding(num_queries, hidden_dim * 2)
393
- if num_feature_levels > 1:
394
- num_backbone_outs = len(backbone.strides)
395
- input_proj_list = []
396
- for _ in range(num_backbone_outs):
397
- in_channels = backbone.num_channels[_]
398
- input_proj_list.append(nn.Sequential(
399
- nn.Conv2d(in_channels, hidden_dim, kernel_size=1),
400
- nn.GroupNorm(32, hidden_dim),
401
- ))
402
- for _ in range(num_feature_levels - num_backbone_outs):
403
- input_proj_list.append(nn.Sequential(
404
- nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1),
405
- nn.GroupNorm(32, hidden_dim),
406
- ))
407
- in_channels = hidden_dim
408
- self.input_proj = nn.ModuleList(input_proj_list)
409
- else:
410
- self.input_proj = nn.ModuleList([
411
- nn.Sequential(
412
- nn.Conv2d(backbone.num_channels[0], hidden_dim, kernel_size=1),
413
- nn.GroupNorm(32, hidden_dim),
414
- )])
415
- self.backbone = backbone
416
- self.aux_loss = aux_loss
417
- self.with_box_refine = with_box_refine
418
- self.two_stage = two_stage
419
-
420
- prior_prob = 0.01
421
- bias_value = -math.log((1 - prior_prob) / prior_prob)
422
- self.class_embed.bias.data = torch.ones(num_classes) * bias_value
423
- nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0)
424
- nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0)
425
- for proj in self.input_proj:
426
- nn.init.xavier_uniform_(proj[0].weight, gain=1)
427
- nn.init.constant_(proj[0].bias, 0)
428
-
429
- # if two-stage, the last class_embed and bbox_embed is for region proposal generation
430
- num_pred = (transformer.decoder.num_layers + 1) if two_stage else transformer.decoder.num_layers
431
- if with_box_refine:
432
- self.class_embed = _get_clones(self.class_embed, num_pred)
433
- self.bbox_embed = _get_clones(self.bbox_embed, num_pred)
434
- nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0)
435
- # hack implementation for iterative bounding box refinement
436
- self.transformer.decoder.bbox_embed = self.bbox_embed
437
- else:
438
- nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0)
439
- self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)])
440
- self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)])
441
- self.transformer.decoder.bbox_embed = None
442
- if two_stage:
443
- # hack implementation for two-stage
444
- self.transformer.decoder.class_embed = self.class_embed
445
- for box_embed in self.bbox_embed:
446
- nn.init.constant_(box_embed.layers[-1].bias.data[2:], 0.0)
447
- self.post_process = TrackerPostProcess()
448
- self.track_base = RuntimeTrackerBase()
449
- self.criterion = criterion
450
- self.memory_bank = memory_bank
451
- self.mem_bank_len = 0 if memory_bank is None else memory_bank.max_his_length
452
-
453
- def _generate_empty_tracks(self):
454
- track_instances = Instances((1, 1))
455
- num_queries, dim = self.query_embed.weight.shape # (300, 512)
456
- device = self.query_embed.weight.device
457
- track_instances.ref_pts = self.transformer.reference_points(self.query_embed.weight[:, :dim // 2])
458
- track_instances.query_pos = self.query_embed.weight
459
- track_instances.output_embedding = torch.zeros((num_queries, dim >> 1), device=device)
460
- track_instances.obj_idxes = torch.full((len(track_instances),), -1, dtype=torch.long, device=device)
461
- track_instances.matched_gt_idxes = torch.full((len(track_instances),), -1, dtype=torch.long, device=device)
462
- track_instances.disappear_time = torch.zeros((len(track_instances), ), dtype=torch.long, device=device)
463
- track_instances.iou = torch.zeros((len(track_instances),), dtype=torch.float, device=device)
464
- track_instances.scores = torch.zeros((len(track_instances),), dtype=torch.float, device=device)
465
- track_instances.track_scores = torch.zeros((len(track_instances),), dtype=torch.float, device=device)
466
- track_instances.pred_boxes = torch.zeros((len(track_instances), 4), dtype=torch.float, device=device)
467
- track_instances.pred_logits = torch.zeros((len(track_instances), self.num_classes), dtype=torch.float, device=device)
468
-
469
- mem_bank_len = self.mem_bank_len
470
- track_instances.mem_bank = torch.zeros((len(track_instances), mem_bank_len, dim // 2), dtype=torch.float32, device=device)
471
- track_instances.mem_padding_mask = torch.ones((len(track_instances), mem_bank_len), dtype=torch.bool, device=device)
472
- track_instances.save_period = torch.zeros((len(track_instances), ), dtype=torch.float32, device=device)
473
-
474
- return track_instances.to(self.query_embed.weight.device)
475
-
476
- def clear(self):
477
- self.track_base.clear()
478
-
479
- @torch.jit.unused
480
- def _set_aux_loss(self, outputs_class, outputs_coord):
481
- # this is a workaround to make torchscript happy, as torchscript
482
- # doesn't support dictionary with non-homogeneous values, such
483
- # as a dict having both a Tensor and a list.
484
- return [{'pred_logits': a, 'pred_boxes': b, }
485
- for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]
486
-
487
- def _forward_single_image(self, samples, track_instances: Instances):
488
- features, pos = self.backbone(samples)
489
- src, mask = features[-1].decompose()
490
- assert mask is not None
491
-
492
- srcs = []
493
- masks = []
494
- for l, feat in enumerate(features):
495
- src, mask = feat.decompose()
496
- srcs.append(self.input_proj[l](src))
497
- masks.append(mask)
498
- assert mask is not None
499
-
500
- if self.num_feature_levels > len(srcs):
501
- _len_srcs = len(srcs)
502
- for l in range(_len_srcs, self.num_feature_levels):
503
- if l == _len_srcs:
504
- src = self.input_proj[l](features[-1].tensors)
505
- else:
506
- src = self.input_proj[l](srcs[-1])
507
- m = samples.mask
508
- mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0]
509
- pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype)
510
- srcs.append(src)
511
- masks.append(mask)
512
- pos.append(pos_l)
513
-
514
- hs, init_reference, inter_references, enc_outputs_class, enc_outputs_coord_unact = self.transformer(srcs, masks, pos, track_instances.query_pos, ref_pts=track_instances.ref_pts)
515
-
516
- outputs_classes = []
517
- outputs_coords = []
518
- for lvl in range(hs.shape[0]):
519
- if lvl == 0:
520
- reference = init_reference
521
- else:
522
- reference = inter_references[lvl - 1]
523
- reference = inverse_sigmoid(reference)
524
- outputs_class = self.class_embed[lvl](hs[lvl])
525
- tmp = self.bbox_embed[lvl](hs[lvl])
526
- if reference.shape[-1] == 4:
527
- tmp += reference
528
- else:
529
- assert reference.shape[-1] == 2
530
- tmp[..., :2] += reference
531
- outputs_coord = tmp.sigmoid()
532
- outputs_classes.append(outputs_class)
533
- outputs_coords.append(outputs_coord)
534
- outputs_class = torch.stack(outputs_classes)
535
- outputs_coord = torch.stack(outputs_coords)
536
-
537
- ref_pts_all = torch.cat([init_reference[None], inter_references[:, :, :, :2]], dim=0)
538
- out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1], 'ref_pts': ref_pts_all[5]}
539
- if self.aux_loss:
540
- out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord)
541
-
542
- with torch.no_grad():
543
- if self.training:
544
- track_scores = outputs_class[-1, 0, :].sigmoid().max(dim=-1).values
545
- else:
546
- track_scores = outputs_class[-1, 0, :, 0].sigmoid()
547
-
548
- track_instances.scores = track_scores
549
- track_instances.pred_logits = outputs_class[-1, 0]
550
- track_instances.pred_boxes = outputs_coord[-1, 0]
551
- track_instances.output_embedding = hs[-1, 0]
552
- if self.training:
553
- # the track id will be assigned by the mather.
554
- out['track_instances'] = track_instances
555
- track_instances = self.criterion.match_for_single_frame(out)
556
- else:
557
- # each track will be assigned an unique global id by the track base.
558
- self.track_base.update(track_instances)
559
- if self.memory_bank is not None:
560
- track_instances = self.memory_bank(track_instances)
561
- # track_instances.track_scores = track_instances.track_scores[..., 0]
562
- # track_instances.scores = track_instances.track_scores.sigmoid()
563
- if self.training:
564
- self.criterion.calc_loss_for_track_scores(track_instances)
565
- tmp = {}
566
- tmp['init_track_instances'] = self._generate_empty_tracks()
567
- tmp['track_instances'] = track_instances
568
- out_track_instances = self.track_embed(tmp)
569
- out['track_instances'] = out_track_instances
570
- return out
571
-
572
- @torch.no_grad()
573
- def inference_single_image(self, img, ori_img_size, track_instances=None):
574
- if not isinstance(img, NestedTensor):
575
- img = nested_tensor_from_tensor_list(img)
576
- if track_instances is None:
577
- track_instances = self._generate_empty_tracks()
578
-
579
- res = self._forward_single_image(img, track_instances=track_instances)
580
-
581
- track_instances = res['track_instances']
582
- track_instances = self.post_process(track_instances, ori_img_size)
583
- ret = {'track_instances': track_instances}
584
- if 'ref_pts' in res:
585
- ref_pts = res['ref_pts']
586
- img_h, img_w = ori_img_size
587
- scale_fct = torch.Tensor([img_w, img_h]).to(ref_pts)
588
- ref_pts = ref_pts * scale_fct[None]
589
- ret['ref_pts'] = ref_pts
590
- return ret
591
-
592
- def forward(self, data: dict):
593
- if self.training:
594
- self.criterion.initialize_for_single_clip(data['gt_instances'])
595
- frames = data['imgs'] # list of Tensor.
596
- outputs = {
597
- 'pred_logits': [],
598
- 'pred_boxes': [],
599
- }
600
-
601
- track_instances = self._generate_empty_tracks()
602
- for frame in frames:
603
- if not isinstance(frame, NestedTensor):
604
- frame = nested_tensor_from_tensor_list([frame])
605
- frame_res = self._forward_single_image(frame, track_instances)
606
- track_instances = frame_res['track_instances']
607
- outputs['pred_logits'].append(frame_res['pred_logits'])
608
- outputs['pred_boxes'].append(frame_res['pred_boxes'])
609
-
610
- if not self.training:
611
- outputs['track_instances'] = track_instances
612
- else:
613
- outputs['losses_dict'] = self.criterion.losses_dict
614
- return outputs
615
-
616
-
617
- def build(args):
618
- dataset_to_num_classes = {
619
- 'coco': 91,
620
- 'coco_panoptic': 250,
621
- 'e2e_mot': 1,
622
- 'e2e_joint': 1,
623
- 'e2e_static_mot': 1
624
- }
625
- assert args.dataset_file in dataset_to_num_classes
626
- num_classes = dataset_to_num_classes[args.dataset_file]
627
- device = torch.device(args.device)
628
-
629
- backbone = build_backbone(args)
630
-
631
- transformer = build_deforamble_transformer(args)
632
- d_model = transformer.d_model
633
- hidden_dim = args.dim_feedforward
634
- query_interaction_layer = build_query_interaction_layer(args, args.query_interaction_layer, d_model, hidden_dim, d_model*2)
635
-
636
- img_matcher = build_matcher(args)
637
- num_frames_per_batch = max(args.sampler_lengths)
638
- weight_dict = {}
639
- for i in range(num_frames_per_batch):
640
- weight_dict.update({"frame_{}_loss_ce".format(i): args.cls_loss_coef,
641
- 'frame_{}_loss_bbox'.format(i): args.bbox_loss_coef,
642
- 'frame_{}_loss_giou'.format(i): args.giou_loss_coef,
643
- })
644
-
645
- # TODO this is a hack
646
- if args.aux_loss:
647
- for i in range(num_frames_per_batch):
648
- for j in range(args.dec_layers - 1):
649
- weight_dict.update({"frame_{}_aux{}_loss_ce".format(i, j): args.cls_loss_coef,
650
- 'frame_{}_aux{}_loss_bbox'.format(i, j): args.bbox_loss_coef,
651
- 'frame_{}_aux{}_loss_giou'.format(i, j): args.giou_loss_coef,
652
- })
653
- if args.memory_bank_type is not None and len(args.memory_bank_type) > 0:
654
- memory_bank = build_memory_bank(args, d_model, hidden_dim, d_model * 2)
655
- for i in range(num_frames_per_batch):
656
- weight_dict.update({"frame_{}_track_loss_ce".format(i): args.cls_loss_coef})
657
- else:
658
- memory_bank = None
659
- losses = ['labels', 'boxes']
660
- criterion = ClipMatcher(num_classes, matcher=img_matcher, weight_dict=weight_dict, losses=losses)
661
- criterion.to(device)
662
- postprocessors = {}
663
- model = MOTR(
664
- backbone,
665
- transformer,
666
- track_embed=query_interaction_layer,
667
- num_feature_levels=args.num_feature_levels,
668
- num_classes=num_classes,
669
- num_queries=args.num_queries,
670
- aux_loss=args.aux_loss,
671
- criterion=criterion,
672
- with_box_refine=args.with_box_refine,
673
- two_stage=args.two_stage,
674
- memory_bank=memory_bank,
675
- )
676
- return model, criterion, postprocessors
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EleutherAI/magma/train.py DELETED
@@ -1,192 +0,0 @@
1
- import torch
2
- import os
3
- import deepspeed
4
- import wandb
5
- from torch.utils.data import random_split, ConcatDataset
6
- from torch.optim import AdamW
7
- from tqdm import tqdm
8
- from functools import partial
9
- from magma.datasets import (
10
- collate_fn,
11
- ImgCptDataset,
12
- )
13
- from magma.magma import (
14
- Magma,
15
- )
16
- from magma.utils import (
17
- is_main,
18
- cycle,
19
- parse_args,
20
- wandb_log,
21
- wandb_init,
22
- save_model,
23
- load_model,
24
- print_main,
25
- configure_param_groups,
26
- )
27
- from magma.train_loop import (
28
- eval_step,
29
- inference_step,
30
- train_step,
31
- )
32
-
33
-
34
- def _load_img_cpt_datasets(dataset_dir, tokenizer, transforms):
35
- if isinstance(dataset_dir, (list, tuple)):
36
- return ConcatDataset(
37
- [_load_img_cpt_datasets(d, tokenizer, transforms) for d in dataset_dir]
38
- )
39
- elif isinstance(dataset_dir, str):
40
- return ImgCptDataset(dataset_dir, tokenizer=tokenizer, transforms=transforms)
41
- else:
42
- raise TypeError("dataset dir wrong type")
43
-
44
-
45
- def get_pretraining_datasets(config, tokenizer, transforms):
46
- # if config.train_dataset_dir is a list, load all datasets + join together
47
- train_dataset = _load_img_cpt_datasets(
48
- config.train_dataset_dir, tokenizer, transforms
49
- )
50
- # if no dedicated eval sets are given, use a percentage of the train dataset
51
- if config.eval_dataset_dir is None:
52
- eval_len = int(len(train_dataset) * config.eval_dataset_pct)
53
- train_len = len(train_dataset) - eval_len
54
- print(
55
- f"Randomly splitting train_dataset into two datasets of length {train_len} and {eval_len}"
56
- )
57
- train_dataset, eval_dataset = random_split(train_dataset, [train_len, eval_len])
58
- else:
59
- eval_dataset = _load_img_cpt_datasets(
60
- config.eval_dataset_dir, tokenizer, transforms
61
- )
62
-
63
- print_main(f"Loaded train dataset with {len(train_dataset)} samples")
64
- print_main(f"Loaded eval dataset with {len(eval_dataset)} samples")
65
-
66
- return train_dataset, eval_dataset
67
-
68
-
69
- # tell tokenizers not to do parallelism
70
- os.environ["TOKENIZERS_PARALLELISM"] = "false"
71
-
72
- if __name__ == "__main__":
73
-
74
- # parse command line arguments:
75
- args = parse_args()
76
- deepspeed.init_distributed()
77
-
78
- # load model + tokenizer:
79
- model = Magma(
80
- args.config
81
- ) # for finetuning one might want to load the model via Magma.from_checkpoint(...) here
82
- tokenizer, config, transforms = model.tokenizer, model.config, model.transforms
83
-
84
- # filter frozen from trainable parameters:
85
- trainable_parameters = configure_param_groups(model, config)
86
-
87
- # load data:
88
- train_dataset, eval_dataset = get_pretraining_datasets(
89
- config, tokenizer, transforms
90
- )
91
-
92
- print_main(f"Loaded train dataset with {len(train_dataset)} samples")
93
- print_main(f"Loaded eval dataset with {len(eval_dataset)} samples")
94
-
95
- opt = AdamW(
96
- trainable_parameters,
97
- config.lr,
98
- betas=(0.9, 0.95),
99
- weight_decay=config.weight_decay,
100
- )
101
-
102
- model_engine, opt, train_loader, lr_scheduler = deepspeed.initialize(
103
- args=args,
104
- model=model,
105
- optimizer=opt,
106
- model_parameters=trainable_parameters,
107
- training_data=train_dataset,
108
- collate_fn=partial(collate_fn, seq_len=model.seq_len),
109
- config_params=config.deepspeed_config_params,
110
- )
111
- eval_loader = cycle(model_engine.deepspeed_io(eval_dataset))
112
- train_loader = cycle(train_loader)
113
-
114
- # initialize training
115
- global_step = 0
116
- if config.load:
117
- # loads a deepspeed checkpoint if provided. For finetuning, set load_optimizer to false
118
- previous_global_step = load_model(
119
- model_engine,
120
- config.load,
121
- load_optimizer_states=config.load_optimizer,
122
- load_lr_scheduler_states=config.load_optimizer,
123
- )
124
-
125
- if config.load_optimizer:
126
- global_step = previous_global_step
127
-
128
- pbar = tqdm(
129
- range(0, config.train_steps),
130
- desc="training...",
131
- initial=global_step,
132
- total=config.train_steps,
133
- disable=not is_main(),
134
- )
135
- wandb_init(
136
- project=config.wandb_project,
137
- name=config.name or wandb.util.generate_id(),
138
- config=config,
139
- )
140
-
141
- # training loop
142
- for i in pbar:
143
- if global_step >= config.train_steps:
144
- break
145
-
146
- ##### train step
147
- loss = train_step(config, train_loader, model_engine)
148
-
149
- global_step += 1
150
-
151
- if global_step % config.log_every == 0:
152
- pbar.set_description(f"training... Step: {global_step} Loss: {loss}")
153
- current_lr = (
154
- [lr for lr in lr_scheduler.get_lr()]
155
- if lr_scheduler is not None
156
- else config.lr
157
- )
158
- to_log = {"train/loss": loss, "train/lr": current_lr}
159
- wandb_log(to_log, step=global_step)
160
-
161
- ##### Evaluation phase
162
- if global_step % config.eval_every == 0:
163
- model_engine.eval()
164
- with torch.no_grad():
165
-
166
- ##### eval step:
167
- eval_loss = eval_step(config, eval_loader, model_engine)
168
-
169
- wandb_log({"eval/loss": eval_loss}, step=global_step)
170
- pbar.set_description(
171
- f"evaluating... Step: {global_step} Eval Loss: {eval_loss}"
172
- )
173
-
174
- ##### inference:
175
- image_grid, caption = inference_step(config, eval_loader, model_engine)
176
- wandb_log(
177
- {"inference/image": wandb.Image(image_grid, caption=caption)},
178
- step=global_step,
179
- )
180
-
181
- model_engine.train()
182
-
183
- ##### Save model
184
- if global_step % config.save_every == 0:
185
- if config.save is not None:
186
- save_model(model_engine, config.save, global_step)
187
- print_main(f"saving model at step {global_step}")
188
-
189
- ##### Save model after training is finished
190
- if config.save is not None:
191
- save_model(model_engine, config.save, global_step)
192
- print_main(f"saving model at end of training (step {global_step})")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EronSamez/RVC_HFmeu/infer/lib/infer_pack/modules/F0Predictor/__init__.py DELETED
File without changes
spaces/EuroPython2022/Fin-Eng-ASR-autosubtitles/README.md DELETED
@@ -1,45 +0,0 @@
1
- ---
2
- title: Fin Eng ASR Autosubtitles
3
- emoji: 🌍
4
- colorFrom: indigo
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 3.0.24
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
14
-
15
- We use Opus-MT models in the code. Here is the citations
16
- ```
17
- @inproceedings{tiedemann-thottingal-2020-opus,
18
- title = "{OPUS}-{MT} {--} Building open translation services for the World",
19
- author = {Tiedemann, J{\"o}rg and Thottingal, Santhosh},
20
- booktitle = "Proceedings of the 22nd Annual Conference of the European Association for Machine Translation",
21
- month = nov,
22
- year = "2020",
23
- address = "Lisboa, Portugal",
24
- publisher = "European Association for Machine Translation",
25
- url = "https://aclanthology.org/2020.eamt-1.61",
26
- pages = "479--480",
27
- }
28
- @inproceedings{tiedemann-2020-tatoeba,
29
- title = "The Tatoeba Translation Challenge {--} Realistic Data Sets for Low Resource and Multilingual {MT}",
30
- author = {Tiedemann, J{\"o}rg},
31
- booktitle = "Proceedings of the Fifth Conference on Machine Translation",
32
- month = nov,
33
- year = "2020",
34
- address = "Online",
35
- publisher = "Association for Computational Linguistics",
36
- url = "https://aclanthology.org/2020.wmt-1.139",
37
- pages = "1174--1182",
38
- }
39
-
40
- Wav2vec2:
41
- BAEVSKI, Alexei, et al. wav2vec 2.0: A framework for self-supervised learning of speech representations. Advances in Neural Information Processing Systems, 2020, 33: 12449-12460.
42
-
43
- T5:
44
- RAFFEL, Colin, et al. Exploring the limits of transfer learning with a unified text-to-text transformer. J. Mach. Learn. Res., 2020, 21.140: 1-67.
45
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/EuroPython2022/Warehouse_Apparel_Detection/metadata/predictor_yolo_detector/utils/general.py DELETED
@@ -1,1299 +0,0 @@
1
- import glob
2
- import logging
3
- import os
4
- import platform
5
- import random
6
- import re
7
- import shutil
8
- import subprocess
9
- import time
10
- from contextlib import contextmanager
11
- from copy import copy
12
- from pathlib import Path
13
-
14
- import cv2
15
- import math
16
- import matplotlib
17
- import matplotlib.pyplot as plt
18
- import numpy as np
19
- import torch
20
- import torch.nn as nn
21
- import yaml
22
- from PIL import Image
23
- from scipy.cluster.vq import kmeans
24
- from scipy.signal import butter, filtfilt
25
- from tqdm import tqdm
26
-
27
- from metadata.predictor_yolo_detector.utils.google_utils import gsutil_getsize
28
- from metadata.predictor_yolo_detector.utils.torch_utils import is_parallel, init_torch_seeds
29
-
30
- # Set printoptions
31
- torch.set_printoptions(linewidth=320, precision=5, profile='long')
32
- np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
33
- matplotlib.rc('font', **{'size': 11})
34
-
35
- # Prevent OpenCV from multithreading (to use PyTorch DataLoader)
36
- cv2.setNumThreads(0)
37
-
38
-
39
- @contextmanager
40
- def torch_distributed_zero_first(local_rank: int):
41
- """
42
- Decorator to make all processes in distributed training wait for each local_master to do something.
43
- """
44
- if local_rank not in [-1, 0]:
45
- torch.distributed.barrier()
46
- yield
47
- if local_rank == 0:
48
- torch.distributed.barrier()
49
-
50
-
51
- def set_logging(rank=-1):
52
- logging.basicConfig(
53
- format="%(message)s",
54
- level=logging.INFO if rank in [-1, 0] else logging.WARN)
55
-
56
-
57
- def init_seeds(seed=0):
58
- random.seed(seed)
59
- np.random.seed(seed)
60
- init_torch_seeds(seed)
61
-
62
-
63
- def get_latest_run(search_dir='./runs'):
64
- # Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
65
- last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
66
- return max(last_list, key=os.path.getctime) if last_list else ''
67
-
68
-
69
- def check_git_status():
70
- # Suggest 'git pull' if repo is out of date
71
- if platform.system() in ['Linux', 'Darwin'] and not os.path.isfile('/.dockerenv'):
72
- s = subprocess.check_output('if [ -d .git ]; then git fetch && git status -uno; fi', shell=True).decode('utf-8')
73
- if 'Your branch is behind' in s:
74
- print(s[s.find('Your branch is behind'):s.find('\n\n')] + '\n')
75
-
76
-
77
- def check_img_size(img_size, s=32):
78
- # Verify img_size is a multiple of stride s
79
- new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
80
- if new_size != img_size:
81
- print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))
82
- return new_size
83
-
84
-
85
- def check_anchors(dataset, model, thr=4.0, imgsz=640):
86
- # Check anchor fit to data, recompute if necessary
87
- print('\nAnalyzing anchors... ', end='')
88
- m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()
89
- shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)
90
- scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale
91
- wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh
92
-
93
- def metric(k): # compute metric
94
- r = wh[:, None] / k[None]
95
- x = torch.min(r, 1. / r).min(2)[0] # ratio metric
96
- best = x.max(1)[0] # best_x
97
- aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold
98
- bpr = (best > 1. / thr).float().mean() # best possible recall
99
- return bpr, aat
100
-
101
- bpr, aat = metric(m.anchor_grid.clone().cpu().view(-1, 2))
102
- print('anchors/target = %.2f, Best Possible Recall (BPR) = %.4f' % (aat, bpr), end='')
103
- if bpr < 0.98: # threshold to recompute
104
- print('. Attempting to generate improved anchors, please wait...' % bpr)
105
- na = m.anchor_grid.numel() // 2 # number of anchors
106
- new_anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)
107
- new_bpr = metric(new_anchors.reshape(-1, 2))[0]
108
- if new_bpr > bpr: # replace anchors
109
- new_anchors = torch.tensor(new_anchors, device=m.anchors.device).type_as(m.anchors)
110
- m.anchor_grid[:] = new_anchors.clone().view_as(m.anchor_grid) # for inference
111
- m.anchors[:] = new_anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss
112
- check_anchor_order(m)
113
- print('New anchors saved to model. Update model *.yaml to use these anchors in the future.')
114
- else:
115
- print('Original anchors better than new anchors. Proceeding with original anchors.')
116
- print('') # newline
117
-
118
-
119
- def check_anchor_order(m):
120
- # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary
121
- a = m.anchor_grid.prod(-1).view(-1) # anchor area
122
- da = a[-1] - a[0] # delta a
123
- ds = m.stride[-1] - m.stride[0] # delta s
124
- if da.sign() != ds.sign(): # same order
125
- print('Reversing anchor order')
126
- m.anchors[:] = m.anchors.flip(0)
127
- m.anchor_grid[:] = m.anchor_grid.flip(0)
128
-
129
-
130
- def check_file(file):
131
- # Search for file if not found
132
- if os.path.isfile(file) or file == '':
133
- return file
134
- else:
135
- files = glob.glob('./**/' + file, recursive=True) # find file
136
- assert len(files), 'File Not Found: %s' % file # assert file was found
137
- assert len(files) == 1, "Multiple files match '%s', specify exact path: %s" % (file, files) # assert unique
138
- return files[0] # return file
139
-
140
-
141
- def check_dataset(dict):
142
- # Download dataset if not found
143
- val, s = dict.get('val'), dict.get('download')
144
- if val and len(val):
145
- val = [os.path.abspath(x) for x in (val if isinstance(val, list) else [val])] # val path
146
- if not all(os.path.exists(x) for x in val):
147
- print('\nWARNING: Dataset not found, nonexistent paths: %s' % [*val])
148
- if s and len(s): # download script
149
- print('Downloading %s ...' % s)
150
- if s.startswith('http') and s.endswith('.zip'): # URL
151
- f = Path(s).name # filename
152
- torch.hub.download_url_to_file(s, f)
153
- r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip
154
- else: # bash script
155
- r = os.system(s)
156
- print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value
157
- else:
158
- raise Exception('Dataset not found.')
159
-
160
-
161
- def make_divisible(x, divisor):
162
- # Returns x evenly divisible by divisor
163
- return math.ceil(x / divisor) * divisor
164
-
165
-
166
- def labels_to_class_weights(labels, nc=80):
167
- # Get class weights (inverse frequency) from training labels
168
- if labels[0] is None: # no labels loaded
169
- return torch.Tensor()
170
-
171
- labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
172
- classes = labels[:, 0].astype(np.int) # labels = [class xywh]
173
- weights = np.bincount(classes, minlength=nc) # occurrences per class
174
-
175
- # Prepend gridpoint count (for uCE training)
176
- # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
177
- # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
178
-
179
- weights[weights == 0] = 1 # replace empty bins with 1
180
- weights = 1 / weights # number of targets per class
181
- weights /= weights.sum() # normalize
182
- return torch.from_numpy(weights)
183
-
184
-
185
- def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
186
- # Produces image weights based on class mAPs
187
- n = len(labels)
188
- class_counts = np.array([np.bincount(labels[i][:, 0].astype(np.int), minlength=nc) for i in range(n)])
189
- image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
190
- # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
191
- return image_weights
192
-
193
-
194
- def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
195
- # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
196
- # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
197
- # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
198
- # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
199
- # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
200
- x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
201
- 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
202
- 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
203
- return x
204
-
205
-
206
- def xyxy2xywh(x):
207
- # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
208
- y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
209
- y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
210
- y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
211
- y[:, 2] = x[:, 2] - x[:, 0] # width
212
- y[:, 3] = x[:, 3] - x[:, 1] # height
213
- return y
214
-
215
-
216
- def xywh2xyxy(x):
217
- # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
218
- y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
219
- y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
220
- y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
221
- y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
222
- y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
223
- return y
224
-
225
-
226
- def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
227
- # Rescale coords (xyxy) from img1_shape to img0_shape
228
- if ratio_pad is None: # calculate from img0_shape
229
- gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
230
- pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
231
- else:
232
- gain = ratio_pad[0][0]
233
- pad = ratio_pad[1]
234
-
235
- coords[:, [0, 2]] -= pad[0] # x padding
236
- coords[:, [1, 3]] -= pad[1] # y padding
237
- coords[:, :4] /= gain
238
- clip_coords(coords, img0_shape)
239
- return coords
240
-
241
-
242
- def clip_coords(boxes, img_shape):
243
- # Clip bounding xyxy bounding boxes to image shape (height, width)
244
- boxes[:, 0].clamp_(0, img_shape[1]) # x1
245
- boxes[:, 1].clamp_(0, img_shape[0]) # y1
246
- boxes[:, 2].clamp_(0, img_shape[1]) # x2
247
- boxes[:, 3].clamp_(0, img_shape[0]) # y2
248
-
249
-
250
- def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, fname='precision-recall_curve.png'):
251
- """ Compute the average precision, given the recall and precision curves.
252
- Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
253
- # Arguments
254
- tp: True positives (nparray, nx1 or nx10).
255
- conf: Objectness value from 0-1 (nparray).
256
- pred_cls: Predicted object classes (nparray).
257
- target_cls: True object classes (nparray).
258
- plot: Plot precision-recall curve at [email protected]
259
- fname: Plot filename
260
- # Returns
261
- The average precision as computed in py-faster-rcnn.
262
- """
263
-
264
- # Sort by objectness
265
- i = np.argsort(-conf)
266
- tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
267
-
268
- # Find unique classes
269
- unique_classes = np.unique(target_cls)
270
-
271
- # Create Precision-Recall curve and compute AP for each class
272
- px, py = np.linspace(0, 1, 1000), [] # for plotting
273
- pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898
274
- s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)
275
- ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s)
276
- for ci, c in enumerate(unique_classes):
277
- i = pred_cls == c
278
- n_gt = (target_cls == c).sum() # Number of ground truth objects
279
- n_p = i.sum() # Number of predicted objects
280
-
281
- if n_p == 0 or n_gt == 0:
282
- continue
283
- else:
284
- # Accumulate FPs and TPs
285
- fpc = (1 - tp[i]).cumsum(0)
286
- tpc = tp[i].cumsum(0)
287
-
288
- # Recall
289
- recall = tpc / (n_gt + 1e-16) # recall curve
290
- r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases
291
-
292
- # Precision
293
- precision = tpc / (tpc + fpc) # precision curve
294
- p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score
295
-
296
- # AP from recall-precision curve
297
- for j in range(tp.shape[1]):
298
- ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
299
- if j == 0:
300
- py.append(np.interp(px, mrec, mpre)) # precision at [email protected]
301
-
302
- # Compute F1 score (harmonic mean of precision and recall)
303
- f1 = 2 * p * r / (p + r + 1e-16)
304
-
305
- if plot:
306
- py = np.stack(py, axis=1)
307
- fig, ax = plt.subplots(1, 1, figsize=(5, 5))
308
- ax.plot(px, py, linewidth=0.5, color='grey') # plot(recall, precision)
309
- ax.plot(px, py.mean(1), linewidth=2, color='blue', label='all classes %.3f [email protected]' % ap[:, 0].mean())
310
- ax.set_xlabel('Recall')
311
- ax.set_ylabel('Precision')
312
- ax.set_xlim(0, 1)
313
- ax.set_ylim(0, 1)
314
- plt.legend()
315
- fig.tight_layout()
316
- fig.savefig(fname, dpi=200)
317
-
318
- return p, r, ap, f1, unique_classes.astype('int32')
319
-
320
-
321
- def compute_ap(recall, precision):
322
- """ Compute the average precision, given the recall and precision curves.
323
- Source: https://github.com/rbgirshick/py-faster-rcnn.
324
- # Arguments
325
- recall: The recall curve (list).
326
- precision: The precision curve (list).
327
- # Returns
328
- The average precision as computed in py-faster-rcnn.
329
- """
330
-
331
- # Append sentinel values to beginning and end
332
- mrec = recall # np.concatenate(([0.], recall, [recall[-1] + 1E-3]))
333
- mpre = precision # np.concatenate(([0.], precision, [0.]))
334
-
335
- # Compute the precision envelope
336
- mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
337
-
338
- # Integrate area under curve
339
- method = 'interp' # methods: 'continuous', 'interp'
340
- if method == 'interp':
341
- x = np.linspace(0, 1, 101) # 101-point interp (COCO)
342
- ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
343
- else: # 'continuous'
344
- i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
345
- ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
346
-
347
- return ap, mpre, mrec
348
-
349
-
350
- def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-9):
351
- # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
352
- box2 = box2.T
353
-
354
- # Get the coordinates of bounding boxes
355
- if x1y1x2y2: # x1, y1, x2, y2 = box1
356
- b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
357
- b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
358
- else: # transform from xywh to xyxy
359
- b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
360
- b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
361
- b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
362
- b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
363
-
364
- # Intersection area
365
- inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
366
- (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
367
-
368
- # Union Area
369
- w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
370
- w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
371
- union = w1 * h1 + w2 * h2 - inter + eps
372
-
373
- iou = inter / union
374
- if GIoU or DIoU or CIoU:
375
- cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
376
- ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
377
- if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
378
- c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
379
- rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +
380
- (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared
381
- if DIoU:
382
- return iou - rho2 / c2 # DIoU
383
- elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
384
- v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
385
- with torch.no_grad():
386
- alpha = v / ((1 + eps) - iou + v)
387
- return iou - (rho2 / c2 + v * alpha) # CIoU
388
- else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
389
- c_area = cw * ch + eps # convex area
390
- return iou - (c_area - union) / c_area # GIoU
391
- else:
392
- return iou # IoU
393
-
394
-
395
- def box_iou(box1, box2):
396
- # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
397
- """
398
- Return intersection-over-union (Jaccard index) of boxes.
399
- Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
400
- Arguments:
401
- box1 (Tensor[N, 4])
402
- box2 (Tensor[M, 4])
403
- Returns:
404
- iou (Tensor[N, M]): the NxM matrix containing the pairwise
405
- IoU values for every element in boxes1 and boxes2
406
- """
407
-
408
- def box_area(box):
409
- # box = 4xn
410
- return (box[2] - box[0]) * (box[3] - box[1])
411
-
412
- area1 = box_area(box1.T)
413
- area2 = box_area(box2.T)
414
-
415
- # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
416
- inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
417
- return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
418
-
419
-
420
- def wh_iou(wh1, wh2):
421
- # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
422
- wh1 = wh1[:, None] # [N,1,2]
423
- wh2 = wh2[None] # [1,M,2]
424
- inter = torch.min(wh1, wh2).prod(2) # [N,M]
425
- return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
426
-
427
-
428
- class FocalLoss(nn.Module):
429
- # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
430
- def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
431
- super(FocalLoss, self).__init__()
432
- self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
433
- self.gamma = gamma
434
- self.alpha = alpha
435
- self.reduction = loss_fcn.reduction
436
- self.loss_fcn.reduction = 'none' # required to apply FL to each element
437
-
438
- def forward(self, pred, true):
439
- loss = self.loss_fcn(pred, true)
440
- # p_t = torch.exp(-loss)
441
- # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
442
-
443
- # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
444
- pred_prob = torch.sigmoid(pred) # prob from logits
445
- p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
446
- alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
447
- modulating_factor = (1.0 - p_t) ** self.gamma
448
- loss *= alpha_factor * modulating_factor
449
-
450
- if self.reduction == 'mean':
451
- return loss.mean()
452
- elif self.reduction == 'sum':
453
- return loss.sum()
454
- else: # 'none'
455
- return loss
456
-
457
-
458
- def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
459
- # return positive, negative label smoothing BCE targets
460
- return 1.0 - 0.5 * eps, 0.5 * eps
461
-
462
-
463
- class BCEBlurWithLogitsLoss(nn.Module):
464
- # BCEwithLogitLoss() with reduced missing label effects.
465
- def __init__(self, alpha=0.05):
466
- super(BCEBlurWithLogitsLoss, self).__init__()
467
- self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
468
- self.alpha = alpha
469
-
470
- def forward(self, pred, true):
471
- loss = self.loss_fcn(pred, true)
472
- pred = torch.sigmoid(pred) # prob from logits
473
- dx = pred - true # reduce only missing label effects
474
- # dx = (pred - true).abs() # reduce missing label and false label effects
475
- alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
476
- loss *= alpha_factor
477
- return loss.mean()
478
-
479
-
480
- def compute_loss(p, targets, model): # predictions, targets, model
481
- device = targets.device
482
- lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
483
- tcls, tbox, indices, anchors = build_targets(p, targets, model) # targets
484
- h = model.hyp # hyperparameters
485
-
486
- # Define criteria
487
- BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.Tensor([h['cls_pw']])).to(device)
488
- BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.Tensor([h['obj_pw']])).to(device)
489
-
490
- # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
491
- cp, cn = smooth_BCE(eps=0.0)
492
-
493
- # Focal loss
494
- g = h['fl_gamma'] # focal loss gamma
495
- if g > 0:
496
- BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
497
-
498
- # Losses
499
- nt = 0 # number of targets
500
- np = len(p) # number of outputs
501
- balance = [4.0, 1.0, 0.4] if np == 3 else [4.0, 1.0, 0.4, 0.1] # P3-5 or P3-6
502
- for i, pi in enumerate(p): # layer index, layer predictions
503
- b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
504
- tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
505
-
506
- n = b.shape[0] # number of targets
507
- if n:
508
- nt += n # cumulative targets
509
- ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
510
-
511
- # Regression
512
- pxy = ps[:, :2].sigmoid() * 2. - 0.5
513
- pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
514
- pbox = torch.cat((pxy, pwh), 1).to(device) # predicted box
515
- iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target)
516
- lbox += (1.0 - iou).mean() # iou loss
517
-
518
- # Objectness
519
- tobj[b, a, gj, gi] = (1.0 - model.gr) + model.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio
520
-
521
- # Classification
522
- if model.nc > 1: # cls loss (only if multiple classes)
523
- t = torch.full_like(ps[:, 5:], cn, device=device) # targets
524
- t[range(n), tcls[i]] = cp
525
- lcls += BCEcls(ps[:, 5:], t) # BCE
526
-
527
- # Append targets to text file
528
- # with open('targets.txt', 'a') as file:
529
- # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
530
-
531
- lobj += BCEobj(pi[..., 4], tobj) * balance[i] # obj loss
532
-
533
- s = 3 / np # output count scaling
534
- lbox *= h['box'] * s
535
- lobj *= h['obj'] * s * (1.4 if np == 4 else 1.)
536
- lcls *= h['cls'] * s
537
- bs = tobj.shape[0] # batch size
538
-
539
- loss = lbox + lobj + lcls
540
- return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
541
-
542
-
543
- def build_targets(p, targets, model):
544
- # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
545
- det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
546
- na, nt = det.na, targets.shape[0] # number of anchors, targets
547
- tcls, tbox, indices, anch = [], [], [], []
548
- gain = torch.ones(7, device=targets.device) # normalized to gridspace gain
549
- ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
550
- targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
551
-
552
- g = 0.5 # bias
553
- off = torch.tensor([[0, 0],
554
- [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
555
- # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
556
- ], device=targets.device).float() * g # offsets
557
-
558
- for i in range(det.nl):
559
- anchors = det.anchors[i]
560
- gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
561
-
562
- # Match targets to anchors
563
- t = targets * gain
564
- if nt:
565
- # Matches
566
- r = t[:, :, 4:6] / anchors[:, None] # wh ratio
567
- j = torch.max(r, 1. / r).max(2)[0] < model.hyp['anchor_t'] # compare
568
- # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
569
- t = t[j] # filter
570
-
571
- # Offsets
572
- gxy = t[:, 2:4] # grid xy
573
- gxi = gain[[2, 3]] - gxy # inverse
574
- j, k = ((gxy % 1. < g) & (gxy > 1.)).T
575
- l, m = ((gxi % 1. < g) & (gxi > 1.)).T
576
- j = torch.stack((torch.ones_like(j), j, k, l, m))
577
- t = t.repeat((5, 1, 1))[j]
578
- offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
579
- else:
580
- t = targets[0]
581
- offsets = 0
582
-
583
- # Define
584
- b, c = t[:, :2].long().T # image, class
585
- gxy = t[:, 2:4] # grid xy
586
- gwh = t[:, 4:6] # grid wh
587
- gij = (gxy - offsets).long()
588
- gi, gj = gij.T # grid xy indices
589
-
590
- # Append
591
- a = t[:, 6].long() # anchor indices
592
- indices.append((b, a, gj, gi)) # image, anchor, grid indices
593
- tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
594
- anch.append(anchors[a]) # anchors
595
- tcls.append(c) # class
596
-
597
- return tcls, tbox, indices, anch
598
-
599
-
600
- def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, merge=False, classes=None, agnostic=False):
601
- """Performs Non-Maximum Suppression (NMS) on inference results
602
-
603
- Returns:
604
- detections with shape: nx6 (x1, y1, x2, y2, conf, cls)
605
- """
606
-
607
- nc = prediction[0].shape[1] - 5 # number of classes
608
- xc = prediction[..., 4] > conf_thres # candidates
609
-
610
- # Settings
611
- min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
612
- max_det = 300 # maximum number of detections per image
613
- time_limit = 10.0 # seconds to quit after
614
- redundant = True # require redundant detections
615
- multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)
616
-
617
- t = time.time()
618
- output = [None] * prediction.shape[0]
619
- for xi, x in enumerate(prediction): # image index, image inference
620
- # Apply constraints
621
- # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
622
- x = x[xc[xi]] # confidence
623
-
624
- # If none remain process next image
625
- if not x.shape[0]:
626
- continue
627
-
628
- # Compute conf
629
- x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
630
-
631
- # Box (center x, center y, width, height) to (x1, y1, x2, y2)
632
- box = xywh2xyxy(x[:, :4])
633
-
634
- # Detections matrix nx6 (xyxy, conf, cls)
635
- if multi_label:
636
- i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
637
- x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
638
- else: # best class only
639
- conf, j = x[:, 5:].max(1, keepdim=True)
640
- x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
641
-
642
- # Filter by class
643
- if classes:
644
- x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
645
-
646
- # Apply finite constraint
647
- # if not torch.isfinite(x).all():
648
- # x = x[torch.isfinite(x).all(1)]
649
-
650
- # If none remain process next image
651
- n = x.shape[0] # number of boxes
652
- if not n:
653
- continue
654
-
655
- # Sort by confidence
656
- # x = x[x[:, 4].argsort(descending=True)]
657
-
658
- # Batched NMS
659
- c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
660
- boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
661
- i = torch.ops.torchvision.nms(boxes, scores, iou_thres)
662
- if i.shape[0] > max_det: # limit detections
663
- i = i[:max_det]
664
- if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
665
- try: # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
666
- iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
667
- weights = iou * scores[None] # box weights
668
- x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
669
- if redundant:
670
- i = i[iou.sum(1) > 1] # require redundancy
671
- except: # possible CUDA error https://github.com/ultralytics/yolov3/issues/1139
672
- print(x, i, x.shape, i.shape)
673
- pass
674
-
675
- output[xi] = x[i]
676
- if (time.time() - t) > time_limit:
677
- break # time limit exceeded
678
-
679
- return output
680
-
681
-
682
- def strip_optimizer(f='weights/best.pt', s=''): # from utils.general import *; strip_optimizer()
683
- # Strip optimizer from 'f' to finalize training, optionally save as 's'
684
- x = torch.load(f, map_location=torch.device('cpu'))
685
- x['optimizer'] = None
686
- x['training_results'] = None
687
- x['epoch'] = -1
688
- x['model'].half() # to FP16
689
- for p in x['model'].parameters():
690
- p.requires_grad = False
691
- torch.save(x, s or f)
692
- mb = os.path.getsize(s or f) / 1E6 # filesize
693
- print('Optimizer stripped from %s,%s %.1fMB' % (f, (' saved as %s,' % s) if s else '', mb))
694
-
695
-
696
- def coco_class_count(path='../coco/labels/train2014/'):
697
- # Histogram of occurrences per class
698
- nc = 80 # number classes
699
- x = np.zeros(nc, dtype='int32')
700
- files = sorted(glob.glob('%s/*.*' % path))
701
- for i, file in enumerate(files):
702
- labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
703
- x += np.bincount(labels[:, 0].astype('int32'), minlength=nc)
704
- print(i, len(files))
705
-
706
-
707
- def coco_only_people(path='../coco/labels/train2017/'): # from utils.general import *; coco_only_people()
708
- # Find images with only people
709
- files = sorted(glob.glob('%s/*.*' % path))
710
- for i, file in enumerate(files):
711
- labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
712
- if all(labels[:, 0] == 0):
713
- print(labels.shape[0], file)
714
-
715
-
716
- def crop_images_random(path='../images/', scale=0.50): # from utils.general import *; crop_images_random()
717
- # crops images into random squares up to scale fraction
718
- # WARNING: overwrites images!
719
- for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
720
- img = cv2.imread(file) # BGR
721
- if img is not None:
722
- h, w = img.shape[:2]
723
-
724
- # create random mask
725
- a = 30 # minimum size (pixels)
726
- mask_h = random.randint(a, int(max(a, h * scale))) # mask height
727
- mask_w = mask_h # mask width
728
-
729
- # box
730
- xmin = max(0, random.randint(0, w) - mask_w // 2)
731
- ymin = max(0, random.randint(0, h) - mask_h // 2)
732
- xmax = min(w, xmin + mask_w)
733
- ymax = min(h, ymin + mask_h)
734
-
735
- # apply random color mask
736
- cv2.imwrite(file, img[ymin:ymax, xmin:xmax])
737
-
738
-
739
- def coco_single_class_labels(path='../coco/labels/train2014/', label_class=43):
740
- # Makes single-class coco datasets. from utils.general import *; coco_single_class_labels()
741
- if os.path.exists('new/'):
742
- shutil.rmtree('new/') # delete output folder
743
- os.makedirs('new/') # make new output folder
744
- os.makedirs('new/labels/')
745
- os.makedirs('new/images/')
746
- for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
747
- with open(file, 'r') as f:
748
- labels = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
749
- i = labels[:, 0] == label_class
750
- if any(i):
751
- img_file = file.replace('labels', 'images').replace('txt', 'jpg')
752
- labels[:, 0] = 0 # reset class to 0
753
- with open('new/images.txt', 'a') as f: # add image to dataset list
754
- f.write(img_file + '\n')
755
- with open('new/labels/' + Path(file).name, 'a') as f: # write label
756
- for l in labels[i]:
757
- f.write('%g %.6f %.6f %.6f %.6f\n' % tuple(l))
758
- shutil.copyfile(src=img_file, dst='new/images/' + Path(file).name.replace('txt', 'jpg')) # copy images
759
-
760
-
761
- def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
762
- """ Creates kmeans-evolved anchors from training dataset
763
-
764
- Arguments:
765
- path: path to dataset *.yaml, or a loaded dataset
766
- n: number of anchors
767
- img_size: image size used for training
768
- thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
769
- gen: generations to evolve anchors using genetic algorithm
770
-
771
- Return:
772
- k: kmeans evolved anchors
773
-
774
- Usage:
775
- from utils.general import *; _ = kmean_anchors()
776
- """
777
- thr = 1. / thr
778
-
779
- def metric(k, wh): # compute metrics
780
- r = wh[:, None] / k[None]
781
- x = torch.min(r, 1. / r).min(2)[0] # ratio metric
782
- # x = wh_iou(wh, torch.tensor(k)) # iou metric
783
- return x, x.max(1)[0] # x, best_x
784
-
785
- def fitness(k): # mutation fitness
786
- _, best = metric(torch.tensor(k, dtype=torch.float32), wh)
787
- return (best * (best > thr).float()).mean() # fitness
788
-
789
- def print_results(k):
790
- k = k[np.argsort(k.prod(1))] # sort small to large
791
- x, best = metric(k, wh0)
792
- bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr
793
- print('thr=%.2f: %.4f best possible recall, %.2f anchors past thr' % (thr, bpr, aat))
794
- print('n=%g, img_size=%s, metric_all=%.3f/%.3f-mean/best, past_thr=%.3f-mean: ' %
795
- (n, img_size, x.mean(), best.mean(), x[x > thr].mean()), end='')
796
- for i, x in enumerate(k):
797
- print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
798
- return k
799
-
800
- if isinstance(path, str): # *.yaml file
801
- with open(path) as f:
802
- data_dict = yaml.load(f, Loader=yaml.FullLoader) # model dict
803
- from metadata.predictor_yolo_detector.utils.datasets import LoadImagesAndLabels
804
- dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
805
- else:
806
- dataset = path # dataset
807
-
808
- # Get label wh
809
- shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)
810
- wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh
811
-
812
- # Filter
813
- i = (wh0 < 3.0).any(1).sum()
814
- if i:
815
- print('WARNING: Extremely small objects found. '
816
- '%g of %g labels are < 3 pixels in width or height.' % (i, len(wh0)))
817
- wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels
818
-
819
- # Kmeans calculation
820
- print('Running kmeans for %g anchors on %g points...' % (n, len(wh)))
821
- s = wh.std(0) # sigmas for whitening
822
- k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
823
- k *= s
824
- wh = torch.tensor(wh, dtype=torch.float32) # filtered
825
- wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered
826
- k = print_results(k)
827
-
828
- # Plot
829
- # k, d = [None] * 20, [None] * 20
830
- # for i in tqdm(range(1, 21)):
831
- # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
832
- # fig, ax = plt.subplots(1, 2, figsize=(14, 7))
833
- # ax = ax.ravel()
834
- # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
835
- # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
836
- # ax[0].hist(wh[wh[:, 0]<100, 0],400)
837
- # ax[1].hist(wh[wh[:, 1]<100, 1],400)
838
- # fig.tight_layout()
839
- # fig.savefig('wh.png', dpi=200)
840
-
841
- # Evolve
842
- npr = np.random
843
- f, sh, mp, s = fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
844
- pbar = tqdm(range(gen), desc='Evolving anchors with Genetic Algorithm') # progress bar
845
- for _ in pbar:
846
- v = np.ones(sh)
847
- while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
848
- v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0)
849
- kg = (k.copy() * v).clip(min=2.0)
850
- fg = fitness(kg)
851
- if fg > f:
852
- f, k = fg, kg.copy()
853
- pbar.desc = 'Evolving anchors with Genetic Algorithm: fitness = %.4f' % f
854
- if verbose:
855
- print_results(k)
856
-
857
- return print_results(k)
858
-
859
-
860
- def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):
861
- # Print mutation results to evolve.txt (for use with train.py --evolve)
862
- a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
863
- b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
864
- c = '%10.4g' * len(results) % results # results (P, R, [email protected], [email protected]:0.95, val_losses x 3)
865
- print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
866
-
867
- if bucket:
868
- url = 'gs://%s/evolve.txt' % bucket
869
- if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0):
870
- os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local
871
-
872
- with open('evolve.txt', 'a') as f: # append result
873
- f.write(c + b + '\n')
874
- x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows
875
- x = x[np.argsort(-fitness(x))] # sort
876
- np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness
877
-
878
- # Save yaml
879
- for i, k in enumerate(hyp.keys()):
880
- hyp[k] = float(x[0, i + 7])
881
- with open(yaml_file, 'w') as f:
882
- results = tuple(x[0, :7])
883
- c = '%10.4g' * len(results) % results # results (P, R, [email protected], [email protected]:0.95, val_losses x 3)
884
- f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n')
885
- yaml.dump(hyp, f, sort_keys=False)
886
-
887
- if bucket:
888
- os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload
889
-
890
-
891
- def apply_classifier(x, model, img, im0):
892
- # applies a second stage classifier to yolo outputs
893
- im0 = [im0] if isinstance(im0, np.ndarray) else im0
894
- for i, d in enumerate(x): # per image
895
- if d is not None and len(d):
896
- d = d.clone()
897
-
898
- # Reshape and pad cutouts
899
- b = xyxy2xywh(d[:, :4]) # boxes
900
- b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
901
- b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
902
- d[:, :4] = xywh2xyxy(b).long()
903
-
904
- # Rescale boxes from img_size to im0 size
905
- scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
906
-
907
- # Classes
908
- pred_cls1 = d[:, 5].long()
909
- ims = []
910
- for j, a in enumerate(d): # per item
911
- cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
912
- im = cv2.resize(cutout, (224, 224)) # BGR
913
- # cv2.imwrite('test%i.jpg' % j, cutout)
914
-
915
- im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
916
- im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
917
- im /= 255.0 # 0 - 255 to 0.0 - 1.0
918
- ims.append(im)
919
-
920
- pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
921
- x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
922
-
923
- return x
924
-
925
-
926
- def fitness(x):
927
- # Returns fitness (for use with results.txt or evolve.txt)
928
- w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, [email protected], [email protected]:0.95]
929
- return (x[:, :4] * w).sum(1)
930
-
931
-
932
- def output_to_target(output, width, height):
933
- # Convert model output to target format [batch_id, class_id, x, y, w, h, conf]
934
- if isinstance(output, torch.Tensor):
935
- output = output.cpu().numpy()
936
-
937
- targets = []
938
- for i, o in enumerate(output):
939
- if o is not None:
940
- for pred in o:
941
- box = pred[:4]
942
- w = (box[2] - box[0]) / width
943
- h = (box[3] - box[1]) / height
944
- x = box[0] / width + w / 2
945
- y = box[1] / height + h / 2
946
- conf = pred[4]
947
- cls = int(pred[5])
948
-
949
- targets.append([i, cls, x, y, w, h, conf])
950
-
951
- return np.array(targets)
952
-
953
-
954
- def increment_dir(dir, comment=''):
955
- # Increments a directory runs/exp1 --> runs/exp2_comment
956
- n = 0 # number
957
- dir = str(Path(dir)) # os-agnostic
958
- dirs = sorted(glob.glob(dir + '*')) # directories
959
- if dirs:
960
- matches = [re.search(r"exp(\d+)", d) for d in dirs]
961
- idxs = [int(m.groups()[0]) for m in matches if m]
962
- if idxs:
963
- n = max(idxs) + 1 # increment
964
- return dir + str(n) + ('_' + comment if comment else '')
965
-
966
-
967
- # Plotting functions ---------------------------------------------------------------------------------------------------
968
- def hist2d(x, y, n=100):
969
- # 2d histogram used in labels.png and evolve.png
970
- xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n)
971
- hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges))
972
- xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1)
973
- yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1)
974
- return np.log(hist[xidx, yidx])
975
-
976
-
977
- def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):
978
- # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy
979
- def butter_lowpass(cutoff, fs, order):
980
- nyq = 0.5 * fs
981
- normal_cutoff = cutoff / nyq
982
- b, a = butter(order, normal_cutoff, btype='low', analog=False)
983
- return b, a
984
-
985
- b, a = butter_lowpass(cutoff, fs, order=order)
986
- return filtfilt(b, a, data) # forward-backward filter
987
-
988
-
989
- def plot_one_box(x, img, color=None, label=None, line_thickness=None):
990
- # Plots one bounding box on image img
991
- tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
992
- color = color or [random.randint(0, 255) for _ in range(3)]
993
- c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
994
- cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
995
- if label:
996
- tf = max(tl - 1, 1) # font thickness
997
- t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
998
- c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
999
- cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
1000
- cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
1001
-
1002
-
1003
- def plot_wh_methods(): # from utils.general import *; plot_wh_methods()
1004
- # Compares the two methods for width-height anchor multiplication
1005
- # https://github.com/ultralytics/yolov3/issues/168
1006
- x = np.arange(-4.0, 4.0, .1)
1007
- ya = np.exp(x)
1008
- yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2
1009
-
1010
- fig = plt.figure(figsize=(6, 3), dpi=150)
1011
- plt.plot(x, ya, '.-', label='YOLOv3')
1012
- plt.plot(x, yb ** 2, '.-', label='YOLOv5 ^2')
1013
- plt.plot(x, yb ** 1.6, '.-', label='YOLOv5 ^1.6')
1014
- plt.xlim(left=-4, right=4)
1015
- plt.ylim(bottom=0, top=6)
1016
- plt.xlabel('input')
1017
- plt.ylabel('output')
1018
- plt.grid()
1019
- plt.legend()
1020
- fig.tight_layout()
1021
- fig.savefig('comparison.png', dpi=200)
1022
-
1023
-
1024
- def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):
1025
- tl = 3 # line thickness
1026
- tf = max(tl - 1, 1) # font thickness
1027
-
1028
- if isinstance(images, torch.Tensor):
1029
- images = images.cpu().float().numpy()
1030
-
1031
- if isinstance(targets, torch.Tensor):
1032
- targets = targets.cpu().numpy()
1033
-
1034
- # un-normalise
1035
- if np.max(images[0]) <= 1:
1036
- images *= 255
1037
-
1038
- bs, _, h, w = images.shape # batch size, _, height, width
1039
- bs = min(bs, max_subplots) # limit plot images
1040
- ns = np.ceil(bs ** 0.5) # number of subplots (square)
1041
-
1042
- # Check if we should resize
1043
- scale_factor = max_size / max(h, w)
1044
- if scale_factor < 1:
1045
- h = math.ceil(scale_factor * h)
1046
- w = math.ceil(scale_factor * w)
1047
-
1048
- # Empty array for output
1049
- mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8)
1050
-
1051
- # Fix class - colour map
1052
- prop_cycle = plt.rcParams['axes.prop_cycle']
1053
- # https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb
1054
- hex2rgb = lambda h: tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
1055
- color_lut = [hex2rgb(h) for h in prop_cycle.by_key()['color']]
1056
-
1057
- for i, img in enumerate(images):
1058
- if i == max_subplots: # if last batch has fewer images than we expect
1059
- break
1060
-
1061
- block_x = int(w * (i // ns))
1062
- block_y = int(h * (i % ns))
1063
-
1064
- img = img.transpose(1, 2, 0)
1065
- if scale_factor < 1:
1066
- img = cv2.resize(img, (w, h))
1067
-
1068
- mosaic[block_y:block_y + h, block_x:block_x + w, :] = img
1069
- if len(targets) > 0:
1070
- image_targets = targets[targets[:, 0] == i]
1071
- boxes = xywh2xyxy(image_targets[:, 2:6]).T
1072
- classes = image_targets[:, 1].astype('int')
1073
- gt = image_targets.shape[1] == 6 # ground truth if no conf column
1074
- conf = None if gt else image_targets[:, 6] # check for confidence presence (gt vs pred)
1075
-
1076
- boxes[[0, 2]] *= w
1077
- boxes[[0, 2]] += block_x
1078
- boxes[[1, 3]] *= h
1079
- boxes[[1, 3]] += block_y
1080
- for j, box in enumerate(boxes.T):
1081
- cls = int(classes[j])
1082
- color = color_lut[cls % len(color_lut)]
1083
- cls = names[cls] if names else cls
1084
- if gt or conf[j] > 0.3: # 0.3 conf thresh
1085
- label = '%s' % cls if gt else '%s %.1f' % (cls, conf[j])
1086
- plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)
1087
-
1088
- # Draw image filename labels
1089
- if paths is not None:
1090
- label = os.path.basename(paths[i])[:40] # trim to 40 char
1091
- t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
1092
- cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,
1093
- lineType=cv2.LINE_AA)
1094
-
1095
- # Image border
1096
- cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)
1097
-
1098
- if fname is not None:
1099
- mosaic = cv2.resize(mosaic, (int(ns * w * 0.5), int(ns * h * 0.5)), interpolation=cv2.INTER_AREA)
1100
- # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save
1101
- Image.fromarray(mosaic).save(fname) # PIL save
1102
- return mosaic
1103
-
1104
-
1105
- def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):
1106
- # Plot LR simulating training for full epochs
1107
- optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals
1108
- y = []
1109
- for _ in range(epochs):
1110
- scheduler.step()
1111
- y.append(optimizer.param_groups[0]['lr'])
1112
- plt.plot(y, '.-', label='LR')
1113
- plt.xlabel('epoch')
1114
- plt.ylabel('LR')
1115
- plt.grid()
1116
- plt.xlim(0, epochs)
1117
- plt.ylim(0)
1118
- plt.tight_layout()
1119
- plt.savefig(Path(save_dir) / 'LR.png', dpi=200)
1120
-
1121
-
1122
- def plot_test_txt(): # from utils.general import *; plot_test()
1123
- # Plot test.txt histograms
1124
- x = np.loadtxt('test.txt', dtype=np.float32)
1125
- box = xyxy2xywh(x[:, :4])
1126
- cx, cy = box[:, 0], box[:, 1]
1127
-
1128
- fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True)
1129
- ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
1130
- ax.set_aspect('equal')
1131
- plt.savefig('hist2d.png', dpi=300)
1132
-
1133
- fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True)
1134
- ax[0].hist(cx, bins=600)
1135
- ax[1].hist(cy, bins=600)
1136
- plt.savefig('hist1d.png', dpi=200)
1137
-
1138
-
1139
- def plot_targets_txt(): # from utils.general import *; plot_targets_txt()
1140
- # Plot targets.txt histograms
1141
- x = np.loadtxt('targets.txt', dtype=np.float32).T
1142
- s = ['x targets', 'y targets', 'width targets', 'height targets']
1143
- fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
1144
- ax = ax.ravel()
1145
- for i in range(4):
1146
- ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
1147
- ax[i].legend()
1148
- ax[i].set_title(s[i])
1149
- plt.savefig('targets.jpg', dpi=200)
1150
-
1151
-
1152
- def plot_study_txt(f='study.txt', x=None): # from utils.general import *; plot_study_txt()
1153
- # Plot study.txt generated by test.py
1154
- fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)
1155
- ax = ax.ravel()
1156
-
1157
- fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)
1158
- for f in ['study/study_coco_yolov5%s.txt' % x for x in ['s', 'm', 'l', 'x']]:
1159
- y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T
1160
- x = np.arange(y.shape[1]) if x is None else np.array(x)
1161
- s = ['P', 'R', '[email protected]', '[email protected]:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)']
1162
- for i in range(7):
1163
- ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)
1164
- ax[i].set_title(s[i])
1165
-
1166
- j = y[3].argmax() + 1
1167
- ax2.plot(y[6, :j], y[3, :j] * 1E2, '.-', linewidth=2, markersize=8,
1168
- label=Path(f).stem.replace('study_coco_', '').replace('yolo', 'YOLO'))
1169
-
1170
- ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],
1171
- 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet')
1172
-
1173
- ax2.grid()
1174
- ax2.set_xlim(0, 30)
1175
- ax2.set_ylim(28, 50)
1176
- ax2.set_yticks(np.arange(30, 55, 5))
1177
- ax2.set_xlabel('GPU Speed (ms/img)')
1178
- ax2.set_ylabel('COCO AP val')
1179
- ax2.legend(loc='lower right')
1180
- plt.savefig('study_mAP_latency.png', dpi=300)
1181
- plt.savefig(f.replace('.txt', '.png'), dpi=300)
1182
-
1183
-
1184
- def plot_labels(labels, save_dir=''):
1185
- # plot dataset labels
1186
- c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes
1187
- nc = int(c.max() + 1) # number of classes
1188
-
1189
- fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)
1190
- ax = ax.ravel()
1191
- ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)
1192
- ax[0].set_xlabel('classes')
1193
- ax[1].scatter(b[0], b[1], c=hist2d(b[0], b[1], 90), cmap='jet')
1194
- ax[1].set_xlabel('x')
1195
- ax[1].set_ylabel('y')
1196
- ax[2].scatter(b[2], b[3], c=hist2d(b[2], b[3], 90), cmap='jet')
1197
- ax[2].set_xlabel('width')
1198
- ax[2].set_ylabel('height')
1199
- plt.savefig(Path(save_dir) / 'labels.png', dpi=200)
1200
- plt.close()
1201
-
1202
- # seaborn correlogram
1203
- try:
1204
- import seaborn as sns
1205
- import pandas as pd
1206
- x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height'])
1207
- sns.pairplot(x, corner=True, diag_kind='hist', kind='scatter', markers='o',
1208
- plot_kws=dict(s=3, edgecolor=None, linewidth=1, alpha=0.02),
1209
- diag_kws=dict(bins=50))
1210
- plt.savefig(Path(save_dir) / 'labels_correlogram.png', dpi=200)
1211
- plt.close()
1212
- except Exception as e:
1213
- pass
1214
-
1215
-
1216
- def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.general import *; plot_evolution()
1217
- # Plot hyperparameter evolution results in evolve.txt
1218
- with open(yaml_file) as f:
1219
- hyp = yaml.load(f, Loader=yaml.FullLoader)
1220
- x = np.loadtxt('evolve.txt', ndmin=2)
1221
- f = fitness(x)
1222
- # weights = (f - f.min()) ** 2 # for weighted results
1223
- plt.figure(figsize=(10, 12), tight_layout=True)
1224
- matplotlib.rc('font', **{'size': 8})
1225
- for i, (k, v) in enumerate(hyp.items()):
1226
- y = x[:, i + 7]
1227
- # mu = (y * weights).sum() / weights.sum() # best weighted result
1228
- mu = y[f.argmax()] # best single result
1229
- plt.subplot(6, 5, i + 1)
1230
- plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none')
1231
- plt.plot(mu, f.max(), 'k+', markersize=15)
1232
- plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
1233
- if i % 5 != 0:
1234
- plt.yticks([])
1235
- print('%15s: %.3g' % (k, mu))
1236
- plt.savefig('evolve.png', dpi=200)
1237
- print('\nPlot saved as evolve.png')
1238
-
1239
-
1240
- def plot_results_overlay(start=0, stop=0): # from utils.general import *; plot_results_overlay()
1241
- # Plot training 'results*.txt', overlaying train and val losses
1242
- s = ['train', 'train', 'train', 'Precision', '[email protected]', 'val', 'val', 'val', 'Recall', '[email protected]:0.95'] # legends
1243
- t = ['Box', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles
1244
- for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
1245
- results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
1246
- n = results.shape[1] # number of rows
1247
- x = range(start, min(stop, n) if stop else n)
1248
- fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True)
1249
- ax = ax.ravel()
1250
- for i in range(5):
1251
- for j in [i, i + 5]:
1252
- y = results[j, x]
1253
- ax[i].plot(x, y, marker='.', label=s[j])
1254
- # y_smooth = butter_lowpass_filtfilt(y)
1255
- # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j])
1256
-
1257
- ax[i].set_title(t[i])
1258
- ax[i].legend()
1259
- ax[i].set_ylabel(f) if i == 0 else None # add filename
1260
- fig.savefig(f.replace('.txt', '.png'), dpi=200)
1261
-
1262
-
1263
- def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''):
1264
- # from utils.general import *; plot_results(save_dir='runs/exp0')
1265
- # Plot training 'results*.txt' as seen in https://github.com/ultralytics/yolov5#reproduce-our-training
1266
- fig, ax = plt.subplots(2, 5, figsize=(12, 6))
1267
- ax = ax.ravel()
1268
- s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall',
1269
- 'val Box', 'val Objectness', 'val Classification', '[email protected]', '[email protected]:0.95']
1270
- if bucket:
1271
- # os.system('rm -rf storage.googleapis.com')
1272
- # files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
1273
- files = ['results%g.txt' % x for x in id]
1274
- c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id)
1275
- os.system(c)
1276
- else:
1277
- files = glob.glob(str(Path(save_dir) / 'results*.txt')) + glob.glob('../../Downloads/results*.txt')
1278
- assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir)
1279
- for fi, f in enumerate(files):
1280
- try:
1281
- results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
1282
- n = results.shape[1] # number of rows
1283
- x = range(start, min(stop, n) if stop else n)
1284
- for i in range(10):
1285
- y = results[i, x]
1286
- if i in [0, 1, 2, 5, 6, 7]:
1287
- y[y == 0] = np.nan # don't show zero loss values
1288
- # y /= y[0] # normalize
1289
- label = labels[fi] if len(labels) else Path(f).stem
1290
- ax[i].plot(x, y, marker='.', label=label, linewidth=1, markersize=6)
1291
- ax[i].set_title(s[i])
1292
- # if i in [5, 6, 7]: # share train and val loss y axes
1293
- # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
1294
- except Exception as e:
1295
- print('Warning: Plotting error for %s; %s' % (f, e))
1296
-
1297
- fig.tight_layout()
1298
- ax[1].legend()
1299
- fig.savefig(Path(save_dir) / 'results.png', dpi=200)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Fcjs/stablediffusionapi-edge-of-realism/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Stablediffusionapi Edge Of Realism
3
- emoji: 😻
4
- colorFrom: green
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.50.2
8
- app_file: app.py
9
- pinned: false
10
- license: gpl-3.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Felladrin/MiniSearch/src/modules/loadBar.ts DELETED
@@ -1,7 +0,0 @@
1
- import LoadBar from "loadbar";
2
-
3
- export const loadBar = new LoadBar({
4
- height: "4px",
5
- backgroundColor: "var(--focus)",
6
- startPoint: 1,
7
- });
 
 
 
 
 
 
 
 
spaces/Fernando22/freegpt-webui/g4f/Provider/Providers/helpers/theb.py DELETED
@@ -1,48 +0,0 @@
1
- import json
2
- import sys
3
- from re import findall
4
- from curl_cffi import requests
5
-
6
- config = json.loads(sys.argv[1])
7
- prompt = config['messages'][-1]['content']
8
-
9
- headers = {
10
- 'authority': 'chatbot.theb.ai',
11
- 'accept': 'application/json, text/plain, */*',
12
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
13
- 'content-type': 'application/json',
14
- 'origin': 'https://chatbot.theb.ai',
15
- 'referer': 'https://chatbot.theb.ai/',
16
- 'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
17
- 'sec-ch-ua-mobile': '?0',
18
- 'sec-ch-ua-platform': '"macOS"',
19
- 'sec-fetch-dest': 'empty',
20
- 'sec-fetch-mode': 'cors',
21
- 'sec-fetch-site': 'same-origin',
22
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
23
- }
24
-
25
- json_data = {
26
- 'prompt': prompt,
27
- 'options': {}
28
- }
29
-
30
- def format(chunk):
31
- try:
32
- completion_chunk = findall(r'content":"(.*)"},"fin', chunk.decode())[0]
33
- print(completion_chunk, flush=True, end='')
34
-
35
- except Exception as e:
36
- print(f'[ERROR] an error occured, retrying... | [[{chunk.decode()}]]', flush=True)
37
- return
38
-
39
- while True:
40
- try:
41
- response = requests.post('https://chatbot.theb.ai/api/chat-process',
42
- headers=headers, json=json_data, content_callback=format, impersonate='chrome110')
43
-
44
- exit(0)
45
-
46
- except Exception as e:
47
- print('[ERROR] an error occured, retrying... |', e, flush=True)
48
- continue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/FrankZxShen/vits-fast-finetuning-umamusume/transforms.py DELETED
@@ -1,193 +0,0 @@
1
- import torch
2
- from torch.nn import functional as F
3
-
4
- import numpy as np
5
-
6
-
7
- DEFAULT_MIN_BIN_WIDTH = 1e-3
8
- DEFAULT_MIN_BIN_HEIGHT = 1e-3
9
- DEFAULT_MIN_DERIVATIVE = 1e-3
10
-
11
-
12
- def piecewise_rational_quadratic_transform(inputs,
13
- unnormalized_widths,
14
- unnormalized_heights,
15
- unnormalized_derivatives,
16
- inverse=False,
17
- tails=None,
18
- tail_bound=1.,
19
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
20
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
21
- min_derivative=DEFAULT_MIN_DERIVATIVE):
22
-
23
- if tails is None:
24
- spline_fn = rational_quadratic_spline
25
- spline_kwargs = {}
26
- else:
27
- spline_fn = unconstrained_rational_quadratic_spline
28
- spline_kwargs = {
29
- 'tails': tails,
30
- 'tail_bound': tail_bound
31
- }
32
-
33
- outputs, logabsdet = spline_fn(
34
- inputs=inputs,
35
- unnormalized_widths=unnormalized_widths,
36
- unnormalized_heights=unnormalized_heights,
37
- unnormalized_derivatives=unnormalized_derivatives,
38
- inverse=inverse,
39
- min_bin_width=min_bin_width,
40
- min_bin_height=min_bin_height,
41
- min_derivative=min_derivative,
42
- **spline_kwargs
43
- )
44
- return outputs, logabsdet
45
-
46
-
47
- def searchsorted(bin_locations, inputs, eps=1e-6):
48
- bin_locations[..., -1] += eps
49
- return torch.sum(
50
- inputs[..., None] >= bin_locations,
51
- dim=-1
52
- ) - 1
53
-
54
-
55
- def unconstrained_rational_quadratic_spline(inputs,
56
- unnormalized_widths,
57
- unnormalized_heights,
58
- unnormalized_derivatives,
59
- inverse=False,
60
- tails='linear',
61
- tail_bound=1.,
62
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
63
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
64
- min_derivative=DEFAULT_MIN_DERIVATIVE):
65
- inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
66
- outside_interval_mask = ~inside_interval_mask
67
-
68
- outputs = torch.zeros_like(inputs)
69
- logabsdet = torch.zeros_like(inputs)
70
-
71
- if tails == 'linear':
72
- unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
73
- constant = np.log(np.exp(1 - min_derivative) - 1)
74
- unnormalized_derivatives[..., 0] = constant
75
- unnormalized_derivatives[..., -1] = constant
76
-
77
- outputs[outside_interval_mask] = inputs[outside_interval_mask]
78
- logabsdet[outside_interval_mask] = 0
79
- else:
80
- raise RuntimeError('{} tails are not implemented.'.format(tails))
81
-
82
- outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
83
- inputs=inputs[inside_interval_mask],
84
- unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
85
- unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
86
- unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
87
- inverse=inverse,
88
- left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
89
- min_bin_width=min_bin_width,
90
- min_bin_height=min_bin_height,
91
- min_derivative=min_derivative
92
- )
93
-
94
- return outputs, logabsdet
95
-
96
- def rational_quadratic_spline(inputs,
97
- unnormalized_widths,
98
- unnormalized_heights,
99
- unnormalized_derivatives,
100
- inverse=False,
101
- left=0., right=1., bottom=0., top=1.,
102
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
103
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
104
- min_derivative=DEFAULT_MIN_DERIVATIVE):
105
- if torch.min(inputs) < left or torch.max(inputs) > right:
106
- raise ValueError('Input to a transform is not within its domain')
107
-
108
- num_bins = unnormalized_widths.shape[-1]
109
-
110
- if min_bin_width * num_bins > 1.0:
111
- raise ValueError('Minimal bin width too large for the number of bins')
112
- if min_bin_height * num_bins > 1.0:
113
- raise ValueError('Minimal bin height too large for the number of bins')
114
-
115
- widths = F.softmax(unnormalized_widths, dim=-1)
116
- widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
117
- cumwidths = torch.cumsum(widths, dim=-1)
118
- cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
119
- cumwidths = (right - left) * cumwidths + left
120
- cumwidths[..., 0] = left
121
- cumwidths[..., -1] = right
122
- widths = cumwidths[..., 1:] - cumwidths[..., :-1]
123
-
124
- derivatives = min_derivative + F.softplus(unnormalized_derivatives)
125
-
126
- heights = F.softmax(unnormalized_heights, dim=-1)
127
- heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
128
- cumheights = torch.cumsum(heights, dim=-1)
129
- cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
130
- cumheights = (top - bottom) * cumheights + bottom
131
- cumheights[..., 0] = bottom
132
- cumheights[..., -1] = top
133
- heights = cumheights[..., 1:] - cumheights[..., :-1]
134
-
135
- if inverse:
136
- bin_idx = searchsorted(cumheights, inputs)[..., None]
137
- else:
138
- bin_idx = searchsorted(cumwidths, inputs)[..., None]
139
-
140
- input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
141
- input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
142
-
143
- input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
144
- delta = heights / widths
145
- input_delta = delta.gather(-1, bin_idx)[..., 0]
146
-
147
- input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
148
- input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
149
-
150
- input_heights = heights.gather(-1, bin_idx)[..., 0]
151
-
152
- if inverse:
153
- a = (((inputs - input_cumheights) * (input_derivatives
154
- + input_derivatives_plus_one
155
- - 2 * input_delta)
156
- + input_heights * (input_delta - input_derivatives)))
157
- b = (input_heights * input_derivatives
158
- - (inputs - input_cumheights) * (input_derivatives
159
- + input_derivatives_plus_one
160
- - 2 * input_delta))
161
- c = - input_delta * (inputs - input_cumheights)
162
-
163
- discriminant = b.pow(2) - 4 * a * c
164
- assert (discriminant >= 0).all()
165
-
166
- root = (2 * c) / (-b - torch.sqrt(discriminant))
167
- outputs = root * input_bin_widths + input_cumwidths
168
-
169
- theta_one_minus_theta = root * (1 - root)
170
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
171
- * theta_one_minus_theta)
172
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
173
- + 2 * input_delta * theta_one_minus_theta
174
- + input_derivatives * (1 - root).pow(2))
175
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
176
-
177
- return outputs, -logabsdet
178
- else:
179
- theta = (inputs - input_cumwidths) / input_bin_widths
180
- theta_one_minus_theta = theta * (1 - theta)
181
-
182
- numerator = input_heights * (input_delta * theta.pow(2)
183
- + input_derivatives * theta_one_minus_theta)
184
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
185
- * theta_one_minus_theta)
186
- outputs = input_cumheights + numerator / denominator
187
-
188
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
189
- + 2 * input_delta * theta_one_minus_theta
190
- + input_derivatives * (1 - theta).pow(2))
191
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
192
-
193
- return outputs, logabsdet
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Gen-Sim/Gen-Sim/cliport/eval.py DELETED
@@ -1,231 +0,0 @@
1
- """Ravens main training script."""
2
-
3
- import os
4
- import pickle
5
- import json
6
-
7
- import numpy as np
8
- import hydra
9
- from cliport import agents
10
- from cliport import dataset
11
- from cliport import tasks
12
- from cliport.utils import utils
13
- from cliport.environments.environment import Environment
14
- from torch.utils.data import DataLoader
15
-
16
-
17
- @hydra.main(config_path='./cfg', config_name='eval', version_base="1.2")
18
- def main(vcfg):
19
- # Load train cfg
20
- tcfg = utils.load_hydra_config(vcfg['train_config'])
21
-
22
- # Initialize environment and task.
23
- env = Environment(
24
- vcfg['assets_root'],
25
- disp=vcfg['disp'],
26
- shared_memory=vcfg['shared_memory'],
27
- hz=480,
28
- record_cfg=vcfg['record']
29
- )
30
-
31
- # Choose eval mode and task.
32
- mode = vcfg['mode']
33
- eval_task = vcfg['eval_task']
34
- print("eval_task!!!", eval_task)
35
-
36
- if mode not in {'train', 'val', 'test'}:
37
- raise Exception("Invalid mode. Valid options: train, val, test")
38
-
39
- # Load eval dataset.
40
- dataset_type = vcfg['type']
41
- if 'multi' in dataset_type:
42
- ds = dataset.RavensMultiTaskDataset(vcfg['data_dir'],
43
- tcfg,
44
- group=eval_task,
45
- mode=mode,
46
- n_demos=vcfg['n_demos'],
47
- augment=False)
48
- else:
49
- ds = dataset.RavensDataset(os.path.join(vcfg['data_dir'], f"{eval_task}-{mode}"),
50
- tcfg,
51
- n_demos=vcfg['n_demos'],
52
- augment=False)
53
-
54
- all_results = {}
55
- name = '{}-{}-n{}'.format(eval_task, vcfg['agent'], vcfg['n_demos'])
56
-
57
- # Save path for results.
58
- json_name = f"multi-results-{mode}.json" if 'multi' in vcfg['model_path'] else f"results-{mode}.json"
59
- save_path = vcfg['save_path']
60
- print(f"Save path for results: {save_path}")
61
- if not os.path.exists(save_path):
62
- os.makedirs(save_path)
63
- save_json = os.path.join(save_path, f'{name}-{json_name}')
64
-
65
- # Load existing results.
66
- existing_results = {}
67
- if os.path.exists(save_json):
68
- with open(save_json, 'r') as f:
69
- existing_results = json.load(f)
70
-
71
- # Make a list of checkpoints to eval.
72
- ckpts_to_eval = list_ckpts_to_eval(vcfg, existing_results)
73
- data_loader = DataLoader(ds, shuffle=False,
74
- pin_memory=False,
75
- num_workers=1 )
76
-
77
- # Evaluation loop
78
- print(f"Evaluating: {str(ckpts_to_eval)}")
79
- for ckpt in ckpts_to_eval:
80
- model_file = os.path.join(vcfg['model_path'], ckpt)
81
-
82
- if not os.path.exists(model_file) or not os.path.isfile(model_file):
83
- print(f"Checkpoint not found: {model_file}")
84
- continue
85
- elif not vcfg['update_results'] and ckpt in existing_results:
86
- print(f"Skipping because of existing results for {model_file}.")
87
- continue
88
-
89
- results = []
90
- mean_reward = 0.0
91
-
92
- # Run testing for each training run.
93
- for train_run in range(vcfg['n_repeats']):
94
-
95
- # Initialize agent.
96
- utils.set_seed(train_run, torch=True)
97
- agent = agents.names[vcfg['agent']](name, tcfg, data_loader, data_loader)
98
-
99
- # Load checkpoint
100
- agent.load(model_file)
101
- print(f"Loaded: {model_file}")
102
-
103
- record = vcfg['record']['save_video']
104
- n_demos = vcfg['n_demos']
105
-
106
- # Run testing and save total rewards with last transition info.
107
- for i in range(0, n_demos):
108
- print(f'Test: {i + 1}/{n_demos}')
109
- try:
110
- episode, seed = ds.load(i)
111
- except:
112
- print(f"skip bad example {i}")
113
- continue
114
- goal = episode[-1]
115
- total_reward = 0
116
- np.random.seed(seed)
117
-
118
- # set task
119
- if 'multi' in dataset_type:
120
- task_name = ds.get_curr_task()
121
- task = tasks.names[task_name]()
122
- print(f'Evaluating on {task_name}')
123
- else:
124
- task_name = vcfg['eval_task']
125
- task = tasks.names[task_name]()
126
-
127
- task.mode = mode
128
- env.seed(seed)
129
- env.set_task(task)
130
- obs = env.reset()
131
- info = env.info
132
- reward = 0
133
-
134
- # Start recording video (NOTE: super slow)
135
- if record:
136
- video_name = f'{task_name}-{i+1:06d}'
137
- if 'multi' in vcfg['model_task']:
138
- video_name = f"{vcfg['model_task']}-{video_name}"
139
- env.start_rec(video_name)
140
-
141
- for _ in range(task.max_steps):
142
- act = agent.act(obs, info, goal)
143
- lang_goal = info['lang_goal']
144
-
145
- # print(f'Lang Goal: {lang_goal}')
146
- obs, reward, done, info = env.step(act)
147
- total_reward += reward
148
- # print(f'Total Reward: {total_reward:.3f} | Done: {done}\n')
149
- if done:
150
- break
151
-
152
- results.append((total_reward, info))
153
- mean_reward = np.mean([r for r, i in results])
154
- print(f'Mean: {mean_reward} | Task: {task_name} | Ckpt: {ckpt}')
155
-
156
- # End recording video
157
- if record:
158
- env.end_rec()
159
-
160
- all_results[ckpt] = {
161
- 'episodes': results,
162
- 'mean_reward': mean_reward,
163
- }
164
-
165
- # Save results in a json file.
166
- if vcfg['save_results']:
167
- print("save results to:", save_json)
168
- # Load existing results
169
- if os.path.exists(save_json):
170
- with open(save_json, 'r') as f:
171
- existing_results = json.load(f)
172
- existing_results.update(all_results)
173
- all_results = existing_results
174
-
175
- with open(save_json, 'w') as f:
176
- json.dump(all_results, f, indent=4)
177
-
178
-
179
- def list_ckpts_to_eval(vcfg, existing_results):
180
- ckpts_to_eval = []
181
-
182
- # Just the last.ckpt
183
- if vcfg['checkpoint_type'] == 'last':
184
- last_ckpt = 'last.ckpt'
185
- ckpts_to_eval.append(last_ckpt)
186
-
187
- # Validation checkpoints that haven't been already evaluated.
188
- elif vcfg['checkpoint_type'] == 'val_missing':
189
- checkpoints = sorted([c for c in os.listdir(vcfg['model_path']) if "steps=" in c])
190
- ckpts_to_eval = [c for c in checkpoints if c not in existing_results]
191
-
192
- # Find the best checkpoint from validation and run eval on the test set.
193
- elif vcfg['checkpoint_type'] == 'test_best':
194
- result_jsons = [c for c in os.listdir(vcfg['results_path']) if "results-val" in c]
195
- if 'multi' in vcfg['model_task']:
196
- result_jsons = [r for r in result_jsons if "multi" in r]
197
- else:
198
- result_jsons = [r for r in result_jsons if "multi" not in r]
199
-
200
- if len(result_jsons) > 0:
201
- result_json = result_jsons[0]
202
- with open(os.path.join(vcfg['results_path'], result_json), 'r') as f:
203
- eval_res = json.load(f)
204
- best_checkpoint = 'last.ckpt'
205
- best_success = -1.0
206
- for ckpt, res in eval_res.items():
207
- if res['mean_reward'] > best_success:
208
- best_checkpoint = ckpt
209
- best_success = res['mean_reward']
210
- print(best_checkpoint)
211
- ckpt = best_checkpoint
212
- ckpts_to_eval.append(ckpt)
213
- else:
214
- print("No best val ckpt found. Using last.ckpt")
215
- ckpt = 'last.ckpt'
216
- ckpts_to_eval.append(ckpt)
217
-
218
- # Load a specific checkpoint with a substring e.g: 'steps=10000'
219
- else:
220
- print(f"Looking for: {vcfg['checkpoint_type']}")
221
- checkpoints = [c for c in os.listdir(vcfg['model_path']) if vcfg['checkpoint_type'] in c]
222
- checkpoint = checkpoints[0] if len(checkpoints) > 0 else ""
223
- ckpt = checkpoint
224
- ckpts_to_eval.append(ckpt)
225
-
226
- print("ckpts_to_eval:", ckpts_to_eval)
227
- return ckpts_to_eval
228
-
229
-
230
- if __name__ == '__main__':
231
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/place_blue_on_line_ends.py DELETED
@@ -1,47 +0,0 @@
1
- import numpy as np
2
- from cliport.tasks.task import Task
3
- from cliport.utils import utils
4
-
5
- class PlaceBlueOnLineEnds(Task):
6
- """Pick up each blue box and accurately place it at the end of a green line."""
7
-
8
- def __init__(self):
9
- super().__init__()
10
- self.max_steps = 10
11
- self.lang_template = "place the blue box at the end of the green line"
12
- self.task_completed_desc = "done placing blue boxes on line ends."
13
- self.additional_reset()
14
-
15
- def reset(self, env):
16
- super().reset(env)
17
-
18
- # Add lines.
19
- line_size = (0.3, 0.01, 0.01)
20
- line_template = 'line/line-template.urdf'
21
- replace = {'DIM': line_size}
22
- line_urdf = self.fill_template(line_template, replace)
23
-
24
- line_colors = ['green']
25
- line_poses = []
26
-
27
- line_pose = self.get_random_pose(env, line_size)
28
- color = utils.COLORS[line_colors[0]]
29
- env.add_object(line_urdf, line_pose, 'fixed', color=color)
30
- line_poses.append(utils.apply(line_pose, (-0.15,0,0)))
31
- line_poses.append(utils.apply(line_pose, (0.15,0,0)))
32
-
33
- # Add blue boxes.
34
- box_size = (0.04, 0.04, 0.04)
35
- box_urdf = 'box/box-template.urdf'
36
- box_color = utils.COLORS['blue']
37
- boxes = []
38
- for _ in range(2):
39
- box_pose = self.get_random_pose(env, box_size)
40
- box_id = env.add_object(box_urdf, box_pose, color=box_color)
41
- boxes.append(box_id)
42
-
43
- # Goal: each blue box is at the end of a different colored line.
44
- for i in range(2):
45
- language_goal = self.lang_template.format(line_colors[0])
46
- self.add_goal(objs=[boxes[i]], matches=np.ones((1, 1)), targ_poses=[line_poses[i]], replace=False,
47
- rotations=True, metric='pose', params=None, step_max_reward=1 / 2, language_goal=language_goal)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Gen-Sim/Gen-Sim/cliport/generated_tasks/rainbow_stack.py DELETED
@@ -1,39 +0,0 @@
1
- import numpy as np
2
- from cliport.tasks.task import Task
3
- from cliport.utils import utils
4
-
5
- class RainbowStack(Task):
6
- """Pick up blocks of seven different colors and stack them on the stand in the order of the rainbow (red, orange, yellow, green, blue, indigo, violet) from bottom to top."""
7
-
8
- def __init__(self):
9
- super().__init__()
10
- self.max_steps = 20
11
- self.lang_template = "stack the blocks on the stand in the order of the rainbow from bottom to top"
12
- self.task_completed_desc = "done stacking."
13
- self.additional_reset()
14
-
15
- def reset(self, env):
16
- super().reset(env)
17
-
18
- # Add stand.
19
- # x, y, z dimensions for the asset size
20
- stand_size = (0.12, 0.12, 0.02)
21
- stand_pose = self.get_random_pose(env, stand_size)
22
- stand_urdf = 'stacking/stand.urdf'
23
- env.add_object(stand_urdf, stand_pose, 'fixed')
24
-
25
- # Add blocks.
26
- # x, y, z dimensions for the asset size
27
- block_size = (0.04, 0.04, 0.04)
28
- block_urdf = 'stacking/block.urdf'
29
- colors = ['red', 'orange', 'yellow', 'green', 'blue', 'indigo', 'violet']
30
- blocks = []
31
- for color in colors:
32
- block_pose = self.get_random_pose(env, block_size)
33
- block_id = env.add_object(block_urdf, block_pose, color=color)
34
- blocks.append(block_id)
35
-
36
- # Goal: stack the blocks on the stand in the order of the rainbow from bottom to top.
37
- for i in range(len(blocks)):
38
- self.add_goal(objs=[blocks[i]], matches=np.ones((1, 1)), targ_poses=[stand_pose], replace=False,
39
- rotations=True, metric='pose', params=None, step_max_reward=1 / len(blocks), language_goal=self.lang_template)