parquet-converter commited on
Commit
e6211f6
·
1 Parent(s): 07ac748

Update parquet files (step 100 of 121)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/101-5/gpt4free/g4f/Provider/Providers/helpers/theb.py +0 -48
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Activar Office 365 Hogar Premium.md +0 -134
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Baghban 1 Hd Movie Download In Hindi Everything You Need to Know About the Film.md +0 -85
  4. spaces/1acneusushi/gradio-2dmoleculeeditor/data/GTA SA NFS Carbon Mod 2010 V 200 The Ultimate Review of the Most Awesome GTA Mod.md +0 -148
  5. spaces/1gistliPinn/ChatGPT4/Examples/At88sc0204 Reset Software.12 Everything You Need to Know About the ATMega88S Microcontroller.md +0 -6
  6. spaces/1pelhydcardo/ChatGPT-prompt-generator/Layla And Other Assorted Love Songs 40th Torrent __HOT__.md +0 -84
  7. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/BombSquad World APK Mod Todo Desbloqueado y Gratis en Mediafre.md +0 -110
  8. spaces/4Taps/SadTalker/src/face3d/util/skin_mask.py +0 -125
  9. spaces/AICODER009/Food101_Detection/README.md +0 -13
  10. spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/tts/portaspeech/fvae.py +0 -203
  11. spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/vocoder/parallel_wavegan/layers/causal_conv.py +0 -56
  12. spaces/AUST001/Translation/app.py +0 -37
  13. spaces/AchyuthGamer/OpenGPT/client/js/highlight.min.js +0 -0
  14. spaces/Aer0xander/sd-to-diffusers/app.py +0 -198
  15. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/canvas.d.ts +0 -2
  16. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/ShakeMethods.js +0 -53
  17. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/utils/ParseYAML.js +0 -15
  18. spaces/Amrrs/DragGan-Inversion/torch_utils/ops/conv2d_resample.py +0 -158
  19. spaces/Amrrs/DragGan-Inversion/viz/latent_widget.py +0 -100
  20. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/unclip/__init__.py +0 -17
  21. spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r50-d8_769x769_80k_cityscapes.py +0 -9
  22. spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/pipelines/transforms.py +0 -889
  23. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/command/build_ext.py +0 -787
  24. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/command/upload.py +0 -205
  25. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/config/defaults.py +0 -635
  26. spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tools/visualize_data.py +0 -94
  27. spaces/B2gan/LLM_Can_See/app.py +0 -87
  28. spaces/Bart92/RVC_HF/julius/filters.py +0 -258
  29. spaces/Benson/text-generation/Examples/Blitzkrieg 3.md +0 -63
  30. spaces/Benson/text-generation/Examples/Colorear El Color Del Libro Por Nmero Mod Apk.md +0 -50
  31. spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/paginate.py +0 -720
  32. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/dir_util.py +0 -243
  33. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/more_itertools/recipes.py +0 -620
  34. spaces/Blessing/Asphalt-Pavement-Distresses-Detector/app.py +0 -174
  35. spaces/CVH-vn1210/make_hair/minigpt4/common/optims.py +0 -119
  36. spaces/CVMX-jaca-tonos/Spanish-Audio-Transcription-to-Quechua-Translation/README.md +0 -12
  37. spaces/CVPR/LIVE/thrust/thrust/detail/type_traits/pointer_traits.h +0 -371
  38. spaces/CVPR/LIVE/thrust/thrust/find.h +0 -385
  39. spaces/CVPR/Text2Human/Text2Human/models/parsing_gen_model.py +0 -220
  40. spaces/CVPR/lama-example/saicinpainting/training/losses/distance_weighting.py +0 -126
  41. spaces/CarlosMF/AI-ORUS-License-v1.0.0/README.md +0 -13
  42. spaces/DCandE/rvc-models/README.md +0 -14
  43. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/Column-2853eb31.css +0 -1
  44. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/themes/glass.py +0 -99
  45. spaces/DaleChen/AutoGPT/tests/integration/memory_tests.py +0 -49
  46. spaces/Detomo/Lighten_dark_image/README.md +0 -11
  47. spaces/Deviliaan/sd_twist/app.py +0 -162
  48. spaces/Dinoking/Guccio-AI-Designer/netdissect/proggan.py +0 -299
  49. spaces/ECCV2022/bytetrack/tutorials/trades/byte_tracker.py +0 -352
  50. spaces/Edward-Ji/essentials-of-microeconomics/essentials_of_microeconomics/util.py +0 -31
spaces/101-5/gpt4free/g4f/Provider/Providers/helpers/theb.py DELETED
@@ -1,48 +0,0 @@
1
- import json
2
- import sys
3
- from re import findall
4
- from curl_cffi import requests
5
-
6
- config = json.loads(sys.argv[1])
7
- prompt = config['messages'][-1]['content']
8
-
9
- headers = {
10
- 'authority': 'chatbot.theb.ai',
11
- 'accept': 'application/json, text/plain, */*',
12
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
13
- 'content-type': 'application/json',
14
- 'origin': 'https://chatbot.theb.ai',
15
- 'referer': 'https://chatbot.theb.ai/',
16
- 'sec-ch-ua': '"Google Chrome";v="113", "Chromium";v="113", "Not-A.Brand";v="24"',
17
- 'sec-ch-ua-mobile': '?0',
18
- 'sec-ch-ua-platform': '"macOS"',
19
- 'sec-fetch-dest': 'empty',
20
- 'sec-fetch-mode': 'cors',
21
- 'sec-fetch-site': 'same-origin',
22
- 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36',
23
- }
24
-
25
- json_data = {
26
- 'prompt': prompt,
27
- 'options': {}
28
- }
29
-
30
- def format(chunk):
31
- try:
32
- completion_chunk = findall(r'content":"(.*)"},"fin', chunk.decode())[0]
33
- print(completion_chunk, flush=True, end='')
34
-
35
- except Exception as e:
36
- print(f'[ERROR] an error occured, retrying... | [[{chunk.decode()}]]', flush=True)
37
- return
38
-
39
- while True:
40
- try:
41
- response = requests.post('https://chatbot.theb.ai/api/chat-process',
42
- headers=headers, json=json_data, content_callback=format, impersonate='chrome110')
43
-
44
- exit(0)
45
-
46
- except Exception as e:
47
- print('[ERROR] an error occured, retrying... |', e, flush=True)
48
- continue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Activar Office 365 Hogar Premium.md DELETED
@@ -1,134 +0,0 @@
1
- <br />
2
- <h1>How to activate Office 365 Home Premium</h1>
3
- <p>Office 365 Home Premium is a subscription service that gives you access to the most up-to-date versions of Word, Excel, PowerPoint, Outlook and other popular Office apps. You also get 1 TB of cloud storage per person, security updates, technical support and other benefits. Office 365 Home Premium is ideal for home users who want to work and collaborate online with ease and flexibility.</p>
4
- <h2>activar office 365 hogar premium</h2><br /><p><b><b>DOWNLOAD</b> &#9734;&#9734;&#9734; <a href="https://byltly.com/2uKvuP">https://byltly.com/2uKvuP</a></b></p><br /><br />
5
- <p>In this article, we will show you how to activate Office 365 Home Premium in different ways, depending on your subscription option. We will also explain the benefits of Office 365 Home Premium and how to troubleshoot some common activation issues.</p>
6
- <h2>Subscription options for Office 365 Home Premium</h2>
7
- <p>You can subscribe to Office 365 Home Premium in two different ways: by month or by year. The monthly option costs €10 per month, while the yearly option costs €99 per year. You can cancel your subscription at any time.</p>
8
- <p>It's easy: you can request it on the official Microsoft website . All this information can be configured in the "My account" section of your profile.</p>
9
- <h3>How to subscribe online</h3>
10
- <p>To subscribe online, follow these steps:</p>
11
- <ol>
12
- <li>Go to <a href="https://www.microsoft.com/es-es/microsoft-365/p/microsoft-365-family/cfq7ttc0k5dm?activetab=pivot%3aoverviewtab">https://www.microsoft.com/es-es/microsoft-365/p/microsoft-365-family/cfq7ttc0k5dm?activetab=pivot%3aoverviewtab</a> and click on "Buy now".</li>
13
- <li>Sign in with your Microsoft account or create one if you don't have one.</li>
14
- <li>Select your payment method and enter your billing information.</li>
15
- <li>Review your order and confirm it.</li>
16
- <li>You will receive an email confirmation with your subscription details.</li>
17
- </ol>
18
- <p>To manage your account settings, such as changing your payment method, canceling your subscription or checking your renewal date, go to <a href="https://account.microsoft.com/services/">https://account.microsoft.com/services/</a> and sign in with your Microsoft account.</p>
19
- <h3>How to activate a product key</h3>
20
- <p>If you have a product key from a new product key card or from the Microsoft Workplace Discount Program, you can use it to activate Office 365 Home Premium. A product key is a 25-character code that looks like this: XXXXX-XXXXX-XXXXX-XXXXX-XXXXX.</p>
21
- <p>Cómo activar office 365 hogar premium gratis<br />
22
- Activar office 365 hogar premium con clave<br />
23
- Activar office 365 hogar premium sin cuenta microsoft<br />
24
- Activar office 365 hogar premium en varios dispositivos<br />
25
- Activar office 365 hogar premium con licencia<br />
26
- Activar office 365 hogar premium con cmd<br />
27
- Activar office 365 hogar premium con kms<br />
28
- Activar office 365 hogar premium con crack<br />
29
- Activar office 365 hogar premium con serial<br />
30
- Activar office 365 hogar premium con keygen<br />
31
- Activar office 365 hogar premium paso a paso<br />
32
- Activar office 365 hogar premium por telefono<br />
33
- Activar office 365 hogar premium por internet<br />
34
- Activar office 365 hogar premium offline<br />
35
- Activar office 365 hogar premium online<br />
36
- Activar office 365 hogar premium windows 10<br />
37
- Activar office 365 hogar premium mac<br />
38
- Activar office 365 hogar premium android<br />
39
- Activar office 365 hogar premium ios<br />
40
- Activar office 365 hogar premium linux<br />
41
- Activar office 365 hogar premium sin descargar nada<br />
42
- Activar office 365 hogar premium sin programas<br />
43
- Activar office 365 hogar premium sin virus<br />
44
- Activar office 365 hogar premium sin errores<br />
45
- Activar office 365 hogar premium sin problemas<br />
46
- Activar office 365 hogar premium facil y rapido<br />
47
- Activar office 365 hogar premium de forma permanente<br />
48
- Activar office 365 hogar premium de por vida<br />
49
- Activar office 365 hogar premium de manera legal<br />
50
- Activar office 365 hogar premium de forma segura<br />
51
- Activación de office 365 hogar premium tutorial completo<br />
52
- Activación de office 365 hogar premium guía práctica<br />
53
- Activación de office 365 hogar premium consejos y trucos<br />
54
- Activación de office 365 hogar premium solución de problemas<br />
55
- Activación de office 365 hogar premium preguntas frecuentes<br />
56
- Beneficios de activar office 365 hogar premium <br />
57
- Ventajas de activar office 365 hogar premium <br />
58
- Diferencias entre activar office 365 hoga</p>
59
- <p>To activate a product key, follow these steps:</p>
60
- <ol>
61
- <li>Go to <a href="https://setup.office.com/">https://setup.office.com/</a> and sign in with your Microsoft account or create one if you don't have one.</li>
62
- <li>Enter your product key and select your country or region and language.</li>
63
- <li>Follow the instructions on the screen to complete the activation process.</li>
64
- <li>You will receive an email confirmation with your subscription details.</li>
65
- </ol>
66
- <h2>Benefits of Office 365 Home Premium</h2>
67
- <p>Office 365 Home Premium offers you many features and advantages that make your work and life easier. Here are some of them:</p>
68
- <ul>
69
- <li>You get access to the latest versions of Office apps, such as Word, Excel, PowerPoint, Outlook and more. You can use them online or offline, depending on your preference.</li>
70
- <li>You get 1 TB of cloud storage per person with OneDrive, where you can store and share your files securely. You can also access them from any device.</li>
71
- <li>You get security updates and technical support from Microsoft at no extra cost. You can also use advanced security features such as ransomware detection and recovery.</li>
72
- <li>You get access to premium features such as Editor in Word, Designer in PowerPoint, Money in Excel and more. You can also use AI-powered tools such as Ideas and Researcher.</li>
73
- <li>You get access to additional apps and services such as Skype (60 minutes of calls per month), Microsoft Family Safety (parental controls and location sharing), Microsoft Teams (chat and video calls) and more.</li>
74
- </ul>
75
- <h3>How to install Office 365 Home Premium on multiple devices</h3>
76
- <p>One of the best things about Office 365 Home Premium is that you can install it on up to five PCs or Macs, five tablets and five phones per household. This means that you can use Office on any device you want, whenever you want.</p>
77
- <p>To install Office 365 Home Premium on multiple devices, follow these steps:</p>
78
- <ol>
79
- <li>Go to <a href="https://account.microsoft.com/services/">https://account.microsoft.com/services/</a> and sign in with your Microsoft account.</li>
80
- <li>Select "Install" under "Office" and choose the device you want to install it on.</li>
81
- <li>Follow the instructions on the screen to download and install Office on your device.</li>
82
- <li>Repeat these steps for each device you want to install Office on.</li>
83
- </ol>
84
- <h3>How to share Office 365 Home Premium with family members</h3>
85
- <p>Another great thing about Office 365 Home Premium is that you can share it with up to four other people in your household. This means that they can also enjoy all the benefits of Office without paying extra.</p>
86
- <p>To share Office 365 Home Premium with family members, follow these steps:</p>
87
- <ol>
88
- <li>Go to <a href="https://account.microsoft.com/services/">https://account.microsoft.com/services/</a> and sign in with your Microsoft account.</li>
89
- <li>Select "Sharing" under "Office" and click on "Start sharing".</li>
90
- <li>Enter the email address of the person you want to share with and click on "Invite". You can also copy a link and send it manually.</li>
91
- <li>The person you invited will receive an email with a link to accept your invitation. They will need a Microsoft account or create one if they don't have one.</li>
92
- <li>Once they accept your invitation, they will be able to install Office on their devices and access all the features.</li>
93
- </ol>
94
- <h2>Troubleshooting tips for Office 365 Home Premium activation</h2>
95
- <p>Sometimes you may encounter some issues when activating Office 365 Home Premium. Don't worry, most of them can be easily solved with some simple steps. Here are some common problems and solutions:</p>
96
- <h3>How to use the Activation wizard</h3>
97
- ```html to activate Microsoft 365. Follow the prompts in the wizard to activate Microsoft 365.</p>
98
- <p>Some of the steps you may need to do are:</p>
99
- <ul>
100
- <li>Sign in with your Microsoft account or your work or school account.</li>
101
- <li>Select your country or region and language.</li>
102
- <li>Enter your product key if you have one.</li>
103
- <li>Review and accept the Microsoft Software License Terms.</li>
104
- <li>Click on "Next" until you see a confirmation message.</li>
105
- </ul>
106
- <h3>How to contact Microsoft support</h3>
107
- <p>If you still have problems activating Office 365 Home Premium or you need more help, you can contact Microsoft support for assistance. You can choose from different support options, such as:</p>
108
- <ul>
109
- <li>Guided support: a browser-based tool that can provide digital solutions for Office problems.</li>
110
- <li>Live chat: a real-time conversation with a Microsoft specialist who can help you with your issue.</li>
111
- <li>Phone call: a phone call with a Microsoft specialist who can help you with your issue.</li>
112
- <li>Email: an email exchange with a Microsoft specialist who can help you with your issue.</li>
113
- <li>Community forum: a public platform where you can ask questions and get answers from other users and experts.</li>
114
- </ul>
115
- <h1>Conclusion</h1>
116
- <p>In this article, we have shown you how to activate Office 365 Home Premium in different ways, depending on your subscription option. We have also explained the benefits of Office 365 Home Premium and how to troubleshoot some common activation issues.</p>
117
- <p>Office 365 Home Premium is a great service that gives you access to the latest versions of Office apps, cloud storage, security updates and other features. You can also install it on multiple devices and share it with your family members. Office 365 Home Premium is ideal for home users who want to work and collaborate online with ease and flexibility.</p>
118
- <p>If you want to enjoy all the benefits of Office 365 Home Premium, don't wait any longer and activate it today. You can subscribe online or use a product key to activate it. If you need any help, you can contact Microsoft support for assistance.</p>
119
- <h2>Frequently Asked Questions</h2>
120
- <ol>
121
- <li><b>What is the difference between Office 365 Home Premium and Office 2019?</b></li>
122
- <p>Office 365 Home Premium is a subscription service that gives you access to the latest versions of Office apps and other benefits. Office 2019 is a one-time purchase that gives you access to one version of Office apps. Office 365 Home Premium has more features and advantages than Office 2019.</p>
123
- <li><b>How do I renew my Office 365 Home Premium subscription?</b></li>
124
- <p>You can renew your Office 365 Home Premium subscription online or by using a product key. To renew online, go to <a href="https://account.microsoft.com/services/">https://account.microsoft.com/services/</a> and sign in with your Microsoft account. To renew by using a product key, go to <a href="https://setup.office.com/">https://setup.office.com/</a> and enter your product key.</p>
125
- <li><b>How do I cancel my Office 365 Home Premium subscription?</b></li>
126
- <p>You can cancel your Office 365 Home Premium subscription at any time. To cancel online, go to <a href="https://account.microsoft.com/services/">https://account.microsoft.com/services/</a> and sign in with your Microsoft account. To cancel by phone, contact Microsoft support .</p>
127
- <li><b>How do I check my Office 365 Home Premium activation status?</b></li>
128
- <p>You can check your Office 365 Home Premium activation status by opening any Office app and going to File > Account. You will see a message that says "Product Activated" or "Subscription Product" if your activation was successful. You will also see your subscription details, such as your expiration date and renewal options.</p>
129
- <li><b>How do I update my Office 365 Home Premium apps?</b></li>
130
- <p>You can update your Office 365 Home Premium apps automatically or manually. To update automatically, make sure you have an active internet connection and that automatic updates are enabled in your settings. To update manually, open any Office app and go to File > Account > Update Options > Update Now.</p>
131
- </ol>
132
- </p> 0a6ba089eb<br />
133
- <br />
134
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Baghban 1 Hd Movie Download In Hindi Everything You Need to Know About the Film.md DELETED
@@ -1,85 +0,0 @@
1
-
2
- <h1>Baghban 1 HD Movie Download In Hindi: A Guide For Bollywood Fans</h1>
3
- <p>If you are a fan of Bollywood movies, you might have heard of <strong>Baghban 1</strong>, a 2003 drama film starring Amitabh Bachchan and Hema Malini. The movie tells the story of an elderly couple who are neglected by their children after they retire. The movie was a critical and commercial success, and it touched the hearts of many viewers with its emotional message.</p>
4
- <h2>Baghban 1 Hd Movie Download In Hindi</h2><br /><p><b><b>Download</b> ---> <a href="https://byltly.com/2uKwoD">https://byltly.com/2uKwoD</a></b></p><br /><br />
5
- <p>But how can you watch this movie online in HD quality and in Hindi language? Is it legal to download Baghban 1 from the internet? What are the benefits and risks of doing so? In this article, we will answer these questions and provide you with a guide on how to enjoy Baghban 1 HD movie download in Hindi.</p>
6
- <h2>Introduction</h2>
7
- <h3>What is Baghban 1?</h3>
8
- <p>Baghban 1 is a Bollywood movie that was released in 2003. It was directed by Ravi Chopra and written by B.R. Chopra. The movie stars Amitabh Bachchan as Raj Malhotra, a retired bank manager, and Hema Malini as Priya Malhotra, his wife. The couple have four sons who are settled in different cities. Raj and Priya expect their sons to take care of them in their old age, but they are disappointed when their sons treat them as a burden and separate them.</p>
9
- <p>The movie shows how Raj and Priya cope with their loneliness and hardship, and how they find support from their adopted son Alok (Salman Khan) and his wife Arpita (Mahima Chaudhry). The movie also has a twist at the end that reveals the true nature of their sons.</p>
10
- <h3>Why is Baghban 1 popular?</h3>
11
- <p>Baghban 1 is popular for many reasons. Some of them are:</p>
12
- <p>Baghban full movie download in HD quality<br />
13
- Baghban 2003 Hindi movie free download<br />
14
- Baghban BluRay 1080p download link<br />
15
- Watch Baghban online on Disney+ Hotstar<br />
16
- Baghban Hindi romantic movie starring Salman Khan<br />
17
- How to download Baghban movie in Hindi<br />
18
- Baghban movie review and ratings<br />
19
- Baghban songs download mp3<br />
20
- Baghban movie cast and crew details<br />
21
- Baghban movie trivia and facts<br />
22
- Baghban movie quotes and dialogues<br />
23
- Baghban movie awards and nominations<br />
24
- Baghban movie box office collection and budget<br />
25
- Baghban movie scenes and clips<br />
26
- Baghban movie behind the scenes and making<br />
27
- Baghban movie theme and message<br />
28
- Baghban movie remake and sequel<br />
29
- Baghban movie poster and wallpapers<br />
30
- Baghban movie subtitles download<br />
31
- Baghban movie torrent download magnet link<br />
32
- Baghban full movie watch online HD<br />
33
- Baghban 2003 Hindi film download filmywap<br />
34
- Baghban BluRay 720p download filmyzilla<br />
35
- Stream Baghban on Disney+ Hotstar VIP<br />
36
- Baghban Bollywood movie featuring Amitabh Bachchan<br />
37
- Where to download Baghban movie in Hindi<br />
38
- Baghban movie critics and audience reviews<br />
39
- Baghban songs video download mp4<br />
40
- Baghban movie actors and actresses names<br />
41
- Baghban movie interesting and unknown facts<br />
42
- Baghban movie memorable and famous quotes<br />
43
- Baghban movie accolades and achievements<br />
44
- Baghban movie income and expenditure report<br />
45
- Baghban movie best and worst scenes<br />
46
- Baghban movie secrets and stories<br />
47
- Baghban movie plot and summary<br />
48
- Baghban movie follow-up and spin-off<br />
49
- Baghban movie images and photos<br />
50
- Baghban movie captions and subtitles file<br />
51
- Baghban full movie download torrent link</p>
52
- <ul>
53
- <li>The movie has a powerful story that deals with the issues of aging, family, and social values. It portrays the contrast between the traditional values of respect and care for elders, and the modern values of individualism and materialism.</li>
54
- <li>The movie has a stellar cast that includes some of the most respected actors in Bollywood. Amitabh Bachhan and Hema Malini deliver excellent performances as the loving couple who face betrayal from their own children. Salman Khan and Mahima Chaudhry also play their roles well as the caring adopted son and daughter-in-law.</li>
55
- <li>The movie has a melodious soundtrack that features songs composed by Aadesh Shrivastava and Uttam Singh. Some of the popular songs are "Main Yahan Tu Wahan", "Holi Khele Raghuveera", "Pehle Kabhi Na Mera Haal", and "Chali Chali Phir Chali".</li>
56
- <li>The movie has a positive message that inspires the viewers to value their parents and elders, and to live with dignity and grace.</li>
57
- </ul>
58
- <h3>How to watch Baghban 1 online legally?</h3>
59
- <p>If you want to watch Baghban 1 online legally, you have a few options. Some of them are:</p>
60
- <ul>
61
- <li>You can stream the movie on Disney+ Hotstar, a popular OTT platform that offers a variety of movies and shows in different languages. You can watch Baghban 1 on Hotstar with a subscription plan that costs Rs. 299 per month or Rs. 1499 per year.</li>
62
- <li>You can buy or rent the movie on YouTube, Google Play Movies, or iTunes. You can watch Baghban 1 on these platforms with a one-time payment that ranges from Rs. 25 to Rs. 150 depending on the quality and format.</li>
63
- <li>You can also buy the DVD or Blu-ray disc of the movie from online or offline stores. You can watch Baghban 1 on these discs with a DVD or Blu-ray player that supports Hindi language.</li>
64
- </ul>
65
- <h2>Features of Baghban 1 HD Movie Download In Hindi</h2>
66
- <h3>High-quality video and audio</h3>
67
- <p>One of the main features of Baghban 1 HD movie download in Hindi is that it offers high-quality video and audio for your viewing pleasure. You can enjoy the movie in full HD resolution (1080p) that enhances the clarity and details of the scenes. You can also enjoy the movie in Dolby Digital 5.1 surround sound that enhances the effects and emotions of the soundtrack.</p>
68
- <h3>Subtitles and dubbing options</h3>
69
- <p>Another feature of Baghban 1 HD movie download in Hindi is that it offers subtitles and dubbing options for your convenience. You can choose to watch the movie with subtitles in English or other languages if you prefer to read along with the dialogues. You can also choose to watch the movie with dubbing in other languages if you prefer to listen to the dialogues in your native tongue.</p>
70
- <h3>Easy and fast download process</h3>
71
- <p>A third feature of Baghban 1 HD movie download in Hindi is that it offers an easy and fast download process for your ease. You can download the movie from any device that has an internet connection, such as a computer, laptop, tablet, or smartphone. You can also download the movie in different formats, such as MP4, MKV, AVI, or MOV. You can also download the movie in different sizes, such as 300 MB, 700 MB, or 4 GB depending on your preference.</p>
72
- <h2>Benefits of Baghban 1 HD Movie Download In Hindi</h2>
73
- <h3>Enjoy the movie at your convenience</h3>
74
- <p>One of the main benefits of Baghban 1 HD movie download in Hindi is that you can enjoy the movie at your convenience. You can watch the movie anytime and anywhere you want without any restrictions or interruptions. You can watch the movie on any device that supports video playback, such as a TV, laptop, tablet, or smartphone. You can also watch the movie offline without any internet connection once you have downloaded it.</p>
75
- <h3>Save money and time</h3>
76
- <p>Another benefit of Baghban 1 HD movie download in Hindi is that you can save money and time by doing so. You don't have to pay for any subscription fees or rental charges to watch the movie online legally. You also don't have to spend any money on buying tickets or travelling to theatres to watch the movie on big screens. You also don't have to waste any time on waiting for buffering or loading issues while streaming the movie online.</p>
77
- <h3>Avoid spoilers and ads</h3>
78
- <p>A third benefit of Baghban 1 HD movie download in Hindi is that you can avoid spoilers and ads by doing so. You don't have to worry about getting spoiled by other people who have already watched the movie before you. You also don't have to endure any annoying ads or pop-ups that interrupt your viewing experience while watching the movie online.</p>
79
- <h2>Risks of Baghban 1 HD Movie Download In Hindi</h2>
80
- <h3>Legal issues and penalties</h3>
81
- <p>One of the main risks of Baghban 1 HD movie download in Hindi is that you may face legal issues and penalties for doing so illegally. Downloading movies from unauthorized sources such as torrent sites or file-sharing platforms is considered piracy under Indian law. Piracy is a criminal offence that can lead to imprisonment up to three years or fine up to Rs. 10 lakh or both under Section 63B of The Copyright Act, 1957. You may also face civil lawsuits from the producers or distributors of the movie for infringing their rights.</p>
82
- <h3>Malware and viruses</h3>
83
- <p</p> 0a6ba089eb<br />
84
- <br />
85
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/GTA SA NFS Carbon Mod 2010 V 200 The Ultimate Review of the Most Awesome GTA Mod.md DELETED
@@ -1,148 +0,0 @@
1
- <br />
2
- <h1>GTA SA NFS Carbon Mod 2010 V 200: A Review</h1>
3
- <p>If you are a fan of GTA San Andreas and NFS Carbon or ProStreet, you might be interested in this mod that combines both games into one. GTA SA NFS Carbon Mod 2010 V 200 is a global mod that replaces all cars in GTA San Andreas with models from NFS Carbon, ProStreet and other different autos. It also adds new sounds, textures, loading screens, menus and car names to enhance the experience. In this article, we will review this mod and tell you everything you need to know about it.</p>
4
- <h2>Features of the mod</h2>
5
- <h3>New cars</h3>
6
- <p>One of the main features of this mod is that it replaces all cars in GTA San Andreas with new ones from NFS games. There are more than 200 cars to choose from, ranging from sports cars to muscle cars to SUVs. You can find them in various locations around San Andreas, such as parking lots, garages, dealerships or on the streets. Some of them are also used by gangs, police or other NPCs. You can customize them in any tuning salon with new spoilers, buckets, hoods, side skirts and wheels. The mod also adds new handling and physics for each car.</p>
7
- <h2>GTA SA NFS Carbon Mod 2010 V 200</h2><br /><p><b><b>Download File</b> &#10004;&#10004;&#10004; <a href="https://byltly.com/2uKxKu">https://byltly.com/2uKxKu</a></b></p><br /><br />
8
- <h3>New sounds</h3>
9
- <p>Another feature of this mod is that it changes the sounds of car engines and shots. The mod uses high-quality sounds from NFS Carbon and ProStreet to make the cars sound more realistic and immersive. You can hear the difference between different types of engines, such as V8s or turbos. The mod also changes the sounds of guns and explosions to make them more powerful and dynamic. To install these sounds, you need to replace some files in your audio folder with the ones provided by the mod.</p>
10
- <h3>New textures</h3>
11
- <p>The mod also changes many textures in GTA San Andreas to make them look more modern and detailed. The mod replaces the textures of buildings, roads, tuning salons, CJ's house, military base and more with new ones inspired by NFS Carbon and ProStreet. The mod also adds new posters in cities on the theme of NFS games and new pedestrians (gangs, police, girls etc.) with new clothes and hairstyles. The mod improves the graphics quality and atmosphere of San Andreas.</p>
12
- <p>GTA San Andreas Need for Speed Carbon Mod 2010 Version 200<br />
13
- How to install GTA SA NFS Carbon Mod 2010 V 200 on PC<br />
14
- GTA SA NFS Carbon Mod 2010 V 200 gameplay and features<br />
15
- Download GTA SA NFS Carbon Mod 2010 V 200 for free<br />
16
- GTA SA NFS Carbon Mod 2010 V 200 review and rating<br />
17
- GTA SA NFS Carbon Mod 2010 V 200 cheats and codes<br />
18
- GTA SA NFS Carbon Mod 2010 V 200 system requirements and compatibility<br />
19
- GTA SA NFS Carbon Mod 2010 V 200 best cars and customization<br />
20
- GTA SA NFS Carbon Mod 2010 V 200 vs original GTA SA<br />
21
- GTA SA NFS Carbon Mod 2010 V 200 online multiplayer mode<br />
22
- GTA SA NFS Carbon Mod 2010 V 200 trailer and screenshots<br />
23
- GTA SA NFS Carbon Mod 2010 V 200 mods and patches<br />
24
- GTA SA NFS Carbon Mod 2010 V 200 tips and tricks<br />
25
- GTA SA NFS Carbon Mod 2010 V 200 missions and challenges<br />
26
- GTA SA NFS Carbon Mod 2010 V 200 soundtrack and music<br />
27
- GTA SA NFS Carbon Mod 2010 V 200 error and bug fixes<br />
28
- GTA SA NFS Carbon Mod 2010 V 200 comparison with other GTA mods<br />
29
- GTA SA NFS Carbon Mod 2010 V 200 story and characters<br />
30
- GTA SA NFS Carbon Mod 2010 V 200 map and locations<br />
31
- GTA SA NFS Carbon Mod 2010 V 200 graphics and performance<br />
32
- GTA SA NFS Carbon Mod 2010 V 200 secrets and easter eggs<br />
33
- GTA SA NFS Carbon Mod 2010 V 200 fun and funny moments<br />
34
- GTA SA NFS Carbon Mod 2010 V 200 walkthrough and guide<br />
35
- GTA SA NFS Carbon Mod 2010 V 200 update and news<br />
36
- GTA SA NFS Carbon Mod 2010 V 200 forum and community<br />
37
- GTA SA NFS Carbon Mod 2010 V 200 fan art and videos<br />
38
- GTA SA NFS Carbon Mod 2010 V 200 wiki and FAQ<br />
39
- GTA SA NFS Carbon Mod 2010 V 200 speedrun and record<br />
40
- GTA SA NFS Carbon Mod 2010 V 200 realistic and immersive mode<br />
41
- GTA SA NFS Carbon Mod 2010 V 200 controller and keyboard support<br />
42
- GTA SA NFS Carbon Mod 2010 V 200 alternatives and similar games<br />
43
- GTA SA NFS Carbon Mod 2010 V 200 development and history<br />
44
- GTA SA NFS Carbon Mod 2010 V 200 awards and achievements<br />
45
- GTA SA NFS Carbon Mod 2010 V 200 problems and solutions<br />
46
- GTA SA NFS Carbon Mod<br />
47
- GTA SA NFS Carbon Mod<br />
48
- How to get the most out of the mod.<br />
49
- What makes the mod unique and special.</p>
50
- <h3>New loading screens and menus</h3>
51
- <p>The mod also changes the loading screens and menus of GTA San Andreas to match the style and theme of NFS Carbon and ProStreet. The mod uses cool images and animations from NFS games to make the loading screens more attractive and dynamic. The mod also changes the fonts, colors and icons of the menus to make them more sleek and stylish. The mod gives GTA San Andreas a fresh look.</p>
52
- <h3>New car names</h3>
53
- <p>The last feature of this mod is that it uses CLEO and fxt Editora to give each car its original name from NFS games. For example, instead of seeing "Banshee" or "Infernus" on your screen when you enter a car, you will see "Dodge Viper SRT-10" or "Lamborghini Murcielago LP640". This makes the game more realistic and authentic. To use this feature, you need to copy the CLEO_TEXT folder from the mod to your CLEO folder.</p>
54
- <h2>Installation of the mod</h2>
55
- <p>To install this mod, you need to download it from one of these links . The file size is about 846 MB. You will get a zip archive that contains several folders and files. You need to extract them using WinRAR or 7-Zip. Then you need to follow these steps:</p>
56
- <ol>
57
- <li>Backup your original files before replacing them with the ones from the mod.</li>
58
- <li>Copy all files from "GTA SA NFS Carbon Mod 2010 V 200" folder (except CLEO_TEXT) to your GTA San Andreas directory (where gta_sa.exe is located).</li>
59
- <li>Copy all files from "Audio" folder (except Audiozon.ipl) to your GTA San Andreas/audio folder.</li>
60
- <li>Copy Audiozon.ipl file from "Audio" folder to your GTA San Andreas/data/maps/audio folder.</li>
61
- <li>Copy CLEO_TEXT folder from "GTA SA NFS Carbon Mod 2010 V 200" folder to your GTA San Andreas/CLEO folder.</li>
62
- <li>Run gta_sa.exe and enjoy!</li>
63
- </ol>
64
- <h2>Pros and cons of the mod</h2>
65
- <p>This mod has many pros and cons that you should consider before installing it. Here are some of them:</p>
66
- <ul>
67
- <li>Pros: <ul>
68
- <li>The mod adds a lot of variety and diversity to GTA San Andreas by replacing all cars with new ones from NFS games.</li>
69
- <li>The mod improves the graphics quality and atmosphere of GTA San Andreas by changing many textures, sounds, loading screens and menus.</li>
70
- <li>The mod makes GTA San Andreas more realistic and authentic by giving each car its original name from NFS games.</li>
71
- <li>The mod is compatible with most other mods for GTA San Andreas.</li>
72
- </ul>
73
- </li>
74
- <li>Cons: <ul>
75
- <li>The mod may cause some bugs or glitches in GTA San Andreas due to compatibility issues or errors.</li>
76
- <li>The mod may affect the performance or stability of GTA San Andreas due to its large size or high requirements.</li>
77
- <li>The mod may not suit everyone's taste or preference as some people may prefer the original cars or style of GTA San Andreas.</li>
78
- <li>The mod may not be updated or supported by its author anymore as it was released in 2010.</li>
79
- </ul>
80
- </li>
81
- </ul>
82
- <h2>Conclusion</h2>
83
- <p>GTA SA NFS Carbon Mod 2010 V 200 is a global mod that transforms GTA San Andreas into a hybrid of NFS Carbon and ProStreet. It replaces all cars with new ones from NFS games, changes many textures, sounds, loading screens and menus, gives each car its original name from NFS games using CLEO scripts. It is a great mod for fans of both GTA San Andreas and NFS games who want to enjoy both worlds in one game. However, it also has some drawbacks such as bugs, performance issues or personal taste that may discourage some players from using it. Therefore, we recommend you to try it out yourself and see if you like it or not.</p>
84
- <h2>FAQs</h2>
85
- <ul>
86
- <li>Q: How can I uninstall this mod?</li>
87
- <li>A: To uninstall this mod, you need to delete all files that you copied from this mod (except CLEO_TEXT) from your GTA San Andreas directory (where gta_sa.exe is located) and restore your original files that you backed up before installing this mod.</li>
88
- <li>Q: How can I change back to original car names?</li>
89
- <li>A: To change back to original car names from NFS games using CLEO scripts. To use this feature, you need to copy the CLEO_TEXT folder from the mod to your CLEO folder.</p>
90
- <h2>Installation of the mod</h2>
91
- <p>To install this mod, you need to download it from one of these links . The file size is about 846 MB. You will get a zip archive that contains several folders and files. You need to extract them using WinRAR or 7-Zip. Then you need to follow these steps:</p>
92
- <ol>
93
- <li>Backup your original files before replacing them with the ones from the mod.</li>
94
- <li>Copy all files from "GTA SA NFS Carbon Mod 2010 V 200" folder (except CLEO_TEXT) to your GTA San Andreas directory (where gta_sa.exe is located).</li>
95
- <li>Copy all files from "Audio" folder (except Audiozon.ipl) to your GTA San Andreas/audio folder.</li>
96
- <li>Copy Audiozon.ipl file from "Audio" folder to your GTA San Andreas/data/maps/audio folder.</li>
97
- <li>Copy CLEO_TEXT folder from "GTA SA NFS Carbon Mod 2010 V 200" folder to your GTA San Andreas/CLEO folder.</li>
98
- <li>Run gta_sa.exe and enjoy!</li>
99
- </ol>
100
- <h2>Pros and cons of the mod</h2>
101
- <p>This mod has many pros and cons that you should consider before installing it. Here are some of them:</p>
102
- <ul>
103
- <li>Pros: <ul>
104
- <li>The mod adds a lot of variety and diversity to GTA San Andreas by replacing all cars with new ones from NFS games.</li>
105
- <li>The mod improves the graphics quality and atmosphere of GTA San Andreas by changing many textures, sounds, loading screens and menus.</li>
106
- <li>The mod makes GTA San Andreas more realistic and authentic by giving each car its original name from NFS games.</li>
107
- <li>The mod is compatible with most other mods for GTA San Andreas.</li>
108
- </ul>
109
- </li>
110
- <li>Cons: <ul>
111
- <li>The mod may cause some bugs or glitches in GTA San Andreas due to compatibility issues or errors.</li>
112
- <li>The mod may affect the performance or stability of GTA San Andreas due to its large size or high requirements.</li>
113
- <li>The mod may not suit everyone's taste or preference as some people may prefer the original cars or style of GTA San Andreas.</li>
114
- <li>The mod may not be updated or supported by its author anymore as it was released in 2010.</li>
115
- </ul>
116
- </li>
117
- </ul>
118
- <h2>Conclusion</h2>
119
- <p>GTA SA NFS Carbon Mod 2010 V 200 is a global mod that transforms GTA San Andreas into a hybrid of NFS Carbon and ProStreet. It replaces all cars with new ones from NFS games, changes many textures, sounds, loading screens and menus, gives each car its original name from NFS games using CLEO scripts. It is a great mod for fans of both GTA San Andreas and NFS games who want to enjoy both worlds in one game. However, it also has some drawbacks such as bugs, performance issues or personal taste that may discourage some players from using it. Therefore, we recommend you to try it out yourself and see if you like it or not.</p>
120
- <h2>FAQs</h2>
121
- <ul>
122
- <li>Q: How can I uninstall this mod?</li>
123
- <li>A: To uninstall this mod, you need to delete all files that you copied from this mod (except CLEO_TEXT) from your GTA San Andreas directory (where gta_sa.exe is located) and restore your original files that you backed up before installing this mod.</li>
124
- <li>Q: How can I change back to original car names?</li>
125
- <li>A: To change back to original car names, you need to delete the CLEO_TEXT folder that you copied from this mod from your GTA San Andreas/CLEO folder and restart the game.</li>
126
- <li>Q: How can I contact the author of this mod?</li>
127
- <li>A: To contact the author of this mod, you can use one of these methods: <ul>
128
- <li>Email: [email protected] (according to )</li>
129
- <li>YouTube: KryZeePlays (according to )</li>
130
- <li>Trello: safikamia (according to )</li>
131
- </ul></li>
132
- <li>Q: How can I get more mods for GTA San Andreas?</li>
133
- <li>A: To get more mods for GTA San Andreas, you can visit these websites: <ul>
134
- <li>NFSMods: https://nfsmods.xyz/</li>
135
- <li>GTAInside: https://www.gtainside.com/en/sanandreas/mods/</li>
136
- <li>GTA5-Mods: https://www.gta5-mods.com/</li>
137
- </ul></li>
138
- <li>Q: How can I optimize my game for better performance?</li>
139
- <li>A: To optimize your game for better performance, you can try these tips: <ul>
140
- <li>Lower your graphics settings in the game options menu.</li>
141
- <li>Use a lower resolution or windowed mode for the game.</li>
142
- <li>Close any unnecessary programs or background processes while playing the game.</li>
143
- <li>Update your drivers and software for your PC components.</li>
144
- </ul></li>
145
- </ul>
146
- </p> 0a6ba089eb<br />
147
- <br />
148
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/At88sc0204 Reset Software.12 Everything You Need to Know About the ATMega88S Microcontroller.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>at88sc0204resetsoftware12</h2><br /><p><b><b>Download File</b> &#9881; <a href="https://imgfil.com/2uxYCs">https://imgfil.com/2uxYCs</a></b></p><br /><br />
2
- <br />
3
- aaccfb2cb3<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/Layla And Other Assorted Love Songs 40th Torrent __HOT__.md DELETED
@@ -1,84 +0,0 @@
1
- ## Layla And Other Assorted Love Songs 40th Torrent
2
-
3
-
4
-
5
-
6
-
7
- ![Layla And Other Assorted Love Songs 40th Torrent __HOT__](https://opengraph.githubassets.com/22b7b1890dca3e0369a9b37853eeae1cdbd867b1966b9b0b1b1eff0e6e8e56f8/thejat/dl-notebooks)
8
-
9
-
10
-
11
-
12
-
13
- **LINK >>> [https://kneedacexbrew.blogspot.com/?d=2txjqn](https://kneedacexbrew.blogspot.com/?d=2txjqn)**
14
-
15
-
16
-
17
-
18
-
19
-
20
-
21
-
22
-
23
-
24
-
25
-
26
-
27
- # Layla and Other Assorted Love Songs: A Classic Rock Masterpiece
28
-
29
-
30
-
31
- Layla and Other Assorted Love Songs is the only studio album by the English–American rock band Derek and the Dominos, released in November 1970 as a double album[^2^]. It is best known for its title track, "Layla", which is often regarded as Eric Clapton's greatest musical achievement[^2^]. The album also features guest appearances by Duane Allman, Bobby Whitlock, Jim Gordon, and others[^2^].
32
-
33
-
34
-
35
- The album was inspired by Clapton's unrequited love for Pattie Boyd, the wife of his friend and fellow musician George Harrison[^2^]. The songs reflect Clapton's emotional turmoil and his desire to express his feelings through music. The album is considered one of the finest examples of blues rock, with influences from soul, country, and gospel[^2^]. It showcases Clapton's virtuoso guitar playing and his chemistry with Allman, who died in a motorcycle accident shortly after the album's release[^2^].
36
-
37
-
38
-
39
- Layla and Other Assorted Love Songs received mixed reviews from critics when it first came out, and sold poorly due to lack of promotion and distribution problems[^2^]. However, it gained popularity over time and was eventually recognized as a classic of rock music. It has been ranked among the greatest albums of all time by various publications and critics, such as Rolling Stone, VH1, and Mojo[^2^]. In 2011, a 40th anniversary edition of the album was released, featuring remastered sound, bonus tracks, and a documentary DVD[^1^].
40
-
41
-
42
-
43
- If you are a fan of rock music, you should not miss this masterpiece. You can download the 40th anniversary edition of Layla and Other Assorted Love Songs from iTunes or other online platforms. You will not regret it!
44
-
45
-
46
-
47
- ## A Closer Look at the Songs
48
-
49
-
50
-
51
- Layla and Other Assorted Love Songs consists of 14 tracks, each one showcasing the band's musical talent and diversity. Here are some highlights of the album:
52
-
53
-
54
-
55
- - "I Looked Away": The opening track is a gentle country rock song, written by Clapton and Whitlock, that sets the tone for the album. It features Clapton's slide guitar and Whitlock's organ.
56
-
57
- - "Bell Bottom Blues": The second track is a soulful ballad, written by Clapton, that expresses his longing for Boyd. It features Clapton's passionate vocals and guitar solo.
58
-
59
- - "Key to the Highway": The third track is a blues standard, written by Big Bill Broonzy and Charles Segar, that showcases the band's jamming skills. It features Allman's lead guitar and Clapton's rhythm guitar.
60
-
61
- - "Tell the Truth": The fourth track is a hard rock song, written by Clapton and Whitlock, that was originally released as a single in 1970. It features Allman's slide guitar and Clapton's wah-wah guitar.
62
-
63
- - "Why Does Love Got to Be So Sad?": The fifth track is a fast-paced rock song, written by Clapton and Whitlock, that reflects Clapton's frustration with love. It features Allman's and Clapton's dueling guitars and Gordon's drumming.
64
-
65
- - "Have You Ever Loved a Woman": The sixth track is a blues classic, written by Billy Myles, that was popularized by Freddie King. It features Clapton's emotional vocals and guitar solo.
66
-
67
- - "Little Wing": The seventh track is a cover of Jimi Hendrix's song, which was released in 1967. It features Allman's and Clapton's guitar harmonies and Whitlock's piano.
68
-
69
- - "It's Too Late": The eighth track is a cover of Chuck Willis's song, which was released in 1956. It features Clapton's lead vocals and guitar solo and Allman's slide guitar.
70
-
71
- - "Layla": The ninth track is the most famous song of the album, written by Clapton and Gordon, that was inspired by a Persian poem about unrequited love. It features Clapton's vocals and guitar riff and Allman's slide guitar in the first part, and Gordon's piano coda in the second part.
72
-
73
- - "Thorn Tree in the Garden": The tenth track is a folk song, written and sung by Whitlock, that closes the album. It features Whitlock's acoustic guitar and harmonica.
74
-
75
-
76
-
77
- These are just some of the songs that make Layla and Other Assorted Love Songs a masterpiece of rock music. You can listen to the whole album and discover more gems for yourself.
78
-
79
- dfd1c89656
80
-
81
-
82
-
83
-
84
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/BombSquad World APK Mod Todo Desbloqueado y Gratis en Mediafre.md DELETED
@@ -1,110 +0,0 @@
1
- <br />
2
- <h1>How to Download BombSquad World APK All Unlocked from Mediafire</h1>
3
- <p>If you are looking for a fun and explosive multiplayer game that you can play with your friends, then you should try <strong>BombSquad World</strong>. It is a modded version of <strong>BombSquad</strong>, a popular game where you can throw bombs, punch, kick, and blast your opponents in various mini-games. In this article, we will show you how to download BombSquad World APK all unlocked from Mediafire, a reliable file hosting service.</p>
4
- <h2>bombsquad world apk todo desbloqueado mediafıre</h2><br /><p><b><b>Download</b> > <a href="https://urlin.us/2uT0WV">https://urlin.us/2uT0WV</a></b></p><br /><br />
5
- <h2>What is BombSquad World?</h2>
6
- <h3>A fun and explosive multiplayer game</h3>
7
- <p>BombSquad is a game that lets you compete with up to 8 players in different modes, such as capture the flag, king of the hill, hockey, football, and more. You can use bombs, fists, hammers, swords, shields, and other weapons to defeat your enemies. You can also customize your character with different outfits, colors, and accessories. The game has colorful graphics, funny sounds, and physics-based gameplay that makes it enjoyable for everyone.</p>
8
- <h3>A modded version of BombSquad with more features</h3>
9
- <p>BombSquad World is a modded version of BombSquad that was created by byANG3L, a YouTube channel that makes videos about the game. BombSquad World has more features than the original game, such as:</p>
10
- <ul>
11
- <li>All the content unlocked, including characters, maps, games, icons, etc.</li>
12
- <li>More customization options for colors, backgrounds, fonts, etc.</li>
13
- <li>New characters, maps, games, icons, etc. added by the modder.</li>
14
- <li>More options for gameplay settings, such as gravity, speed, power-ups, etc.</li>
15
- <li>Support for online multiplayer with other players who have the mod.</li>
16
- </ul>
17
- <p>BombSquad World is updated regularly by the modder to add new content and fix bugs. You can check out his YouTube channel for more information and videos about the mod.</p>
18
- <p>bombsquad pro apk todo desbloqueado mediafıre<br />
19
- bombsquad world mod apk todo desbloqueado mediafıre<br />
20
- bombsquad 1.7.5 apk todo desbloqueado mediafıre<br />
21
- bombsquad byang3l apk todo desbloqueado mediafıre<br />
22
- bombsquad world modpack v2.3 apk todo desbloqueado mediafıre<br />
23
- bombsquad hack apk todo desbloqueado mediafıre<br />
24
- bombsquad android apk todo desbloqueado mediafıre<br />
25
- bombsquad windows apk todo desbloqueado mediafıre<br />
26
- bombsquad linux apk todo desbloqueado mediafıre<br />
27
- bombsquad online apk todo desbloqueado mediafıre<br />
28
- bombsquad offline apk todo desbloqueado mediafıre<br />
29
- bombsquad multiplayer apk todo desbloqueado mediafıre<br />
30
- bombsquad characters apk todo desbloqueado mediafıre<br />
31
- bombsquad maps apk todo desbloqueado mediafıre<br />
32
- bombsquad games apk todo desbloqueado mediafıre<br />
33
- bombsquad scripts apk todo desbloqueado mediafıre<br />
34
- bombsquad colors apk todo desbloqueado mediafıre<br />
35
- bombsquad icons apk todo desbloqueado mediafıre<br />
36
- bombsquad skins apk todo desbloqueado mediafıre<br />
37
- bombsquad tickets apk todo desbloqueado mediafıre<br />
38
- bombsquad coins apk todo desbloqueado mediafıre<br />
39
- bombsquad tokens apk todo desbloqueado mediafıre<br />
40
- bombsquad cheats apk todo desbloqueado mediafıre<br />
41
- bombsquad mods apk todo desbloqueado mediafıre<br />
42
- bombsquad custom apk todo desbloqueado mediafıre<br />
43
- bombsquad latest version apk todo desbloqueado mediafıre<br />
44
- bombsquad old version apk todo desbloqueado mediafıre<br />
45
- bombsquad new update apk todo desbloqueado mediafıre<br />
46
- bombsquad free download apk todo desbloqueado mediafıre<br />
47
- bombsquad full version apk todo desbloqueado mediafıre<br />
48
- bombsquad premium version apk todo desbloqueado mediafıre<br />
49
- bombsquad cracked version apk todo desbloqueado mediafıre<br />
50
- bombsquad patched version apk todo desbloqueado mediafıre<br />
51
- bombsquad unlocked version apk todo desbloqueado mediafıre<br />
52
- bombsquad mega mod version apk todo desbloqueado mediafıre<br />
53
- bombsquad unlimited version apk todo desbloqueado mediafıre<br />
54
- descargar bombsquad world apk todo desbloqueado mediafıre<br />
55
- download bombsquad world apk all unlocked mediafire <br />
56
- baixar bombsquad world apk tudo destravado mediafire <br />
57
- télécharger bombsquad world apk tout déverrouillé mediapire <br />
58
- scaricare bombsquad world apk tutto sbloccato mediapire <br />
59
- herunterladen bombsquad world apk alles entsperrt mediapire <br />
60
- indirme bombasquade dünya apks tümü kilidi açık medyapire <br />
61
- скачать бомбскуад ворлд апк все разблокировано медиапире <br />
62
- 下载炸弹小队世界apk全部解锁mediapire <br />
63
- ダウンロードボムスクワッドワールドapkすべてのロックを解除mediapire <br />
64
- 다운로드 폭탄 분대 월드 APK 모든 잠금 해제 mediapire <br />
65
- تحميل بومبسكواد وورلد APK ��ل مقفلة mediapire <br />
66
- डाउनलोड बमस्क्वाड वर्ल्ड एपीके सभी अनलॉक मीडियापायर</p>
67
- <h2>Why download BombSquad World APK from Mediafire?</h2>
68
- <h3>It is free and easy to install</h3>
69
- <p>BombSquad World APK is a file that contains the modded version of BombSquad. You can download it for free from Mediafire, a file hosting service that allows you to upload and share files online. Mediafire has a simple interface and fast download speed. You don't need to register or pay anything to use it. You just need to click on the download link and wait for it to finish.</p>
70
- <h3>It has all the content unlocked and customized</h3>
71
- <p>BombSquad World APK has all the content unlocked and customized by the modder. You don't need to spend any money or time to unlock or buy anything in the game. You can access everything from the start and enjoy the game without any limitations. You can also change the settings and appearance of the game according to your preferences.</p>
72
- <h3>It works for Android and <h3>It works for Android and Windows devices</h3>
73
- <p>BombSquad World APK works for both Android and Windows devices. You can play the game on your smartphone, tablet, or PC. You just need to install the APK file on your device and run it. You don't need to root or jailbreak your device to use the mod. You can also play online with other players who have the mod installed on their devices.</p>
74
- <h2>How to download and install BombSquad World APK from Mediafire?</h2>
75
- <h3>Step 1: Go to the BombSquad World page</h3>
76
- <p>The first step is to go to the BombSquad World page on Mediafire. You can use this link: <a href="">BombSquad World APK Download</a>. This will take you to the page where you can see the details and versions of the mod.</p>
77
- <h3>Step 2: Choose the version you want to download</h3>
78
- <p>The next step is to choose the version you want to download. There are different versions of the mod, such as v1.0, v1.1, v1.2, etc. Each version has different features and content added by the modder. You can check out his YouTube channel for more information and videos about each version. You can also see the file size and date of each version on the page.</p>
79
- <h3>Step 3: Click on the download link and wait for it to finish</h3>
80
- <p>The third step is to click on the download link and wait for it to finish. You will see a green button that says "Download". Click on it and a new tab will open. You will see a countdown timer and a captcha code. Enter the captcha code and click on "Verify". Then, click on "Download" again and the download will start. Wait for it to finish and save the file on your device.</p>
81
- <h3>Step 4: Install the APK file on your device</h3>
82
- <p>The fourth step is to install the APK file on your device. Before you do that, you need to enable "Unknown sources" on your device settings. This will allow you to install apps from sources other than the Google Play Store or the Microsoft Store. To do that, go to Settings > Security > Unknown sources and toggle it on.</p>
83
- <p>Then, locate the APK file on your device and tap on it. You will see a pop-up window that asks you to confirm the installation. Tap on "Install" and wait for it to finish.</p>
84
- <h3>Step 5: Enjoy the game with your friends</h3>
85
- <p>The final step is to enjoy the game with your friends. You can launch the game from your app drawer or desktop shortcut. You will see the BombSquad World logo and menu. You can choose from different modes, such as online, local, co-op, etc. You can also customize your character, settings, and game options. You can invite your friends to join you online or play with them locally using Wi-Fi or Bluetooth.</p>
86
- <h2>Conclusion</h2>
87
- <p>BombSquad World is a fun and explosive multiplayer game that you can play with your friends. It is a modded version of BombSquad that has all the content unlocked and customized by byANG3L, a YouTube channel that makes videos about the game. You can download BombSquad World APK all unlocked from Mediafire, a reliable file hosting service that is free and easy to use. You just need to follow these steps:</p>
88
- <ol>
89
- <li>Go to the BombSquad World page on Mediafire.</li>
90
- <li>Choose the version you want to download.</li>
91
- <li>Click on the download link and wait for it to finish.</li>
92
- <li>Install the APK file on your device.</li>
93
- <li>Enjoy the game with your friends.</li>
94
- </ol>
95
- <p>We hope this article was helpful and informative for you. If you have any questions or feedback, please let us know in the comments below.</p>
96
- <h2>FAQs</h2>
97
- <ul>
98
- <li><strong>Q: Is BombSquad World safe to download and install?</strong></li>
99
- <li>A: Yes, BombSquad World is safe to download and install. It does not contain any viruses or malware that can harm your device or data. However, you should always download it from trusted sources like Mediafire and scan it with an antivirus before installing it.</li>
100
- <li><strong>Q: Is BombSquad World compatible with my device?</strong></li>
101
- <li>A: BombSquad World is compatible with most Android and Windows devices that have at least 1 GB of RAM and 100 MB of free storage space. However, some devices may not support some features or run smoothly due to hardware limitations.</li>
102
- <li><strong>Q: Q: How can I update BombSquad World to the latest version?</strong></li>
103
- <li>A: You can update BombSquad World to the latest version by following the same steps as downloading and installing it. You just need to go to the BombSquad World page on Mediafire and choose the latest version available. Then, download and install it over the previous version. You don't need to uninstall the old version first.</li>
104
- <li><strong>Q: Can I play BombSquad World with players who have the original BombSquad?</strong></li>
105
- <li>A: No, you can't play BombSquad World with players who have the original BombSquad. The modded version has different features and content that are not compatible with the original version. You can only play online with other players who have the same modded version as you.</li>
106
- <li><strong>Q: Can I use BombSquad World on other platforms like iOS or Mac?</strong></li>
107
- <li>A: No, you can't use BombSquad World on other platforms like iOS or Mac. The modded version is only available for Android and Windows devices. You can use the original BombSquad on other platforms, but you won't have access to the modded features and content.</li>
108
- </ul></p> 197e85843d<br />
109
- <br />
110
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/4Taps/SadTalker/src/face3d/util/skin_mask.py DELETED
@@ -1,125 +0,0 @@
1
- """This script is to generate skin attention mask for Deep3DFaceRecon_pytorch
2
- """
3
-
4
- import math
5
- import numpy as np
6
- import os
7
- import cv2
8
-
9
- class GMM:
10
- def __init__(self, dim, num, w, mu, cov, cov_det, cov_inv):
11
- self.dim = dim # feature dimension
12
- self.num = num # number of Gaussian components
13
- self.w = w # weights of Gaussian components (a list of scalars)
14
- self.mu= mu # mean of Gaussian components (a list of 1xdim vectors)
15
- self.cov = cov # covariance matrix of Gaussian components (a list of dimxdim matrices)
16
- self.cov_det = cov_det # pre-computed determinet of covariance matrices (a list of scalars)
17
- self.cov_inv = cov_inv # pre-computed inverse covariance matrices (a list of dimxdim matrices)
18
-
19
- self.factor = [0]*num
20
- for i in range(self.num):
21
- self.factor[i] = (2*math.pi)**(self.dim/2) * self.cov_det[i]**0.5
22
-
23
- def likelihood(self, data):
24
- assert(data.shape[1] == self.dim)
25
- N = data.shape[0]
26
- lh = np.zeros(N)
27
-
28
- for i in range(self.num):
29
- data_ = data - self.mu[i]
30
-
31
- tmp = np.matmul(data_,self.cov_inv[i]) * data_
32
- tmp = np.sum(tmp,axis=1)
33
- power = -0.5 * tmp
34
-
35
- p = np.array([math.exp(power[j]) for j in range(N)])
36
- p = p/self.factor[i]
37
- lh += p*self.w[i]
38
-
39
- return lh
40
-
41
-
42
- def _rgb2ycbcr(rgb):
43
- m = np.array([[65.481, 128.553, 24.966],
44
- [-37.797, -74.203, 112],
45
- [112, -93.786, -18.214]])
46
- shape = rgb.shape
47
- rgb = rgb.reshape((shape[0] * shape[1], 3))
48
- ycbcr = np.dot(rgb, m.transpose() / 255.)
49
- ycbcr[:, 0] += 16.
50
- ycbcr[:, 1:] += 128.
51
- return ycbcr.reshape(shape)
52
-
53
-
54
- def _bgr2ycbcr(bgr):
55
- rgb = bgr[..., ::-1]
56
- return _rgb2ycbcr(rgb)
57
-
58
-
59
- gmm_skin_w = [0.24063933, 0.16365987, 0.26034665, 0.33535415]
60
- gmm_skin_mu = [np.array([113.71862, 103.39613, 164.08226]),
61
- np.array([150.19858, 105.18467, 155.51428]),
62
- np.array([183.92976, 107.62468, 152.71820]),
63
- np.array([114.90524, 113.59782, 151.38217])]
64
- gmm_skin_cov_det = [5692842.5, 5851930.5, 2329131., 1585971.]
65
- gmm_skin_cov_inv = [np.array([[0.0019472069, 0.0020450759, -0.00060243998],[0.0020450759, 0.017700525, 0.0051420014],[-0.00060243998, 0.0051420014, 0.0081308950]]),
66
- np.array([[0.0027110141, 0.0011036990, 0.0023122299],[0.0011036990, 0.010707724, 0.010742856],[0.0023122299, 0.010742856, 0.017481629]]),
67
- np.array([[0.0048026871, 0.00022935172, 0.0077668377],[0.00022935172, 0.011729696, 0.0081661865],[0.0077668377, 0.0081661865, 0.025374353]]),
68
- np.array([[0.0011989699, 0.0022453172, -0.0010748957],[0.0022453172, 0.047758564, 0.020332102],[-0.0010748957, 0.020332102, 0.024502251]])]
69
-
70
- gmm_skin = GMM(3, 4, gmm_skin_w, gmm_skin_mu, [], gmm_skin_cov_det, gmm_skin_cov_inv)
71
-
72
- gmm_nonskin_w = [0.12791070, 0.31130761, 0.34245777, 0.21832393]
73
- gmm_nonskin_mu = [np.array([99.200851, 112.07533, 140.20602]),
74
- np.array([110.91392, 125.52969, 130.19237]),
75
- np.array([129.75864, 129.96107, 126.96808]),
76
- np.array([112.29587, 128.85121, 129.05431])]
77
- gmm_nonskin_cov_det = [458703648., 6466488., 90611376., 133097.63]
78
- gmm_nonskin_cov_inv = [np.array([[0.00085371657, 0.00071197288, 0.00023958916],[0.00071197288, 0.0025935620, 0.00076557708],[0.00023958916, 0.00076557708, 0.0015042332]]),
79
- np.array([[0.00024650150, 0.00045542428, 0.00015019422],[0.00045542428, 0.026412144, 0.018419769],[0.00015019422, 0.018419769, 0.037497383]]),
80
- np.array([[0.00037054974, 0.00038146760, 0.00040408765],[0.00038146760, 0.0085505722, 0.0079136286],[0.00040408765, 0.0079136286, 0.010982352]]),
81
- np.array([[0.00013709733, 0.00051228428, 0.00012777430],[0.00051228428, 0.28237113, 0.10528370],[0.00012777430, 0.10528370, 0.23468947]])]
82
-
83
- gmm_nonskin = GMM(3, 4, gmm_nonskin_w, gmm_nonskin_mu, [], gmm_nonskin_cov_det, gmm_nonskin_cov_inv)
84
-
85
- prior_skin = 0.8
86
- prior_nonskin = 1 - prior_skin
87
-
88
-
89
- # calculate skin attention mask
90
- def skinmask(imbgr):
91
- im = _bgr2ycbcr(imbgr)
92
-
93
- data = im.reshape((-1,3))
94
-
95
- lh_skin = gmm_skin.likelihood(data)
96
- lh_nonskin = gmm_nonskin.likelihood(data)
97
-
98
- tmp1 = prior_skin * lh_skin
99
- tmp2 = prior_nonskin * lh_nonskin
100
- post_skin = tmp1 / (tmp1+tmp2) # posterior probability
101
-
102
- post_skin = post_skin.reshape((im.shape[0],im.shape[1]))
103
-
104
- post_skin = np.round(post_skin*255)
105
- post_skin = post_skin.astype(np.uint8)
106
- post_skin = np.tile(np.expand_dims(post_skin,2),[1,1,3]) # reshape to H*W*3
107
-
108
- return post_skin
109
-
110
-
111
- def get_skin_mask(img_path):
112
- print('generating skin masks......')
113
- names = [i for i in sorted(os.listdir(
114
- img_path)) if 'jpg' in i or 'png' in i or 'jpeg' in i or 'PNG' in i]
115
- save_path = os.path.join(img_path, 'mask')
116
- if not os.path.isdir(save_path):
117
- os.makedirs(save_path)
118
-
119
- for i in range(0, len(names)):
120
- name = names[i]
121
- print('%05d' % (i), ' ', name)
122
- full_image_name = os.path.join(img_path, name)
123
- img = cv2.imread(full_image_name).astype(np.float32)
124
- skin_img = skinmask(img)
125
- cv2.imwrite(os.path.join(save_path, name), skin_img.astype(np.uint8))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AICODER009/Food101_Detection/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Food101 Detection
3
- emoji: 🔥
4
- colorFrom: yellow
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 3.29.0
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/tts/portaspeech/fvae.py DELETED
@@ -1,203 +0,0 @@
1
- import numpy as np
2
- import torch
3
- import torch.distributions as dist
4
- from torch import nn
5
-
6
- from text_to_speech.modules.commons.conv import ConditionalConvBlocks
7
- from text_to_speech.modules.commons.normalizing_flow.res_flow import ResFlow
8
- from text_to_speech.modules.commons.wavenet import WN
9
- # from text_to_speech.modules.tts.syntaspeech.syntactic_graph_encoder import GraphAuxEnc
10
-
11
-
12
- class FVAEEncoder(nn.Module):
13
- def __init__(self, c_in, hidden_size, c_latent, kernel_size,
14
- n_layers, c_cond=0, p_dropout=0, strides=[4], nn_type='wn'):
15
- super().__init__()
16
- self.strides = strides
17
- self.hidden_size = hidden_size
18
- if np.prod(strides) == 1:
19
- self.pre_net = nn.Conv1d(c_in, hidden_size, kernel_size=1)
20
- else:
21
- self.pre_net = nn.Sequential(*[
22
- nn.Conv1d(c_in, hidden_size, kernel_size=s * 2, stride=s, padding=s // 2)
23
- if i == 0 else
24
- nn.Conv1d(hidden_size, hidden_size, kernel_size=s * 2, stride=s, padding=s // 2)
25
- for i, s in enumerate(strides)
26
- ])
27
- if nn_type == 'wn':
28
- self.nn = WN(hidden_size, kernel_size, 1, n_layers, c_cond, p_dropout)
29
- elif nn_type == 'conv':
30
- self.nn = ConditionalConvBlocks(
31
- hidden_size, c_cond, hidden_size, None, kernel_size,
32
- layers_in_block=2, is_BTC=False, num_layers=n_layers)
33
-
34
- self.out_proj = nn.Conv1d(hidden_size, c_latent * 2, 1)
35
- self.latent_channels = c_latent
36
-
37
- def forward(self, x, nonpadding, cond):
38
- x = self.pre_net(x)
39
- nonpadding = nonpadding[:, :, ::np.prod(self.strides)][:, :, :x.shape[-1]]
40
- x = x * nonpadding
41
- x = self.nn(x, nonpadding=nonpadding, cond=cond) * nonpadding
42
- x = self.out_proj(x)
43
- m, logs = torch.split(x, self.latent_channels, dim=1)
44
- z = (m + torch.randn_like(m) * torch.exp(logs))
45
- return z, m, logs, nonpadding
46
-
47
-
48
- class FVAEDecoder(nn.Module):
49
- def __init__(self, c_latent, hidden_size, out_channels, kernel_size,
50
- n_layers, c_cond=0, p_dropout=0, strides=[4], nn_type='wn'):
51
- super().__init__()
52
- self.strides = strides
53
- self.hidden_size = hidden_size
54
- self.pre_net = nn.Sequential(*[
55
- nn.ConvTranspose1d(c_latent, hidden_size, kernel_size=s, stride=s)
56
- if i == 0 else
57
- nn.ConvTranspose1d(hidden_size, hidden_size, kernel_size=s, stride=s)
58
- for i, s in enumerate(strides)
59
- ])
60
- if nn_type == 'wn':
61
- self.nn = WN(hidden_size, kernel_size, 1, n_layers, c_cond, p_dropout)
62
- elif nn_type == 'conv':
63
- self.nn = ConditionalConvBlocks(
64
- hidden_size, c_cond, hidden_size, [1] * n_layers, kernel_size,
65
- layers_in_block=2, is_BTC=False)
66
- self.out_proj = nn.Conv1d(hidden_size, out_channels, 1)
67
-
68
- def forward(self, x, nonpadding, cond):
69
- x = self.pre_net(x)
70
- x = x * nonpadding
71
- x = self.nn(x, nonpadding=nonpadding, cond=cond) * nonpadding
72
- x = self.out_proj(x)
73
- return x
74
-
75
-
76
- class FVAE(nn.Module):
77
- def __init__(self,
78
- c_in_out, hidden_size, c_latent,
79
- kernel_size, enc_n_layers, dec_n_layers, c_cond, strides,
80
- use_prior_flow, flow_hidden=None, flow_kernel_size=None, flow_n_steps=None,
81
- encoder_type='wn', decoder_type='wn'):
82
- super(FVAE, self).__init__()
83
- self.strides = strides
84
- self.hidden_size = hidden_size
85
- self.latent_size = c_latent
86
- self.use_prior_flow = use_prior_flow
87
- if np.prod(strides) == 1:
88
- self.g_pre_net = nn.Conv1d(c_cond, c_cond, kernel_size=1)
89
- else:
90
- self.g_pre_net = nn.Sequential(*[
91
- nn.Conv1d(c_cond, c_cond, kernel_size=s * 2, stride=s, padding=s // 2)
92
- for i, s in enumerate(strides)
93
- ])
94
- self.encoder = FVAEEncoder(c_in_out, hidden_size, c_latent, kernel_size,
95
- enc_n_layers, c_cond, strides=strides, nn_type=encoder_type)
96
- if use_prior_flow:
97
- self.prior_flow = ResFlow(
98
- c_latent, flow_hidden, flow_kernel_size, flow_n_steps, 4, c_cond=c_cond)
99
- self.decoder = FVAEDecoder(c_latent, hidden_size, c_in_out, kernel_size,
100
- dec_n_layers, c_cond, strides=strides, nn_type=decoder_type)
101
- self.prior_dist = dist.Normal(0, 1)
102
-
103
- def forward(self, x=None, nonpadding=None, cond=None, infer=False, noise_scale=1.0, **kwargs):
104
- """
105
-
106
- :param x: [B, C_in_out, T]
107
- :param nonpadding: [B, 1, T]
108
- :param cond: [B, C_g, T]
109
- :return:
110
- """
111
- if nonpadding is None:
112
- nonpadding = 1
113
- cond_sqz = self.g_pre_net(cond)
114
- if not infer:
115
- z_q, m_q, logs_q, nonpadding_sqz = self.encoder(x, nonpadding, cond_sqz)
116
- q_dist = dist.Normal(m_q, logs_q.exp())
117
- if self.use_prior_flow:
118
- logqx = q_dist.log_prob(z_q)
119
- z_p = self.prior_flow(z_q, nonpadding_sqz, cond_sqz)
120
- logpx = self.prior_dist.log_prob(z_p)
121
- loss_kl = ((logqx - logpx) * nonpadding_sqz).sum() / nonpadding_sqz.sum() / logqx.shape[1]
122
- else:
123
- loss_kl = torch.distributions.kl_divergence(q_dist, self.prior_dist)
124
- loss_kl = (loss_kl * nonpadding_sqz).sum() / nonpadding_sqz.sum() / z_q.shape[1]
125
- z_p = None
126
- return z_q, loss_kl, z_p, m_q, logs_q
127
- else:
128
- latent_shape = [cond_sqz.shape[0], self.latent_size, cond_sqz.shape[2]]
129
- z_p = torch.randn(latent_shape).to(cond.device) * noise_scale
130
- if self.use_prior_flow:
131
- z_p = self.prior_flow(z_p, 1, cond_sqz, reverse=True)
132
- return z_p
133
-
134
-
135
- class SyntaFVAE(nn.Module):
136
- def __init__(self,
137
- c_in_out, hidden_size, c_latent,
138
- kernel_size, enc_n_layers, dec_n_layers, c_cond, strides,
139
- use_prior_flow, flow_hidden=None, flow_kernel_size=None, flow_n_steps=None,
140
- encoder_type='wn', decoder_type='wn'):
141
- super(SyntaFVAE, self).__init__()
142
- self.strides = strides
143
- self.hidden_size = hidden_size
144
- self.latent_size = c_latent
145
- self.use_prior_flow = use_prior_flow
146
- if np.prod(strides) == 1:
147
- self.g_pre_net = nn.Conv1d(c_cond, c_cond, kernel_size=1)
148
- else:
149
- self.g_pre_net = nn.Sequential(*[
150
- nn.Conv1d(c_cond, c_cond, kernel_size=s * 2, stride=s, padding=s // 2)
151
- for i, s in enumerate(strides)
152
- ])
153
- self.encoder = FVAEEncoder(c_in_out, hidden_size, c_latent, kernel_size,
154
- enc_n_layers, c_cond, strides=strides, nn_type=encoder_type)
155
- if use_prior_flow:
156
- self.prior_flow = ResFlow(
157
- c_latent, flow_hidden, flow_kernel_size, flow_n_steps, 4, c_cond=c_cond)
158
- self.decoder = FVAEDecoder(c_latent, hidden_size, c_in_out, kernel_size,
159
- dec_n_layers, c_cond, strides=strides, nn_type=decoder_type)
160
- self.prior_dist = dist.Normal(0, 1)
161
- self.graph_encoder = GraphAuxEnc(in_dim=hidden_size, hid_dim=hidden_size,out_dim=hidden_size)
162
-
163
- def forward(self, x=None, nonpadding=None, cond=None, infer=False, noise_scale=1.0,
164
- mel2word=None, ph2word=None, graph_lst=None, etypes_lst=None):
165
- """
166
-
167
- :param x: target mel, [B, C_in_out, T]
168
- :param nonpadding: [B, 1, T]
169
- :param cond: phoneme encoding, [B, C_g, T]
170
- :return:
171
- """
172
- word_len = ph2word.max(dim=1)[0]
173
- ph_encoding_for_graph = cond.detach() + 0.1 * (cond - cond.detach()) # only 0.1x grad can pass through
174
- _, ph_out_word_encoding_for_graph = GraphAuxEnc.ph_encoding_to_word_encoding(ph_encoding_for_graph.transpose(1,2), mel2word, word_len)
175
- t_m = mel2word.shape[-1]
176
- g_graph = self.graph_encoder.word_forward(graph_lst=graph_lst, word_encoding=ph_out_word_encoding_for_graph, etypes_lst=etypes_lst)
177
- g_graph = g_graph.transpose(1,2)
178
- g_graph = GraphAuxEnc._postprocess_word2ph(g_graph,mel2word,t_m)
179
- g_graph = g_graph.transpose(1,2)
180
- cond = cond + g_graph * 1.
181
-
182
- if nonpadding is None:
183
- nonpadding = 1
184
- cond_sqz = self.g_pre_net(cond)
185
- if not infer:
186
- z_q, m_q, logs_q, nonpadding_sqz = self.encoder(x, nonpadding, cond_sqz)
187
- q_dist = dist.Normal(m_q, logs_q.exp())
188
- if self.use_prior_flow:
189
- logqx = q_dist.log_prob(z_q)
190
- z_p = self.prior_flow(z_q, nonpadding_sqz, cond_sqz)
191
- logpx = self.prior_dist.log_prob(z_p)
192
- loss_kl = ((logqx - logpx) * nonpadding_sqz).sum() / nonpadding_sqz.sum() / logqx.shape[1]
193
- else:
194
- loss_kl = torch.distributions.kl_divergence(q_dist, self.prior_dist)
195
- loss_kl = (loss_kl * nonpadding_sqz).sum() / nonpadding_sqz.sum() / z_q.shape[1]
196
- z_p = None
197
- return z_q, loss_kl, z_p, m_q, logs_q
198
- else:
199
- latent_shape = [cond_sqz.shape[0], self.latent_size, cond_sqz.shape[2]]
200
- z_p = torch.randn(latent_shape).to(cond.device) * noise_scale
201
- if self.use_prior_flow:
202
- z_p = self.prior_flow(z_p, 1, cond_sqz, reverse=True)
203
- return z_p
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/vocoder/parallel_wavegan/layers/causal_conv.py DELETED
@@ -1,56 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
-
3
- # Copyright 2020 Tomoki Hayashi
4
- # MIT License (https://opensource.org/licenses/MIT)
5
-
6
- """Causal convolusion layer modules."""
7
-
8
-
9
- import torch
10
-
11
-
12
- class CausalConv1d(torch.nn.Module):
13
- """CausalConv1d module with customized initialization."""
14
-
15
- def __init__(self, in_channels, out_channels, kernel_size,
16
- dilation=1, bias=True, pad="ConstantPad1d", pad_params={"value": 0.0}):
17
- """Initialize CausalConv1d module."""
18
- super(CausalConv1d, self).__init__()
19
- self.pad = getattr(torch.nn, pad)((kernel_size - 1) * dilation, **pad_params)
20
- self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size,
21
- dilation=dilation, bias=bias)
22
-
23
- def forward(self, x):
24
- """Calculate forward propagation.
25
-
26
- Args:
27
- x (Tensor): Input tensor (B, in_channels, T).
28
-
29
- Returns:
30
- Tensor: Output tensor (B, out_channels, T).
31
-
32
- """
33
- return self.conv(self.pad(x))[:, :, :x.size(2)]
34
-
35
-
36
- class CausalConvTranspose1d(torch.nn.Module):
37
- """CausalConvTranspose1d module with customized initialization."""
38
-
39
- def __init__(self, in_channels, out_channels, kernel_size, stride, bias=True):
40
- """Initialize CausalConvTranspose1d module."""
41
- super(CausalConvTranspose1d, self).__init__()
42
- self.deconv = torch.nn.ConvTranspose1d(
43
- in_channels, out_channels, kernel_size, stride, bias=bias)
44
- self.stride = stride
45
-
46
- def forward(self, x):
47
- """Calculate forward propagation.
48
-
49
- Args:
50
- x (Tensor): Input tensor (B, in_channels, T_in).
51
-
52
- Returns:
53
- Tensor: Output tensor (B, out_channels, T_out).
54
-
55
- """
56
- return self.deconv(x)[:, :, :-self.stride]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AUST001/Translation/app.py DELETED
@@ -1,37 +0,0 @@
1
- import openai
2
- import gradio as gr
3
- import random
4
-
5
-
6
- openai.api_key = random.choice(['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',
7
- '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',
8
- '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',
9
- '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',
10
- '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',
11
- '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',
12
- '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',
13
- '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',
14
- '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',
15
- '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',
16
- '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'])
17
-
18
- def completion(prompt):
19
- response = openai.Completion.create(
20
- engine="text-davinci-003",
21
- prompt=prompt,
22
- max_tokens=1024,
23
- n=1,
24
- stop=None,
25
- temperature=0.5
26
- )
27
- return response.choices[0].text[2:]
28
-
29
- def greet(question):
30
- return completion('Please translate this passage into German and then into Chinese.\n'+question)
31
-
32
-
33
- demo = gr.Interface(fn=greet, inputs=gr.Textbox(lines=20, placeholder='Please enter the text to be weighted down'), outputs=gr.Textbox(lines=20, placeholder='It will take some time to reduce the weight, please wait...'))
34
-
35
- if __name__ == "__main__":
36
- # demo.launch(share='True')
37
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AchyuthGamer/OpenGPT/client/js/highlight.min.js DELETED
The diff for this file is too large to render. See raw diff
 
spaces/Aer0xander/sd-to-diffusers/app.py DELETED
@@ -1,198 +0,0 @@
1
- import os
2
- import subprocess
3
- from huggingface_hub import HfApi, upload_folder
4
- import gradio as gr
5
- import hf_utils
6
- import utils
7
- from safetensors import safe_open
8
- import torch
9
-
10
- subprocess.run(["git", "clone", "https://github.com/huggingface/diffusers", "diffs"])
11
-
12
- def error_str(error, title="Error"):
13
- return f"""#### {title}
14
- {error}""" if error else ""
15
-
16
- def on_token_change(token):
17
- model_names, error = hf_utils.get_my_model_names(token)
18
- if model_names:
19
- model_names.append("Other")
20
-
21
- return gr.update(visible=bool(model_names)), gr.update(choices=model_names, value=model_names[0] if model_names else None), gr.update(visible=bool(model_names)), gr.update(value=error_str(error))
22
-
23
- def url_to_model_id(model_id_str):
24
- return model_id_str.split("/")[-2] + "/" + model_id_str.split("/")[-1] if model_id_str.startswith("https://huggingface.co/") else model_id_str
25
-
26
- def get_ckpt_names(token, radio_model_names, input_model):
27
-
28
- model_id = url_to_model_id(input_model) if radio_model_names == "Other" else radio_model_names
29
-
30
- if token == "" or model_id == "":
31
- return error_str("Please enter both a token and a model name.", title="Invalid input"), gr.update(choices=[]), gr.update(visible=False)
32
-
33
- try:
34
- api = HfApi(token=token)
35
- ckpt_files = [f for f in api.list_repo_files(repo_id=model_id) if f.endswith(".ckpt") or f.endswith(".safetensors")]
36
-
37
- if not ckpt_files:
38
- return error_str("No checkpoint files found in the model repo."), gr.update(choices=[]), gr.update(visible=False)
39
-
40
- return None, gr.update(choices=ckpt_files, value=ckpt_files[0], visible=True), gr.update(visible=True)
41
-
42
- except Exception as e:
43
- return error_str(e), gr.update(choices=[]), None
44
-
45
- def convert_and_push(radio_model_names, input_model, ckpt_name, sd_version, token, path_in_repo, ema, safetensors):
46
- extract_ema = ema == "ema"
47
-
48
- if sd_version == None:
49
- return error_str("You must select a stable diffusion version.", title="Invalid input")
50
-
51
- model_id = url_to_model_id(input_model) if radio_model_names == "Other" else radio_model_names
52
-
53
- try:
54
- model_id = url_to_model_id(model_id)
55
-
56
- # 1. Download the checkpoint file
57
- ckpt_path, revision = hf_utils.download_file(repo_id=model_id, filename=ckpt_name, token=token)
58
-
59
- if safetensors == "yes":
60
- tensors = {}
61
- with safe_open(ckpt_path, framework="pt", device="cpu") as f:
62
- for key in f.keys():
63
- tensors[key] = f.get_tensor(key)
64
-
65
- new_checkpoint_path = "/".join(ckpt_path.split("/")[:-1] + ["model_safe.ckpt"])
66
- torch.save(tensors, new_checkpoint_path)
67
- ckpt_path = new_checkpoint_path
68
- print("Converting ckpt_path", ckpt_path)
69
-
70
- print(ckpt_path)
71
-
72
- # 2. Run the conversion script
73
- os.makedirs(model_id, exist_ok=True)
74
- run_command = [
75
- "python3",
76
- "./diffs/scripts/convert_original_stable_diffusion_to_diffusers.py",
77
- "--checkpoint_path",
78
- ckpt_path,
79
- "--dump_path" ,
80
- model_id,
81
- ]
82
- if extract_ema:
83
- run_command.append("--extract_ema")
84
- subprocess.run(run_command)
85
-
86
- # 3. Push to the model repo
87
- commit_message="Add Diffusers weights"
88
- upload_folder(
89
- folder_path=model_id,
90
- repo_id=model_id,
91
- path_in_repo=path_in_repo,
92
- token=token,
93
- create_pr=True,
94
- commit_message=commit_message,
95
- commit_description=f"Add Diffusers weights converted from checkpoint `{ckpt_name}` in revision {revision}",
96
- )
97
-
98
- # # 4. Delete the downloaded checkpoint file, yaml files, and the converted model folder
99
- hf_utils.delete_file(revision)
100
- subprocess.run(["rm", "-rf", model_id.split('/')[0]])
101
- import glob
102
- for f in glob.glob("*.yaml*"):
103
- subprocess.run(["rm", "-rf", f])
104
-
105
- return f"""Successfully converted the checkpoint and opened a PR to add the weights to the model repo.
106
- You can view and merge the PR [here]({hf_utils.get_pr_url(HfApi(token=token), model_id, commit_message)})."""
107
-
108
- return "Done"
109
-
110
- except Exception as e:
111
- return error_str(e)
112
-
113
-
114
- DESCRIPTION = """### Convert a stable diffusion checkpoint to Diffusers🧨
115
- With this space, you can easily convert a CompVis stable diffusion checkpoint to Diffusers and automatically create a pull request to the model repo.
116
- You can choose to convert a checkpoint from one of your own models, or from any other model on the Hub.
117
- You can skip the queue by running the app in the colab: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/gist/qunash/f0f3152c5851c0c477b68b7b98d547fe/convert-sd-to-diffusers.ipynb)"""
118
-
119
- with gr.Blocks() as demo:
120
-
121
- gr.Markdown(DESCRIPTION)
122
- with gr.Row():
123
-
124
- with gr.Column(scale=11):
125
- with gr.Column():
126
- gr.Markdown("## 1. Load model info")
127
- input_token = gr.Textbox(
128
- max_lines=1,
129
- type="password",
130
- label="Enter your Hugging Face token",
131
- placeholder="READ permission is sufficient"
132
- )
133
- gr.Markdown("You can get a token [here](https://huggingface.co/settings/tokens)")
134
- with gr.Group(visible=False) as group_model:
135
- radio_model_names = gr.Radio(label="Choose a model")
136
- input_model = gr.Textbox(
137
- max_lines=1,
138
- label="Model name or URL",
139
- placeholder="username/model_name",
140
- visible=False,
141
- )
142
-
143
- btn_get_ckpts = gr.Button("Load", visible=False)
144
-
145
- with gr.Column(scale=10):
146
- with gr.Column(visible=False) as group_convert:
147
- gr.Markdown("## 2. Convert to Diffusers🧨")
148
- radio_ckpts = gr.Radio(label="Choose the checkpoint to convert", visible=False)
149
- path_in_repo = gr.Textbox(label="Path where the weights will be saved", placeholder="Leave empty for root folder")
150
- ema = gr.Radio(label="Extract EMA or non-EMA?", choices=["ema", "non-ema"])
151
- safetensors = gr.Radio(label="Extract from safetensors", choices=["yes", "no"], value="no")
152
- radio_sd_version = gr.Radio(label="Choose the model version", choices=["v1", "v2", "v2.1"])
153
- gr.Markdown("Conversion may take a few minutes.")
154
- btn_convert = gr.Button("Convert & Push")
155
-
156
- error_output = gr.Markdown(label="Output")
157
-
158
- input_token.change(
159
- fn=on_token_change,
160
- inputs=input_token,
161
- outputs=[group_model, radio_model_names, btn_get_ckpts, error_output],
162
- queue=False,
163
- scroll_to_output=True)
164
-
165
- radio_model_names.change(
166
- lambda x: gr.update(visible=x == "Other"),
167
- inputs=radio_model_names,
168
- outputs=input_model,
169
- queue=False,
170
- scroll_to_output=True)
171
-
172
- btn_get_ckpts.click(
173
- fn=get_ckpt_names,
174
- inputs=[input_token, radio_model_names, input_model],
175
- outputs=[error_output, radio_ckpts, group_convert],
176
- scroll_to_output=True,
177
- queue=False
178
- )
179
-
180
- btn_convert.click(
181
- fn=convert_and_push,
182
- inputs=[radio_model_names, input_model, radio_ckpts, radio_sd_version, input_token, path_in_repo, ema, safetensors],
183
- outputs=error_output,
184
- scroll_to_output=True
185
- )
186
-
187
- # gr.Markdown("""<img src="https://raw.githubusercontent.com/huggingface/diffusers/main/docs/source/imgs/diffusers_library.jpg" width="150"/>""")
188
- gr.HTML("""
189
- <div style="border-top: 1px solid #303030;">
190
- <br>
191
- <p>Space by: <a href="https://twitter.com/hahahahohohe"><img src="https://img.shields.io/twitter/follow/hahahahohohe?label=%40anzorq&style=social" alt="Twitter Follow"></a></p><br>
192
- <a href="https://www.buymeacoffee.com/anzorq" target="_blank"><img src="https://cdn.buymeacoffee.com/buttons/v2/default-yellow.png" alt="Buy Me A Coffee" style="height: 45px !important;width: 162px !important;" ></a><br><br>
193
- <p><img src="https://visitor-badge.glitch.me/badge?page_id=anzorq.sd-to-diffusers" alt="visitors"></p>
194
- </div>
195
- """)
196
-
197
- demo.queue()
198
- demo.launch(debug=True, share=utils.is_google_colab())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/canvas.d.ts DELETED
@@ -1,2 +0,0 @@
1
- import Canvas from './gameobjects/canvas/canvas/Canvas';
2
- export default Canvas;
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/basesizer/ShakeMethods.js DELETED
@@ -1,53 +0,0 @@
1
- import Shake from '../shake/Shake.js';
2
- import { WaitComplete } from '../utils/WaitEvent.js'
3
-
4
- const IsPlainObject = Phaser.Utils.Objects.IsPlainObject;
5
-
6
- var OnInitShake = function (gameObject, shake) {
7
- // Route 'complete' of shake to gameObject
8
- shake.on('complete', function () {
9
- gameObject.emit('shake.complete', gameObject);
10
- })
11
-
12
- // Shake effect won't change position
13
- }
14
-
15
- export default {
16
- shake(duration, magnitude, magnitudeMode) {
17
- if (IsPlainObject(duration)) {
18
- var config = duration;
19
- duration = config.duration;
20
- magnitude = config.magnitude;
21
- magnitudeMode = config.magnitudeMode;
22
- }
23
-
24
- if (this._shake === undefined) {
25
- this._shake = new Shake(this, {
26
- mode: 0,
27
- magnitudeMode: 1
28
- });
29
- OnInitShake(this, this._shake);
30
- }
31
-
32
- if (duration !== undefined) {
33
- this._shake.setDuration(duration);
34
- }
35
-
36
- if (magnitude !== undefined) {
37
- this._shake.setMagnitude(magnitude);
38
- }
39
-
40
- if (magnitudeMode !== undefined) {
41
- this._shake.setMagnitudeMode(magnitudeMode);
42
- }
43
-
44
- this._shake.shake();
45
-
46
- return this;
47
- },
48
-
49
- shakePromise(duration, alpha) {
50
- this.shake(duration, alpha);
51
- return WaitComplete(this._shake);
52
- },
53
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/maker/utils/ParseYAML.js DELETED
@@ -1,15 +0,0 @@
1
- import yaml from '../../yaml/yaml.js';
2
-
3
- var ParseYAML = function (s) {
4
- if (typeof (s) === 'string') {
5
- try {
6
- return yaml.load(s);
7
- } catch (e) {
8
- console.log(e);
9
- return undefined;
10
- }
11
- }
12
- return s;
13
- }
14
-
15
- export default ParseYAML;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/torch_utils/ops/conv2d_resample.py DELETED
@@ -1,158 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- """2D convolution with optional up/downsampling."""
10
-
11
- import torch
12
-
13
- from .. import misc
14
- from . import conv2d_gradfix
15
- from . import upfirdn2d
16
- from .upfirdn2d import _parse_padding
17
- from .upfirdn2d import _get_filter_size
18
-
19
- # ----------------------------------------------------------------------------
20
-
21
-
22
- def _get_weight_shape(w):
23
- with misc.suppress_tracer_warnings(): # this value will be treated as a constant
24
- shape = [int(sz) for sz in w.shape]
25
- misc.assert_shape(w, shape)
26
- return shape
27
-
28
- # ----------------------------------------------------------------------------
29
-
30
-
31
- def _conv2d_wrapper(x, w, stride=1, padding=0, groups=1, transpose=False, flip_weight=True):
32
- """Wrapper for the underlying `conv2d()` and `conv_transpose2d()` implementations.
33
- """
34
- _out_channels, _in_channels_per_group, kh, kw = _get_weight_shape(w)
35
-
36
- # Flip weight if requested.
37
- # Note: conv2d() actually performs correlation (flip_weight=True) not convolution (flip_weight=False).
38
- if not flip_weight and (kw > 1 or kh > 1):
39
- w = w.flip([2, 3])
40
-
41
- # Execute using conv2d_gradfix.
42
- op = conv2d_gradfix.conv_transpose2d if transpose else conv2d_gradfix.conv2d
43
- return op(x, w, stride=stride, padding=padding, groups=groups)
44
-
45
- # ----------------------------------------------------------------------------
46
-
47
-
48
- @misc.profiled_function
49
- def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight=True, flip_filter=False):
50
- r"""2D convolution with optional up/downsampling.
51
-
52
- Padding is performed only once at the beginning, not between the operations.
53
-
54
- Args:
55
- x: Input tensor of shape
56
- `[batch_size, in_channels, in_height, in_width]`.
57
- w: Weight tensor of shape
58
- `[out_channels, in_channels//groups, kernel_height, kernel_width]`.
59
- f: Low-pass filter for up/downsampling. Must be prepared beforehand by
60
- calling upfirdn2d.setup_filter(). None = identity (default).
61
- up: Integer upsampling factor (default: 1).
62
- down: Integer downsampling factor (default: 1).
63
- padding: Padding with respect to the upsampled image. Can be a single number
64
- or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
65
- (default: 0).
66
- groups: Split input channels into N groups (default: 1).
67
- flip_weight: False = convolution, True = correlation (default: True).
68
- flip_filter: False = convolution, True = correlation (default: False).
69
-
70
- Returns:
71
- Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
72
- """
73
- # Validate arguments.
74
- assert isinstance(x, torch.Tensor) and (x.ndim == 4)
75
- assert isinstance(w, torch.Tensor) and (
76
- w.ndim == 4) and (w.dtype == x.dtype)
77
- assert f is None or (isinstance(f, torch.Tensor) and f.ndim in [
78
- 1, 2] and f.dtype == torch.float32)
79
- assert isinstance(up, int) and (up >= 1)
80
- assert isinstance(down, int) and (down >= 1)
81
- assert isinstance(groups, int) and (groups >= 1)
82
- out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w)
83
- fw, fh = _get_filter_size(f)
84
- px0, px1, py0, py1 = _parse_padding(padding)
85
-
86
- # Adjust padding to account for up/downsampling.
87
- if up > 1:
88
- px0 += (fw + up - 1) // 2
89
- px1 += (fw - up) // 2
90
- py0 += (fh + up - 1) // 2
91
- py1 += (fh - up) // 2
92
- if down > 1:
93
- px0 += (fw - down + 1) // 2
94
- px1 += (fw - down) // 2
95
- py0 += (fh - down + 1) // 2
96
- py1 += (fh - down) // 2
97
-
98
- # Fast path: 1x1 convolution with downsampling only => downsample first, then convolve.
99
- if kw == 1 and kh == 1 and (down > 1 and up == 1):
100
- x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, padding=[
101
- px0, px1, py0, py1], flip_filter=flip_filter)
102
- x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
103
- return x
104
-
105
- # Fast path: 1x1 convolution with upsampling only => convolve first, then upsample.
106
- if kw == 1 and kh == 1 and (up > 1 and down == 1):
107
- x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
108
- x = upfirdn2d.upfirdn2d(x=x, f=f, up=up, padding=[
109
- px0, px1, py0, py1], gain=up**2, flip_filter=flip_filter)
110
- return x
111
-
112
- # Fast path: downsampling only => use strided convolution.
113
- if down > 1 and up == 1:
114
- x = upfirdn2d.upfirdn2d(
115
- x=x, f=f, padding=[px0, px1, py0, py1], flip_filter=flip_filter)
116
- x = _conv2d_wrapper(x=x, w=w, stride=down,
117
- groups=groups, flip_weight=flip_weight)
118
- return x
119
-
120
- # Fast path: upsampling with optional downsampling => use transpose strided convolution.
121
- if up > 1:
122
- if groups == 1:
123
- w = w.transpose(0, 1)
124
- else:
125
- w = w.reshape(groups, out_channels // groups,
126
- in_channels_per_group, kh, kw)
127
- w = w.transpose(1, 2)
128
- w = w.reshape(groups * in_channels_per_group,
129
- out_channels // groups, kh, kw)
130
- px0 -= kw - 1
131
- px1 -= kw - up
132
- py0 -= kh - 1
133
- py1 -= kh - up
134
- pxt = max(min(-px0, -px1), 0)
135
- pyt = max(min(-py0, -py1), 0)
136
- x = _conv2d_wrapper(x=x, w=w, stride=up, padding=[
137
- pyt, pxt], groups=groups, transpose=True, flip_weight=(not flip_weight))
138
- x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[
139
- px0+pxt, px1+pxt, py0+pyt, py1+pyt], gain=up**2, flip_filter=flip_filter)
140
- if down > 1:
141
- x = upfirdn2d.upfirdn2d(
142
- x=x, f=f, down=down, flip_filter=flip_filter)
143
- return x
144
-
145
- # Fast path: no up/downsampling, padding supported by the underlying implementation => use plain conv2d.
146
- if up == 1 and down == 1:
147
- if px0 == px1 and py0 == py1 and px0 >= 0 and py0 >= 0:
148
- return _conv2d_wrapper(x=x, w=w, padding=[py0, px0], groups=groups, flip_weight=flip_weight)
149
-
150
- # Fallback: Generic reference implementation.
151
- x = upfirdn2d.upfirdn2d(x=x, f=(f if up > 1 else None), up=up, padding=[
152
- px0, px1, py0, py1], gain=up**2, flip_filter=flip_filter)
153
- x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
154
- if down > 1:
155
- x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
156
- return x
157
-
158
- # ----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/viz/latent_widget.py DELETED
@@ -1,100 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- import os
10
- import numpy as np
11
- import imgui
12
- import dnnlib
13
- import torch
14
- from gui_utils import imgui_utils
15
-
16
- # ----------------------------------------------------------------------------
17
-
18
-
19
- class LatentWidget:
20
- def __init__(self, viz):
21
- self.viz = viz
22
- self.seed = 0
23
- self.w_plus = True
24
- self.reg = 0
25
- self.lr = 0.001
26
- self.w_path = ''
27
- self.w_load = None
28
- self.defer_frames = 0
29
- self.disabled_time = 0
30
-
31
- @imgui_utils.scoped_by_object_id
32
- def __call__(self, show=True):
33
- viz = self.viz
34
- if show:
35
- with imgui_utils.grayed_out(self.disabled_time != 0):
36
- imgui.text('Latent')
37
- imgui.same_line(viz.label_w)
38
- with imgui_utils.item_width(viz.font_size * 8.75):
39
- changed, seed = imgui.input_int('Seed', self.seed)
40
- if changed:
41
- self.seed = seed
42
- # reset latent code
43
- self.w_load = None
44
-
45
- # load latent code
46
- imgui.text(' ')
47
- imgui.same_line(viz.label_w)
48
- _changed, self.w_path = imgui_utils.input_text('##path', self.w_path, 1024,
49
- flags=(
50
- imgui.INPUT_TEXT_AUTO_SELECT_ALL | imgui.INPUT_TEXT_ENTER_RETURNS_TRUE),
51
- width=(-1),
52
- help_text='Path to latent code')
53
- if imgui.is_item_hovered() and not imgui.is_item_active() and self.w_path != '':
54
- imgui.set_tooltip(self.w_path)
55
-
56
- imgui.text(' ')
57
- imgui.same_line(viz.label_w)
58
- if imgui_utils.button('Load latent', width=viz.button_w, enabled=(self.disabled_time == 0 and 'image' in viz.result)):
59
- assert os.path.isfile(
60
- self.w_path), f"{self.w_path} does not exist!"
61
- self.w_load = torch.load(self.w_path)
62
- self.defer_frames = 2
63
- self.disabled_time = 0.5
64
-
65
- imgui.text(' ')
66
- imgui.same_line(viz.label_w)
67
- with imgui_utils.item_width(viz.button_w):
68
- changed, lr = imgui.input_float('Step Size', self.lr)
69
- if changed:
70
- self.lr = lr
71
-
72
- # imgui.text(' ')
73
- # imgui.same_line(viz.label_w)
74
- # with imgui_utils.item_width(viz.button_w):
75
- # changed, reg = imgui.input_float('Regularize', self.reg)
76
- # if changed:
77
- # self.reg = reg
78
-
79
- imgui.text(' ')
80
- imgui.same_line(viz.label_w)
81
- reset_w = imgui_utils.button(
82
- 'Reset', width=viz.button_w, enabled='image' in viz.result)
83
- imgui.same_line()
84
- _clicked, w = imgui.checkbox('w', not self.w_plus)
85
- if w:
86
- self.w_plus = False
87
- imgui.same_line()
88
- _clicked, self.w_plus = imgui.checkbox('w+', self.w_plus)
89
-
90
- self.disabled_time = max(self.disabled_time - viz.frame_delta, 0)
91
- if self.defer_frames > 0:
92
- self.defer_frames -= 1
93
- viz.args.w0_seed = self.seed
94
- viz.args.w_load = self.w_load
95
- viz.args.reg = self.reg
96
- viz.args.w_plus = self.w_plus
97
- viz.args.reset_w = reset_w
98
- viz.args.lr = lr
99
-
100
- # ----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/unclip/__init__.py DELETED
@@ -1,17 +0,0 @@
1
- from ...utils import (
2
- OptionalDependencyNotAvailable,
3
- is_torch_available,
4
- is_transformers_available,
5
- is_transformers_version,
6
- )
7
-
8
-
9
- try:
10
- if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
11
- raise OptionalDependencyNotAvailable()
12
- except OptionalDependencyNotAvailable:
13
- from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
14
- else:
15
- from .pipeline_unclip import UnCLIPPipeline
16
- from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
17
- from .text_proj import UnCLIPTextProjModel
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/psanet/psanet_r50-d8_769x769_80k_cityscapes.py DELETED
@@ -1,9 +0,0 @@
1
- _base_ = [
2
- '../_base_/models/psanet_r50-d8.py',
3
- '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py',
4
- '../_base_/schedules/schedule_80k.py'
5
- ]
6
- model = dict(
7
- decode_head=dict(align_corners=True),
8
- auxiliary_head=dict(align_corners=True),
9
- test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/uniformer/mmseg/datasets/pipelines/transforms.py DELETED
@@ -1,889 +0,0 @@
1
- import annotator.uniformer.mmcv as mmcv
2
- import numpy as np
3
- from annotator.uniformer.mmcv.utils import deprecated_api_warning, is_tuple_of
4
- from numpy import random
5
-
6
- from ..builder import PIPELINES
7
-
8
-
9
- @PIPELINES.register_module()
10
- class Resize(object):
11
- """Resize images & seg.
12
-
13
- This transform resizes the input image to some scale. If the input dict
14
- contains the key "scale", then the scale in the input dict is used,
15
- otherwise the specified scale in the init method is used.
16
-
17
- ``img_scale`` can be None, a tuple (single-scale) or a list of tuple
18
- (multi-scale). There are 4 multiscale modes:
19
-
20
- - ``ratio_range is not None``:
21
- 1. When img_scale is None, img_scale is the shape of image in results
22
- (img_scale = results['img'].shape[:2]) and the image is resized based
23
- on the original size. (mode 1)
24
- 2. When img_scale is a tuple (single-scale), randomly sample a ratio from
25
- the ratio range and multiply it with the image scale. (mode 2)
26
-
27
- - ``ratio_range is None and multiscale_mode == "range"``: randomly sample a
28
- scale from the a range. (mode 3)
29
-
30
- - ``ratio_range is None and multiscale_mode == "value"``: randomly sample a
31
- scale from multiple scales. (mode 4)
32
-
33
- Args:
34
- img_scale (tuple or list[tuple]): Images scales for resizing.
35
- multiscale_mode (str): Either "range" or "value".
36
- ratio_range (tuple[float]): (min_ratio, max_ratio)
37
- keep_ratio (bool): Whether to keep the aspect ratio when resizing the
38
- image.
39
- """
40
-
41
- def __init__(self,
42
- img_scale=None,
43
- multiscale_mode='range',
44
- ratio_range=None,
45
- keep_ratio=True):
46
- if img_scale is None:
47
- self.img_scale = None
48
- else:
49
- if isinstance(img_scale, list):
50
- self.img_scale = img_scale
51
- else:
52
- self.img_scale = [img_scale]
53
- assert mmcv.is_list_of(self.img_scale, tuple)
54
-
55
- if ratio_range is not None:
56
- # mode 1: given img_scale=None and a range of image ratio
57
- # mode 2: given a scale and a range of image ratio
58
- assert self.img_scale is None or len(self.img_scale) == 1
59
- else:
60
- # mode 3 and 4: given multiple scales or a range of scales
61
- assert multiscale_mode in ['value', 'range']
62
-
63
- self.multiscale_mode = multiscale_mode
64
- self.ratio_range = ratio_range
65
- self.keep_ratio = keep_ratio
66
-
67
- @staticmethod
68
- def random_select(img_scales):
69
- """Randomly select an img_scale from given candidates.
70
-
71
- Args:
72
- img_scales (list[tuple]): Images scales for selection.
73
-
74
- Returns:
75
- (tuple, int): Returns a tuple ``(img_scale, scale_dix)``,
76
- where ``img_scale`` is the selected image scale and
77
- ``scale_idx`` is the selected index in the given candidates.
78
- """
79
-
80
- assert mmcv.is_list_of(img_scales, tuple)
81
- scale_idx = np.random.randint(len(img_scales))
82
- img_scale = img_scales[scale_idx]
83
- return img_scale, scale_idx
84
-
85
- @staticmethod
86
- def random_sample(img_scales):
87
- """Randomly sample an img_scale when ``multiscale_mode=='range'``.
88
-
89
- Args:
90
- img_scales (list[tuple]): Images scale range for sampling.
91
- There must be two tuples in img_scales, which specify the lower
92
- and upper bound of image scales.
93
-
94
- Returns:
95
- (tuple, None): Returns a tuple ``(img_scale, None)``, where
96
- ``img_scale`` is sampled scale and None is just a placeholder
97
- to be consistent with :func:`random_select`.
98
- """
99
-
100
- assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2
101
- img_scale_long = [max(s) for s in img_scales]
102
- img_scale_short = [min(s) for s in img_scales]
103
- long_edge = np.random.randint(
104
- min(img_scale_long),
105
- max(img_scale_long) + 1)
106
- short_edge = np.random.randint(
107
- min(img_scale_short),
108
- max(img_scale_short) + 1)
109
- img_scale = (long_edge, short_edge)
110
- return img_scale, None
111
-
112
- @staticmethod
113
- def random_sample_ratio(img_scale, ratio_range):
114
- """Randomly sample an img_scale when ``ratio_range`` is specified.
115
-
116
- A ratio will be randomly sampled from the range specified by
117
- ``ratio_range``. Then it would be multiplied with ``img_scale`` to
118
- generate sampled scale.
119
-
120
- Args:
121
- img_scale (tuple): Images scale base to multiply with ratio.
122
- ratio_range (tuple[float]): The minimum and maximum ratio to scale
123
- the ``img_scale``.
124
-
125
- Returns:
126
- (tuple, None): Returns a tuple ``(scale, None)``, where
127
- ``scale`` is sampled ratio multiplied with ``img_scale`` and
128
- None is just a placeholder to be consistent with
129
- :func:`random_select`.
130
- """
131
-
132
- assert isinstance(img_scale, tuple) and len(img_scale) == 2
133
- min_ratio, max_ratio = ratio_range
134
- assert min_ratio <= max_ratio
135
- ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio
136
- scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)
137
- return scale, None
138
-
139
- def _random_scale(self, results):
140
- """Randomly sample an img_scale according to ``ratio_range`` and
141
- ``multiscale_mode``.
142
-
143
- If ``ratio_range`` is specified, a ratio will be sampled and be
144
- multiplied with ``img_scale``.
145
- If multiple scales are specified by ``img_scale``, a scale will be
146
- sampled according to ``multiscale_mode``.
147
- Otherwise, single scale will be used.
148
-
149
- Args:
150
- results (dict): Result dict from :obj:`dataset`.
151
-
152
- Returns:
153
- dict: Two new keys 'scale` and 'scale_idx` are added into
154
- ``results``, which would be used by subsequent pipelines.
155
- """
156
-
157
- if self.ratio_range is not None:
158
- if self.img_scale is None:
159
- h, w = results['img'].shape[:2]
160
- scale, scale_idx = self.random_sample_ratio((w, h),
161
- self.ratio_range)
162
- else:
163
- scale, scale_idx = self.random_sample_ratio(
164
- self.img_scale[0], self.ratio_range)
165
- elif len(self.img_scale) == 1:
166
- scale, scale_idx = self.img_scale[0], 0
167
- elif self.multiscale_mode == 'range':
168
- scale, scale_idx = self.random_sample(self.img_scale)
169
- elif self.multiscale_mode == 'value':
170
- scale, scale_idx = self.random_select(self.img_scale)
171
- else:
172
- raise NotImplementedError
173
-
174
- results['scale'] = scale
175
- results['scale_idx'] = scale_idx
176
-
177
- def _resize_img(self, results):
178
- """Resize images with ``results['scale']``."""
179
- if self.keep_ratio:
180
- img, scale_factor = mmcv.imrescale(
181
- results['img'], results['scale'], return_scale=True)
182
- # the w_scale and h_scale has minor difference
183
- # a real fix should be done in the mmcv.imrescale in the future
184
- new_h, new_w = img.shape[:2]
185
- h, w = results['img'].shape[:2]
186
- w_scale = new_w / w
187
- h_scale = new_h / h
188
- else:
189
- img, w_scale, h_scale = mmcv.imresize(
190
- results['img'], results['scale'], return_scale=True)
191
- scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
192
- dtype=np.float32)
193
- results['img'] = img
194
- results['img_shape'] = img.shape
195
- results['pad_shape'] = img.shape # in case that there is no padding
196
- results['scale_factor'] = scale_factor
197
- results['keep_ratio'] = self.keep_ratio
198
-
199
- def _resize_seg(self, results):
200
- """Resize semantic segmentation map with ``results['scale']``."""
201
- for key in results.get('seg_fields', []):
202
- if self.keep_ratio:
203
- gt_seg = mmcv.imrescale(
204
- results[key], results['scale'], interpolation='nearest')
205
- else:
206
- gt_seg = mmcv.imresize(
207
- results[key], results['scale'], interpolation='nearest')
208
- results[key] = gt_seg
209
-
210
- def __call__(self, results):
211
- """Call function to resize images, bounding boxes, masks, semantic
212
- segmentation map.
213
-
214
- Args:
215
- results (dict): Result dict from loading pipeline.
216
-
217
- Returns:
218
- dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor',
219
- 'keep_ratio' keys are added into result dict.
220
- """
221
-
222
- if 'scale' not in results:
223
- self._random_scale(results)
224
- self._resize_img(results)
225
- self._resize_seg(results)
226
- return results
227
-
228
- def __repr__(self):
229
- repr_str = self.__class__.__name__
230
- repr_str += (f'(img_scale={self.img_scale}, '
231
- f'multiscale_mode={self.multiscale_mode}, '
232
- f'ratio_range={self.ratio_range}, '
233
- f'keep_ratio={self.keep_ratio})')
234
- return repr_str
235
-
236
-
237
- @PIPELINES.register_module()
238
- class RandomFlip(object):
239
- """Flip the image & seg.
240
-
241
- If the input dict contains the key "flip", then the flag will be used,
242
- otherwise it will be randomly decided by a ratio specified in the init
243
- method.
244
-
245
- Args:
246
- prob (float, optional): The flipping probability. Default: None.
247
- direction(str, optional): The flipping direction. Options are
248
- 'horizontal' and 'vertical'. Default: 'horizontal'.
249
- """
250
-
251
- @deprecated_api_warning({'flip_ratio': 'prob'}, cls_name='RandomFlip')
252
- def __init__(self, prob=None, direction='horizontal'):
253
- self.prob = prob
254
- self.direction = direction
255
- if prob is not None:
256
- assert prob >= 0 and prob <= 1
257
- assert direction in ['horizontal', 'vertical']
258
-
259
- def __call__(self, results):
260
- """Call function to flip bounding boxes, masks, semantic segmentation
261
- maps.
262
-
263
- Args:
264
- results (dict): Result dict from loading pipeline.
265
-
266
- Returns:
267
- dict: Flipped results, 'flip', 'flip_direction' keys are added into
268
- result dict.
269
- """
270
-
271
- if 'flip' not in results:
272
- flip = True if np.random.rand() < self.prob else False
273
- results['flip'] = flip
274
- if 'flip_direction' not in results:
275
- results['flip_direction'] = self.direction
276
- if results['flip']:
277
- # flip image
278
- results['img'] = mmcv.imflip(
279
- results['img'], direction=results['flip_direction'])
280
-
281
- # flip segs
282
- for key in results.get('seg_fields', []):
283
- # use copy() to make numpy stride positive
284
- results[key] = mmcv.imflip(
285
- results[key], direction=results['flip_direction']).copy()
286
- return results
287
-
288
- def __repr__(self):
289
- return self.__class__.__name__ + f'(prob={self.prob})'
290
-
291
-
292
- @PIPELINES.register_module()
293
- class Pad(object):
294
- """Pad the image & mask.
295
-
296
- There are two padding modes: (1) pad to a fixed size and (2) pad to the
297
- minimum size that is divisible by some number.
298
- Added keys are "pad_shape", "pad_fixed_size", "pad_size_divisor",
299
-
300
- Args:
301
- size (tuple, optional): Fixed padding size.
302
- size_divisor (int, optional): The divisor of padded size.
303
- pad_val (float, optional): Padding value. Default: 0.
304
- seg_pad_val (float, optional): Padding value of segmentation map.
305
- Default: 255.
306
- """
307
-
308
- def __init__(self,
309
- size=None,
310
- size_divisor=None,
311
- pad_val=0,
312
- seg_pad_val=255):
313
- self.size = size
314
- self.size_divisor = size_divisor
315
- self.pad_val = pad_val
316
- self.seg_pad_val = seg_pad_val
317
- # only one of size and size_divisor should be valid
318
- assert size is not None or size_divisor is not None
319
- assert size is None or size_divisor is None
320
-
321
- def _pad_img(self, results):
322
- """Pad images according to ``self.size``."""
323
- if self.size is not None:
324
- padded_img = mmcv.impad(
325
- results['img'], shape=self.size, pad_val=self.pad_val)
326
- elif self.size_divisor is not None:
327
- padded_img = mmcv.impad_to_multiple(
328
- results['img'], self.size_divisor, pad_val=self.pad_val)
329
- results['img'] = padded_img
330
- results['pad_shape'] = padded_img.shape
331
- results['pad_fixed_size'] = self.size
332
- results['pad_size_divisor'] = self.size_divisor
333
-
334
- def _pad_seg(self, results):
335
- """Pad masks according to ``results['pad_shape']``."""
336
- for key in results.get('seg_fields', []):
337
- results[key] = mmcv.impad(
338
- results[key],
339
- shape=results['pad_shape'][:2],
340
- pad_val=self.seg_pad_val)
341
-
342
- def __call__(self, results):
343
- """Call function to pad images, masks, semantic segmentation maps.
344
-
345
- Args:
346
- results (dict): Result dict from loading pipeline.
347
-
348
- Returns:
349
- dict: Updated result dict.
350
- """
351
-
352
- self._pad_img(results)
353
- self._pad_seg(results)
354
- return results
355
-
356
- def __repr__(self):
357
- repr_str = self.__class__.__name__
358
- repr_str += f'(size={self.size}, size_divisor={self.size_divisor}, ' \
359
- f'pad_val={self.pad_val})'
360
- return repr_str
361
-
362
-
363
- @PIPELINES.register_module()
364
- class Normalize(object):
365
- """Normalize the image.
366
-
367
- Added key is "img_norm_cfg".
368
-
369
- Args:
370
- mean (sequence): Mean values of 3 channels.
371
- std (sequence): Std values of 3 channels.
372
- to_rgb (bool): Whether to convert the image from BGR to RGB,
373
- default is true.
374
- """
375
-
376
- def __init__(self, mean, std, to_rgb=True):
377
- self.mean = np.array(mean, dtype=np.float32)
378
- self.std = np.array(std, dtype=np.float32)
379
- self.to_rgb = to_rgb
380
-
381
- def __call__(self, results):
382
- """Call function to normalize images.
383
-
384
- Args:
385
- results (dict): Result dict from loading pipeline.
386
-
387
- Returns:
388
- dict: Normalized results, 'img_norm_cfg' key is added into
389
- result dict.
390
- """
391
-
392
- results['img'] = mmcv.imnormalize(results['img'], self.mean, self.std,
393
- self.to_rgb)
394
- results['img_norm_cfg'] = dict(
395
- mean=self.mean, std=self.std, to_rgb=self.to_rgb)
396
- return results
397
-
398
- def __repr__(self):
399
- repr_str = self.__class__.__name__
400
- repr_str += f'(mean={self.mean}, std={self.std}, to_rgb=' \
401
- f'{self.to_rgb})'
402
- return repr_str
403
-
404
-
405
- @PIPELINES.register_module()
406
- class Rerange(object):
407
- """Rerange the image pixel value.
408
-
409
- Args:
410
- min_value (float or int): Minimum value of the reranged image.
411
- Default: 0.
412
- max_value (float or int): Maximum value of the reranged image.
413
- Default: 255.
414
- """
415
-
416
- def __init__(self, min_value=0, max_value=255):
417
- assert isinstance(min_value, float) or isinstance(min_value, int)
418
- assert isinstance(max_value, float) or isinstance(max_value, int)
419
- assert min_value < max_value
420
- self.min_value = min_value
421
- self.max_value = max_value
422
-
423
- def __call__(self, results):
424
- """Call function to rerange images.
425
-
426
- Args:
427
- results (dict): Result dict from loading pipeline.
428
- Returns:
429
- dict: Reranged results.
430
- """
431
-
432
- img = results['img']
433
- img_min_value = np.min(img)
434
- img_max_value = np.max(img)
435
-
436
- assert img_min_value < img_max_value
437
- # rerange to [0, 1]
438
- img = (img - img_min_value) / (img_max_value - img_min_value)
439
- # rerange to [min_value, max_value]
440
- img = img * (self.max_value - self.min_value) + self.min_value
441
- results['img'] = img
442
-
443
- return results
444
-
445
- def __repr__(self):
446
- repr_str = self.__class__.__name__
447
- repr_str += f'(min_value={self.min_value}, max_value={self.max_value})'
448
- return repr_str
449
-
450
-
451
- @PIPELINES.register_module()
452
- class CLAHE(object):
453
- """Use CLAHE method to process the image.
454
-
455
- See `ZUIDERVELD,K. Contrast Limited Adaptive Histogram Equalization[J].
456
- Graphics Gems, 1994:474-485.` for more information.
457
-
458
- Args:
459
- clip_limit (float): Threshold for contrast limiting. Default: 40.0.
460
- tile_grid_size (tuple[int]): Size of grid for histogram equalization.
461
- Input image will be divided into equally sized rectangular tiles.
462
- It defines the number of tiles in row and column. Default: (8, 8).
463
- """
464
-
465
- def __init__(self, clip_limit=40.0, tile_grid_size=(8, 8)):
466
- assert isinstance(clip_limit, (float, int))
467
- self.clip_limit = clip_limit
468
- assert is_tuple_of(tile_grid_size, int)
469
- assert len(tile_grid_size) == 2
470
- self.tile_grid_size = tile_grid_size
471
-
472
- def __call__(self, results):
473
- """Call function to Use CLAHE method process images.
474
-
475
- Args:
476
- results (dict): Result dict from loading pipeline.
477
-
478
- Returns:
479
- dict: Processed results.
480
- """
481
-
482
- for i in range(results['img'].shape[2]):
483
- results['img'][:, :, i] = mmcv.clahe(
484
- np.array(results['img'][:, :, i], dtype=np.uint8),
485
- self.clip_limit, self.tile_grid_size)
486
-
487
- return results
488
-
489
- def __repr__(self):
490
- repr_str = self.__class__.__name__
491
- repr_str += f'(clip_limit={self.clip_limit}, '\
492
- f'tile_grid_size={self.tile_grid_size})'
493
- return repr_str
494
-
495
-
496
- @PIPELINES.register_module()
497
- class RandomCrop(object):
498
- """Random crop the image & seg.
499
-
500
- Args:
501
- crop_size (tuple): Expected size after cropping, (h, w).
502
- cat_max_ratio (float): The maximum ratio that single category could
503
- occupy.
504
- """
505
-
506
- def __init__(self, crop_size, cat_max_ratio=1., ignore_index=255):
507
- assert crop_size[0] > 0 and crop_size[1] > 0
508
- self.crop_size = crop_size
509
- self.cat_max_ratio = cat_max_ratio
510
- self.ignore_index = ignore_index
511
-
512
- def get_crop_bbox(self, img):
513
- """Randomly get a crop bounding box."""
514
- margin_h = max(img.shape[0] - self.crop_size[0], 0)
515
- margin_w = max(img.shape[1] - self.crop_size[1], 0)
516
- offset_h = np.random.randint(0, margin_h + 1)
517
- offset_w = np.random.randint(0, margin_w + 1)
518
- crop_y1, crop_y2 = offset_h, offset_h + self.crop_size[0]
519
- crop_x1, crop_x2 = offset_w, offset_w + self.crop_size[1]
520
-
521
- return crop_y1, crop_y2, crop_x1, crop_x2
522
-
523
- def crop(self, img, crop_bbox):
524
- """Crop from ``img``"""
525
- crop_y1, crop_y2, crop_x1, crop_x2 = crop_bbox
526
- img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]
527
- return img
528
-
529
- def __call__(self, results):
530
- """Call function to randomly crop images, semantic segmentation maps.
531
-
532
- Args:
533
- results (dict): Result dict from loading pipeline.
534
-
535
- Returns:
536
- dict: Randomly cropped results, 'img_shape' key in result dict is
537
- updated according to crop size.
538
- """
539
-
540
- img = results['img']
541
- crop_bbox = self.get_crop_bbox(img)
542
- if self.cat_max_ratio < 1.:
543
- # Repeat 10 times
544
- for _ in range(10):
545
- seg_temp = self.crop(results['gt_semantic_seg'], crop_bbox)
546
- labels, cnt = np.unique(seg_temp, return_counts=True)
547
- cnt = cnt[labels != self.ignore_index]
548
- if len(cnt) > 1 and np.max(cnt) / np.sum(
549
- cnt) < self.cat_max_ratio:
550
- break
551
- crop_bbox = self.get_crop_bbox(img)
552
-
553
- # crop the image
554
- img = self.crop(img, crop_bbox)
555
- img_shape = img.shape
556
- results['img'] = img
557
- results['img_shape'] = img_shape
558
-
559
- # crop semantic seg
560
- for key in results.get('seg_fields', []):
561
- results[key] = self.crop(results[key], crop_bbox)
562
-
563
- return results
564
-
565
- def __repr__(self):
566
- return self.__class__.__name__ + f'(crop_size={self.crop_size})'
567
-
568
-
569
- @PIPELINES.register_module()
570
- class RandomRotate(object):
571
- """Rotate the image & seg.
572
-
573
- Args:
574
- prob (float): The rotation probability.
575
- degree (float, tuple[float]): Range of degrees to select from. If
576
- degree is a number instead of tuple like (min, max),
577
- the range of degree will be (``-degree``, ``+degree``)
578
- pad_val (float, optional): Padding value of image. Default: 0.
579
- seg_pad_val (float, optional): Padding value of segmentation map.
580
- Default: 255.
581
- center (tuple[float], optional): Center point (w, h) of the rotation in
582
- the source image. If not specified, the center of the image will be
583
- used. Default: None.
584
- auto_bound (bool): Whether to adjust the image size to cover the whole
585
- rotated image. Default: False
586
- """
587
-
588
- def __init__(self,
589
- prob,
590
- degree,
591
- pad_val=0,
592
- seg_pad_val=255,
593
- center=None,
594
- auto_bound=False):
595
- self.prob = prob
596
- assert prob >= 0 and prob <= 1
597
- if isinstance(degree, (float, int)):
598
- assert degree > 0, f'degree {degree} should be positive'
599
- self.degree = (-degree, degree)
600
- else:
601
- self.degree = degree
602
- assert len(self.degree) == 2, f'degree {self.degree} should be a ' \
603
- f'tuple of (min, max)'
604
- self.pal_val = pad_val
605
- self.seg_pad_val = seg_pad_val
606
- self.center = center
607
- self.auto_bound = auto_bound
608
-
609
- def __call__(self, results):
610
- """Call function to rotate image, semantic segmentation maps.
611
-
612
- Args:
613
- results (dict): Result dict from loading pipeline.
614
-
615
- Returns:
616
- dict: Rotated results.
617
- """
618
-
619
- rotate = True if np.random.rand() < self.prob else False
620
- degree = np.random.uniform(min(*self.degree), max(*self.degree))
621
- if rotate:
622
- # rotate image
623
- results['img'] = mmcv.imrotate(
624
- results['img'],
625
- angle=degree,
626
- border_value=self.pal_val,
627
- center=self.center,
628
- auto_bound=self.auto_bound)
629
-
630
- # rotate segs
631
- for key in results.get('seg_fields', []):
632
- results[key] = mmcv.imrotate(
633
- results[key],
634
- angle=degree,
635
- border_value=self.seg_pad_val,
636
- center=self.center,
637
- auto_bound=self.auto_bound,
638
- interpolation='nearest')
639
- return results
640
-
641
- def __repr__(self):
642
- repr_str = self.__class__.__name__
643
- repr_str += f'(prob={self.prob}, ' \
644
- f'degree={self.degree}, ' \
645
- f'pad_val={self.pal_val}, ' \
646
- f'seg_pad_val={self.seg_pad_val}, ' \
647
- f'center={self.center}, ' \
648
- f'auto_bound={self.auto_bound})'
649
- return repr_str
650
-
651
-
652
- @PIPELINES.register_module()
653
- class RGB2Gray(object):
654
- """Convert RGB image to grayscale image.
655
-
656
- This transform calculate the weighted mean of input image channels with
657
- ``weights`` and then expand the channels to ``out_channels``. When
658
- ``out_channels`` is None, the number of output channels is the same as
659
- input channels.
660
-
661
- Args:
662
- out_channels (int): Expected number of output channels after
663
- transforming. Default: None.
664
- weights (tuple[float]): The weights to calculate the weighted mean.
665
- Default: (0.299, 0.587, 0.114).
666
- """
667
-
668
- def __init__(self, out_channels=None, weights=(0.299, 0.587, 0.114)):
669
- assert out_channels is None or out_channels > 0
670
- self.out_channels = out_channels
671
- assert isinstance(weights, tuple)
672
- for item in weights:
673
- assert isinstance(item, (float, int))
674
- self.weights = weights
675
-
676
- def __call__(self, results):
677
- """Call function to convert RGB image to grayscale image.
678
-
679
- Args:
680
- results (dict): Result dict from loading pipeline.
681
-
682
- Returns:
683
- dict: Result dict with grayscale image.
684
- """
685
- img = results['img']
686
- assert len(img.shape) == 3
687
- assert img.shape[2] == len(self.weights)
688
- weights = np.array(self.weights).reshape((1, 1, -1))
689
- img = (img * weights).sum(2, keepdims=True)
690
- if self.out_channels is None:
691
- img = img.repeat(weights.shape[2], axis=2)
692
- else:
693
- img = img.repeat(self.out_channels, axis=2)
694
-
695
- results['img'] = img
696
- results['img_shape'] = img.shape
697
-
698
- return results
699
-
700
- def __repr__(self):
701
- repr_str = self.__class__.__name__
702
- repr_str += f'(out_channels={self.out_channels}, ' \
703
- f'weights={self.weights})'
704
- return repr_str
705
-
706
-
707
- @PIPELINES.register_module()
708
- class AdjustGamma(object):
709
- """Using gamma correction to process the image.
710
-
711
- Args:
712
- gamma (float or int): Gamma value used in gamma correction.
713
- Default: 1.0.
714
- """
715
-
716
- def __init__(self, gamma=1.0):
717
- assert isinstance(gamma, float) or isinstance(gamma, int)
718
- assert gamma > 0
719
- self.gamma = gamma
720
- inv_gamma = 1.0 / gamma
721
- self.table = np.array([(i / 255.0)**inv_gamma * 255
722
- for i in np.arange(256)]).astype('uint8')
723
-
724
- def __call__(self, results):
725
- """Call function to process the image with gamma correction.
726
-
727
- Args:
728
- results (dict): Result dict from loading pipeline.
729
-
730
- Returns:
731
- dict: Processed results.
732
- """
733
-
734
- results['img'] = mmcv.lut_transform(
735
- np.array(results['img'], dtype=np.uint8), self.table)
736
-
737
- return results
738
-
739
- def __repr__(self):
740
- return self.__class__.__name__ + f'(gamma={self.gamma})'
741
-
742
-
743
- @PIPELINES.register_module()
744
- class SegRescale(object):
745
- """Rescale semantic segmentation maps.
746
-
747
- Args:
748
- scale_factor (float): The scale factor of the final output.
749
- """
750
-
751
- def __init__(self, scale_factor=1):
752
- self.scale_factor = scale_factor
753
-
754
- def __call__(self, results):
755
- """Call function to scale the semantic segmentation map.
756
-
757
- Args:
758
- results (dict): Result dict from loading pipeline.
759
-
760
- Returns:
761
- dict: Result dict with semantic segmentation map scaled.
762
- """
763
- for key in results.get('seg_fields', []):
764
- if self.scale_factor != 1:
765
- results[key] = mmcv.imrescale(
766
- results[key], self.scale_factor, interpolation='nearest')
767
- return results
768
-
769
- def __repr__(self):
770
- return self.__class__.__name__ + f'(scale_factor={self.scale_factor})'
771
-
772
-
773
- @PIPELINES.register_module()
774
- class PhotoMetricDistortion(object):
775
- """Apply photometric distortion to image sequentially, every transformation
776
- is applied with a probability of 0.5. The position of random contrast is in
777
- second or second to last.
778
-
779
- 1. random brightness
780
- 2. random contrast (mode 0)
781
- 3. convert color from BGR to HSV
782
- 4. random saturation
783
- 5. random hue
784
- 6. convert color from HSV to BGR
785
- 7. random contrast (mode 1)
786
-
787
- Args:
788
- brightness_delta (int): delta of brightness.
789
- contrast_range (tuple): range of contrast.
790
- saturation_range (tuple): range of saturation.
791
- hue_delta (int): delta of hue.
792
- """
793
-
794
- def __init__(self,
795
- brightness_delta=32,
796
- contrast_range=(0.5, 1.5),
797
- saturation_range=(0.5, 1.5),
798
- hue_delta=18):
799
- self.brightness_delta = brightness_delta
800
- self.contrast_lower, self.contrast_upper = contrast_range
801
- self.saturation_lower, self.saturation_upper = saturation_range
802
- self.hue_delta = hue_delta
803
-
804
- def convert(self, img, alpha=1, beta=0):
805
- """Multiple with alpha and add beat with clip."""
806
- img = img.astype(np.float32) * alpha + beta
807
- img = np.clip(img, 0, 255)
808
- return img.astype(np.uint8)
809
-
810
- def brightness(self, img):
811
- """Brightness distortion."""
812
- if random.randint(2):
813
- return self.convert(
814
- img,
815
- beta=random.uniform(-self.brightness_delta,
816
- self.brightness_delta))
817
- return img
818
-
819
- def contrast(self, img):
820
- """Contrast distortion."""
821
- if random.randint(2):
822
- return self.convert(
823
- img,
824
- alpha=random.uniform(self.contrast_lower, self.contrast_upper))
825
- return img
826
-
827
- def saturation(self, img):
828
- """Saturation distortion."""
829
- if random.randint(2):
830
- img = mmcv.bgr2hsv(img)
831
- img[:, :, 1] = self.convert(
832
- img[:, :, 1],
833
- alpha=random.uniform(self.saturation_lower,
834
- self.saturation_upper))
835
- img = mmcv.hsv2bgr(img)
836
- return img
837
-
838
- def hue(self, img):
839
- """Hue distortion."""
840
- if random.randint(2):
841
- img = mmcv.bgr2hsv(img)
842
- img[:, :,
843
- 0] = (img[:, :, 0].astype(int) +
844
- random.randint(-self.hue_delta, self.hue_delta)) % 180
845
- img = mmcv.hsv2bgr(img)
846
- return img
847
-
848
- def __call__(self, results):
849
- """Call function to perform photometric distortion on images.
850
-
851
- Args:
852
- results (dict): Result dict from loading pipeline.
853
-
854
- Returns:
855
- dict: Result dict with images distorted.
856
- """
857
-
858
- img = results['img']
859
- # random brightness
860
- img = self.brightness(img)
861
-
862
- # mode == 0 --> do random contrast first
863
- # mode == 1 --> do random contrast last
864
- mode = random.randint(2)
865
- if mode == 1:
866
- img = self.contrast(img)
867
-
868
- # random saturation
869
- img = self.saturation(img)
870
-
871
- # random hue
872
- img = self.hue(img)
873
-
874
- # random contrast
875
- if mode == 0:
876
- img = self.contrast(img)
877
-
878
- results['img'] = img
879
- return results
880
-
881
- def __repr__(self):
882
- repr_str = self.__class__.__name__
883
- repr_str += (f'(brightness_delta={self.brightness_delta}, '
884
- f'contrast_range=({self.contrast_lower}, '
885
- f'{self.contrast_upper}), '
886
- f'saturation_range=({self.saturation_lower}, '
887
- f'{self.saturation_upper}), '
888
- f'hue_delta={self.hue_delta})')
889
- return repr_str
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/command/build_ext.py DELETED
@@ -1,787 +0,0 @@
1
- """distutils.command.build_ext
2
-
3
- Implements the Distutils 'build_ext' command, for building extension
4
- modules (currently limited to C extensions, should accommodate C++
5
- extensions ASAP)."""
6
-
7
- import contextlib
8
- import os
9
- import re
10
- import sys
11
- from distutils.core import Command
12
- from distutils.errors import (
13
- DistutilsOptionError,
14
- DistutilsSetupError,
15
- CCompilerError,
16
- DistutilsError,
17
- CompileError,
18
- DistutilsPlatformError,
19
- )
20
- from distutils.sysconfig import customize_compiler, get_python_version
21
- from distutils.sysconfig import get_config_h_filename
22
- from distutils.dep_util import newer_group
23
- from distutils.extension import Extension
24
- from distutils.util import get_platform
25
- from distutils import log
26
- from . import py37compat
27
-
28
- from site import USER_BASE
29
-
30
- # An extension name is just a dot-separated list of Python NAMEs (ie.
31
- # the same as a fully-qualified module name).
32
- extension_name_re = re.compile(r'^[a-zA-Z_][a-zA-Z_0-9]*(\.[a-zA-Z_][a-zA-Z_0-9]*)*$')
33
-
34
-
35
- def show_compilers():
36
- from distutils.ccompiler import show_compilers
37
-
38
- show_compilers()
39
-
40
-
41
- class build_ext(Command):
42
-
43
- description = "build C/C++ extensions (compile/link to build directory)"
44
-
45
- # XXX thoughts on how to deal with complex command-line options like
46
- # these, i.e. how to make it so fancy_getopt can suck them off the
47
- # command line and make it look like setup.py defined the appropriate
48
- # lists of tuples of what-have-you.
49
- # - each command needs a callback to process its command-line options
50
- # - Command.__init__() needs access to its share of the whole
51
- # command line (must ultimately come from
52
- # Distribution.parse_command_line())
53
- # - it then calls the current command class' option-parsing
54
- # callback to deal with weird options like -D, which have to
55
- # parse the option text and churn out some custom data
56
- # structure
57
- # - that data structure (in this case, a list of 2-tuples)
58
- # will then be present in the command object by the time
59
- # we get to finalize_options() (i.e. the constructor
60
- # takes care of both command-line and client options
61
- # in between initialize_options() and finalize_options())
62
-
63
- sep_by = " (separated by '%s')" % os.pathsep
64
- user_options = [
65
- ('build-lib=', 'b', "directory for compiled extension modules"),
66
- ('build-temp=', 't', "directory for temporary files (build by-products)"),
67
- (
68
- 'plat-name=',
69
- 'p',
70
- "platform name to cross-compile for, if supported "
71
- "(default: %s)" % get_platform(),
72
- ),
73
- (
74
- 'inplace',
75
- 'i',
76
- "ignore build-lib and put compiled extensions into the source "
77
- + "directory alongside your pure Python modules",
78
- ),
79
- (
80
- 'include-dirs=',
81
- 'I',
82
- "list of directories to search for header files" + sep_by,
83
- ),
84
- ('define=', 'D', "C preprocessor macros to define"),
85
- ('undef=', 'U', "C preprocessor macros to undefine"),
86
- ('libraries=', 'l', "external C libraries to link with"),
87
- (
88
- 'library-dirs=',
89
- 'L',
90
- "directories to search for external C libraries" + sep_by,
91
- ),
92
- ('rpath=', 'R', "directories to search for shared C libraries at runtime"),
93
- ('link-objects=', 'O', "extra explicit link objects to include in the link"),
94
- ('debug', 'g', "compile/link with debugging information"),
95
- ('force', 'f', "forcibly build everything (ignore file timestamps)"),
96
- ('compiler=', 'c', "specify the compiler type"),
97
- ('parallel=', 'j', "number of parallel build jobs"),
98
- ('swig-cpp', None, "make SWIG create C++ files (default is C)"),
99
- ('swig-opts=', None, "list of SWIG command line options"),
100
- ('swig=', None, "path to the SWIG executable"),
101
- ('user', None, "add user include, library and rpath"),
102
- ]
103
-
104
- boolean_options = ['inplace', 'debug', 'force', 'swig-cpp', 'user']
105
-
106
- help_options = [
107
- ('help-compiler', None, "list available compilers", show_compilers),
108
- ]
109
-
110
- def initialize_options(self):
111
- self.extensions = None
112
- self.build_lib = None
113
- self.plat_name = None
114
- self.build_temp = None
115
- self.inplace = 0
116
- self.package = None
117
-
118
- self.include_dirs = None
119
- self.define = None
120
- self.undef = None
121
- self.libraries = None
122
- self.library_dirs = None
123
- self.rpath = None
124
- self.link_objects = None
125
- self.debug = None
126
- self.force = None
127
- self.compiler = None
128
- self.swig = None
129
- self.swig_cpp = None
130
- self.swig_opts = None
131
- self.user = None
132
- self.parallel = None
133
-
134
- def finalize_options(self): # noqa: C901
135
- from distutils import sysconfig
136
-
137
- self.set_undefined_options(
138
- 'build',
139
- ('build_lib', 'build_lib'),
140
- ('build_temp', 'build_temp'),
141
- ('compiler', 'compiler'),
142
- ('debug', 'debug'),
143
- ('force', 'force'),
144
- ('parallel', 'parallel'),
145
- ('plat_name', 'plat_name'),
146
- )
147
-
148
- if self.package is None:
149
- self.package = self.distribution.ext_package
150
-
151
- self.extensions = self.distribution.ext_modules
152
-
153
- # Make sure Python's include directories (for Python.h, pyconfig.h,
154
- # etc.) are in the include search path.
155
- py_include = sysconfig.get_python_inc()
156
- plat_py_include = sysconfig.get_python_inc(plat_specific=1)
157
- if self.include_dirs is None:
158
- self.include_dirs = self.distribution.include_dirs or []
159
- if isinstance(self.include_dirs, str):
160
- self.include_dirs = self.include_dirs.split(os.pathsep)
161
-
162
- # If in a virtualenv, add its include directory
163
- # Issue 16116
164
- if sys.exec_prefix != sys.base_exec_prefix:
165
- self.include_dirs.append(os.path.join(sys.exec_prefix, 'include'))
166
-
167
- # Put the Python "system" include dir at the end, so that
168
- # any local include dirs take precedence.
169
- self.include_dirs.extend(py_include.split(os.path.pathsep))
170
- if plat_py_include != py_include:
171
- self.include_dirs.extend(plat_py_include.split(os.path.pathsep))
172
-
173
- self.ensure_string_list('libraries')
174
- self.ensure_string_list('link_objects')
175
-
176
- # Life is easier if we're not forever checking for None, so
177
- # simplify these options to empty lists if unset
178
- if self.libraries is None:
179
- self.libraries = []
180
- if self.library_dirs is None:
181
- self.library_dirs = []
182
- elif isinstance(self.library_dirs, str):
183
- self.library_dirs = self.library_dirs.split(os.pathsep)
184
-
185
- if self.rpath is None:
186
- self.rpath = []
187
- elif isinstance(self.rpath, str):
188
- self.rpath = self.rpath.split(os.pathsep)
189
-
190
- # for extensions under windows use different directories
191
- # for Release and Debug builds.
192
- # also Python's library directory must be appended to library_dirs
193
- if os.name == 'nt':
194
- # the 'libs' directory is for binary installs - we assume that
195
- # must be the *native* platform. But we don't really support
196
- # cross-compiling via a binary install anyway, so we let it go.
197
- self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs'))
198
- if sys.base_exec_prefix != sys.prefix: # Issue 16116
199
- self.library_dirs.append(os.path.join(sys.base_exec_prefix, 'libs'))
200
- if self.debug:
201
- self.build_temp = os.path.join(self.build_temp, "Debug")
202
- else:
203
- self.build_temp = os.path.join(self.build_temp, "Release")
204
-
205
- # Append the source distribution include and library directories,
206
- # this allows distutils on windows to work in the source tree
207
- self.include_dirs.append(os.path.dirname(get_config_h_filename()))
208
- self.library_dirs.append(sys.base_exec_prefix)
209
-
210
- # Use the .lib files for the correct architecture
211
- if self.plat_name == 'win32':
212
- suffix = 'win32'
213
- else:
214
- # win-amd64
215
- suffix = self.plat_name[4:]
216
- new_lib = os.path.join(sys.exec_prefix, 'PCbuild')
217
- if suffix:
218
- new_lib = os.path.join(new_lib, suffix)
219
- self.library_dirs.append(new_lib)
220
-
221
- # For extensions under Cygwin, Python's library directory must be
222
- # appended to library_dirs
223
- if sys.platform[:6] == 'cygwin':
224
- if not sysconfig.python_build:
225
- # building third party extensions
226
- self.library_dirs.append(
227
- os.path.join(
228
- sys.prefix, "lib", "python" + get_python_version(), "config"
229
- )
230
- )
231
- else:
232
- # building python standard extensions
233
- self.library_dirs.append('.')
234
-
235
- # For building extensions with a shared Python library,
236
- # Python's library directory must be appended to library_dirs
237
- # See Issues: #1600860, #4366
238
- if sysconfig.get_config_var('Py_ENABLE_SHARED'):
239
- if not sysconfig.python_build:
240
- # building third party extensions
241
- self.library_dirs.append(sysconfig.get_config_var('LIBDIR'))
242
- else:
243
- # building python standard extensions
244
- self.library_dirs.append('.')
245
-
246
- # The argument parsing will result in self.define being a string, but
247
- # it has to be a list of 2-tuples. All the preprocessor symbols
248
- # specified by the 'define' option will be set to '1'. Multiple
249
- # symbols can be separated with commas.
250
-
251
- if self.define:
252
- defines = self.define.split(',')
253
- self.define = [(symbol, '1') for symbol in defines]
254
-
255
- # The option for macros to undefine is also a string from the
256
- # option parsing, but has to be a list. Multiple symbols can also
257
- # be separated with commas here.
258
- if self.undef:
259
- self.undef = self.undef.split(',')
260
-
261
- if self.swig_opts is None:
262
- self.swig_opts = []
263
- else:
264
- self.swig_opts = self.swig_opts.split(' ')
265
-
266
- # Finally add the user include and library directories if requested
267
- if self.user:
268
- user_include = os.path.join(USER_BASE, "include")
269
- user_lib = os.path.join(USER_BASE, "lib")
270
- if os.path.isdir(user_include):
271
- self.include_dirs.append(user_include)
272
- if os.path.isdir(user_lib):
273
- self.library_dirs.append(user_lib)
274
- self.rpath.append(user_lib)
275
-
276
- if isinstance(self.parallel, str):
277
- try:
278
- self.parallel = int(self.parallel)
279
- except ValueError:
280
- raise DistutilsOptionError("parallel should be an integer")
281
-
282
- def run(self): # noqa: C901
283
- from distutils.ccompiler import new_compiler
284
-
285
- # 'self.extensions', as supplied by setup.py, is a list of
286
- # Extension instances. See the documentation for Extension (in
287
- # distutils.extension) for details.
288
- #
289
- # For backwards compatibility with Distutils 0.8.2 and earlier, we
290
- # also allow the 'extensions' list to be a list of tuples:
291
- # (ext_name, build_info)
292
- # where build_info is a dictionary containing everything that
293
- # Extension instances do except the name, with a few things being
294
- # differently named. We convert these 2-tuples to Extension
295
- # instances as needed.
296
-
297
- if not self.extensions:
298
- return
299
-
300
- # If we were asked to build any C/C++ libraries, make sure that the
301
- # directory where we put them is in the library search path for
302
- # linking extensions.
303
- if self.distribution.has_c_libraries():
304
- build_clib = self.get_finalized_command('build_clib')
305
- self.libraries.extend(build_clib.get_library_names() or [])
306
- self.library_dirs.append(build_clib.build_clib)
307
-
308
- # Setup the CCompiler object that we'll use to do all the
309
- # compiling and linking
310
- self.compiler = new_compiler(
311
- compiler=self.compiler,
312
- verbose=self.verbose,
313
- dry_run=self.dry_run,
314
- force=self.force,
315
- )
316
- customize_compiler(self.compiler)
317
- # If we are cross-compiling, init the compiler now (if we are not
318
- # cross-compiling, init would not hurt, but people may rely on
319
- # late initialization of compiler even if they shouldn't...)
320
- if os.name == 'nt' and self.plat_name != get_platform():
321
- self.compiler.initialize(self.plat_name)
322
-
323
- # And make sure that any compile/link-related options (which might
324
- # come from the command-line or from the setup script) are set in
325
- # that CCompiler object -- that way, they automatically apply to
326
- # all compiling and linking done here.
327
- if self.include_dirs is not None:
328
- self.compiler.set_include_dirs(self.include_dirs)
329
- if self.define is not None:
330
- # 'define' option is a list of (name,value) tuples
331
- for (name, value) in self.define:
332
- self.compiler.define_macro(name, value)
333
- if self.undef is not None:
334
- for macro in self.undef:
335
- self.compiler.undefine_macro(macro)
336
- if self.libraries is not None:
337
- self.compiler.set_libraries(self.libraries)
338
- if self.library_dirs is not None:
339
- self.compiler.set_library_dirs(self.library_dirs)
340
- if self.rpath is not None:
341
- self.compiler.set_runtime_library_dirs(self.rpath)
342
- if self.link_objects is not None:
343
- self.compiler.set_link_objects(self.link_objects)
344
-
345
- # Now actually compile and link everything.
346
- self.build_extensions()
347
-
348
- def check_extensions_list(self, extensions): # noqa: C901
349
- """Ensure that the list of extensions (presumably provided as a
350
- command option 'extensions') is valid, i.e. it is a list of
351
- Extension objects. We also support the old-style list of 2-tuples,
352
- where the tuples are (ext_name, build_info), which are converted to
353
- Extension instances here.
354
-
355
- Raise DistutilsSetupError if the structure is invalid anywhere;
356
- just returns otherwise.
357
- """
358
- if not isinstance(extensions, list):
359
- raise DistutilsSetupError(
360
- "'ext_modules' option must be a list of Extension instances"
361
- )
362
-
363
- for i, ext in enumerate(extensions):
364
- if isinstance(ext, Extension):
365
- continue # OK! (assume type-checking done
366
- # by Extension constructor)
367
-
368
- if not isinstance(ext, tuple) or len(ext) != 2:
369
- raise DistutilsSetupError(
370
- "each element of 'ext_modules' option must be an "
371
- "Extension instance or 2-tuple"
372
- )
373
-
374
- ext_name, build_info = ext
375
-
376
- log.warn(
377
- "old-style (ext_name, build_info) tuple found in "
378
- "ext_modules for extension '%s' "
379
- "-- please convert to Extension instance",
380
- ext_name,
381
- )
382
-
383
- if not (isinstance(ext_name, str) and extension_name_re.match(ext_name)):
384
- raise DistutilsSetupError(
385
- "first element of each tuple in 'ext_modules' "
386
- "must be the extension name (a string)"
387
- )
388
-
389
- if not isinstance(build_info, dict):
390
- raise DistutilsSetupError(
391
- "second element of each tuple in 'ext_modules' "
392
- "must be a dictionary (build info)"
393
- )
394
-
395
- # OK, the (ext_name, build_info) dict is type-safe: convert it
396
- # to an Extension instance.
397
- ext = Extension(ext_name, build_info['sources'])
398
-
399
- # Easy stuff: one-to-one mapping from dict elements to
400
- # instance attributes.
401
- for key in (
402
- 'include_dirs',
403
- 'library_dirs',
404
- 'libraries',
405
- 'extra_objects',
406
- 'extra_compile_args',
407
- 'extra_link_args',
408
- ):
409
- val = build_info.get(key)
410
- if val is not None:
411
- setattr(ext, key, val)
412
-
413
- # Medium-easy stuff: same syntax/semantics, different names.
414
- ext.runtime_library_dirs = build_info.get('rpath')
415
- if 'def_file' in build_info:
416
- log.warn("'def_file' element of build info dict " "no longer supported")
417
-
418
- # Non-trivial stuff: 'macros' split into 'define_macros'
419
- # and 'undef_macros'.
420
- macros = build_info.get('macros')
421
- if macros:
422
- ext.define_macros = []
423
- ext.undef_macros = []
424
- for macro in macros:
425
- if not (isinstance(macro, tuple) and len(macro) in (1, 2)):
426
- raise DistutilsSetupError(
427
- "'macros' element of build info dict "
428
- "must be 1- or 2-tuple"
429
- )
430
- if len(macro) == 1:
431
- ext.undef_macros.append(macro[0])
432
- elif len(macro) == 2:
433
- ext.define_macros.append(macro)
434
-
435
- extensions[i] = ext
436
-
437
- def get_source_files(self):
438
- self.check_extensions_list(self.extensions)
439
- filenames = []
440
-
441
- # Wouldn't it be neat if we knew the names of header files too...
442
- for ext in self.extensions:
443
- filenames.extend(ext.sources)
444
- return filenames
445
-
446
- def get_outputs(self):
447
- # Sanity check the 'extensions' list -- can't assume this is being
448
- # done in the same run as a 'build_extensions()' call (in fact, we
449
- # can probably assume that it *isn't*!).
450
- self.check_extensions_list(self.extensions)
451
-
452
- # And build the list of output (built) filenames. Note that this
453
- # ignores the 'inplace' flag, and assumes everything goes in the
454
- # "build" tree.
455
- outputs = []
456
- for ext in self.extensions:
457
- outputs.append(self.get_ext_fullpath(ext.name))
458
- return outputs
459
-
460
- def build_extensions(self):
461
- # First, sanity-check the 'extensions' list
462
- self.check_extensions_list(self.extensions)
463
- if self.parallel:
464
- self._build_extensions_parallel()
465
- else:
466
- self._build_extensions_serial()
467
-
468
- def _build_extensions_parallel(self):
469
- workers = self.parallel
470
- if self.parallel is True:
471
- workers = os.cpu_count() # may return None
472
- try:
473
- from concurrent.futures import ThreadPoolExecutor
474
- except ImportError:
475
- workers = None
476
-
477
- if workers is None:
478
- self._build_extensions_serial()
479
- return
480
-
481
- with ThreadPoolExecutor(max_workers=workers) as executor:
482
- futures = [
483
- executor.submit(self.build_extension, ext) for ext in self.extensions
484
- ]
485
- for ext, fut in zip(self.extensions, futures):
486
- with self._filter_build_errors(ext):
487
- fut.result()
488
-
489
- def _build_extensions_serial(self):
490
- for ext in self.extensions:
491
- with self._filter_build_errors(ext):
492
- self.build_extension(ext)
493
-
494
- @contextlib.contextmanager
495
- def _filter_build_errors(self, ext):
496
- try:
497
- yield
498
- except (CCompilerError, DistutilsError, CompileError) as e:
499
- if not ext.optional:
500
- raise
501
- self.warn('building extension "{}" failed: {}'.format(ext.name, e))
502
-
503
- def build_extension(self, ext):
504
- sources = ext.sources
505
- if sources is None or not isinstance(sources, (list, tuple)):
506
- raise DistutilsSetupError(
507
- "in 'ext_modules' option (extension '%s'), "
508
- "'sources' must be present and must be "
509
- "a list of source filenames" % ext.name
510
- )
511
- # sort to make the resulting .so file build reproducible
512
- sources = sorted(sources)
513
-
514
- ext_path = self.get_ext_fullpath(ext.name)
515
- depends = sources + ext.depends
516
- if not (self.force or newer_group(depends, ext_path, 'newer')):
517
- log.debug("skipping '%s' extension (up-to-date)", ext.name)
518
- return
519
- else:
520
- log.info("building '%s' extension", ext.name)
521
-
522
- # First, scan the sources for SWIG definition files (.i), run
523
- # SWIG on 'em to create .c files, and modify the sources list
524
- # accordingly.
525
- sources = self.swig_sources(sources, ext)
526
-
527
- # Next, compile the source code to object files.
528
-
529
- # XXX not honouring 'define_macros' or 'undef_macros' -- the
530
- # CCompiler API needs to change to accommodate this, and I
531
- # want to do one thing at a time!
532
-
533
- # Two possible sources for extra compiler arguments:
534
- # - 'extra_compile_args' in Extension object
535
- # - CFLAGS environment variable (not particularly
536
- # elegant, but people seem to expect it and I
537
- # guess it's useful)
538
- # The environment variable should take precedence, and
539
- # any sensible compiler will give precedence to later
540
- # command line args. Hence we combine them in order:
541
- extra_args = ext.extra_compile_args or []
542
-
543
- macros = ext.define_macros[:]
544
- for undef in ext.undef_macros:
545
- macros.append((undef,))
546
-
547
- objects = self.compiler.compile(
548
- sources,
549
- output_dir=self.build_temp,
550
- macros=macros,
551
- include_dirs=ext.include_dirs,
552
- debug=self.debug,
553
- extra_postargs=extra_args,
554
- depends=ext.depends,
555
- )
556
-
557
- # XXX outdated variable, kept here in case third-part code
558
- # needs it.
559
- self._built_objects = objects[:]
560
-
561
- # Now link the object files together into a "shared object" --
562
- # of course, first we have to figure out all the other things
563
- # that go into the mix.
564
- if ext.extra_objects:
565
- objects.extend(ext.extra_objects)
566
- extra_args = ext.extra_link_args or []
567
-
568
- # Detect target language, if not provided
569
- language = ext.language or self.compiler.detect_language(sources)
570
-
571
- self.compiler.link_shared_object(
572
- objects,
573
- ext_path,
574
- libraries=self.get_libraries(ext),
575
- library_dirs=ext.library_dirs,
576
- runtime_library_dirs=ext.runtime_library_dirs,
577
- extra_postargs=extra_args,
578
- export_symbols=self.get_export_symbols(ext),
579
- debug=self.debug,
580
- build_temp=self.build_temp,
581
- target_lang=language,
582
- )
583
-
584
- def swig_sources(self, sources, extension):
585
- """Walk the list of source files in 'sources', looking for SWIG
586
- interface (.i) files. Run SWIG on all that are found, and
587
- return a modified 'sources' list with SWIG source files replaced
588
- by the generated C (or C++) files.
589
- """
590
- new_sources = []
591
- swig_sources = []
592
- swig_targets = {}
593
-
594
- # XXX this drops generated C/C++ files into the source tree, which
595
- # is fine for developers who want to distribute the generated
596
- # source -- but there should be an option to put SWIG output in
597
- # the temp dir.
598
-
599
- if self.swig_cpp:
600
- log.warn("--swig-cpp is deprecated - use --swig-opts=-c++")
601
-
602
- if (
603
- self.swig_cpp
604
- or ('-c++' in self.swig_opts)
605
- or ('-c++' in extension.swig_opts)
606
- ):
607
- target_ext = '.cpp'
608
- else:
609
- target_ext = '.c'
610
-
611
- for source in sources:
612
- (base, ext) = os.path.splitext(source)
613
- if ext == ".i": # SWIG interface file
614
- new_sources.append(base + '_wrap' + target_ext)
615
- swig_sources.append(source)
616
- swig_targets[source] = new_sources[-1]
617
- else:
618
- new_sources.append(source)
619
-
620
- if not swig_sources:
621
- return new_sources
622
-
623
- swig = self.swig or self.find_swig()
624
- swig_cmd = [swig, "-python"]
625
- swig_cmd.extend(self.swig_opts)
626
- if self.swig_cpp:
627
- swig_cmd.append("-c++")
628
-
629
- # Do not override commandline arguments
630
- if not self.swig_opts:
631
- for o in extension.swig_opts:
632
- swig_cmd.append(o)
633
-
634
- for source in swig_sources:
635
- target = swig_targets[source]
636
- log.info("swigging %s to %s", source, target)
637
- self.spawn(swig_cmd + ["-o", target, source])
638
-
639
- return new_sources
640
-
641
- def find_swig(self):
642
- """Return the name of the SWIG executable. On Unix, this is
643
- just "swig" -- it should be in the PATH. Tries a bit harder on
644
- Windows.
645
- """
646
- if os.name == "posix":
647
- return "swig"
648
- elif os.name == "nt":
649
- # Look for SWIG in its standard installation directory on
650
- # Windows (or so I presume!). If we find it there, great;
651
- # if not, act like Unix and assume it's in the PATH.
652
- for vers in ("1.3", "1.2", "1.1"):
653
- fn = os.path.join("c:\\swig%s" % vers, "swig.exe")
654
- if os.path.isfile(fn):
655
- return fn
656
- else:
657
- return "swig.exe"
658
- else:
659
- raise DistutilsPlatformError(
660
- "I don't know how to find (much less run) SWIG "
661
- "on platform '%s'" % os.name
662
- )
663
-
664
- # -- Name generators -----------------------------------------------
665
- # (extension names, filenames, whatever)
666
- def get_ext_fullpath(self, ext_name):
667
- """Returns the path of the filename for a given extension.
668
-
669
- The file is located in `build_lib` or directly in the package
670
- (inplace option).
671
- """
672
- fullname = self.get_ext_fullname(ext_name)
673
- modpath = fullname.split('.')
674
- filename = self.get_ext_filename(modpath[-1])
675
-
676
- if not self.inplace:
677
- # no further work needed
678
- # returning :
679
- # build_dir/package/path/filename
680
- filename = os.path.join(*modpath[:-1] + [filename])
681
- return os.path.join(self.build_lib, filename)
682
-
683
- # the inplace option requires to find the package directory
684
- # using the build_py command for that
685
- package = '.'.join(modpath[0:-1])
686
- build_py = self.get_finalized_command('build_py')
687
- package_dir = os.path.abspath(build_py.get_package_dir(package))
688
-
689
- # returning
690
- # package_dir/filename
691
- return os.path.join(package_dir, filename)
692
-
693
- def get_ext_fullname(self, ext_name):
694
- """Returns the fullname of a given extension name.
695
-
696
- Adds the `package.` prefix"""
697
- if self.package is None:
698
- return ext_name
699
- else:
700
- return self.package + '.' + ext_name
701
-
702
- def get_ext_filename(self, ext_name):
703
- r"""Convert the name of an extension (eg. "foo.bar") into the name
704
- of the file from which it will be loaded (eg. "foo/bar.so", or
705
- "foo\bar.pyd").
706
- """
707
- from distutils.sysconfig import get_config_var
708
-
709
- ext_path = ext_name.split('.')
710
- ext_suffix = get_config_var('EXT_SUFFIX')
711
- return os.path.join(*ext_path) + ext_suffix
712
-
713
- def get_export_symbols(self, ext):
714
- """Return the list of symbols that a shared extension has to
715
- export. This either uses 'ext.export_symbols' or, if it's not
716
- provided, "PyInit_" + module_name. Only relevant on Windows, where
717
- the .pyd file (DLL) must export the module "PyInit_" function.
718
- """
719
- name = ext.name.split('.')[-1]
720
- try:
721
- # Unicode module name support as defined in PEP-489
722
- # https://www.python.org/dev/peps/pep-0489/#export-hook-name
723
- name.encode('ascii')
724
- except UnicodeEncodeError:
725
- suffix = 'U_' + name.encode('punycode').replace(b'-', b'_').decode('ascii')
726
- else:
727
- suffix = "_" + name
728
-
729
- initfunc_name = "PyInit" + suffix
730
- if initfunc_name not in ext.export_symbols:
731
- ext.export_symbols.append(initfunc_name)
732
- return ext.export_symbols
733
-
734
- def get_libraries(self, ext): # noqa: C901
735
- """Return the list of libraries to link against when building a
736
- shared extension. On most platforms, this is just 'ext.libraries';
737
- on Windows, we add the Python library (eg. python20.dll).
738
- """
739
- # The python library is always needed on Windows. For MSVC, this
740
- # is redundant, since the library is mentioned in a pragma in
741
- # pyconfig.h that MSVC groks. The other Windows compilers all seem
742
- # to need it mentioned explicitly, though, so that's what we do.
743
- # Append '_d' to the python import library on debug builds.
744
- if sys.platform == "win32":
745
- from distutils._msvccompiler import MSVCCompiler
746
-
747
- if not isinstance(self.compiler, MSVCCompiler):
748
- template = "python%d%d"
749
- if self.debug:
750
- template = template + '_d'
751
- pythonlib = template % (
752
- sys.hexversion >> 24,
753
- (sys.hexversion >> 16) & 0xFF,
754
- )
755
- # don't extend ext.libraries, it may be shared with other
756
- # extensions, it is a reference to the original list
757
- return ext.libraries + [pythonlib]
758
- else:
759
- # On Android only the main executable and LD_PRELOADs are considered
760
- # to be RTLD_GLOBAL, all the dependencies of the main executable
761
- # remain RTLD_LOCAL and so the shared libraries must be linked with
762
- # libpython when python is built with a shared python library (issue
763
- # bpo-21536).
764
- # On Cygwin (and if required, other POSIX-like platforms based on
765
- # Windows like MinGW) it is simply necessary that all symbols in
766
- # shared libraries are resolved at link time.
767
- from distutils.sysconfig import get_config_var
768
-
769
- link_libpython = False
770
- if get_config_var('Py_ENABLE_SHARED'):
771
- # A native build on an Android device or on Cygwin
772
- if hasattr(sys, 'getandroidapilevel'):
773
- link_libpython = True
774
- elif sys.platform == 'cygwin':
775
- link_libpython = True
776
- elif '_PYTHON_HOST_PLATFORM' in os.environ:
777
- # We are cross-compiling for one of the relevant platforms
778
- if get_config_var('ANDROID_API_LEVEL') != 0:
779
- link_libpython = True
780
- elif get_config_var('MACHDEP') == 'cygwin':
781
- link_libpython = True
782
-
783
- if link_libpython:
784
- ldversion = get_config_var('LDVERSION')
785
- return ext.libraries + ['python' + ldversion]
786
-
787
- return ext.libraries + py37compat.pythonlib()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/setuptools/_distutils/command/upload.py DELETED
@@ -1,205 +0,0 @@
1
- """
2
- distutils.command.upload
3
-
4
- Implements the Distutils 'upload' subcommand (upload package to a package
5
- index).
6
- """
7
-
8
- import os
9
- import io
10
- import hashlib
11
- from base64 import standard_b64encode
12
- from urllib.request import urlopen, Request, HTTPError
13
- from urllib.parse import urlparse
14
- from distutils.errors import DistutilsError, DistutilsOptionError
15
- from distutils.core import PyPIRCCommand
16
- from distutils.spawn import spawn
17
- from distutils import log
18
-
19
-
20
- # PyPI Warehouse supports MD5, SHA256, and Blake2 (blake2-256)
21
- # https://bugs.python.org/issue40698
22
- _FILE_CONTENT_DIGESTS = {
23
- "md5_digest": getattr(hashlib, "md5", None),
24
- "sha256_digest": getattr(hashlib, "sha256", None),
25
- "blake2_256_digest": getattr(hashlib, "blake2b", None),
26
- }
27
-
28
-
29
- class upload(PyPIRCCommand):
30
-
31
- description = "upload binary package to PyPI"
32
-
33
- user_options = PyPIRCCommand.user_options + [
34
- ('sign', 's', 'sign files to upload using gpg'),
35
- ('identity=', 'i', 'GPG identity used to sign files'),
36
- ]
37
-
38
- boolean_options = PyPIRCCommand.boolean_options + ['sign']
39
-
40
- def initialize_options(self):
41
- PyPIRCCommand.initialize_options(self)
42
- self.username = ''
43
- self.password = ''
44
- self.show_response = 0
45
- self.sign = False
46
- self.identity = None
47
-
48
- def finalize_options(self):
49
- PyPIRCCommand.finalize_options(self)
50
- if self.identity and not self.sign:
51
- raise DistutilsOptionError("Must use --sign for --identity to have meaning")
52
- config = self._read_pypirc()
53
- if config != {}:
54
- self.username = config['username']
55
- self.password = config['password']
56
- self.repository = config['repository']
57
- self.realm = config['realm']
58
-
59
- # getting the password from the distribution
60
- # if previously set by the register command
61
- if not self.password and self.distribution.password:
62
- self.password = self.distribution.password
63
-
64
- def run(self):
65
- if not self.distribution.dist_files:
66
- msg = (
67
- "Must create and upload files in one command "
68
- "(e.g. setup.py sdist upload)"
69
- )
70
- raise DistutilsOptionError(msg)
71
- for command, pyversion, filename in self.distribution.dist_files:
72
- self.upload_file(command, pyversion, filename)
73
-
74
- def upload_file(self, command, pyversion, filename): # noqa: C901
75
- # Makes sure the repository URL is compliant
76
- schema, netloc, url, params, query, fragments = urlparse(self.repository)
77
- if params or query or fragments:
78
- raise AssertionError("Incompatible url %s" % self.repository)
79
-
80
- if schema not in ('http', 'https'):
81
- raise AssertionError("unsupported schema " + schema)
82
-
83
- # Sign if requested
84
- if self.sign:
85
- gpg_args = ["gpg", "--detach-sign", "-a", filename]
86
- if self.identity:
87
- gpg_args[2:2] = ["--local-user", self.identity]
88
- spawn(gpg_args, dry_run=self.dry_run)
89
-
90
- # Fill in the data - send all the meta-data in case we need to
91
- # register a new release
92
- f = open(filename, 'rb')
93
- try:
94
- content = f.read()
95
- finally:
96
- f.close()
97
-
98
- meta = self.distribution.metadata
99
- data = {
100
- # action
101
- ':action': 'file_upload',
102
- 'protocol_version': '1',
103
- # identify release
104
- 'name': meta.get_name(),
105
- 'version': meta.get_version(),
106
- # file content
107
- 'content': (os.path.basename(filename), content),
108
- 'filetype': command,
109
- 'pyversion': pyversion,
110
- # additional meta-data
111
- 'metadata_version': '1.0',
112
- 'summary': meta.get_description(),
113
- 'home_page': meta.get_url(),
114
- 'author': meta.get_contact(),
115
- 'author_email': meta.get_contact_email(),
116
- 'license': meta.get_licence(),
117
- 'description': meta.get_long_description(),
118
- 'keywords': meta.get_keywords(),
119
- 'platform': meta.get_platforms(),
120
- 'classifiers': meta.get_classifiers(),
121
- 'download_url': meta.get_download_url(),
122
- # PEP 314
123
- 'provides': meta.get_provides(),
124
- 'requires': meta.get_requires(),
125
- 'obsoletes': meta.get_obsoletes(),
126
- }
127
-
128
- data['comment'] = ''
129
-
130
- # file content digests
131
- for digest_name, digest_cons in _FILE_CONTENT_DIGESTS.items():
132
- if digest_cons is None:
133
- continue
134
- try:
135
- data[digest_name] = digest_cons(content).hexdigest()
136
- except ValueError:
137
- # hash digest not available or blocked by security policy
138
- pass
139
-
140
- if self.sign:
141
- with open(filename + ".asc", "rb") as f:
142
- data['gpg_signature'] = (os.path.basename(filename) + ".asc", f.read())
143
-
144
- # set up the authentication
145
- user_pass = (self.username + ":" + self.password).encode('ascii')
146
- # The exact encoding of the authentication string is debated.
147
- # Anyway PyPI only accepts ascii for both username or password.
148
- auth = "Basic " + standard_b64encode(user_pass).decode('ascii')
149
-
150
- # Build up the MIME payload for the POST data
151
- boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
152
- sep_boundary = b'\r\n--' + boundary.encode('ascii')
153
- end_boundary = sep_boundary + b'--\r\n'
154
- body = io.BytesIO()
155
- for key, value in data.items():
156
- title = '\r\nContent-Disposition: form-data; name="%s"' % key
157
- # handle multiple entries for the same name
158
- if not isinstance(value, list):
159
- value = [value]
160
- for value in value:
161
- if type(value) is tuple:
162
- title += '; filename="%s"' % value[0]
163
- value = value[1]
164
- else:
165
- value = str(value).encode('utf-8')
166
- body.write(sep_boundary)
167
- body.write(title.encode('utf-8'))
168
- body.write(b"\r\n\r\n")
169
- body.write(value)
170
- body.write(end_boundary)
171
- body = body.getvalue()
172
-
173
- msg = "Submitting {} to {}".format(filename, self.repository)
174
- self.announce(msg, log.INFO)
175
-
176
- # build the Request
177
- headers = {
178
- 'Content-type': 'multipart/form-data; boundary=%s' % boundary,
179
- 'Content-length': str(len(body)),
180
- 'Authorization': auth,
181
- }
182
-
183
- request = Request(self.repository, data=body, headers=headers)
184
- # send the data
185
- try:
186
- result = urlopen(request)
187
- status = result.getcode()
188
- reason = result.msg
189
- except HTTPError as e:
190
- status = e.code
191
- reason = e.msg
192
- except OSError as e:
193
- self.announce(str(e), log.ERROR)
194
- raise
195
-
196
- if status == 200:
197
- self.announce('Server response ({}): {}'.format(status, reason), log.INFO)
198
- if self.show_response:
199
- text = self._read_pypi_response(result)
200
- msg = '\n'.join(('-' * 75, text, '-' * 75))
201
- self.announce(msg, log.INFO)
202
- else:
203
- msg = 'Upload failed ({}): {}'.format(status, reason)
204
- self.announce(msg, log.ERROR)
205
- raise DistutilsError(msg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/detectron2/config/defaults.py DELETED
@@ -1,635 +0,0 @@
1
- # Copyright (c) Facebook, Inc. and its affiliates.
2
- from .config import CfgNode as CN
3
-
4
- # NOTE: given the new config system
5
- # (https://detectron2.readthedocs.io/en/latest/tutorials/lazyconfigs.html),
6
- # we will stop adding new functionalities to default CfgNode.
7
-
8
- # -----------------------------------------------------------------------------
9
- # Convention about Training / Test specific parameters
10
- # -----------------------------------------------------------------------------
11
- # Whenever an argument can be either used for training or for testing, the
12
- # corresponding name will be post-fixed by a _TRAIN for a training parameter,
13
- # or _TEST for a test-specific parameter.
14
- # For example, the number of images during training will be
15
- # IMAGES_PER_BATCH_TRAIN, while the number of images for testing will be
16
- # IMAGES_PER_BATCH_TEST
17
-
18
- # -----------------------------------------------------------------------------
19
- # Config definition
20
- # -----------------------------------------------------------------------------
21
-
22
- _C = CN()
23
-
24
- # The version number, to upgrade from old configs to new ones if any
25
- # changes happen. It's recommended to keep a VERSION in your config file.
26
- _C.VERSION = 2
27
-
28
- _C.MODEL = CN()
29
- _C.MODEL.LOAD_PROPOSALS = False
30
- _C.MODEL.MASK_ON = False
31
- _C.MODEL.KEYPOINT_ON = False
32
- _C.MODEL.DEVICE = "cuda"
33
- _C.MODEL.META_ARCHITECTURE = "GeneralizedRCNN"
34
-
35
- # Path (a file path, or URL like detectron2://.., https://..) to a checkpoint file
36
- # to be loaded to the model. You can find available models in the model zoo.
37
- _C.MODEL.WEIGHTS = ""
38
-
39
- # Values to be used for image normalization (BGR order, since INPUT.FORMAT defaults to BGR).
40
- # To train on images of different number of channels, just set different mean & std.
41
- # Default values are the mean pixel value from ImageNet: [103.53, 116.28, 123.675]
42
- _C.MODEL.PIXEL_MEAN = [103.530, 116.280, 123.675]
43
- # When using pre-trained models in Detectron1 or any MSRA models,
44
- # std has been absorbed into its conv1 weights, so the std needs to be set 1.
45
- # Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std)
46
- _C.MODEL.PIXEL_STD = [1.0, 1.0, 1.0]
47
-
48
-
49
- # -----------------------------------------------------------------------------
50
- # INPUT
51
- # -----------------------------------------------------------------------------
52
- _C.INPUT = CN()
53
- # By default, {MIN,MAX}_SIZE options are used in transforms.ResizeShortestEdge.
54
- # Please refer to ResizeShortestEdge for detailed definition.
55
- # Size of the smallest side of the image during training
56
- _C.INPUT.MIN_SIZE_TRAIN = (800,)
57
- # Sample size of smallest side by choice or random selection from range give by
58
- # INPUT.MIN_SIZE_TRAIN
59
- _C.INPUT.MIN_SIZE_TRAIN_SAMPLING = "choice"
60
- # Maximum size of the side of the image during training
61
- _C.INPUT.MAX_SIZE_TRAIN = 1333
62
- # Size of the smallest side of the image during testing. Set to zero to disable resize in testing.
63
- _C.INPUT.MIN_SIZE_TEST = 800
64
- # Maximum size of the side of the image during testing
65
- _C.INPUT.MAX_SIZE_TEST = 1333
66
- # Mode for flipping images used in data augmentation during training
67
- # choose one of ["horizontal, "vertical", "none"]
68
- _C.INPUT.RANDOM_FLIP = "horizontal"
69
-
70
- # `True` if cropping is used for data augmentation during training
71
- _C.INPUT.CROP = CN({"ENABLED": False})
72
- # Cropping type. See documentation of `detectron2.data.transforms.RandomCrop` for explanation.
73
- _C.INPUT.CROP.TYPE = "relative_range"
74
- # Size of crop in range (0, 1] if CROP.TYPE is "relative" or "relative_range" and in number of
75
- # pixels if CROP.TYPE is "absolute"
76
- _C.INPUT.CROP.SIZE = [0.9, 0.9]
77
-
78
-
79
- # Whether the model needs RGB, YUV, HSV etc.
80
- # Should be one of the modes defined here, as we use PIL to read the image:
81
- # https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes
82
- # with BGR being the one exception. One can set image format to BGR, we will
83
- # internally use RGB for conversion and flip the channels over
84
- _C.INPUT.FORMAT = "BGR"
85
- # The ground truth mask format that the model will use.
86
- # Mask R-CNN supports either "polygon" or "bitmask" as ground truth.
87
- _C.INPUT.MASK_FORMAT = "polygon" # alternative: "bitmask"
88
-
89
-
90
- # -----------------------------------------------------------------------------
91
- # Dataset
92
- # -----------------------------------------------------------------------------
93
- _C.DATASETS = CN()
94
- # List of the dataset names for training. Must be registered in DatasetCatalog
95
- # Samples from these datasets will be merged and used as one dataset.
96
- _C.DATASETS.TRAIN = ()
97
- # List of the pre-computed proposal files for training, which must be consistent
98
- # with datasets listed in DATASETS.TRAIN.
99
- _C.DATASETS.PROPOSAL_FILES_TRAIN = ()
100
- # Number of top scoring precomputed proposals to keep for training
101
- _C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN = 2000
102
- # List of the dataset names for testing. Must be registered in DatasetCatalog
103
- _C.DATASETS.TEST = ()
104
- # List of the pre-computed proposal files for test, which must be consistent
105
- # with datasets listed in DATASETS.TEST.
106
- _C.DATASETS.PROPOSAL_FILES_TEST = ()
107
- # Number of top scoring precomputed proposals to keep for test
108
- _C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST = 1000
109
-
110
- # -----------------------------------------------------------------------------
111
- # DataLoader
112
- # -----------------------------------------------------------------------------
113
- _C.DATALOADER = CN()
114
- # Number of data loading threads
115
- _C.DATALOADER.NUM_WORKERS = 4
116
- # If True, each batch should contain only images for which the aspect ratio
117
- # is compatible. This groups portrait images together, and landscape images
118
- # are not batched with portrait images.
119
- _C.DATALOADER.ASPECT_RATIO_GROUPING = True
120
- # Options: TrainingSampler, RepeatFactorTrainingSampler
121
- _C.DATALOADER.SAMPLER_TRAIN = "TrainingSampler"
122
- # Repeat threshold for RepeatFactorTrainingSampler
123
- _C.DATALOADER.REPEAT_THRESHOLD = 0.0
124
- # Tf True, when working on datasets that have instance annotations, the
125
- # training dataloader will filter out images without associated annotations
126
- _C.DATALOADER.FILTER_EMPTY_ANNOTATIONS = True
127
-
128
- # ---------------------------------------------------------------------------- #
129
- # Backbone options
130
- # ---------------------------------------------------------------------------- #
131
- _C.MODEL.BACKBONE = CN()
132
-
133
- _C.MODEL.BACKBONE.NAME = "build_resnet_backbone"
134
- # Freeze the first several stages so they are not trained.
135
- # There are 5 stages in ResNet. The first is a convolution, and the following
136
- # stages are each group of residual blocks.
137
- _C.MODEL.BACKBONE.FREEZE_AT = 2
138
-
139
-
140
- # ---------------------------------------------------------------------------- #
141
- # FPN options
142
- # ---------------------------------------------------------------------------- #
143
- _C.MODEL.FPN = CN()
144
- # Names of the input feature maps to be used by FPN
145
- # They must have contiguous power of 2 strides
146
- # e.g., ["res2", "res3", "res4", "res5"]
147
- _C.MODEL.FPN.IN_FEATURES = []
148
- _C.MODEL.FPN.OUT_CHANNELS = 256
149
-
150
- # Options: "" (no norm), "GN"
151
- _C.MODEL.FPN.NORM = ""
152
-
153
- # Types for fusing the FPN top-down and lateral features. Can be either "sum" or "avg"
154
- _C.MODEL.FPN.FUSE_TYPE = "sum"
155
-
156
-
157
- # ---------------------------------------------------------------------------- #
158
- # Proposal generator options
159
- # ---------------------------------------------------------------------------- #
160
- _C.MODEL.PROPOSAL_GENERATOR = CN()
161
- # Current proposal generators include "RPN", "RRPN" and "PrecomputedProposals"
162
- _C.MODEL.PROPOSAL_GENERATOR.NAME = "RPN"
163
- # Proposal height and width both need to be greater than MIN_SIZE
164
- # (a the scale used during training or inference)
165
- _C.MODEL.PROPOSAL_GENERATOR.MIN_SIZE = 0
166
-
167
-
168
- # ---------------------------------------------------------------------------- #
169
- # Anchor generator options
170
- # ---------------------------------------------------------------------------- #
171
- _C.MODEL.ANCHOR_GENERATOR = CN()
172
- # The generator can be any name in the ANCHOR_GENERATOR registry
173
- _C.MODEL.ANCHOR_GENERATOR.NAME = "DefaultAnchorGenerator"
174
- # Anchor sizes (i.e. sqrt of area) in absolute pixels w.r.t. the network input.
175
- # Format: list[list[float]]. SIZES[i] specifies the list of sizes to use for
176
- # IN_FEATURES[i]; len(SIZES) must be equal to len(IN_FEATURES) or 1.
177
- # When len(SIZES) == 1, SIZES[0] is used for all IN_FEATURES.
178
- _C.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64, 128, 256, 512]]
179
- # Anchor aspect ratios. For each area given in `SIZES`, anchors with different aspect
180
- # ratios are generated by an anchor generator.
181
- # Format: list[list[float]]. ASPECT_RATIOS[i] specifies the list of aspect ratios (H/W)
182
- # to use for IN_FEATURES[i]; len(ASPECT_RATIOS) == len(IN_FEATURES) must be true,
183
- # or len(ASPECT_RATIOS) == 1 is true and aspect ratio list ASPECT_RATIOS[0] is used
184
- # for all IN_FEATURES.
185
- _C.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.5, 1.0, 2.0]]
186
- # Anchor angles.
187
- # list[list[float]], the angle in degrees, for each input feature map.
188
- # ANGLES[i] specifies the list of angles for IN_FEATURES[i].
189
- _C.MODEL.ANCHOR_GENERATOR.ANGLES = [[-90, 0, 90]]
190
- # Relative offset between the center of the first anchor and the top-left corner of the image
191
- # Value has to be in [0, 1). Recommend to use 0.5, which means half stride.
192
- # The value is not expected to affect model accuracy.
193
- _C.MODEL.ANCHOR_GENERATOR.OFFSET = 0.0
194
-
195
- # ---------------------------------------------------------------------------- #
196
- # RPN options
197
- # ---------------------------------------------------------------------------- #
198
- _C.MODEL.RPN = CN()
199
- _C.MODEL.RPN.HEAD_NAME = "StandardRPNHead" # used by RPN_HEAD_REGISTRY
200
-
201
- # Names of the input feature maps to be used by RPN
202
- # e.g., ["p2", "p3", "p4", "p5", "p6"] for FPN
203
- _C.MODEL.RPN.IN_FEATURES = ["res4"]
204
- # Remove RPN anchors that go outside the image by BOUNDARY_THRESH pixels
205
- # Set to -1 or a large value, e.g. 100000, to disable pruning anchors
206
- _C.MODEL.RPN.BOUNDARY_THRESH = -1
207
- # IOU overlap ratios [BG_IOU_THRESHOLD, FG_IOU_THRESHOLD]
208
- # Minimum overlap required between an anchor and ground-truth box for the
209
- # (anchor, gt box) pair to be a positive example (IoU >= FG_IOU_THRESHOLD
210
- # ==> positive RPN example: 1)
211
- # Maximum overlap allowed between an anchor and ground-truth box for the
212
- # (anchor, gt box) pair to be a negative examples (IoU < BG_IOU_THRESHOLD
213
- # ==> negative RPN example: 0)
214
- # Anchors with overlap in between (BG_IOU_THRESHOLD <= IoU < FG_IOU_THRESHOLD)
215
- # are ignored (-1)
216
- _C.MODEL.RPN.IOU_THRESHOLDS = [0.3, 0.7]
217
- _C.MODEL.RPN.IOU_LABELS = [0, -1, 1]
218
- # Number of regions per image used to train RPN
219
- _C.MODEL.RPN.BATCH_SIZE_PER_IMAGE = 256
220
- # Target fraction of foreground (positive) examples per RPN minibatch
221
- _C.MODEL.RPN.POSITIVE_FRACTION = 0.5
222
- # Options are: "smooth_l1", "giou", "diou", "ciou"
223
- _C.MODEL.RPN.BBOX_REG_LOSS_TYPE = "smooth_l1"
224
- _C.MODEL.RPN.BBOX_REG_LOSS_WEIGHT = 1.0
225
- # Weights on (dx, dy, dw, dh) for normalizing RPN anchor regression targets
226
- _C.MODEL.RPN.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
227
- # The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1.
228
- _C.MODEL.RPN.SMOOTH_L1_BETA = 0.0
229
- _C.MODEL.RPN.LOSS_WEIGHT = 1.0
230
- # Number of top scoring RPN proposals to keep before applying NMS
231
- # When FPN is used, this is *per FPN level* (not total)
232
- _C.MODEL.RPN.PRE_NMS_TOPK_TRAIN = 12000
233
- _C.MODEL.RPN.PRE_NMS_TOPK_TEST = 6000
234
- # Number of top scoring RPN proposals to keep after applying NMS
235
- # When FPN is used, this limit is applied per level and then again to the union
236
- # of proposals from all levels
237
- # NOTE: When FPN is used, the meaning of this config is different from Detectron1.
238
- # It means per-batch topk in Detectron1, but per-image topk here.
239
- # See the "find_top_rpn_proposals" function for details.
240
- _C.MODEL.RPN.POST_NMS_TOPK_TRAIN = 2000
241
- _C.MODEL.RPN.POST_NMS_TOPK_TEST = 1000
242
- # NMS threshold used on RPN proposals
243
- _C.MODEL.RPN.NMS_THRESH = 0.7
244
- # Set this to -1 to use the same number of output channels as input channels.
245
- _C.MODEL.RPN.CONV_DIMS = [-1]
246
-
247
- # ---------------------------------------------------------------------------- #
248
- # ROI HEADS options
249
- # ---------------------------------------------------------------------------- #
250
- _C.MODEL.ROI_HEADS = CN()
251
- _C.MODEL.ROI_HEADS.NAME = "Res5ROIHeads"
252
- # Number of foreground classes
253
- _C.MODEL.ROI_HEADS.NUM_CLASSES = 80
254
- # Names of the input feature maps to be used by ROI heads
255
- # Currently all heads (box, mask, ...) use the same input feature map list
256
- # e.g., ["p2", "p3", "p4", "p5"] is commonly used for FPN
257
- _C.MODEL.ROI_HEADS.IN_FEATURES = ["res4"]
258
- # IOU overlap ratios [IOU_THRESHOLD]
259
- # Overlap threshold for an RoI to be considered background (if < IOU_THRESHOLD)
260
- # Overlap threshold for an RoI to be considered foreground (if >= IOU_THRESHOLD)
261
- _C.MODEL.ROI_HEADS.IOU_THRESHOLDS = [0.5]
262
- _C.MODEL.ROI_HEADS.IOU_LABELS = [0, 1]
263
- # RoI minibatch size *per image* (number of regions of interest [ROIs]) during training
264
- # Total number of RoIs per training minibatch =
265
- # ROI_HEADS.BATCH_SIZE_PER_IMAGE * SOLVER.IMS_PER_BATCH
266
- # E.g., a common configuration is: 512 * 16 = 8192
267
- _C.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512
268
- # Target fraction of RoI minibatch that is labeled foreground (i.e. class > 0)
269
- _C.MODEL.ROI_HEADS.POSITIVE_FRACTION = 0.25
270
-
271
- # Only used on test mode
272
-
273
- # Minimum score threshold (assuming scores in a [0, 1] range); a value chosen to
274
- # balance obtaining high recall with not having too many low precision
275
- # detections that will slow down inference post processing steps (like NMS)
276
- # A default threshold of 0.0 increases AP by ~0.2-0.3 but significantly slows down
277
- # inference.
278
- _C.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.05
279
- # Overlap threshold used for non-maximum suppression (suppress boxes with
280
- # IoU >= this threshold)
281
- _C.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.5
282
- # If True, augment proposals with ground-truth boxes before sampling proposals to
283
- # train ROI heads.
284
- _C.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT = True
285
-
286
- # ---------------------------------------------------------------------------- #
287
- # Box Head
288
- # ---------------------------------------------------------------------------- #
289
- _C.MODEL.ROI_BOX_HEAD = CN()
290
- # C4 don't use head name option
291
- # Options for non-C4 models: FastRCNNConvFCHead,
292
- _C.MODEL.ROI_BOX_HEAD.NAME = ""
293
- # Options are: "smooth_l1", "giou", "diou", "ciou"
294
- _C.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_TYPE = "smooth_l1"
295
- # The final scaling coefficient on the box regression loss, used to balance the magnitude of its
296
- # gradients with other losses in the model. See also `MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT`.
297
- _C.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT = 1.0
298
- # Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets
299
- # These are empirically chosen to approximately lead to unit variance targets
300
- _C.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10.0, 10.0, 5.0, 5.0)
301
- # The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1.
302
- _C.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA = 0.0
303
- _C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14
304
- _C.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO = 0
305
- # Type of pooling operation applied to the incoming feature map for each RoI
306
- _C.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignV2"
307
-
308
- _C.MODEL.ROI_BOX_HEAD.NUM_FC = 0
309
- # Hidden layer dimension for FC layers in the RoI box head
310
- _C.MODEL.ROI_BOX_HEAD.FC_DIM = 1024
311
- _C.MODEL.ROI_BOX_HEAD.NUM_CONV = 0
312
- # Channel dimension for Conv layers in the RoI box head
313
- _C.MODEL.ROI_BOX_HEAD.CONV_DIM = 256
314
- # Normalization method for the convolution layers.
315
- # Options: "" (no norm), "GN", "SyncBN".
316
- _C.MODEL.ROI_BOX_HEAD.NORM = ""
317
- # Whether to use class agnostic for bbox regression
318
- _C.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG = False
319
- # If true, RoI heads use bounding boxes predicted by the box head rather than proposal boxes.
320
- _C.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES = False
321
-
322
- # ---------------------------------------------------------------------------- #
323
- # Cascaded Box Head
324
- # ---------------------------------------------------------------------------- #
325
- _C.MODEL.ROI_BOX_CASCADE_HEAD = CN()
326
- # The number of cascade stages is implicitly defined by the length of the following two configs.
327
- _C.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS = (
328
- (10.0, 10.0, 5.0, 5.0),
329
- (20.0, 20.0, 10.0, 10.0),
330
- (30.0, 30.0, 15.0, 15.0),
331
- )
332
- _C.MODEL.ROI_BOX_CASCADE_HEAD.IOUS = (0.5, 0.6, 0.7)
333
-
334
-
335
- # ---------------------------------------------------------------------------- #
336
- # Mask Head
337
- # ---------------------------------------------------------------------------- #
338
- _C.MODEL.ROI_MASK_HEAD = CN()
339
- _C.MODEL.ROI_MASK_HEAD.NAME = "MaskRCNNConvUpsampleHead"
340
- _C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 14
341
- _C.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO = 0
342
- _C.MODEL.ROI_MASK_HEAD.NUM_CONV = 0 # The number of convs in the mask head
343
- _C.MODEL.ROI_MASK_HEAD.CONV_DIM = 256
344
- # Normalization method for the convolution layers.
345
- # Options: "" (no norm), "GN", "SyncBN".
346
- _C.MODEL.ROI_MASK_HEAD.NORM = ""
347
- # Whether to use class agnostic for mask prediction
348
- _C.MODEL.ROI_MASK_HEAD.CLS_AGNOSTIC_MASK = False
349
- # Type of pooling operation applied to the incoming feature map for each RoI
350
- _C.MODEL.ROI_MASK_HEAD.POOLER_TYPE = "ROIAlignV2"
351
-
352
-
353
- # ---------------------------------------------------------------------------- #
354
- # Keypoint Head
355
- # ---------------------------------------------------------------------------- #
356
- _C.MODEL.ROI_KEYPOINT_HEAD = CN()
357
- _C.MODEL.ROI_KEYPOINT_HEAD.NAME = "KRCNNConvDeconvUpsampleHead"
358
- _C.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION = 14
359
- _C.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO = 0
360
- _C.MODEL.ROI_KEYPOINT_HEAD.CONV_DIMS = tuple(512 for _ in range(8))
361
- _C.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS = 17 # 17 is the number of keypoints in COCO.
362
-
363
- # Images with too few (or no) keypoints are excluded from training.
364
- _C.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE = 1
365
- # Normalize by the total number of visible keypoints in the minibatch if True.
366
- # Otherwise, normalize by the total number of keypoints that could ever exist
367
- # in the minibatch.
368
- # The keypoint softmax loss is only calculated on visible keypoints.
369
- # Since the number of visible keypoints can vary significantly between
370
- # minibatches, this has the effect of up-weighting the importance of
371
- # minibatches with few visible keypoints. (Imagine the extreme case of
372
- # only one visible keypoint versus N: in the case of N, each one
373
- # contributes 1/N to the gradient compared to the single keypoint
374
- # determining the gradient direction). Instead, we can normalize the
375
- # loss by the total number of keypoints, if it were the case that all
376
- # keypoints were visible in a full minibatch. (Returning to the example,
377
- # this means that the one visible keypoint contributes as much as each
378
- # of the N keypoints.)
379
- _C.MODEL.ROI_KEYPOINT_HEAD.NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS = True
380
- # Multi-task loss weight to use for keypoints
381
- # Recommended values:
382
- # - use 1.0 if NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS is True
383
- # - use 4.0 if NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS is False
384
- _C.MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT = 1.0
385
- # Type of pooling operation applied to the incoming feature map for each RoI
386
- _C.MODEL.ROI_KEYPOINT_HEAD.POOLER_TYPE = "ROIAlignV2"
387
-
388
- # ---------------------------------------------------------------------------- #
389
- # Semantic Segmentation Head
390
- # ---------------------------------------------------------------------------- #
391
- _C.MODEL.SEM_SEG_HEAD = CN()
392
- _C.MODEL.SEM_SEG_HEAD.NAME = "SemSegFPNHead"
393
- _C.MODEL.SEM_SEG_HEAD.IN_FEATURES = ["p2", "p3", "p4", "p5"]
394
- # Label in the semantic segmentation ground truth that is ignored, i.e., no loss is calculated for
395
- # the correposnding pixel.
396
- _C.MODEL.SEM_SEG_HEAD.IGNORE_VALUE = 255
397
- # Number of classes in the semantic segmentation head
398
- _C.MODEL.SEM_SEG_HEAD.NUM_CLASSES = 54
399
- # Number of channels in the 3x3 convs inside semantic-FPN heads.
400
- _C.MODEL.SEM_SEG_HEAD.CONVS_DIM = 128
401
- # Outputs from semantic-FPN heads are up-scaled to the COMMON_STRIDE stride.
402
- _C.MODEL.SEM_SEG_HEAD.COMMON_STRIDE = 4
403
- # Normalization method for the convolution layers. Options: "" (no norm), "GN".
404
- _C.MODEL.SEM_SEG_HEAD.NORM = "GN"
405
- _C.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT = 1.0
406
-
407
- _C.MODEL.PANOPTIC_FPN = CN()
408
- # Scaling of all losses from instance detection / segmentation head.
409
- _C.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT = 1.0
410
-
411
- # options when combining instance & semantic segmentation outputs
412
- _C.MODEL.PANOPTIC_FPN.COMBINE = CN({"ENABLED": True}) # "COMBINE.ENABLED" is deprecated & not used
413
- _C.MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH = 0.5
414
- _C.MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT = 4096
415
- _C.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = 0.5
416
-
417
-
418
- # ---------------------------------------------------------------------------- #
419
- # RetinaNet Head
420
- # ---------------------------------------------------------------------------- #
421
- _C.MODEL.RETINANET = CN()
422
-
423
- # This is the number of foreground classes.
424
- _C.MODEL.RETINANET.NUM_CLASSES = 80
425
-
426
- _C.MODEL.RETINANET.IN_FEATURES = ["p3", "p4", "p5", "p6", "p7"]
427
-
428
- # Convolutions to use in the cls and bbox tower
429
- # NOTE: this doesn't include the last conv for logits
430
- _C.MODEL.RETINANET.NUM_CONVS = 4
431
-
432
- # IoU overlap ratio [bg, fg] for labeling anchors.
433
- # Anchors with < bg are labeled negative (0)
434
- # Anchors with >= bg and < fg are ignored (-1)
435
- # Anchors with >= fg are labeled positive (1)
436
- _C.MODEL.RETINANET.IOU_THRESHOLDS = [0.4, 0.5]
437
- _C.MODEL.RETINANET.IOU_LABELS = [0, -1, 1]
438
-
439
- # Prior prob for rare case (i.e. foreground) at the beginning of training.
440
- # This is used to set the bias for the logits layer of the classifier subnet.
441
- # This improves training stability in the case of heavy class imbalance.
442
- _C.MODEL.RETINANET.PRIOR_PROB = 0.01
443
-
444
- # Inference cls score threshold, only anchors with score > INFERENCE_TH are
445
- # considered for inference (to improve speed)
446
- _C.MODEL.RETINANET.SCORE_THRESH_TEST = 0.05
447
- # Select topk candidates before NMS
448
- _C.MODEL.RETINANET.TOPK_CANDIDATES_TEST = 1000
449
- _C.MODEL.RETINANET.NMS_THRESH_TEST = 0.5
450
-
451
- # Weights on (dx, dy, dw, dh) for normalizing Retinanet anchor regression targets
452
- _C.MODEL.RETINANET.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
453
-
454
- # Loss parameters
455
- _C.MODEL.RETINANET.FOCAL_LOSS_GAMMA = 2.0
456
- _C.MODEL.RETINANET.FOCAL_LOSS_ALPHA = 0.25
457
- _C.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA = 0.1
458
- # Options are: "smooth_l1", "giou", "diou", "ciou"
459
- _C.MODEL.RETINANET.BBOX_REG_LOSS_TYPE = "smooth_l1"
460
-
461
- # One of BN, SyncBN, FrozenBN, GN
462
- # Only supports GN until unshared norm is implemented
463
- _C.MODEL.RETINANET.NORM = ""
464
-
465
-
466
- # ---------------------------------------------------------------------------- #
467
- # ResNe[X]t options (ResNets = {ResNet, ResNeXt}
468
- # Note that parts of a resnet may be used for both the backbone and the head
469
- # These options apply to both
470
- # ---------------------------------------------------------------------------- #
471
- _C.MODEL.RESNETS = CN()
472
-
473
- _C.MODEL.RESNETS.DEPTH = 50
474
- _C.MODEL.RESNETS.OUT_FEATURES = ["res4"] # res4 for C4 backbone, res2..5 for FPN backbone
475
-
476
- # Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt
477
- _C.MODEL.RESNETS.NUM_GROUPS = 1
478
-
479
- # Options: FrozenBN, GN, "SyncBN", "BN"
480
- _C.MODEL.RESNETS.NORM = "FrozenBN"
481
-
482
- # Baseline width of each group.
483
- # Scaling this parameters will scale the width of all bottleneck layers.
484
- _C.MODEL.RESNETS.WIDTH_PER_GROUP = 64
485
-
486
- # Place the stride 2 conv on the 1x1 filter
487
- # Use True only for the original MSRA ResNet; use False for C2 and Torch models
488
- _C.MODEL.RESNETS.STRIDE_IN_1X1 = True
489
-
490
- # Apply dilation in stage "res5"
491
- _C.MODEL.RESNETS.RES5_DILATION = 1
492
-
493
- # Output width of res2. Scaling this parameters will scale the width of all 1x1 convs in ResNet
494
- # For R18 and R34, this needs to be set to 64
495
- _C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256
496
- _C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64
497
-
498
- # Apply Deformable Convolution in stages
499
- # Specify if apply deform_conv on Res2, Res3, Res4, Res5
500
- _C.MODEL.RESNETS.DEFORM_ON_PER_STAGE = [False, False, False, False]
501
- # Use True to use modulated deform_conv (DeformableV2, https://arxiv.org/abs/1811.11168);
502
- # Use False for DeformableV1.
503
- _C.MODEL.RESNETS.DEFORM_MODULATED = False
504
- # Number of groups in deformable conv.
505
- _C.MODEL.RESNETS.DEFORM_NUM_GROUPS = 1
506
-
507
-
508
- # ---------------------------------------------------------------------------- #
509
- # Solver
510
- # ---------------------------------------------------------------------------- #
511
- _C.SOLVER = CN()
512
-
513
- # Options: WarmupMultiStepLR, WarmupCosineLR.
514
- # See detectron2/solver/build.py for definition.
515
- _C.SOLVER.LR_SCHEDULER_NAME = "WarmupMultiStepLR"
516
-
517
- _C.SOLVER.MAX_ITER = 40000
518
-
519
- _C.SOLVER.BASE_LR = 0.001
520
-
521
- _C.SOLVER.MOMENTUM = 0.9
522
-
523
- _C.SOLVER.NESTEROV = False
524
-
525
- _C.SOLVER.WEIGHT_DECAY = 0.0001
526
- # The weight decay that's applied to parameters of normalization layers
527
- # (typically the affine transformation)
528
- _C.SOLVER.WEIGHT_DECAY_NORM = 0.0
529
-
530
- _C.SOLVER.GAMMA = 0.1
531
- # The iteration number to decrease learning rate by GAMMA.
532
- _C.SOLVER.STEPS = (30000,)
533
-
534
- _C.SOLVER.WARMUP_FACTOR = 1.0 / 1000
535
- _C.SOLVER.WARMUP_ITERS = 1000
536
- _C.SOLVER.WARMUP_METHOD = "linear"
537
-
538
- # Save a checkpoint after every this number of iterations
539
- _C.SOLVER.CHECKPOINT_PERIOD = 5000
540
-
541
- # Number of images per batch across all machines. This is also the number
542
- # of training images per step (i.e. per iteration). If we use 16 GPUs
543
- # and IMS_PER_BATCH = 32, each GPU will see 2 images per batch.
544
- # May be adjusted automatically if REFERENCE_WORLD_SIZE is set.
545
- _C.SOLVER.IMS_PER_BATCH = 16
546
-
547
- # The reference number of workers (GPUs) this config is meant to train with.
548
- # It takes no effect when set to 0.
549
- # With a non-zero value, it will be used by DefaultTrainer to compute a desired
550
- # per-worker batch size, and then scale the other related configs (total batch size,
551
- # learning rate, etc) to match the per-worker batch size.
552
- # See documentation of `DefaultTrainer.auto_scale_workers` for details:
553
- _C.SOLVER.REFERENCE_WORLD_SIZE = 0
554
-
555
- # Detectron v1 (and previous detection code) used a 2x higher LR and 0 WD for
556
- # biases. This is not useful (at least for recent models). You should avoid
557
- # changing these and they exist only to reproduce Detectron v1 training if
558
- # desired.
559
- _C.SOLVER.BIAS_LR_FACTOR = 1.0
560
- _C.SOLVER.WEIGHT_DECAY_BIAS = None # None means following WEIGHT_DECAY
561
-
562
- # Gradient clipping
563
- _C.SOLVER.CLIP_GRADIENTS = CN({"ENABLED": False})
564
- # Type of gradient clipping, currently 2 values are supported:
565
- # - "value": the absolute values of elements of each gradients are clipped
566
- # - "norm": the norm of the gradient for each parameter is clipped thus
567
- # affecting all elements in the parameter
568
- _C.SOLVER.CLIP_GRADIENTS.CLIP_TYPE = "value"
569
- # Maximum absolute value used for clipping gradients
570
- _C.SOLVER.CLIP_GRADIENTS.CLIP_VALUE = 1.0
571
- # Floating point number p for L-p norm to be used with the "norm"
572
- # gradient clipping type; for L-inf, please specify .inf
573
- _C.SOLVER.CLIP_GRADIENTS.NORM_TYPE = 2.0
574
-
575
- # Enable automatic mixed precision for training
576
- # Note that this does not change model's inference behavior.
577
- # To use AMP in inference, run inference under autocast()
578
- _C.SOLVER.AMP = CN({"ENABLED": False})
579
-
580
- # ---------------------------------------------------------------------------- #
581
- # Specific test options
582
- # ---------------------------------------------------------------------------- #
583
- _C.TEST = CN()
584
- # For end-to-end tests to verify the expected accuracy.
585
- # Each item is [task, metric, value, tolerance]
586
- # e.g.: [['bbox', 'AP', 38.5, 0.2]]
587
- _C.TEST.EXPECTED_RESULTS = []
588
- # The period (in terms of steps) to evaluate the model during training.
589
- # Set to 0 to disable.
590
- _C.TEST.EVAL_PERIOD = 0
591
- # The sigmas used to calculate keypoint OKS. See http://cocodataset.org/#keypoints-eval
592
- # When empty, it will use the defaults in COCO.
593
- # Otherwise it should be a list[float] with the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS.
594
- _C.TEST.KEYPOINT_OKS_SIGMAS = []
595
- # Maximum number of detections to return per image during inference (100 is
596
- # based on the limit established for the COCO dataset).
597
- _C.TEST.DETECTIONS_PER_IMAGE = 100
598
-
599
- _C.TEST.AUG = CN({"ENABLED": False})
600
- _C.TEST.AUG.MIN_SIZES = (400, 500, 600, 700, 800, 900, 1000, 1100, 1200)
601
- _C.TEST.AUG.MAX_SIZE = 4000
602
- _C.TEST.AUG.FLIP = True
603
-
604
- _C.TEST.PRECISE_BN = CN({"ENABLED": False})
605
- _C.TEST.PRECISE_BN.NUM_ITER = 200
606
-
607
- # ---------------------------------------------------------------------------- #
608
- # Misc options
609
- # ---------------------------------------------------------------------------- #
610
- # Directory where output files are written
611
- _C.OUTPUT_DIR = "./output"
612
- # Set seed to negative to fully randomize everything.
613
- # Set seed to positive to use a fixed seed. Note that a fixed seed increases
614
- # reproducibility but does not guarantee fully deterministic behavior.
615
- # Disabling all parallelism further increases reproducibility.
616
- _C.SEED = -1
617
- # Benchmark different cudnn algorithms.
618
- # If input images have very different sizes, this option will have large overhead
619
- # for about 10k iterations. It usually hurts total time, but can benefit for certain models.
620
- # If input images have the same or similar sizes, benchmark is often helpful.
621
- _C.CUDNN_BENCHMARK = False
622
- # The period (in terms of steps) for minibatch visualization at train time.
623
- # Set to 0 to disable.
624
- _C.VIS_PERIOD = 0
625
-
626
- # global config is for quick hack purposes.
627
- # You can set them in command line or config files,
628
- # and access it with:
629
- #
630
- # from detectron2.config import global_cfg
631
- # print(global_cfg.HACK)
632
- #
633
- # Do not commit any configs into it.
634
- _C.GLOBAL = CN()
635
- _C.GLOBAL.HACK = 1.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awiny/Image2Paragraph/models/grit_src/third_party/CenterNet2/tools/visualize_data.py DELETED
@@ -1,94 +0,0 @@
1
- #!/usr/bin/env python
2
- # Copyright (c) Facebook, Inc. and its affiliates.
3
- import argparse
4
- import os
5
- from itertools import chain
6
- import cv2
7
- import tqdm
8
-
9
- from detectron2.config import get_cfg
10
- from detectron2.data import DatasetCatalog, MetadataCatalog, build_detection_train_loader
11
- from detectron2.data import detection_utils as utils
12
- from detectron2.data.build import filter_images_with_few_keypoints
13
- from detectron2.utils.logger import setup_logger
14
- from detectron2.utils.visualizer import Visualizer
15
-
16
-
17
- def setup(args):
18
- cfg = get_cfg()
19
- if args.config_file:
20
- cfg.merge_from_file(args.config_file)
21
- cfg.merge_from_list(args.opts)
22
- cfg.DATALOADER.NUM_WORKERS = 0
23
- cfg.freeze()
24
- return cfg
25
-
26
-
27
- def parse_args(in_args=None):
28
- parser = argparse.ArgumentParser(description="Visualize ground-truth data")
29
- parser.add_argument(
30
- "--source",
31
- choices=["annotation", "dataloader"],
32
- required=True,
33
- help="visualize the annotations or the data loader (with pre-processing)",
34
- )
35
- parser.add_argument("--config-file", metavar="FILE", help="path to config file")
36
- parser.add_argument("--output-dir", default="./", help="path to output directory")
37
- parser.add_argument("--show", action="store_true", help="show output in a window")
38
- parser.add_argument(
39
- "opts",
40
- help="Modify config options using the command-line",
41
- default=None,
42
- nargs=argparse.REMAINDER,
43
- )
44
- return parser.parse_args(in_args)
45
-
46
-
47
- if __name__ == "__main__":
48
- args = parse_args()
49
- logger = setup_logger()
50
- logger.info("Arguments: " + str(args))
51
- cfg = setup(args)
52
-
53
- dirname = args.output_dir
54
- os.makedirs(dirname, exist_ok=True)
55
- metadata = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])
56
-
57
- def output(vis, fname):
58
- if args.show:
59
- print(fname)
60
- cv2.imshow("window", vis.get_image()[:, :, ::-1])
61
- cv2.waitKey()
62
- else:
63
- filepath = os.path.join(dirname, fname)
64
- print("Saving to {} ...".format(filepath))
65
- vis.save(filepath)
66
-
67
- scale = 1.0
68
- if args.source == "dataloader":
69
- train_data_loader = build_detection_train_loader(cfg)
70
- for batch in train_data_loader:
71
- for per_image in batch:
72
- # Pytorch tensor is in (C, H, W) format
73
- img = per_image["image"].permute(1, 2, 0).cpu().detach().numpy()
74
- img = utils.convert_image_to_rgb(img, cfg.INPUT.FORMAT)
75
-
76
- visualizer = Visualizer(img, metadata=metadata, scale=scale)
77
- target_fields = per_image["instances"].get_fields()
78
- labels = [metadata.thing_classes[i] for i in target_fields["gt_classes"]]
79
- vis = visualizer.overlay_instances(
80
- labels=labels,
81
- boxes=target_fields.get("gt_boxes", None),
82
- masks=target_fields.get("gt_masks", None),
83
- keypoints=target_fields.get("gt_keypoints", None),
84
- )
85
- output(vis, str(per_image["image_id"]) + ".jpg")
86
- else:
87
- dicts = list(chain.from_iterable([DatasetCatalog.get(k) for k in cfg.DATASETS.TRAIN]))
88
- if cfg.MODEL.KEYPOINT_ON:
89
- dicts = filter_images_with_few_keypoints(dicts, 1)
90
- for dic in tqdm.tqdm(dicts):
91
- img = utils.read_image(dic["file_name"], "RGB")
92
- visualizer = Visualizer(img, metadata=metadata, scale=scale)
93
- vis = visualizer.draw_dataset_dict(dic)
94
- output(vis, os.path.basename(dic["file_name"]))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/B2gan/LLM_Can_See/app.py DELETED
@@ -1,87 +0,0 @@
1
- import gradio as gr
2
-
3
- from transformers import VisionEncoderDecoderModel, ViTImageProcessor, AutoTokenizer
4
- from transformers import CLIPProcessor, CLIPModel
5
- from transformers import pipeline
6
-
7
- import torch
8
- from PIL import Image
9
-
10
- import openai
11
- import ai_functions
12
-
13
- import re
14
- import os
15
-
16
- HF_TOKEN = os.environ.get("HF_TOKEN")
17
-
18
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
19
-
20
- model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
21
- feature_extractor = ViTImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
22
- tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
23
-
24
- clip = CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
25
- processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
26
-
27
- clip = clip.to(device)
28
-
29
- llm = None
30
-
31
- model.to(device)
32
-
33
- max_length = 16
34
- num_beams = 4
35
- gen_kwargs = {"max_length": max_length, "num_beams": num_beams}
36
-
37
- @staticmethod
38
- def extract_list_items(text):
39
- pattern = r"\d+[.\)-](?=\D)"
40
- return re.split(pattern, text)[1:]
41
-
42
- def process_image_and_text(image, text, model_choice, api_key=None):
43
- global llm
44
- if model_choice != "llama 7b":
45
- openai.api_key = api_key
46
-
47
- pixel_values = feature_extractor(images=image, return_tensors="pt").pixel_values
48
- pixel_values = pixel_values.to(device)
49
-
50
- output_ids = model.generate(pixel_values, **gen_kwargs)
51
-
52
- preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
53
- preds = [pred.strip() for pred in preds][0]
54
-
55
- if model_choice == "llama 7b":
56
- result = ai_functions.ai_function(text, preds, llm)
57
- else:
58
- result = ai_functions.ai_function(text, preds, model_choice)
59
-
60
- prompt = extract_list_items(result)
61
-
62
- inputs = processor(text=prompt, images=image, return_tensors="pt", padding=True)
63
- inputs = {name: tensor.to(device) for name, tensor in inputs.items()}
64
-
65
- outputs = clip(**inputs)
66
- logits_per_image = outputs.logits_per_image
67
- probs = logits_per_image.softmax(dim=1)
68
-
69
- return prompt[torch.argmax(torch.FloatTensor(probs.cpu()))]
70
-
71
- model_choices = gr.Dropdown(choices=["gpt-4", "gpt-3.5-turbo"], label="Model")
72
- api_key_input = gr.Textbox(label="OpenAI API Key")
73
-
74
- with gr.Blocks() as demo:
75
- gr.Markdown("# LLM Can See\n\nThis is an integration project using the Large Language Model(LLM), vit-gpt2, and CLIP, with the aim of enabling LLM to interpret images and answer related questions.")
76
-
77
-
78
- with gr.Row():
79
- gr.Interface(
80
- fn=process_image_and_text,
81
- inputs=["image", "text", model_choices, api_key_input],
82
- outputs=["text"],
83
- allow_flagging="never"
84
- )
85
- gr.Markdown("## How to Use\n\n1. **Upload an image**: Choose an image you want the model to interpret.\n2. **Enter a question**: Input your question for the model in the text box.\n3. **Select a model**: Select the model you wish to use for the answer. The current supported models are \"gpt-4\" and \"gpt-3.5-turbo\". Due to memory limitations, the \"llama2\" model is not currently supported.\n\n## Notes\n\nThe current prompt has been preset to \"assist the visually impaired\", so the questions answered by the model will be particularly biased towards this setting. Please note that this may affect the style and content of the model's answers.\n\n")
86
-
87
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Bart92/RVC_HF/julius/filters.py DELETED
@@ -1,258 +0,0 @@
1
- # File under the MIT license, see https://github.com/adefossez/julius/LICENSE for details.
2
- # Author: adefossez, 2021
3
- """
4
- FIR windowed sinc highpass and bandpass filters.
5
- Those are convenience wrappers around the filters defined in `julius.lowpass`.
6
- """
7
-
8
- from typing import Sequence, Optional
9
-
10
- import torch
11
-
12
- # Import all lowpass filters for consistency.
13
- from .lowpass import lowpass_filter, lowpass_filters, LowPassFilter, LowPassFilters # noqa
14
- from .utils import simple_repr
15
-
16
-
17
- class HighPassFilters(torch.nn.Module):
18
- """
19
- Bank of high pass filters. See `julius.lowpass.LowPassFilters` for more
20
- details on the implementation.
21
-
22
- Args:
23
- cutoffs (list[float]): list of cutoff frequencies, in [0, 0.5] expressed as `f/f_s` where
24
- f_s is the samplerate and `f` is the cutoff frequency.
25
- The upper limit is 0.5, because a signal sampled at `f_s` contains only
26
- frequencies under `f_s / 2`.
27
- stride (int): how much to decimate the output. Probably not a good idea
28
- to do so with a high pass filters though...
29
- pad (bool): if True, appropriately pad the input with zero over the edge. If `stride=1`,
30
- the output will have the same length as the input.
31
- zeros (float): Number of zero crossings to keep.
32
- Controls the receptive field of the Finite Impulse Response filter.
33
- For filters with low cutoff frequency, e.g. 40Hz at 44.1kHz,
34
- it is a bad idea to set this to a high value.
35
- This is likely appropriate for most use. Lower values
36
- will result in a faster filter, but with a slower attenuation around the
37
- cutoff frequency.
38
- fft (bool or None): if True, uses `julius.fftconv` rather than PyTorch convolutions.
39
- If False, uses PyTorch convolutions. If None, either one will be chosen automatically
40
- depending on the effective filter size.
41
-
42
-
43
- ..warning::
44
- All the filters will use the same filter size, aligned on the lowest
45
- frequency provided. If you combine a lot of filters with very diverse frequencies, it might
46
- be more efficient to split them over multiple modules with similar frequencies.
47
-
48
- Shape:
49
-
50
- - Input: `[*, T]`
51
- - Output: `[F, *, T']`, with `T'=T` if `pad` is True and `stride` is 1, and
52
- `F` is the numer of cutoff frequencies.
53
-
54
- >>> highpass = HighPassFilters([1/4])
55
- >>> x = torch.randn(4, 12, 21, 1024)
56
- >>> list(highpass(x).shape)
57
- [1, 4, 12, 21, 1024]
58
- """
59
-
60
- def __init__(self, cutoffs: Sequence[float], stride: int = 1, pad: bool = True,
61
- zeros: float = 8, fft: Optional[bool] = None):
62
- super().__init__()
63
- self._lowpasses = LowPassFilters(cutoffs, stride, pad, zeros, fft)
64
-
65
- @property
66
- def cutoffs(self):
67
- return self._lowpasses.cutoffs
68
-
69
- @property
70
- def stride(self):
71
- return self._lowpasses.stride
72
-
73
- @property
74
- def pad(self):
75
- return self._lowpasses.pad
76
-
77
- @property
78
- def zeros(self):
79
- return self._lowpasses.zeros
80
-
81
- @property
82
- def fft(self):
83
- return self._lowpasses.fft
84
-
85
- def forward(self, input):
86
- lows = self._lowpasses(input)
87
-
88
- # We need to extract the right portion of the input in case
89
- # pad is False or stride > 1
90
- if self.pad:
91
- start, end = 0, input.shape[-1]
92
- else:
93
- start = self._lowpasses.half_size
94
- end = -start
95
- input = input[..., start:end:self.stride]
96
- highs = input - lows
97
- return highs
98
-
99
- def __repr__(self):
100
- return simple_repr(self)
101
-
102
-
103
- class HighPassFilter(torch.nn.Module):
104
- """
105
- Same as `HighPassFilters` but applies a single high pass filter.
106
-
107
- Shape:
108
-
109
- - Input: `[*, T]`
110
- - Output: `[*, T']`, with `T'=T` if `pad` is True and `stride` is 1.
111
-
112
- >>> highpass = HighPassFilter(1/4, stride=1)
113
- >>> x = torch.randn(4, 124)
114
- >>> list(highpass(x).shape)
115
- [4, 124]
116
- """
117
-
118
- def __init__(self, cutoff: float, stride: int = 1, pad: bool = True,
119
- zeros: float = 8, fft: Optional[bool] = None):
120
- super().__init__()
121
- self._highpasses = HighPassFilters([cutoff], stride, pad, zeros, fft)
122
-
123
- @property
124
- def cutoff(self):
125
- return self._highpasses.cutoffs[0]
126
-
127
- @property
128
- def stride(self):
129
- return self._highpasses.stride
130
-
131
- @property
132
- def pad(self):
133
- return self._highpasses.pad
134
-
135
- @property
136
- def zeros(self):
137
- return self._highpasses.zeros
138
-
139
- @property
140
- def fft(self):
141
- return self._highpasses.fft
142
-
143
- def forward(self, input):
144
- return self._highpasses(input)[0]
145
-
146
- def __repr__(self):
147
- return simple_repr(self)
148
-
149
-
150
- def highpass_filters(input: torch.Tensor, cutoffs: Sequence[float],
151
- stride: int = 1, pad: bool = True,
152
- zeros: float = 8, fft: Optional[bool] = None):
153
- """
154
- Functional version of `HighPassFilters`, refer to this class for more information.
155
- """
156
- return HighPassFilters(cutoffs, stride, pad, zeros, fft).to(input)(input)
157
-
158
-
159
- def highpass_filter(input: torch.Tensor, cutoff: float,
160
- stride: int = 1, pad: bool = True,
161
- zeros: float = 8, fft: Optional[bool] = None):
162
- """
163
- Functional version of `HighPassFilter`, refer to this class for more information.
164
- Output will not have a dimension inserted in the front.
165
- """
166
- return highpass_filters(input, [cutoff], stride, pad, zeros, fft)[0]
167
-
168
-
169
- class BandPassFilter(torch.nn.Module):
170
- """
171
- Single band pass filter, implemented as a the difference of two lowpass filters.
172
-
173
- Args:
174
- cutoff_low (float): lower cutoff frequency, in [0, 0.5] expressed as `f/f_s` where
175
- f_s is the samplerate and `f` is the cutoff frequency.
176
- The upper limit is 0.5, because a signal sampled at `f_s` contains only
177
- frequencies under `f_s / 2`.
178
- cutoff_high (float): higher cutoff frequency, in [0, 0.5] expressed as `f/f_s`.
179
- This must be higher than cutoff_high. Note that due to the fact
180
- that filter are not perfect, the output will be non zero even if
181
- cutoff_high == cutoff_low.
182
- stride (int): how much to decimate the output.
183
- pad (bool): if True, appropriately pad the input with zero over the edge. If `stride=1`,
184
- the output will have the same length as the input.
185
- zeros (float): Number of zero crossings to keep.
186
- Controls the receptive field of the Finite Impulse Response filter.
187
- For filters with low cutoff frequency, e.g. 40Hz at 44.1kHz,
188
- it is a bad idea to set this to a high value.
189
- This is likely appropriate for most use. Lower values
190
- will result in a faster filter, but with a slower attenuation around the
191
- cutoff frequency.
192
- fft (bool or None): if True, uses `julius.fftconv` rather than PyTorch convolutions.
193
- If False, uses PyTorch convolutions. If None, either one will be chosen automatically
194
- depending on the effective filter size.
195
-
196
-
197
- Shape:
198
-
199
- - Input: `[*, T]`
200
- - Output: `[*, T']`, with `T'=T` if `pad` is True and `stride` is 1.
201
-
202
- ..Note:: There is no BandPassFilters (bank of bandpasses) because its
203
- signification would be the same as `julius.bands.SplitBands`.
204
-
205
- >>> bandpass = BandPassFilter(1/4, 1/3)
206
- >>> x = torch.randn(4, 12, 21, 1024)
207
- >>> list(bandpass(x).shape)
208
- [4, 12, 21, 1024]
209
- """
210
-
211
- def __init__(self, cutoff_low: float, cutoff_high: float, stride: int = 1, pad: bool = True,
212
- zeros: float = 8, fft: Optional[bool] = None):
213
- super().__init__()
214
- if cutoff_low > cutoff_high:
215
- raise ValueError(f"Lower cutoff {cutoff_low} should be less than "
216
- f"higher cutoff {cutoff_high}.")
217
- self._lowpasses = LowPassFilters([cutoff_low, cutoff_high], stride, pad, zeros, fft)
218
-
219
- @property
220
- def cutoff_low(self):
221
- return self._lowpasses.cutoffs[0]
222
-
223
- @property
224
- def cutoff_high(self):
225
- return self._lowpasses.cutoffs[1]
226
-
227
- @property
228
- def stride(self):
229
- return self._lowpasses.stride
230
-
231
- @property
232
- def pad(self):
233
- return self._lowpasses.pad
234
-
235
- @property
236
- def zeros(self):
237
- return self._lowpasses.zeros
238
-
239
- @property
240
- def fft(self):
241
- return self._lowpasses.fft
242
-
243
- def forward(self, input):
244
- lows = self._lowpasses(input)
245
- return lows[1] - lows[0]
246
-
247
- def __repr__(self):
248
- return simple_repr(self)
249
-
250
-
251
- def bandpass_filter(input: torch.Tensor, cutoff_low: float, cutoff_high: float,
252
- stride: int = 1, pad: bool = True,
253
- zeros: float = 8, fft: Optional[bool] = None):
254
- """
255
- Functional version of `BandPassfilter`, refer to this class for more information.
256
- Output will not have a dimension inserted in the front.
257
- """
258
- return BandPassFilter(cutoff_low, cutoff_high, stride, pad, zeros, fft).to(input)(input)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Blitzkrieg 3.md DELETED
@@ -1,63 +0,0 @@
1
-
2
- <h1>Blitzkrieg 3: Un juego RTS de la Segunda Guerra Mundial con IA de red neuronal</h1>
3
- <p>Si eres un fanático de la maestría estratégica y la guerra histórica, es posible que desees echar un vistazo a Blitzkrieg 3, un juego RTS de la WW2 que cuenta con una IA de red neuronal única que puede desafiar tus habilidades tácticas. En este artículo, te daremos una visión general de lo que es Blitzkrieg 3, cómo jugarlo y qué lo hace destacar de otros juegos RTS. </p>
4
- <h2>¿Qué es Blitzkrieg 3?</h2>
5
- <p>Blitzkrieg 3 es un juego RTS de la Segunda Guerra Mundial desarrollado y publicado por Nival, los creadores de los juegos anteriores de Blitzkrieg. Fue lanzado el 2 de junio de 2017, después de estar en Steam Early Access desde mayo de 2015. Es el tercer y último título de la serie Blitzkrieg, que se basa en los acontecimientos de la Segunda Guerra Mundial. </p>
6
- <h2>Blitzkrieg 3</h2><br /><p><b><b>Download</b> &#128279; <a href="https://bltlly.com/2v6L3A">https://bltlly.com/2v6L3A</a></b></p><br /><br />
7
- <h3>Historia y desarrollo del juego</h3>
8
- <p>El primer tráiler de Blitzkrieg 3 fue publicado en YouTube el 13 de agosto de 2013, con imágenes de acción en vivo mezcladas con ilustraciones conceptuales y secuencias y renders reales en el juego. La campaña de pre-orden se lanzó en noviembre de 2014, y el juego llegó a Steam Early Access en mayo de 2015. Durante el proceso de desarrollo, Nival involucró a la comunidad de jugadores en la definición de la forma de desarrollo del proyecto. El juego fue lanzado oficialmente el 2 de junio de 2017, y cerrado el 14 de diciembre de 2022. </p>
9
- <h3>Las principales características y modos del juego</h3>
10
- <p>Blitzkrieg 3 es un juego RTS de la Segunda Guerra Mundial que ofrece una variedad de características y modos para diferentes tipos de jugadores. Algunas de las principales características y modos son:</p>
11
- <ul>
12
- <li>Una campaña para un jugador que cubre el período principal de la Segunda Guerra Mundial, desde la invasión de Polonia en 1939 hasta la captura de Berlín en 1945. Cada una de las tres campañas ofrece una combinación única de misiones PvE, PvP y PvAI. En total, el juego cuenta con más de 60 misiones históricas con más de 200 unidades de combate auténticas. </li>
13
-
14
- <li>Una IA de red neuronal llamada Boris que juega en el nivel del jugador superior sin usar ninguna información oculta sobre el enemigo. Boris analiza regularmente la sesión de juego y hace predicciones basadas en la red neuronal del comportamiento del enemigo. Este enfoque le permite idear sofisticadas contraestrategias y llevarlas a la vida, lo que lo hace difícil e impredecible. </li>
15
- <li>Una exactitud y autenticidad histórica que recrea las características de rendimiento, tácticas y apariencia de unidades de combate reales y comandantes de la Segunda Guerra Mundial. El juego también cuenta con batallas legendarias como Stalingrado, Día D y El Alamein. </li>
16
- </ul>
17
- <h2>Cómo jugar Blitzkrieg 3?</h2>
18
- <p>Blitzkrieg 3 es un juego RTS de la Segunda Guerra Mundial que requiere pensamiento estratégico, planificación táctica y toma de decisiones rápidas. Dependiendo de tu preferencia, puedes elegir entre la campaña para un jugador o el modo multijugador. </p>
19
- <h3>La campaña para un jugador</h3>
20
- <p <p>La campaña para un jugador de Blitzkrieg 3 consta de tres campañas, cada una de las cuales representa una de las principales facciones de la Segunda Guerra Mundial: el Eje, los Aliados y la URSS. Cada campaña tiene su propia historia, misiones, objetivos y desafíos. Las campañas son:</p>
21
- <h4>La campaña Axis</h4>
22
- <p>La campaña del Eje sigue a la máquina de guerra alemana mientras invade y conquista Europa y el norte de África. Usted comandará la Wehrmacht y el Afrika Korps, dirigido por famosos generales como Erwin Rommel, Heinz Guderian y Erich von Manstein. Se enfrentará a la resistencia de los ejércitos polaco, francés, británico y soviético, así como a los movimientos partidistas en Yugoslavia y Grecia. También serás testigo del ascenso y caída del Tercer Reich, desde la Blitzkrieg hasta la Batalla de Berlín. </p>
23
- <h4>La campaña de los aliados</h4>
24
-
25
- <h4>La campaña de la URSS</h4>
26
- <p>La campaña de la URSS sigue al Ejército Rojo soviético mientras defiende su patria y empuja a los invasores nazis. Comandarás las tropas soviéticas, lideradas por mariscales legendarios como Georgy Zhukov, Konstantin Rokossovsky e Ivan Konev. Usted participará en las batallas brutales de Moscú, Stalingrado, Kursk, Leningrado y Sebastopol. Usted también será testigo del heroísmo y la resistencia del pueblo soviético, desde el sitio de Leningrado hasta el asalto del Reichstag. </p>
27
- <p></p>
28
- <h3>El modo multijugador</h3>
29
- <p>El modo multijugador de Blitzkrieg 3 te permite jugar con Neural Network AI, amigos y otros jugadores online. Puedes elegir entre dos modos: Escaramuza y Asalto.</p>
30
- <h4>El modo de escaramuza</h4>
31
- <p>El modo escaramuza es un modo RTS clásico donde puedes jugar partidas 1x1, 2x2 o 3x3 contra otros jugadores o oponentes de IA. Puede elegir entre diferentes mapas, configuraciones y facciones. El objetivo es destruir todas las unidades y edificios enemigos o capturar todos los puntos estratégicos en el mapa. También puede personalizar la composición de su ejército, tácticas y habilidades de comandante. </p>
32
- <h4>El modo de asalto</h4>
33
- <p>El modo asalto es un modo RTS único donde un jugador ataca y el otro defiende sus fortificaciones. El atacante tiene un tiempo limitado para romper las defensas del defensor y capturar su cuartel general. El defensor tiene que usar sus recursos sabiamente para construir y mejorar sus fortificaciones y repeler los ataques del atacante. El modo es asíncrono, lo que significa que puedes atacar o defender en cualquier momento sin esperar a que tu oponente esté en línea. </p> <h2>¿Qué hace que Blitzkrieg 3 sea único? </h2>
34
- <p>Blitzkrieg 3 es un juego RTS de la Segunda Guerra Mundial que se destaca de otros juegos RTS por dos razones principales: su IA de red neuronal llamada Boris y su exactitud y autenticidad históricas. </p>
35
- <h3>La IA de la red neuronal llamada Boris</h3>
36
-
37
- <h4>¿Cómo funciona Boris? </h4>
38
- <p>Boris trabaja utilizando una combinación de aprendizaje automático, aprendizaje profundo y técnicas de aprendizaje de refuerzo. Aprende de su propia experiencia, de los comentarios de otros jugadores y de los datos recopilados por Nival. También utiliza una red neuronal para generar y evaluar posibles acciones y resultados. Puede ajustar su nivel de dificultad, su composición del ejército, sus tácticas y sus habilidades de comandante de acuerdo a la situación y el oponente. También puede aprender de sus errores y mejorar con el tiempo. </p>
39
- <h4>¿Cómo desafiar a Boris? </h4>
40
- <p>Si quieres desafiar a Boris, puedes hacerlo de dos maneras: jugando contra él en modo Escaramuza o atacando sus fortificaciones en modo Asalto. En el modo Escaramuza, puedes elegir el mapa, la configuración y la facción tanto para ti como para Boris. También puedes establecer el nivel de dificultad de Boris, de fácil a imposible. En el modo Asalto, puede elegir una de las fortificaciones de Boris para atacar, que se generan aleatoriamente en función de su red neuronal. También puedes ver cómo les ha ido a otros jugadores contra Boris y comparar tus resultados con los de ellos. </p>
41
- <h3>La exactitud histórica y la autenticidad del juego</h3>
42
- <p>Otro aspecto que hace que Blitzkrieg 3 sea único es su exactitud y autenticidad histórica. El juego recrea las características de rendimiento, tácticas y apariencia de unidades de combate reales y comandantes de la Segunda Guerra Mundial. El juego también cuenta con batallas legendarias como Stalingrado, Día D y El Alamein. </p>
43
- <h4>Las misiones y batallas históricas</h4>
44
-
45
- <h4>Las unidades y comandantes de combate auténticos</h4>
46
- <p>El juego cuenta con más de 200 unidades de combate auténticas de la Segunda Guerra Mundial, incluyendo tanques, infantería, artillería, aviones y barcos. Cada unidad tiene sus propias características de rendimiento, como velocidad, blindaje, potencia de fuego, rango y precisión que se basan en datos históricos reales. El juego también cuenta con más de 20 comandantes reales de la Segunda Guerra Mundial, como Erwin Rommel, Bernard Montgomery y Georgy Zhukov. Cada comandante tiene su propio retrato, biografía, habilidades y habilidades que influyen en el desempeño y comportamiento de sus unidades en el campo de batalla. </p>
47
- <h2>Conclusión y preguntas frecuentes</h2>
48
- <p>Blitzkrieg 3 es un juego RTS de la Segunda Guerra Mundial que ofrece una IA de red neuronal única que puede desafiar tus habilidades tácticas, así como una precisión y autenticidad histórica que recrea los eventos y escenarios de la Segunda Guerra Mundial. Ya sea que prefieras jugar solo o con otros en línea, Blitzkrieg 3 te proporcionará horas de dominio estratégico y guerra histórica. </p>
49
- <p>Aquí hay algunas preguntas frecuentes sobre Blitzkrieg 3:</p>
50
- <ul>
51
- <li><b>Q: ¿Todavía se puede jugar a Blitzkrieg 3? </b></li>
52
- <li>A: Sí, Blitzkrieg 3 todavía se puede jugar en línea a través de Steam o sin conexión a través de GOG.com. Sin embargo, los servidores oficiales se cerraron el 14 de diciembre de 2022 debido a la baja base de jugadores y los altos costos de mantenimiento. </li>
53
- <li><b>Q: ¿Cuánto cuesta Blitzkrieg 3? </b></li>
54
- <li>A: Blitzkrieg 3 cuesta $19.99 USD en Steam o $14.99 USD en GOG.com. También tiene dos DLC: Deluxe Edition Upgrade ($9.99 USD) que agrega cuatro misiones exclusivas, cuatro comandantes legendarios y cuatro unidades de combate exclusivas; y Digital Deluxe Edition Upgrade ($19.99 USD) que agrega el DLC anterior más cuatro misiones exclusivas, cuatro comandantes más legendarios y cuatro unidades de combate más exclusivas. </li>
55
- <li><b>Q: ¿Cuáles son los requisitos del sistema para Blitzkrieg 3?</b></li>
56
-
57
- <li><b>Q: ¿Cómo puedo aprender más sobre Blitzkrieg 3?</b></li>
58
- <li>A: Puedes aprender más sobre Blitzkrieg 3 visitando su sitio web oficial , su página de Steam , su página GOG.com , su página de Facebook , su página de Twitter , o su canal de YouTube . También puede leer comentarios, guías y consejos de otros jugadores y críticos en línea. </li>
59
- <li><b>Q: ¿Cómo puedo contactar a los desarrolladores de Blitzkrieg 3?</b></li>
60
- <li>A: Puede ponerse en contacto con los desarrolladores de Blitzkrieg 3 enviando un correo electrónico a [email protected] o rellenando el formulario de contacto en su sitio web . También puedes unirte a su servidor Discord o a su comunidad de Steam para chatear con ellos y otros jugadores. </li>
61
- </ul></p> 64aa2da5cf<br />
62
- <br />
63
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Colorear El Color Del Libro Por Nmero Mod Apk.md DELETED
@@ -1,50 +0,0 @@
1
-
2
- <h1>Colorear el color del libro por el número Mod APK: Una manera divertida y relajante para dar rienda suelta a su creatividad</h1>
3
- <p>¿Amas los libros para colorear pero odias la molestia de comprarlos, llevarlos alrededor, y encontrar un lugar para colorearlos? ¿Te gustaría tener acceso a miles de hermosas imágenes que puedes colorear en cualquier momento, en cualquier lugar y como quieras? Si respondiste sí a cualquiera de estas preguntas, entonces te encantará Colorear Color de Libro por Número Mod APK.</p>
4
- <h2>colorear el color del libro por número mod apk</h2><br /><p><b><b>Download File</b> &#9675; <a href="https://bltlly.com/2v6K1x">https://bltlly.com/2v6K1x</a></b></p><br /><br />
5
- <h2>¿Qué es el color del libro para colorear por número Mod APK? </h2>
6
- <p>Colorear el color del libro por número Mod APK es una versión modificada de la aplicación popular para colorear Colorear el color del libro por número. Esta aplicación le permite colorear cientos de imágenes increíbles siguiendo los números de cada píxel. Puede elegir entre varias categorías como animales, flores, mandalas, paisajes, dibujos animados y más. También puede personalizar su experiencia de colorear cambiando el tamaño, la forma y el color de los píxeles, así como utilizando diferentes modos y herramientas de colorear. </p>
7
- <h3>Características del color del libro para colorear por número Mod APK</h3>
8
- <h4>- Miles de imágenes para elegir</h4>
9
- <p>Una de las mejores características de Colorear Libro Color por Número Mod APK es que le ofrece una gran colección de imágenes que se puede colorear de forma gratuita. Puede navegar a través de diferentes categorías y temas, o utilizar la función de búsqueda para encontrar sus imágenes favoritas. También puede guardar su progreso y reanudar su coloración en cualquier momento. </p>
10
- <h4>- Interfaz para colorear fácil e intuitiva</h4>
11
- <p>Otra gran característica de Colorear Libro Color por Number Mod APK es que tiene una interfaz simple y fácil de usar que hace que colorear fácil y divertido. Solo tienes que tocar en los píxeles que coinciden con los números en la parte inferior de la pantalla, y ver su imagen cobran vida. También puedes acercar y alejar, mover la imagen, deshacer y rehacer tus acciones, y usar sugerencias si te quedas atascado. </p>
12
- <p></p>
13
- <h4>- Diferentes modos de colorear y herramientas</h4>
14
-
15
- <h4>- Comparte tus obras de arte con amigos y familiares</h4>
16
- <p>Una vez que termine de colorear una imagen, también puede compartirla con sus amigos y familiares a través de las redes sociales o aplicaciones de mensajería. También puede guardarlo en la galería de su dispositivo o configurarlo como fondo de pantalla. También puede ver las obras de otros usuarios en la galería de aplicaciones y calificarlas. </p>
17
- <h3>¿Cómo descargar e instalar el color del libro para colorear por número Mod APK? </h3>
18
- <h4>- Paso 1: Descargar el archivo APK de una fuente de confianza</h4>
19
- <p>El primer paso para disfrutar de Coloring Book Color by Number Mod APK es descargar el archivo APK de una fuente confiable. Puede utilizar este enlace para descargar la última versión de la aplicación. </p>
20
- <h4>- Paso 2: Habilitar fuentes desconocidas en el dispositivo</h4>
21
- <p>El siguiente paso <p>El siguiente paso es habilitar fuentes desconocidas en su dispositivo. Esto le permitirá instalar aplicaciones que no son de la tienda de aplicaciones oficial. Para hacer esto, vaya a la configuración del dispositivo, luego a la seguridad y luego active la opción para permitir fuentes desconocidas. Es posible que necesite confirmar esta acción con un mensaje emergente. </p>
22
- <h4>- Paso 3: Instalar el archivo APK y disfrutar de la aplicación</h4>
23
- <p>El paso final es instalar el archivo APK y disfrutar de la aplicación. Para ello, busque el archivo descargado en el almacenamiento del dispositivo y, a continuación, toque en él para iniciar el proceso de instalación. Es posible que deba conceder algunos permisos a la aplicación, como el acceso a su almacenamiento, cámara y micrófono. Una vez completada la instalación, puede abrir la aplicación y empezar a colorear. </p>
24
- <h3>Beneficios de usar el color del libro para colorear por número Mod APK</h3>
25
- <h4>- No hay anuncios o compras en la aplicación</h4>
26
- <p>Uno de los principales beneficios de usar Coloring Book Color by Number Mod APK es que elimina todos los anuncios molestos y compras en la aplicación que la aplicación original tiene. Esto significa que puede disfrutar de la coloración sin interrupciones ni distracciones. También puede ahorrar dinero y tiempo al no tener que comprar monedas o suscripciones para desbloquear más imágenes y características. </p>
27
-
28
- <p>Otro beneficio de usar Colorear Libro Color por Número Mod APK es que le da acceso ilimitado a todas las imágenes y características que ofrece la aplicación. Puede colorear cualquier imagen que desee, de cualquier categoría o tema, sin limitaciones o restricciones. También puedes usar todos los modos de colorear y las herramientas que ofrece la aplicación, y personalizar tu experiencia de colorear a tu gusto. </p>
29
- <h4>- Mejora tu concentración y estado de ánimo</h4>
30
- <p>Un tercer beneficio de usar Colorear Libro Color por Número Mod APK es que puede ayudar a mejorar su concentración y estado de ánimo. Colorear es una actividad relajante y terapéutica que puede reducir el estrés, la ansiedad y el aburrimiento. También puede aumentar su creatividad, confianza y autoestima. Al colorear con números, también puedes entrenar a tu cerebro para enfocar y prestar atención a los detalles. </p>
31
- <h4>- Expresa tu personalidad y estilo</h4>
32
- <p>Un cuarto beneficio de usar Colorear Libro Color por Número Mod APK es que puede ayudarle a expresar su personalidad y estilo. Colorear es una forma de arte que puede reflejar tus emociones, preferencias y gustos. Usted puede elegir los colores que se adapten a su estado de ánimo, o experimentar con diferentes combinaciones y contrastes. También puedes mostrar tus obras a otros y recibir comentarios y cumplidos. </p>
33
- <h2>Conclusión</h2>
34
-
35
- <h3>Preguntas frecuentes</h3>
36
- <p>Aquí hay algunas preguntas frecuentes sobre el color del libro para colorear por número Mod APK:</p>
37
- <ol>
38
- <li> Es colorear el color del libro por el número Mod APK seguro de usar? </li>
39
- <p>Sí, Colorear el color del libro por número Mod APK es seguro de usar, siempre y cuando se descarga de una fuente confiable. Sin embargo, siempre debe tener cuidado al instalar aplicaciones de fuentes desconocidas, ya que pueden contener virus o malware que pueden dañar su dispositivo. </p>
40
- <li> ¿Cuáles son los requisitos para usar Colorear el color del libro por número Mod APK? </li>
41
- <p>Para utilizar el color del libro para colorear por número Mod APK, necesita un dispositivo Android con Android 4.1 o una versión superior, y al menos 100 MB de espacio de almacenamiento gratuito. </p>
42
- <li> ¿Puedo usar el color del libro para colorear por número Mod APK sin conexión? </li>
43
- <p>Sí, puede usar Colorear el color del libro por número Mod APK sin conexión a Internet. Sin embargo, es posible que necesite una conexión a Internet para descargar nuevas imágenes o compartir sus obras de arte en línea. </p>
44
- <li> ¿Puedo actualizar el color del libro para colorear por número Mod APK? </li>
45
- <p>No, no puede actualizar el color del libro para colorear por número Mod APK a través de la tienda de aplicaciones oficial, ya que es una versión modificada de la aplicación original. Sin embargo, puede comprobar si hay nuevas versiones del mod APK desde la fuente donde lo descargó. </p>
46
- <li> ¿Puedo solicitar nuevas imágenes para colorear el color del libro por el número Mod APK? </li>
47
- <p>Sí, puede solicitar nuevas imágenes para colorear el color del libro por el número Mod APK poniéndose en contacto con los desarrolladores <p>Sí, puede solicitar nuevas imágenes para colorear el color del libro por el número Mod APK contactando con los desarrolladores de la aplicación. Puede encontrar su información de contacto en la configuración de la aplicación o en su sitio web. También puedes sugerir nuevas categorías o temas que te gustaría ver en la app. </p>
48
- <p>Espero que hayas disfrutado leyendo este artículo y hayas aprendido algo nuevo. Si tiene alguna pregunta o comentario, no dude en dejar un comentario a continuación. Gracias por su tiempo y atención. </p> 64aa2da5cf<br />
49
- <br />
50
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/botocore/paginate.py DELETED
@@ -1,720 +0,0 @@
1
- # Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License"). You
4
- # may not use this file except in compliance with the License. A copy of
5
- # the License is located at
6
- #
7
- # http://aws.amazon.com/apache2.0/
8
- #
9
- # or in the "license" file accompanying this file. This file is
10
- # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11
- # ANY KIND, either express or implied. See the License for the specific
12
- # language governing permissions and limitations under the License.
13
-
14
- import base64
15
- import json
16
- import logging
17
- from itertools import tee
18
-
19
- import jmespath
20
-
21
- from botocore.exceptions import PaginationError
22
- from botocore.utils import merge_dicts, set_value_from_jmespath
23
-
24
- log = logging.getLogger(__name__)
25
-
26
-
27
- class TokenEncoder:
28
- """Encodes dictionaries into opaque strings.
29
-
30
- This for the most part json dumps + base64 encoding, but also supports
31
- having bytes in the dictionary in addition to the types that json can
32
- handle by default.
33
-
34
- This is intended for use in encoding pagination tokens, which in some
35
- cases can be complex structures and / or contain bytes.
36
- """
37
-
38
- def encode(self, token):
39
- """Encodes a dictionary to an opaque string.
40
-
41
- :type token: dict
42
- :param token: A dictionary containing pagination information,
43
- particularly the service pagination token(s) but also other boto
44
- metadata.
45
-
46
- :rtype: str
47
- :returns: An opaque string
48
- """
49
- try:
50
- # Try just using json dumps first to avoid having to traverse
51
- # and encode the dict. In 99.9999% of cases this will work.
52
- json_string = json.dumps(token)
53
- except (TypeError, UnicodeDecodeError):
54
- # If normal dumping failed, go through and base64 encode all bytes.
55
- encoded_token, encoded_keys = self._encode(token, [])
56
-
57
- # Save the list of all the encoded key paths. We can safely
58
- # assume that no service will ever use this key.
59
- encoded_token['boto_encoded_keys'] = encoded_keys
60
-
61
- # Now that the bytes are all encoded, dump the json.
62
- json_string = json.dumps(encoded_token)
63
-
64
- # base64 encode the json string to produce an opaque token string.
65
- return base64.b64encode(json_string.encode('utf-8')).decode('utf-8')
66
-
67
- def _encode(self, data, path):
68
- """Encode bytes in given data, keeping track of the path traversed."""
69
- if isinstance(data, dict):
70
- return self._encode_dict(data, path)
71
- elif isinstance(data, list):
72
- return self._encode_list(data, path)
73
- elif isinstance(data, bytes):
74
- return self._encode_bytes(data, path)
75
- else:
76
- return data, []
77
-
78
- def _encode_list(self, data, path):
79
- """Encode any bytes in a list, noting the index of what is encoded."""
80
- new_data = []
81
- encoded = []
82
- for i, value in enumerate(data):
83
- new_path = path + [i]
84
- new_value, new_encoded = self._encode(value, new_path)
85
- new_data.append(new_value)
86
- encoded.extend(new_encoded)
87
- return new_data, encoded
88
-
89
- def _encode_dict(self, data, path):
90
- """Encode any bytes in a dict, noting the index of what is encoded."""
91
- new_data = {}
92
- encoded = []
93
- for key, value in data.items():
94
- new_path = path + [key]
95
- new_value, new_encoded = self._encode(value, new_path)
96
- new_data[key] = new_value
97
- encoded.extend(new_encoded)
98
- return new_data, encoded
99
-
100
- def _encode_bytes(self, data, path):
101
- """Base64 encode a byte string."""
102
- return base64.b64encode(data).decode('utf-8'), [path]
103
-
104
-
105
- class TokenDecoder:
106
- """Decodes token strings back into dictionaries.
107
-
108
- This performs the inverse operation to the TokenEncoder, accepting
109
- opaque strings and decoding them into a useable form.
110
- """
111
-
112
- def decode(self, token):
113
- """Decodes an opaque string to a dictionary.
114
-
115
- :type token: str
116
- :param token: A token string given by the botocore pagination
117
- interface.
118
-
119
- :rtype: dict
120
- :returns: A dictionary containing pagination information,
121
- particularly the service pagination token(s) but also other boto
122
- metadata.
123
- """
124
- json_string = base64.b64decode(token.encode('utf-8')).decode('utf-8')
125
- decoded_token = json.loads(json_string)
126
-
127
- # Remove the encoding metadata as it is read since it will no longer
128
- # be needed.
129
- encoded_keys = decoded_token.pop('boto_encoded_keys', None)
130
- if encoded_keys is None:
131
- return decoded_token
132
- else:
133
- return self._decode(decoded_token, encoded_keys)
134
-
135
- def _decode(self, token, encoded_keys):
136
- """Find each encoded value and decode it."""
137
- for key in encoded_keys:
138
- encoded = self._path_get(token, key)
139
- decoded = base64.b64decode(encoded.encode('utf-8'))
140
- self._path_set(token, key, decoded)
141
- return token
142
-
143
- def _path_get(self, data, path):
144
- """Return the nested data at the given path.
145
-
146
- For instance:
147
- data = {'foo': ['bar', 'baz']}
148
- path = ['foo', 0]
149
- ==> 'bar'
150
- """
151
- # jmespath isn't used here because it would be difficult to actually
152
- # create the jmespath query when taking all of the unknowns of key
153
- # structure into account. Gross though this is, it is simple and not
154
- # very error prone.
155
- d = data
156
- for step in path:
157
- d = d[step]
158
- return d
159
-
160
- def _path_set(self, data, path, value):
161
- """Set the value of a key in the given data.
162
-
163
- Example:
164
- data = {'foo': ['bar', 'baz']}
165
- path = ['foo', 1]
166
- value = 'bin'
167
- ==> data = {'foo': ['bar', 'bin']}
168
- """
169
- container = self._path_get(data, path[:-1])
170
- container[path[-1]] = value
171
-
172
-
173
- class PaginatorModel:
174
- def __init__(self, paginator_config):
175
- self._paginator_config = paginator_config['pagination']
176
-
177
- def get_paginator(self, operation_name):
178
- try:
179
- single_paginator_config = self._paginator_config[operation_name]
180
- except KeyError:
181
- raise ValueError(
182
- "Paginator for operation does not exist: %s" % operation_name
183
- )
184
- return single_paginator_config
185
-
186
-
187
- class PageIterator:
188
- """An iterable object to pagiante API results.
189
- Please note it is NOT a python iterator.
190
- Use ``iter`` to wrap this as a generator.
191
- """
192
-
193
- def __init__(
194
- self,
195
- method,
196
- input_token,
197
- output_token,
198
- more_results,
199
- result_keys,
200
- non_aggregate_keys,
201
- limit_key,
202
- max_items,
203
- starting_token,
204
- page_size,
205
- op_kwargs,
206
- ):
207
- self._method = method
208
- self._input_token = input_token
209
- self._output_token = output_token
210
- self._more_results = more_results
211
- self._result_keys = result_keys
212
- self._max_items = max_items
213
- self._limit_key = limit_key
214
- self._starting_token = starting_token
215
- self._page_size = page_size
216
- self._op_kwargs = op_kwargs
217
- self._resume_token = None
218
- self._non_aggregate_key_exprs = non_aggregate_keys
219
- self._non_aggregate_part = {}
220
- self._token_encoder = TokenEncoder()
221
- self._token_decoder = TokenDecoder()
222
-
223
- @property
224
- def result_keys(self):
225
- return self._result_keys
226
-
227
- @property
228
- def resume_token(self):
229
- """Token to specify to resume pagination."""
230
- return self._resume_token
231
-
232
- @resume_token.setter
233
- def resume_token(self, value):
234
- if not isinstance(value, dict):
235
- raise ValueError("Bad starting token: %s" % value)
236
-
237
- if 'boto_truncate_amount' in value:
238
- token_keys = sorted(self._input_token + ['boto_truncate_amount'])
239
- else:
240
- token_keys = sorted(self._input_token)
241
- dict_keys = sorted(value.keys())
242
-
243
- if token_keys == dict_keys:
244
- self._resume_token = self._token_encoder.encode(value)
245
- else:
246
- raise ValueError("Bad starting token: %s" % value)
247
-
248
- @property
249
- def non_aggregate_part(self):
250
- return self._non_aggregate_part
251
-
252
- def __iter__(self):
253
- current_kwargs = self._op_kwargs
254
- previous_next_token = None
255
- next_token = {key: None for key in self._input_token}
256
- if self._starting_token is not None:
257
- # If the starting token exists, populate the next_token with the
258
- # values inside it. This ensures that we have the service's
259
- # pagination token on hand if we need to truncate after the
260
- # first response.
261
- next_token = self._parse_starting_token()[0]
262
- # The number of items from result_key we've seen so far.
263
- total_items = 0
264
- first_request = True
265
- primary_result_key = self.result_keys[0]
266
- starting_truncation = 0
267
- self._inject_starting_params(current_kwargs)
268
- while True:
269
- response = self._make_request(current_kwargs)
270
- parsed = self._extract_parsed_response(response)
271
- if first_request:
272
- # The first request is handled differently. We could
273
- # possibly have a resume/starting token that tells us where
274
- # to index into the retrieved page.
275
- if self._starting_token is not None:
276
- starting_truncation = self._handle_first_request(
277
- parsed, primary_result_key, starting_truncation
278
- )
279
- first_request = False
280
- self._record_non_aggregate_key_values(parsed)
281
- else:
282
- # If this isn't the first request, we have already sliced into
283
- # the first request and had to make additional requests after.
284
- # We no longer need to add this to truncation.
285
- starting_truncation = 0
286
- current_response = primary_result_key.search(parsed)
287
- if current_response is None:
288
- current_response = []
289
- num_current_response = len(current_response)
290
- truncate_amount = 0
291
- if self._max_items is not None:
292
- truncate_amount = (
293
- total_items + num_current_response - self._max_items
294
- )
295
- if truncate_amount > 0:
296
- self._truncate_response(
297
- parsed,
298
- primary_result_key,
299
- truncate_amount,
300
- starting_truncation,
301
- next_token,
302
- )
303
- yield response
304
- break
305
- else:
306
- yield response
307
- total_items += num_current_response
308
- next_token = self._get_next_token(parsed)
309
- if all(t is None for t in next_token.values()):
310
- break
311
- if (
312
- self._max_items is not None
313
- and total_items == self._max_items
314
- ):
315
- # We're on a page boundary so we can set the current
316
- # next token to be the resume token.
317
- self.resume_token = next_token
318
- break
319
- if (
320
- previous_next_token is not None
321
- and previous_next_token == next_token
322
- ):
323
- message = (
324
- f"The same next token was received "
325
- f"twice: {next_token}"
326
- )
327
- raise PaginationError(message=message)
328
- self._inject_token_into_kwargs(current_kwargs, next_token)
329
- previous_next_token = next_token
330
-
331
- def search(self, expression):
332
- """Applies a JMESPath expression to a paginator
333
-
334
- Each page of results is searched using the provided JMESPath
335
- expression. If the result is not a list, it is yielded
336
- directly. If the result is a list, each element in the result
337
- is yielded individually (essentially implementing a flatmap in
338
- which the JMESPath search is the mapping function).
339
-
340
- :type expression: str
341
- :param expression: JMESPath expression to apply to each page.
342
-
343
- :return: Returns an iterator that yields the individual
344
- elements of applying a JMESPath expression to each page of
345
- results.
346
- """
347
- compiled = jmespath.compile(expression)
348
- for page in self:
349
- results = compiled.search(page)
350
- if isinstance(results, list):
351
- yield from results
352
- else:
353
- # Yield result directly if it is not a list.
354
- yield results
355
-
356
- def _make_request(self, current_kwargs):
357
- return self._method(**current_kwargs)
358
-
359
- def _extract_parsed_response(self, response):
360
- return response
361
-
362
- def _record_non_aggregate_key_values(self, response):
363
- non_aggregate_keys = {}
364
- for expression in self._non_aggregate_key_exprs:
365
- result = expression.search(response)
366
- set_value_from_jmespath(
367
- non_aggregate_keys, expression.expression, result
368
- )
369
- self._non_aggregate_part = non_aggregate_keys
370
-
371
- def _inject_starting_params(self, op_kwargs):
372
- # If the user has specified a starting token we need to
373
- # inject that into the operation's kwargs.
374
- if self._starting_token is not None:
375
- # Don't need to do anything special if there is no starting
376
- # token specified.
377
- next_token = self._parse_starting_token()[0]
378
- self._inject_token_into_kwargs(op_kwargs, next_token)
379
- if self._page_size is not None:
380
- # Pass the page size as the parameter name for limiting
381
- # page size, also known as the limit_key.
382
- op_kwargs[self._limit_key] = self._page_size
383
-
384
- def _inject_token_into_kwargs(self, op_kwargs, next_token):
385
- for name, token in next_token.items():
386
- if (token is not None) and (token != 'None'):
387
- op_kwargs[name] = token
388
- elif name in op_kwargs:
389
- del op_kwargs[name]
390
-
391
- def _handle_first_request(
392
- self, parsed, primary_result_key, starting_truncation
393
- ):
394
- # If the payload is an array or string, we need to slice into it
395
- # and only return the truncated amount.
396
- starting_truncation = self._parse_starting_token()[1]
397
- all_data = primary_result_key.search(parsed)
398
- if isinstance(all_data, (list, str)):
399
- data = all_data[starting_truncation:]
400
- else:
401
- data = None
402
- set_value_from_jmespath(parsed, primary_result_key.expression, data)
403
- # We also need to truncate any secondary result keys
404
- # because they were not truncated in the previous last
405
- # response.
406
- for token in self.result_keys:
407
- if token == primary_result_key:
408
- continue
409
- sample = token.search(parsed)
410
- if isinstance(sample, list):
411
- empty_value = []
412
- elif isinstance(sample, str):
413
- empty_value = ''
414
- elif isinstance(sample, (int, float)):
415
- empty_value = 0
416
- else:
417
- empty_value = None
418
- set_value_from_jmespath(parsed, token.expression, empty_value)
419
- return starting_truncation
420
-
421
- def _truncate_response(
422
- self,
423
- parsed,
424
- primary_result_key,
425
- truncate_amount,
426
- starting_truncation,
427
- next_token,
428
- ):
429
- original = primary_result_key.search(parsed)
430
- if original is None:
431
- original = []
432
- amount_to_keep = len(original) - truncate_amount
433
- truncated = original[:amount_to_keep]
434
- set_value_from_jmespath(
435
- parsed, primary_result_key.expression, truncated
436
- )
437
- # The issue here is that even though we know how much we've truncated
438
- # we need to account for this globally including any starting
439
- # left truncation. For example:
440
- # Raw response: [0,1,2,3]
441
- # Starting index: 1
442
- # Max items: 1
443
- # Starting left truncation: [1, 2, 3]
444
- # End right truncation for max items: [1]
445
- # However, even though we only kept 1, this is post
446
- # left truncation so the next starting index should be 2, not 1
447
- # (left_truncation + amount_to_keep).
448
- next_token['boto_truncate_amount'] = (
449
- amount_to_keep + starting_truncation
450
- )
451
- self.resume_token = next_token
452
-
453
- def _get_next_token(self, parsed):
454
- if self._more_results is not None:
455
- if not self._more_results.search(parsed):
456
- return {}
457
- next_tokens = {}
458
- for output_token, input_key in zip(
459
- self._output_token, self._input_token
460
- ):
461
- next_token = output_token.search(parsed)
462
- # We do not want to include any empty strings as actual tokens.
463
- # Treat them as None.
464
- if next_token:
465
- next_tokens[input_key] = next_token
466
- else:
467
- next_tokens[input_key] = None
468
- return next_tokens
469
-
470
- def result_key_iters(self):
471
- teed_results = tee(self, len(self.result_keys))
472
- return [
473
- ResultKeyIterator(i, result_key)
474
- for i, result_key in zip(teed_results, self.result_keys)
475
- ]
476
-
477
- def build_full_result(self):
478
- complete_result = {}
479
- for response in self:
480
- page = response
481
- # We want to try to catch operation object pagination
482
- # and format correctly for those. They come in the form
483
- # of a tuple of two elements: (http_response, parsed_responsed).
484
- # We want the parsed_response as that is what the page iterator
485
- # uses. We can remove it though once operation objects are removed.
486
- if isinstance(response, tuple) and len(response) == 2:
487
- page = response[1]
488
- # We're incrementally building the full response page
489
- # by page. For each page in the response we need to
490
- # inject the necessary components from the page
491
- # into the complete_result.
492
- for result_expression in self.result_keys:
493
- # In order to incrementally update a result key
494
- # we need to search the existing value from complete_result,
495
- # then we need to search the _current_ page for the
496
- # current result key value. Then we append the current
497
- # value onto the existing value, and re-set that value
498
- # as the new value.
499
- result_value = result_expression.search(page)
500
- if result_value is None:
501
- continue
502
- existing_value = result_expression.search(complete_result)
503
- if existing_value is None:
504
- # Set the initial result
505
- set_value_from_jmespath(
506
- complete_result,
507
- result_expression.expression,
508
- result_value,
509
- )
510
- continue
511
- # Now both result_value and existing_value contain something
512
- if isinstance(result_value, list):
513
- existing_value.extend(result_value)
514
- elif isinstance(result_value, (int, float, str)):
515
- # Modify the existing result with the sum or concatenation
516
- set_value_from_jmespath(
517
- complete_result,
518
- result_expression.expression,
519
- existing_value + result_value,
520
- )
521
- merge_dicts(complete_result, self.non_aggregate_part)
522
- if self.resume_token is not None:
523
- complete_result['NextToken'] = self.resume_token
524
- return complete_result
525
-
526
- def _parse_starting_token(self):
527
- if self._starting_token is None:
528
- return None
529
-
530
- # The starting token is a dict passed as a base64 encoded string.
531
- next_token = self._starting_token
532
- try:
533
- next_token = self._token_decoder.decode(next_token)
534
- index = 0
535
- if 'boto_truncate_amount' in next_token:
536
- index = next_token.get('boto_truncate_amount')
537
- del next_token['boto_truncate_amount']
538
- except (ValueError, TypeError):
539
- next_token, index = self._parse_starting_token_deprecated()
540
- return next_token, index
541
-
542
- def _parse_starting_token_deprecated(self):
543
- """
544
- This handles parsing of old style starting tokens, and attempts to
545
- coerce them into the new style.
546
- """
547
- log.debug(
548
- "Attempting to fall back to old starting token parser. For "
549
- "token: %s" % self._starting_token
550
- )
551
- if self._starting_token is None:
552
- return None
553
-
554
- parts = self._starting_token.split('___')
555
- next_token = []
556
- index = 0
557
- if len(parts) == len(self._input_token) + 1:
558
- try:
559
- index = int(parts.pop())
560
- except ValueError:
561
- # This doesn't look like a valid old-style token, so we're
562
- # passing it along as an opaque service token.
563
- parts = [self._starting_token]
564
-
565
- for part in parts:
566
- if part == 'None':
567
- next_token.append(None)
568
- else:
569
- next_token.append(part)
570
- return self._convert_deprecated_starting_token(next_token), index
571
-
572
- def _convert_deprecated_starting_token(self, deprecated_token):
573
- """
574
- This attempts to convert a deprecated starting token into the new
575
- style.
576
- """
577
- len_deprecated_token = len(deprecated_token)
578
- len_input_token = len(self._input_token)
579
- if len_deprecated_token > len_input_token:
580
- raise ValueError("Bad starting token: %s" % self._starting_token)
581
- elif len_deprecated_token < len_input_token:
582
- log.debug(
583
- "Old format starting token does not contain all input "
584
- "tokens. Setting the rest, in order, as None."
585
- )
586
- for i in range(len_input_token - len_deprecated_token):
587
- deprecated_token.append(None)
588
- return dict(zip(self._input_token, deprecated_token))
589
-
590
-
591
- class Paginator:
592
- PAGE_ITERATOR_CLS = PageIterator
593
-
594
- def __init__(self, method, pagination_config, model):
595
- self._model = model
596
- self._method = method
597
- self._pagination_cfg = pagination_config
598
- self._output_token = self._get_output_tokens(self._pagination_cfg)
599
- self._input_token = self._get_input_tokens(self._pagination_cfg)
600
- self._more_results = self._get_more_results_token(self._pagination_cfg)
601
- self._non_aggregate_keys = self._get_non_aggregate_keys(
602
- self._pagination_cfg
603
- )
604
- self._result_keys = self._get_result_keys(self._pagination_cfg)
605
- self._limit_key = self._get_limit_key(self._pagination_cfg)
606
-
607
- @property
608
- def result_keys(self):
609
- return self._result_keys
610
-
611
- def _get_non_aggregate_keys(self, config):
612
- keys = []
613
- for key in config.get('non_aggregate_keys', []):
614
- keys.append(jmespath.compile(key))
615
- return keys
616
-
617
- def _get_output_tokens(self, config):
618
- output = []
619
- output_token = config['output_token']
620
- if not isinstance(output_token, list):
621
- output_token = [output_token]
622
- for config in output_token:
623
- output.append(jmespath.compile(config))
624
- return output
625
-
626
- def _get_input_tokens(self, config):
627
- input_token = self._pagination_cfg['input_token']
628
- if not isinstance(input_token, list):
629
- input_token = [input_token]
630
- return input_token
631
-
632
- def _get_more_results_token(self, config):
633
- more_results = config.get('more_results')
634
- if more_results is not None:
635
- return jmespath.compile(more_results)
636
-
637
- def _get_result_keys(self, config):
638
- result_key = config.get('result_key')
639
- if result_key is not None:
640
- if not isinstance(result_key, list):
641
- result_key = [result_key]
642
- result_key = [jmespath.compile(rk) for rk in result_key]
643
- return result_key
644
-
645
- def _get_limit_key(self, config):
646
- return config.get('limit_key')
647
-
648
- def paginate(self, **kwargs):
649
- """Create paginator object for an operation.
650
-
651
- This returns an iterable object. Iterating over
652
- this object will yield a single page of a response
653
- at a time.
654
-
655
- """
656
- page_params = self._extract_paging_params(kwargs)
657
- return self.PAGE_ITERATOR_CLS(
658
- self._method,
659
- self._input_token,
660
- self._output_token,
661
- self._more_results,
662
- self._result_keys,
663
- self._non_aggregate_keys,
664
- self._limit_key,
665
- page_params['MaxItems'],
666
- page_params['StartingToken'],
667
- page_params['PageSize'],
668
- kwargs,
669
- )
670
-
671
- def _extract_paging_params(self, kwargs):
672
- pagination_config = kwargs.pop('PaginationConfig', {})
673
- max_items = pagination_config.get('MaxItems', None)
674
- if max_items is not None:
675
- max_items = int(max_items)
676
- page_size = pagination_config.get('PageSize', None)
677
- if page_size is not None:
678
- if self._limit_key is None:
679
- raise PaginationError(
680
- message="PageSize parameter is not supported for the "
681
- "pagination interface for this operation."
682
- )
683
- input_members = self._model.input_shape.members
684
- limit_key_shape = input_members.get(self._limit_key)
685
- if limit_key_shape.type_name == 'string':
686
- if not isinstance(page_size, str):
687
- page_size = str(page_size)
688
- else:
689
- page_size = int(page_size)
690
- return {
691
- 'MaxItems': max_items,
692
- 'StartingToken': pagination_config.get('StartingToken', None),
693
- 'PageSize': page_size,
694
- }
695
-
696
-
697
- class ResultKeyIterator:
698
- """Iterates over the results of paginated responses.
699
-
700
- Each iterator is associated with a single result key.
701
- Iterating over this object will give you each element in
702
- the result key list.
703
-
704
- :param pages_iterator: An iterator that will give you
705
- pages of results (a ``PageIterator`` class).
706
- :param result_key: The JMESPath expression representing
707
- the result key.
708
-
709
- """
710
-
711
- def __init__(self, pages_iterator, result_key):
712
- self._pages_iterator = pages_iterator
713
- self.result_key = result_key
714
-
715
- def __iter__(self):
716
- for page in self._pages_iterator:
717
- results = self.result_key.search(page)
718
- if results is None:
719
- results = []
720
- yield from results
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_distutils/dir_util.py DELETED
@@ -1,243 +0,0 @@
1
- """distutils.dir_util
2
-
3
- Utility functions for manipulating directories and directory trees."""
4
-
5
- import os
6
- import errno
7
- from distutils.errors import DistutilsInternalError, DistutilsFileError
8
- from distutils import log
9
-
10
- # cache for by mkpath() -- in addition to cheapening redundant calls,
11
- # eliminates redundant "creating /foo/bar/baz" messages in dry-run mode
12
- _path_created = {}
13
-
14
-
15
- def mkpath(name, mode=0o777, verbose=1, dry_run=0): # noqa: C901
16
- """Create a directory and any missing ancestor directories.
17
-
18
- If the directory already exists (or if 'name' is the empty string, which
19
- means the current directory, which of course exists), then do nothing.
20
- Raise DistutilsFileError if unable to create some directory along the way
21
- (eg. some sub-path exists, but is a file rather than a directory).
22
- If 'verbose' is true, print a one-line summary of each mkdir to stdout.
23
- Return the list of directories actually created.
24
-
25
- os.makedirs is not used because:
26
-
27
- a) It's new to Python 1.5.2, and
28
- b) it blows up if the directory already exists (in which case it should
29
- silently succeed).
30
- """
31
-
32
- global _path_created
33
-
34
- # Detect a common bug -- name is None
35
- if not isinstance(name, str):
36
- raise DistutilsInternalError(
37
- "mkpath: 'name' must be a string (got {!r})".format(name)
38
- )
39
-
40
- # XXX what's the better way to handle verbosity? print as we create
41
- # each directory in the path (the current behaviour), or only announce
42
- # the creation of the whole path? (quite easy to do the latter since
43
- # we're not using a recursive algorithm)
44
-
45
- name = os.path.normpath(name)
46
- created_dirs = []
47
- if os.path.isdir(name) or name == '':
48
- return created_dirs
49
- if _path_created.get(os.path.abspath(name)):
50
- return created_dirs
51
-
52
- (head, tail) = os.path.split(name)
53
- tails = [tail] # stack of lone dirs to create
54
-
55
- while head and tail and not os.path.isdir(head):
56
- (head, tail) = os.path.split(head)
57
- tails.insert(0, tail) # push next higher dir onto stack
58
-
59
- # now 'head' contains the deepest directory that already exists
60
- # (that is, the child of 'head' in 'name' is the highest directory
61
- # that does *not* exist)
62
- for d in tails:
63
- # print "head = %s, d = %s: " % (head, d),
64
- head = os.path.join(head, d)
65
- abs_head = os.path.abspath(head)
66
-
67
- if _path_created.get(abs_head):
68
- continue
69
-
70
- if verbose >= 1:
71
- log.info("creating %s", head)
72
-
73
- if not dry_run:
74
- try:
75
- os.mkdir(head, mode)
76
- except OSError as exc:
77
- if not (exc.errno == errno.EEXIST and os.path.isdir(head)):
78
- raise DistutilsFileError(
79
- "could not create '{}': {}".format(head, exc.args[-1])
80
- )
81
- created_dirs.append(head)
82
-
83
- _path_created[abs_head] = 1
84
- return created_dirs
85
-
86
-
87
- def create_tree(base_dir, files, mode=0o777, verbose=1, dry_run=0):
88
- """Create all the empty directories under 'base_dir' needed to put 'files'
89
- there.
90
-
91
- 'base_dir' is just the name of a directory which doesn't necessarily
92
- exist yet; 'files' is a list of filenames to be interpreted relative to
93
- 'base_dir'. 'base_dir' + the directory portion of every file in 'files'
94
- will be created if it doesn't already exist. 'mode', 'verbose' and
95
- 'dry_run' flags are as for 'mkpath()'.
96
- """
97
- # First get the list of directories to create
98
- need_dir = set()
99
- for file in files:
100
- need_dir.add(os.path.join(base_dir, os.path.dirname(file)))
101
-
102
- # Now create them
103
- for dir in sorted(need_dir):
104
- mkpath(dir, mode, verbose=verbose, dry_run=dry_run)
105
-
106
-
107
- def copy_tree( # noqa: C901
108
- src,
109
- dst,
110
- preserve_mode=1,
111
- preserve_times=1,
112
- preserve_symlinks=0,
113
- update=0,
114
- verbose=1,
115
- dry_run=0,
116
- ):
117
- """Copy an entire directory tree 'src' to a new location 'dst'.
118
-
119
- Both 'src' and 'dst' must be directory names. If 'src' is not a
120
- directory, raise DistutilsFileError. If 'dst' does not exist, it is
121
- created with 'mkpath()'. The end result of the copy is that every
122
- file in 'src' is copied to 'dst', and directories under 'src' are
123
- recursively copied to 'dst'. Return the list of files that were
124
- copied or might have been copied, using their output name. The
125
- return value is unaffected by 'update' or 'dry_run': it is simply
126
- the list of all files under 'src', with the names changed to be
127
- under 'dst'.
128
-
129
- 'preserve_mode' and 'preserve_times' are the same as for
130
- 'copy_file'; note that they only apply to regular files, not to
131
- directories. If 'preserve_symlinks' is true, symlinks will be
132
- copied as symlinks (on platforms that support them!); otherwise
133
- (the default), the destination of the symlink will be copied.
134
- 'update' and 'verbose' are the same as for 'copy_file'.
135
- """
136
- from distutils.file_util import copy_file
137
-
138
- if not dry_run and not os.path.isdir(src):
139
- raise DistutilsFileError("cannot copy tree '%s': not a directory" % src)
140
- try:
141
- names = os.listdir(src)
142
- except OSError as e:
143
- if dry_run:
144
- names = []
145
- else:
146
- raise DistutilsFileError(
147
- "error listing files in '{}': {}".format(src, e.strerror)
148
- )
149
-
150
- if not dry_run:
151
- mkpath(dst, verbose=verbose)
152
-
153
- outputs = []
154
-
155
- for n in names:
156
- src_name = os.path.join(src, n)
157
- dst_name = os.path.join(dst, n)
158
-
159
- if n.startswith('.nfs'):
160
- # skip NFS rename files
161
- continue
162
-
163
- if preserve_symlinks and os.path.islink(src_name):
164
- link_dest = os.readlink(src_name)
165
- if verbose >= 1:
166
- log.info("linking %s -> %s", dst_name, link_dest)
167
- if not dry_run:
168
- os.symlink(link_dest, dst_name)
169
- outputs.append(dst_name)
170
-
171
- elif os.path.isdir(src_name):
172
- outputs.extend(
173
- copy_tree(
174
- src_name,
175
- dst_name,
176
- preserve_mode,
177
- preserve_times,
178
- preserve_symlinks,
179
- update,
180
- verbose=verbose,
181
- dry_run=dry_run,
182
- )
183
- )
184
- else:
185
- copy_file(
186
- src_name,
187
- dst_name,
188
- preserve_mode,
189
- preserve_times,
190
- update,
191
- verbose=verbose,
192
- dry_run=dry_run,
193
- )
194
- outputs.append(dst_name)
195
-
196
- return outputs
197
-
198
-
199
- def _build_cmdtuple(path, cmdtuples):
200
- """Helper for remove_tree()."""
201
- for f in os.listdir(path):
202
- real_f = os.path.join(path, f)
203
- if os.path.isdir(real_f) and not os.path.islink(real_f):
204
- _build_cmdtuple(real_f, cmdtuples)
205
- else:
206
- cmdtuples.append((os.remove, real_f))
207
- cmdtuples.append((os.rmdir, path))
208
-
209
-
210
- def remove_tree(directory, verbose=1, dry_run=0):
211
- """Recursively remove an entire directory tree.
212
-
213
- Any errors are ignored (apart from being reported to stdout if 'verbose'
214
- is true).
215
- """
216
- global _path_created
217
-
218
- if verbose >= 1:
219
- log.info("removing '%s' (and everything under it)", directory)
220
- if dry_run:
221
- return
222
- cmdtuples = []
223
- _build_cmdtuple(directory, cmdtuples)
224
- for cmd in cmdtuples:
225
- try:
226
- cmd[0](cmd[1])
227
- # remove dir from cache if it's already there
228
- abspath = os.path.abspath(cmd[1])
229
- if abspath in _path_created:
230
- del _path_created[abspath]
231
- except OSError as exc:
232
- log.warn("error removing %s: %s", directory, exc)
233
-
234
-
235
- def ensure_relative(path):
236
- """Take the full path 'path', and make it a relative path.
237
-
238
- This is useful to make 'path' the second argument to os.path.join().
239
- """
240
- drive, path = os.path.splitdrive(path)
241
- if path[0:1] == os.sep:
242
- path = drive + path[1:]
243
- return path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/more_itertools/recipes.py DELETED
@@ -1,620 +0,0 @@
1
- """Imported from the recipes section of the itertools documentation.
2
-
3
- All functions taken from the recipes section of the itertools library docs
4
- [1]_.
5
- Some backward-compatible usability improvements have been made.
6
-
7
- .. [1] http://docs.python.org/library/itertools.html#recipes
8
-
9
- """
10
- import warnings
11
- from collections import deque
12
- from itertools import (
13
- chain,
14
- combinations,
15
- count,
16
- cycle,
17
- groupby,
18
- islice,
19
- repeat,
20
- starmap,
21
- tee,
22
- zip_longest,
23
- )
24
- import operator
25
- from random import randrange, sample, choice
26
-
27
- __all__ = [
28
- 'all_equal',
29
- 'consume',
30
- 'convolve',
31
- 'dotproduct',
32
- 'first_true',
33
- 'flatten',
34
- 'grouper',
35
- 'iter_except',
36
- 'ncycles',
37
- 'nth',
38
- 'nth_combination',
39
- 'padnone',
40
- 'pad_none',
41
- 'pairwise',
42
- 'partition',
43
- 'powerset',
44
- 'prepend',
45
- 'quantify',
46
- 'random_combination_with_replacement',
47
- 'random_combination',
48
- 'random_permutation',
49
- 'random_product',
50
- 'repeatfunc',
51
- 'roundrobin',
52
- 'tabulate',
53
- 'tail',
54
- 'take',
55
- 'unique_everseen',
56
- 'unique_justseen',
57
- ]
58
-
59
-
60
- def take(n, iterable):
61
- """Return first *n* items of the iterable as a list.
62
-
63
- >>> take(3, range(10))
64
- [0, 1, 2]
65
-
66
- If there are fewer than *n* items in the iterable, all of them are
67
- returned.
68
-
69
- >>> take(10, range(3))
70
- [0, 1, 2]
71
-
72
- """
73
- return list(islice(iterable, n))
74
-
75
-
76
- def tabulate(function, start=0):
77
- """Return an iterator over the results of ``func(start)``,
78
- ``func(start + 1)``, ``func(start + 2)``...
79
-
80
- *func* should be a function that accepts one integer argument.
81
-
82
- If *start* is not specified it defaults to 0. It will be incremented each
83
- time the iterator is advanced.
84
-
85
- >>> square = lambda x: x ** 2
86
- >>> iterator = tabulate(square, -3)
87
- >>> take(4, iterator)
88
- [9, 4, 1, 0]
89
-
90
- """
91
- return map(function, count(start))
92
-
93
-
94
- def tail(n, iterable):
95
- """Return an iterator over the last *n* items of *iterable*.
96
-
97
- >>> t = tail(3, 'ABCDEFG')
98
- >>> list(t)
99
- ['E', 'F', 'G']
100
-
101
- """
102
- return iter(deque(iterable, maxlen=n))
103
-
104
-
105
- def consume(iterator, n=None):
106
- """Advance *iterable* by *n* steps. If *n* is ``None``, consume it
107
- entirely.
108
-
109
- Efficiently exhausts an iterator without returning values. Defaults to
110
- consuming the whole iterator, but an optional second argument may be
111
- provided to limit consumption.
112
-
113
- >>> i = (x for x in range(10))
114
- >>> next(i)
115
- 0
116
- >>> consume(i, 3)
117
- >>> next(i)
118
- 4
119
- >>> consume(i)
120
- >>> next(i)
121
- Traceback (most recent call last):
122
- File "<stdin>", line 1, in <module>
123
- StopIteration
124
-
125
- If the iterator has fewer items remaining than the provided limit, the
126
- whole iterator will be consumed.
127
-
128
- >>> i = (x for x in range(3))
129
- >>> consume(i, 5)
130
- >>> next(i)
131
- Traceback (most recent call last):
132
- File "<stdin>", line 1, in <module>
133
- StopIteration
134
-
135
- """
136
- # Use functions that consume iterators at C speed.
137
- if n is None:
138
- # feed the entire iterator into a zero-length deque
139
- deque(iterator, maxlen=0)
140
- else:
141
- # advance to the empty slice starting at position n
142
- next(islice(iterator, n, n), None)
143
-
144
-
145
- def nth(iterable, n, default=None):
146
- """Returns the nth item or a default value.
147
-
148
- >>> l = range(10)
149
- >>> nth(l, 3)
150
- 3
151
- >>> nth(l, 20, "zebra")
152
- 'zebra'
153
-
154
- """
155
- return next(islice(iterable, n, None), default)
156
-
157
-
158
- def all_equal(iterable):
159
- """
160
- Returns ``True`` if all the elements are equal to each other.
161
-
162
- >>> all_equal('aaaa')
163
- True
164
- >>> all_equal('aaab')
165
- False
166
-
167
- """
168
- g = groupby(iterable)
169
- return next(g, True) and not next(g, False)
170
-
171
-
172
- def quantify(iterable, pred=bool):
173
- """Return the how many times the predicate is true.
174
-
175
- >>> quantify([True, False, True])
176
- 2
177
-
178
- """
179
- return sum(map(pred, iterable))
180
-
181
-
182
- def pad_none(iterable):
183
- """Returns the sequence of elements and then returns ``None`` indefinitely.
184
-
185
- >>> take(5, pad_none(range(3)))
186
- [0, 1, 2, None, None]
187
-
188
- Useful for emulating the behavior of the built-in :func:`map` function.
189
-
190
- See also :func:`padded`.
191
-
192
- """
193
- return chain(iterable, repeat(None))
194
-
195
-
196
- padnone = pad_none
197
-
198
-
199
- def ncycles(iterable, n):
200
- """Returns the sequence elements *n* times
201
-
202
- >>> list(ncycles(["a", "b"], 3))
203
- ['a', 'b', 'a', 'b', 'a', 'b']
204
-
205
- """
206
- return chain.from_iterable(repeat(tuple(iterable), n))
207
-
208
-
209
- def dotproduct(vec1, vec2):
210
- """Returns the dot product of the two iterables.
211
-
212
- >>> dotproduct([10, 10], [20, 20])
213
- 400
214
-
215
- """
216
- return sum(map(operator.mul, vec1, vec2))
217
-
218
-
219
- def flatten(listOfLists):
220
- """Return an iterator flattening one level of nesting in a list of lists.
221
-
222
- >>> list(flatten([[0, 1], [2, 3]]))
223
- [0, 1, 2, 3]
224
-
225
- See also :func:`collapse`, which can flatten multiple levels of nesting.
226
-
227
- """
228
- return chain.from_iterable(listOfLists)
229
-
230
-
231
- def repeatfunc(func, times=None, *args):
232
- """Call *func* with *args* repeatedly, returning an iterable over the
233
- results.
234
-
235
- If *times* is specified, the iterable will terminate after that many
236
- repetitions:
237
-
238
- >>> from operator import add
239
- >>> times = 4
240
- >>> args = 3, 5
241
- >>> list(repeatfunc(add, times, *args))
242
- [8, 8, 8, 8]
243
-
244
- If *times* is ``None`` the iterable will not terminate:
245
-
246
- >>> from random import randrange
247
- >>> times = None
248
- >>> args = 1, 11
249
- >>> take(6, repeatfunc(randrange, times, *args)) # doctest:+SKIP
250
- [2, 4, 8, 1, 8, 4]
251
-
252
- """
253
- if times is None:
254
- return starmap(func, repeat(args))
255
- return starmap(func, repeat(args, times))
256
-
257
-
258
- def _pairwise(iterable):
259
- """Returns an iterator of paired items, overlapping, from the original
260
-
261
- >>> take(4, pairwise(count()))
262
- [(0, 1), (1, 2), (2, 3), (3, 4)]
263
-
264
- On Python 3.10 and above, this is an alias for :func:`itertools.pairwise`.
265
-
266
- """
267
- a, b = tee(iterable)
268
- next(b, None)
269
- yield from zip(a, b)
270
-
271
-
272
- try:
273
- from itertools import pairwise as itertools_pairwise
274
- except ImportError:
275
- pairwise = _pairwise
276
- else:
277
-
278
- def pairwise(iterable):
279
- yield from itertools_pairwise(iterable)
280
-
281
- pairwise.__doc__ = _pairwise.__doc__
282
-
283
-
284
- def grouper(iterable, n, fillvalue=None):
285
- """Collect data into fixed-length chunks or blocks.
286
-
287
- >>> list(grouper('ABCDEFG', 3, 'x'))
288
- [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')]
289
-
290
- """
291
- if isinstance(iterable, int):
292
- warnings.warn(
293
- "grouper expects iterable as first parameter", DeprecationWarning
294
- )
295
- n, iterable = iterable, n
296
- args = [iter(iterable)] * n
297
- return zip_longest(fillvalue=fillvalue, *args)
298
-
299
-
300
- def roundrobin(*iterables):
301
- """Yields an item from each iterable, alternating between them.
302
-
303
- >>> list(roundrobin('ABC', 'D', 'EF'))
304
- ['A', 'D', 'E', 'B', 'F', 'C']
305
-
306
- This function produces the same output as :func:`interleave_longest`, but
307
- may perform better for some inputs (in particular when the number of
308
- iterables is small).
309
-
310
- """
311
- # Recipe credited to George Sakkis
312
- pending = len(iterables)
313
- nexts = cycle(iter(it).__next__ for it in iterables)
314
- while pending:
315
- try:
316
- for next in nexts:
317
- yield next()
318
- except StopIteration:
319
- pending -= 1
320
- nexts = cycle(islice(nexts, pending))
321
-
322
-
323
- def partition(pred, iterable):
324
- """
325
- Returns a 2-tuple of iterables derived from the input iterable.
326
- The first yields the items that have ``pred(item) == False``.
327
- The second yields the items that have ``pred(item) == True``.
328
-
329
- >>> is_odd = lambda x: x % 2 != 0
330
- >>> iterable = range(10)
331
- >>> even_items, odd_items = partition(is_odd, iterable)
332
- >>> list(even_items), list(odd_items)
333
- ([0, 2, 4, 6, 8], [1, 3, 5, 7, 9])
334
-
335
- If *pred* is None, :func:`bool` is used.
336
-
337
- >>> iterable = [0, 1, False, True, '', ' ']
338
- >>> false_items, true_items = partition(None, iterable)
339
- >>> list(false_items), list(true_items)
340
- ([0, False, ''], [1, True, ' '])
341
-
342
- """
343
- if pred is None:
344
- pred = bool
345
-
346
- evaluations = ((pred(x), x) for x in iterable)
347
- t1, t2 = tee(evaluations)
348
- return (
349
- (x for (cond, x) in t1 if not cond),
350
- (x for (cond, x) in t2 if cond),
351
- )
352
-
353
-
354
- def powerset(iterable):
355
- """Yields all possible subsets of the iterable.
356
-
357
- >>> list(powerset([1, 2, 3]))
358
- [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
359
-
360
- :func:`powerset` will operate on iterables that aren't :class:`set`
361
- instances, so repeated elements in the input will produce repeated elements
362
- in the output. Use :func:`unique_everseen` on the input to avoid generating
363
- duplicates:
364
-
365
- >>> seq = [1, 1, 0]
366
- >>> list(powerset(seq))
367
- [(), (1,), (1,), (0,), (1, 1), (1, 0), (1, 0), (1, 1, 0)]
368
- >>> from more_itertools import unique_everseen
369
- >>> list(powerset(unique_everseen(seq)))
370
- [(), (1,), (0,), (1, 0)]
371
-
372
- """
373
- s = list(iterable)
374
- return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
375
-
376
-
377
- def unique_everseen(iterable, key=None):
378
- """
379
- Yield unique elements, preserving order.
380
-
381
- >>> list(unique_everseen('AAAABBBCCDAABBB'))
382
- ['A', 'B', 'C', 'D']
383
- >>> list(unique_everseen('ABBCcAD', str.lower))
384
- ['A', 'B', 'C', 'D']
385
-
386
- Sequences with a mix of hashable and unhashable items can be used.
387
- The function will be slower (i.e., `O(n^2)`) for unhashable items.
388
-
389
- Remember that ``list`` objects are unhashable - you can use the *key*
390
- parameter to transform the list to a tuple (which is hashable) to
391
- avoid a slowdown.
392
-
393
- >>> iterable = ([1, 2], [2, 3], [1, 2])
394
- >>> list(unique_everseen(iterable)) # Slow
395
- [[1, 2], [2, 3]]
396
- >>> list(unique_everseen(iterable, key=tuple)) # Faster
397
- [[1, 2], [2, 3]]
398
-
399
- Similary, you may want to convert unhashable ``set`` objects with
400
- ``key=frozenset``. For ``dict`` objects,
401
- ``key=lambda x: frozenset(x.items())`` can be used.
402
-
403
- """
404
- seenset = set()
405
- seenset_add = seenset.add
406
- seenlist = []
407
- seenlist_add = seenlist.append
408
- use_key = key is not None
409
-
410
- for element in iterable:
411
- k = key(element) if use_key else element
412
- try:
413
- if k not in seenset:
414
- seenset_add(k)
415
- yield element
416
- except TypeError:
417
- if k not in seenlist:
418
- seenlist_add(k)
419
- yield element
420
-
421
-
422
- def unique_justseen(iterable, key=None):
423
- """Yields elements in order, ignoring serial duplicates
424
-
425
- >>> list(unique_justseen('AAAABBBCCDAABBB'))
426
- ['A', 'B', 'C', 'D', 'A', 'B']
427
- >>> list(unique_justseen('ABBCcAD', str.lower))
428
- ['A', 'B', 'C', 'A', 'D']
429
-
430
- """
431
- return map(next, map(operator.itemgetter(1), groupby(iterable, key)))
432
-
433
-
434
- def iter_except(func, exception, first=None):
435
- """Yields results from a function repeatedly until an exception is raised.
436
-
437
- Converts a call-until-exception interface to an iterator interface.
438
- Like ``iter(func, sentinel)``, but uses an exception instead of a sentinel
439
- to end the loop.
440
-
441
- >>> l = [0, 1, 2]
442
- >>> list(iter_except(l.pop, IndexError))
443
- [2, 1, 0]
444
-
445
- """
446
- try:
447
- if first is not None:
448
- yield first()
449
- while 1:
450
- yield func()
451
- except exception:
452
- pass
453
-
454
-
455
- def first_true(iterable, default=None, pred=None):
456
- """
457
- Returns the first true value in the iterable.
458
-
459
- If no true value is found, returns *default*
460
-
461
- If *pred* is not None, returns the first item for which
462
- ``pred(item) == True`` .
463
-
464
- >>> first_true(range(10))
465
- 1
466
- >>> first_true(range(10), pred=lambda x: x > 5)
467
- 6
468
- >>> first_true(range(10), default='missing', pred=lambda x: x > 9)
469
- 'missing'
470
-
471
- """
472
- return next(filter(pred, iterable), default)
473
-
474
-
475
- def random_product(*args, repeat=1):
476
- """Draw an item at random from each of the input iterables.
477
-
478
- >>> random_product('abc', range(4), 'XYZ') # doctest:+SKIP
479
- ('c', 3, 'Z')
480
-
481
- If *repeat* is provided as a keyword argument, that many items will be
482
- drawn from each iterable.
483
-
484
- >>> random_product('abcd', range(4), repeat=2) # doctest:+SKIP
485
- ('a', 2, 'd', 3)
486
-
487
- This equivalent to taking a random selection from
488
- ``itertools.product(*args, **kwarg)``.
489
-
490
- """
491
- pools = [tuple(pool) for pool in args] * repeat
492
- return tuple(choice(pool) for pool in pools)
493
-
494
-
495
- def random_permutation(iterable, r=None):
496
- """Return a random *r* length permutation of the elements in *iterable*.
497
-
498
- If *r* is not specified or is ``None``, then *r* defaults to the length of
499
- *iterable*.
500
-
501
- >>> random_permutation(range(5)) # doctest:+SKIP
502
- (3, 4, 0, 1, 2)
503
-
504
- This equivalent to taking a random selection from
505
- ``itertools.permutations(iterable, r)``.
506
-
507
- """
508
- pool = tuple(iterable)
509
- r = len(pool) if r is None else r
510
- return tuple(sample(pool, r))
511
-
512
-
513
- def random_combination(iterable, r):
514
- """Return a random *r* length subsequence of the elements in *iterable*.
515
-
516
- >>> random_combination(range(5), 3) # doctest:+SKIP
517
- (2, 3, 4)
518
-
519
- This equivalent to taking a random selection from
520
- ``itertools.combinations(iterable, r)``.
521
-
522
- """
523
- pool = tuple(iterable)
524
- n = len(pool)
525
- indices = sorted(sample(range(n), r))
526
- return tuple(pool[i] for i in indices)
527
-
528
-
529
- def random_combination_with_replacement(iterable, r):
530
- """Return a random *r* length subsequence of elements in *iterable*,
531
- allowing individual elements to be repeated.
532
-
533
- >>> random_combination_with_replacement(range(3), 5) # doctest:+SKIP
534
- (0, 0, 1, 2, 2)
535
-
536
- This equivalent to taking a random selection from
537
- ``itertools.combinations_with_replacement(iterable, r)``.
538
-
539
- """
540
- pool = tuple(iterable)
541
- n = len(pool)
542
- indices = sorted(randrange(n) for i in range(r))
543
- return tuple(pool[i] for i in indices)
544
-
545
-
546
- def nth_combination(iterable, r, index):
547
- """Equivalent to ``list(combinations(iterable, r))[index]``.
548
-
549
- The subsequences of *iterable* that are of length *r* can be ordered
550
- lexicographically. :func:`nth_combination` computes the subsequence at
551
- sort position *index* directly, without computing the previous
552
- subsequences.
553
-
554
- >>> nth_combination(range(5), 3, 5)
555
- (0, 3, 4)
556
-
557
- ``ValueError`` will be raised If *r* is negative or greater than the length
558
- of *iterable*.
559
- ``IndexError`` will be raised if the given *index* is invalid.
560
- """
561
- pool = tuple(iterable)
562
- n = len(pool)
563
- if (r < 0) or (r > n):
564
- raise ValueError
565
-
566
- c = 1
567
- k = min(r, n - r)
568
- for i in range(1, k + 1):
569
- c = c * (n - k + i) // i
570
-
571
- if index < 0:
572
- index += c
573
-
574
- if (index < 0) or (index >= c):
575
- raise IndexError
576
-
577
- result = []
578
- while r:
579
- c, n, r = c * r // n, n - 1, r - 1
580
- while index >= c:
581
- index -= c
582
- c, n = c * (n - r) // n, n - 1
583
- result.append(pool[-1 - n])
584
-
585
- return tuple(result)
586
-
587
-
588
- def prepend(value, iterator):
589
- """Yield *value*, followed by the elements in *iterator*.
590
-
591
- >>> value = '0'
592
- >>> iterator = ['1', '2', '3']
593
- >>> list(prepend(value, iterator))
594
- ['0', '1', '2', '3']
595
-
596
- To prepend multiple values, see :func:`itertools.chain`
597
- or :func:`value_chain`.
598
-
599
- """
600
- return chain([value], iterator)
601
-
602
-
603
- def convolve(signal, kernel):
604
- """Convolve the iterable *signal* with the iterable *kernel*.
605
-
606
- >>> signal = (1, 2, 3, 4, 5)
607
- >>> kernel = [3, 2, 1]
608
- >>> list(convolve(signal, kernel))
609
- [3, 8, 14, 20, 26, 14, 5]
610
-
611
- Note: the input arguments are not interchangeable, as the *kernel*
612
- is immediately consumed and stored.
613
-
614
- """
615
- kernel = tuple(kernel)[::-1]
616
- n = len(kernel)
617
- window = deque([0], maxlen=n) * n
618
- for x in chain(signal, repeat(0, n - 1)):
619
- window.append(x)
620
- yield sum(map(operator.mul, kernel, window))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Blessing/Asphalt-Pavement-Distresses-Detector/app.py DELETED
@@ -1,174 +0,0 @@
1
- import gradio as gr
2
- import cv2
3
- import requests
4
- import os
5
- import random
6
-
7
- from ultralytics import YOLO
8
-
9
- file_urls = [
10
- 'https://www.dropbox.com/scl/fi/5pavu4vvkprrtkwktvei7/DSC02373.JPG?rlkey=fpj636qtkf3vrqfxy45n2d9ii&dl=1',
11
- 'https://www.dropbox.com/scl/fi/56pbn4r3ohk85rchcvwdj/DSC02813.JPG?rlkey=jnbsidqtthk6p4ysld6o6kc4t&dl=1',
12
- 'https://www.dropbox.com/scl/fi/av9g5zbmrrzg9064zivat/image_2.jpg?rlkey=ldocvzz5lq98zffqf1lmhbhv1&dl=1',
13
- 'https://www.dropbox.com/scl/fi/izo2eqqnqzcsaxis1qrbx/IMG_7612.JPG?rlkey=6wfjaux44khtlx454ex0ng0hp&dl=1',
14
- 'https://www.dropbox.com/scl/fi/e6vgy1et6vjr61uypk5yu/VID-20230809-WA0021.mp4?rlkey=khv8rw074vezzlg8ob38bpmbx&dl=1'
15
- ]
16
-
17
- def download_file(url, save_name):
18
- url = url
19
- if not os.path.exists(save_name):
20
- file = requests.get(url)
21
- open(save_name, 'wb').write(file.content)
22
-
23
- for i, url in enumerate(file_urls):
24
- if 'mp4' in file_urls[i]:
25
- download_file(
26
- file_urls[i],
27
- f"video.mp4"
28
- )
29
- else:
30
- download_file(
31
- file_urls[i],
32
- f"image_{i}.jpg"
33
- )
34
-
35
-
36
- model = YOLO('best.pt')
37
- path = [['image_0.jpg'], ['image_1.jpg'], ['image_2.jpg'], ['image_3.jpg']]
38
-
39
- # path = [['IMG_7612.JPG'], ['IMG_7678.JPG'], ['all_33.jpg'], ['all_80.jpg'],
40
- # ['DSC02813.JPG'], ['DSC02373.JPG']]
41
-
42
-
43
- # path = [['sc_1_0 (1) (1).JPG'], ['sc_1_0 (16) (1).JPG'],
44
- # ['sc_1_0 (18) (1).JPG'], ['sc_1_0 (18).JPG']]
45
-
46
- video_path = [['video.mp4']]
47
-
48
- classes = ['alligator_cracking', 'longitudinal_cracking', 'potholes', 'ravelling']
49
-
50
- def show_preds_image(image_path):
51
- image = cv2.imread(image_path)
52
- outputs = model.predict(source=image_path, agnostic_nms=True, conf=0.25, iou=0.4, imgsz=640)
53
- results = outputs[0].cpu().numpy()
54
-
55
- re_boxes = results.boxes.data.tolist()
56
-
57
- class_colors = {1 : (95, 255, 54), 2: (242, 210, 100), 3: (96, 7, 70), 4:(221, 59, 41)}
58
- random.seed(42)
59
- # class_colors = [(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) for _ in range(4)]
60
-
61
- for i, det in enumerate(results.boxes.xyxy):
62
- x1, y1, x2, y2 = int(det[0]), int(det[1]), int(det[2]), int(det[3])
63
-
64
- class_label = int(re_boxes[i][-1])
65
- rectangle_color = class_colors.get(class_label)
66
- # rectangle_color = class_colors[class_label]
67
- text_color = rectangle_color
68
- cv2.rectangle(
69
- image,
70
- (int(det[0]), int(det[1])),
71
- (int(det[2]), int(det[3])),
72
- color=rectangle_color,
73
- thickness=3,
74
- lineType=cv2.LINE_AA
75
- )
76
-
77
- text_position = (x1, y1+100)
78
- conf = re_boxes[i][-2]
79
- class_name = classes[class_label]
80
- # class_label = class_name.split('_')[0] + '\n' + class_name.split('_')[1] if '_' in class_name else class_name
81
- cv2.putText(image, classes[class_label] + f' = {round(conf, 2)}',
82
- text_position, cv2.FONT_HERSHEY_SIMPLEX, 1.5, text_color, 3)
83
-
84
-
85
- # print(class_ids)
86
- return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
87
-
88
- inputs_image = [
89
- gr.components.Image(type="filepath", label="Input Image"),
90
- ]
91
- outputs_image = [
92
- gr.components.Image(type="numpy", label="Output Image"),
93
- ]
94
- interface_image = gr.Interface(
95
- fn=show_preds_image,
96
- inputs=inputs_image,
97
- outputs=outputs_image,
98
- title="Asphalt Road Pavement Distresses Detector",
99
- examples=path,
100
- cache_examples=False,
101
- description= 'This is a demo app that takes in images or videos of Asphalt pavement surfaces and \
102
- \n detects the following pavement distresses: \
103
- \n \
104
- \n Alligator cracking \
105
- \n Longitudinal cracking \
106
- \n Potholes \
107
- \n Ravelling \
108
- \n \
109
- \n This is specifically for Inference and educational purpose.\
110
- \n \
111
- \n The model might ocassionaly give false outputs'
112
- )
113
-
114
- def show_preds_video(video_path):
115
- cap = cv2.VideoCapture(video_path)
116
- while(cap.isOpened()):
117
- ret, frame = cap.read()
118
- if ret:
119
- frame_copy = frame.copy()
120
- outputs = model.predict(source=frame, agnostic_nms=True, conf=0.25, iou=0.4, imgsz=640)
121
- results = outputs[0].cpu().numpy()
122
- re_boxes = results.boxes.data.tolist()
123
-
124
- class_colors = {1 : (95, 255, 54), 2: (242, 210, 100), 3: (96, 7, 70), 4:(221, 59, 41)}
125
- random.seed(42)
126
- # class_colors = [(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) for _ in range(4)]
127
-
128
- for i, det in enumerate(results.boxes.xyxy):
129
- x1, y1, x2, y2 = int(det[0]), int(det[1]), int(det[2]), int(det[3])
130
-
131
- class_label = int(re_boxes[i][-1])
132
- rectangle_color = class_colors.get(class_label)
133
- # rectangle_color = class_colors[class_label]
134
- text_color = rectangle_color
135
-
136
- cv2.rectangle(
137
- frame_copy,
138
- (int(det[0]), int(det[1])),
139
- (int(det[2]), int(det[3])),
140
- color=rectangle_color,
141
- thickness=2,
142
- lineType=cv2.LINE_AA
143
- )
144
-
145
-
146
- text_position = (x1, y1+100)
147
- conf = re_boxes[i][-2]
148
- class_name = classes[class_label]
149
- # class_label = class_name.split('_')[0] + '\n' + class_name.split('_')[1] if '_' in class_name else class_name
150
- cv2.putText(frame_copy, classes[class_label] + f' = {round(conf, 2)}',
151
- text_position, cv2.FONT_HERSHEY_SIMPLEX, 1.5, text_color, 3)
152
-
153
- yield cv2.cvtColor(frame_copy, cv2.COLOR_BGR2RGB)
154
-
155
- inputs_video = [
156
- gr.components.Video(type="filepath", label="Input Video"),
157
-
158
- ]
159
- outputs_video = [
160
- gr.components.Image(type="numpy", label="Output Video"),
161
- ]
162
- interface_video = gr.Interface(
163
- fn=show_preds_video,
164
- inputs=inputs_video,
165
- outputs=outputs_video,
166
- title="Asphalt Road Pavement Distresses Detector",
167
- examples=video_path,
168
- cache_examples=False,
169
- # live=True
170
- )
171
- gr.TabbedInterface(
172
- [interface_image, interface_video],
173
- tab_names=['Image inference', 'Video inference'],
174
- ).queue().launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVH-vn1210/make_hair/minigpt4/common/optims.py DELETED
@@ -1,119 +0,0 @@
1
- """
2
- Copyright (c) 2022, salesforce.com, inc.
3
- All rights reserved.
4
- SPDX-License-Identifier: BSD-3-Clause
5
- For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
6
- """
7
-
8
- import math
9
-
10
- from minigpt4.common.registry import registry
11
-
12
-
13
- @registry.register_lr_scheduler("linear_warmup_step_lr")
14
- class LinearWarmupStepLRScheduler:
15
- def __init__(
16
- self,
17
- optimizer,
18
- max_epoch,
19
- min_lr,
20
- init_lr,
21
- decay_rate=1,
22
- warmup_start_lr=-1,
23
- warmup_steps=0,
24
- **kwargs
25
- ):
26
- self.optimizer = optimizer
27
-
28
- self.max_epoch = max_epoch
29
- self.min_lr = min_lr
30
-
31
- self.decay_rate = decay_rate
32
-
33
- self.init_lr = init_lr
34
- self.warmup_steps = warmup_steps
35
- self.warmup_start_lr = warmup_start_lr if warmup_start_lr >= 0 else init_lr
36
-
37
- def step(self, cur_epoch, cur_step):
38
- if cur_epoch == 0:
39
- warmup_lr_schedule(
40
- step=cur_step,
41
- optimizer=self.optimizer,
42
- max_step=self.warmup_steps,
43
- init_lr=self.warmup_start_lr,
44
- max_lr=self.init_lr,
45
- )
46
- else:
47
- step_lr_schedule(
48
- epoch=cur_epoch,
49
- optimizer=self.optimizer,
50
- init_lr=self.init_lr,
51
- min_lr=self.min_lr,
52
- decay_rate=self.decay_rate,
53
- )
54
-
55
-
56
- @registry.register_lr_scheduler("linear_warmup_cosine_lr")
57
- class LinearWarmupCosineLRScheduler:
58
- def __init__(
59
- self,
60
- optimizer,
61
- max_epoch,
62
- iters_per_epoch,
63
- min_lr,
64
- init_lr,
65
- warmup_steps=0,
66
- warmup_start_lr=-1,
67
- **kwargs
68
- ):
69
- self.optimizer = optimizer
70
-
71
- self.max_epoch = max_epoch
72
- self.iters_per_epoch = iters_per_epoch
73
- self.min_lr = min_lr
74
-
75
- self.init_lr = init_lr
76
- self.warmup_steps = warmup_steps
77
- self.warmup_start_lr = warmup_start_lr if warmup_start_lr >= 0 else init_lr
78
-
79
- def step(self, cur_epoch, cur_step):
80
- total_cur_step = cur_epoch * self.iters_per_epoch + cur_step
81
- if total_cur_step < self.warmup_steps:
82
- warmup_lr_schedule(
83
- step=cur_step,
84
- optimizer=self.optimizer,
85
- max_step=self.warmup_steps,
86
- init_lr=self.warmup_start_lr,
87
- max_lr=self.init_lr,
88
- )
89
- else:
90
- cosine_lr_schedule(
91
- epoch=total_cur_step,
92
- optimizer=self.optimizer,
93
- max_epoch=self.max_epoch * self.iters_per_epoch,
94
- init_lr=self.init_lr,
95
- min_lr=self.min_lr,
96
- )
97
-
98
-
99
- def cosine_lr_schedule(optimizer, epoch, max_epoch, init_lr, min_lr):
100
- """Decay the learning rate"""
101
- lr = (init_lr - min_lr) * 0.5 * (
102
- 1.0 + math.cos(math.pi * epoch / max_epoch)
103
- ) + min_lr
104
- for param_group in optimizer.param_groups:
105
- param_group["lr"] = lr
106
-
107
-
108
- def warmup_lr_schedule(optimizer, step, max_step, init_lr, max_lr):
109
- """Warmup the learning rate"""
110
- lr = min(max_lr, init_lr + (max_lr - init_lr) * step / max(max_step, 1))
111
- for param_group in optimizer.param_groups:
112
- param_group["lr"] = lr
113
-
114
-
115
- def step_lr_schedule(optimizer, epoch, init_lr, min_lr, decay_rate):
116
- """Decay the learning rate"""
117
- lr = max(min_lr, init_lr * (decay_rate**epoch))
118
- for param_group in optimizer.param_groups:
119
- param_group["lr"] = lr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVMX-jaca-tonos/Spanish-Audio-Transcription-to-Quechua-Translation/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Spanish Audio Transcription To Quechua Translation
3
- emoji: 📈
4
- colorFrom: purple
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 2.9.4
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/detail/type_traits/pointer_traits.h DELETED
@@ -1,371 +0,0 @@
1
- /*
2
- * Copyright 2008-2018 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
- #include <thrust/detail/type_traits.h>
21
- #include <thrust/detail/type_traits/is_metafunction_defined.h>
22
- #include <thrust/detail/type_traits/has_nested_type.h>
23
- #include <thrust/iterator/iterator_traits.h>
24
- #include <cstddef>
25
-
26
- namespace thrust
27
- {
28
- namespace detail
29
- {
30
-
31
- template<typename Ptr> struct pointer_element;
32
-
33
- template<template<typename> class Ptr, typename Arg>
34
- struct pointer_element<Ptr<Arg> >
35
- {
36
- typedef Arg type;
37
- };
38
-
39
- template<template<typename,typename> class Ptr, typename Arg1, typename Arg2>
40
- struct pointer_element<Ptr<Arg1,Arg2> >
41
- {
42
- typedef Arg1 type;
43
- };
44
-
45
- template<template<typename,typename,typename> class Ptr, typename Arg1, typename Arg2, typename Arg3>
46
- struct pointer_element<Ptr<Arg1,Arg2,Arg3> >
47
- {
48
- typedef Arg1 type;
49
- };
50
-
51
- template<template<typename,typename,typename,typename> class Ptr, typename Arg1, typename Arg2, typename Arg3, typename Arg4>
52
- struct pointer_element<Ptr<Arg1,Arg2,Arg3,Arg4> >
53
- {
54
- typedef Arg1 type;
55
- };
56
-
57
- template<template<typename,typename,typename,typename,typename> class Ptr, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5>
58
- struct pointer_element<Ptr<Arg1,Arg2,Arg3,Arg4,Arg5> >
59
- {
60
- typedef Arg1 type;
61
- };
62
-
63
- template<typename T>
64
- struct pointer_element<T*>
65
- {
66
- typedef T type;
67
- };
68
-
69
- template<typename Ptr>
70
- struct pointer_difference
71
- {
72
- typedef typename Ptr::difference_type type;
73
- };
74
-
75
- template<typename T>
76
- struct pointer_difference<T*>
77
- {
78
- typedef std::ptrdiff_t type;
79
- };
80
-
81
- template<typename Ptr, typename T> struct rebind_pointer;
82
-
83
- template<typename T, typename U>
84
- struct rebind_pointer<T*,U>
85
- {
86
- typedef U* type;
87
- };
88
-
89
- template<template<typename> class Ptr, typename Arg, typename T>
90
- struct rebind_pointer<Ptr<Arg>,T>
91
- {
92
- typedef Ptr<T> type;
93
- };
94
-
95
- template<template<typename, typename> class Ptr, typename Arg1, typename Arg2, typename T>
96
- struct rebind_pointer<Ptr<Arg1,Arg2>,T>
97
- {
98
- typedef Ptr<T,Arg2> type;
99
- };
100
-
101
- template<template<typename, typename, typename> class Ptr, typename Arg1, typename Arg2, typename Arg3, typename T>
102
- struct rebind_pointer<Ptr<Arg1,Arg2,Arg3>,T>
103
- {
104
- typedef Ptr<T,Arg2,Arg3> type;
105
- };
106
-
107
- template<template<typename, typename, typename, typename> class Ptr, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename T>
108
- struct rebind_pointer<Ptr<Arg1,Arg2,Arg3,Arg4>,T>
109
- {
110
- typedef Ptr<T,Arg2,Arg3,Arg4> type;
111
- };
112
-
113
- // XXX this should probably be renamed native_type or similar
114
- __THRUST_DEFINE_HAS_NESTED_TYPE(has_raw_pointer, raw_pointer)
115
-
116
- namespace pointer_traits_detail
117
- {
118
-
119
- template<typename Ptr, typename Enable = void> struct pointer_raw_pointer_impl {};
120
-
121
- template<typename T>
122
- struct pointer_raw_pointer_impl<T*>
123
- {
124
- typedef T* type;
125
- };
126
-
127
- template<typename Ptr>
128
- struct pointer_raw_pointer_impl<Ptr, typename enable_if<has_raw_pointer<Ptr>::value>::type>
129
- {
130
- typedef typename Ptr::raw_pointer type;
131
- };
132
-
133
- } // end pointer_traits_detail
134
-
135
- template<typename T>
136
- struct pointer_raw_pointer
137
- : pointer_traits_detail::pointer_raw_pointer_impl<T>
138
- {};
139
-
140
- namespace pointer_traits_detail
141
- {
142
-
143
- template<typename Void>
144
- struct capture_address
145
- {
146
- template<typename T>
147
- __host__ __device__
148
- capture_address(T &r)
149
- : m_addr(&r)
150
- {}
151
-
152
- inline __host__ __device__
153
- Void *operator&() const
154
- {
155
- return m_addr;
156
- }
157
-
158
- Void *m_addr;
159
- };
160
-
161
- // metafunction to compute the type of pointer_to's parameter below
162
- template<typename T>
163
- struct pointer_to_param
164
- : thrust::detail::eval_if<
165
- thrust::detail::is_void<T>::value,
166
- thrust::detail::identity_<capture_address<T> >,
167
- thrust::detail::add_reference<T>
168
- >
169
- {};
170
-
171
- }
172
-
173
- template<typename Ptr>
174
- struct pointer_traits
175
- {
176
- typedef Ptr pointer;
177
- typedef typename Ptr::reference reference;
178
- typedef typename pointer_element<Ptr>::type element_type;
179
- typedef typename pointer_difference<Ptr>::type difference_type;
180
-
181
- template<typename U>
182
- struct rebind
183
- {
184
- typedef typename rebind_pointer<Ptr,U>::type other;
185
- };
186
-
187
- __host__ __device__
188
- inline static pointer pointer_to(typename pointer_traits_detail::pointer_to_param<element_type>::type r)
189
- {
190
- // XXX this is supposed to be pointer::pointer_to(&r); (i.e., call a static member function of pointer called pointer_to)
191
- // assume that pointer has a constructor from raw pointer instead
192
-
193
- return pointer(&r);
194
- }
195
-
196
- // thrust additions follow
197
- typedef typename pointer_raw_pointer<Ptr>::type raw_pointer;
198
-
199
- __host__ __device__
200
- inline static raw_pointer get(pointer ptr)
201
- {
202
- return ptr.get();
203
- }
204
- };
205
-
206
- template<typename T>
207
- struct pointer_traits<T*>
208
- {
209
- typedef T* pointer;
210
- typedef T& reference;
211
- typedef T element_type;
212
- typedef typename pointer_difference<T*>::type difference_type;
213
-
214
- template<typename U>
215
- struct rebind
216
- {
217
- typedef U* other;
218
- };
219
-
220
- __host__ __device__
221
- inline static pointer pointer_to(typename pointer_traits_detail::pointer_to_param<element_type>::type r)
222
- {
223
- return &r;
224
- }
225
-
226
- // thrust additions follow
227
- typedef typename pointer_raw_pointer<T*>::type raw_pointer;
228
-
229
- __host__ __device__
230
- inline static raw_pointer get(pointer ptr)
231
- {
232
- return ptr;
233
- }
234
- };
235
-
236
- template<>
237
- struct pointer_traits<void*>
238
- {
239
- typedef void* pointer;
240
- typedef void reference;
241
- typedef void element_type;
242
- typedef pointer_difference<void*>::type difference_type;
243
-
244
- template<typename U>
245
- struct rebind
246
- {
247
- typedef U* other;
248
- };
249
-
250
- __host__ __device__
251
- inline static pointer pointer_to(pointer_traits_detail::pointer_to_param<element_type>::type r)
252
- {
253
- return &r;
254
- }
255
-
256
- // thrust additions follow
257
- typedef pointer_raw_pointer<void*>::type raw_pointer;
258
-
259
- __host__ __device__
260
- inline static raw_pointer get(pointer ptr)
261
- {
262
- return ptr;
263
- }
264
- };
265
-
266
- template<>
267
- struct pointer_traits<const void*>
268
- {
269
- typedef const void* pointer;
270
- typedef const void reference;
271
- typedef const void element_type;
272
- typedef pointer_difference<const void*>::type difference_type;
273
-
274
- template<typename U>
275
- struct rebind
276
- {
277
- typedef U* other;
278
- };
279
-
280
- __host__ __device__
281
- inline static pointer pointer_to(pointer_traits_detail::pointer_to_param<element_type>::type r)
282
- {
283
- return &r;
284
- }
285
-
286
- // thrust additions follow
287
- typedef pointer_raw_pointer<const void*>::type raw_pointer;
288
-
289
- __host__ __device__
290
- inline static raw_pointer get(pointer ptr)
291
- {
292
- return ptr;
293
- }
294
- };
295
-
296
- template<typename FromPtr, typename ToPtr>
297
- struct is_pointer_system_convertible
298
- : thrust::detail::is_convertible<
299
- typename iterator_system<FromPtr>::type,
300
- typename iterator_system<ToPtr>::type
301
- >
302
- {};
303
-
304
- template<typename FromPtr, typename ToPtr>
305
- struct is_pointer_convertible
306
- : thrust::detail::and_<
307
- thrust::detail::is_convertible<
308
- typename pointer_element<FromPtr>::type *,
309
- typename pointer_element<ToPtr>::type *
310
- >,
311
- is_pointer_system_convertible<FromPtr, ToPtr>
312
- >
313
- {};
314
-
315
- template<typename FromPtr, typename ToPtr>
316
- struct is_void_pointer_system_convertible
317
- : thrust::detail::and_<
318
- thrust::detail::is_same<
319
- typename pointer_element<FromPtr>::type,
320
- void
321
- >,
322
- is_pointer_system_convertible<FromPtr, ToPtr>
323
- >
324
- {};
325
-
326
- // this could be a lot better, but for our purposes, it's probably
327
- // sufficient just to check if pointer_raw_pointer<T> has meaning
328
- template<typename T>
329
- struct is_thrust_pointer
330
- : is_metafunction_defined<pointer_raw_pointer<T> >
331
- {};
332
-
333
- // avoid inspecting traits of the arguments if they aren't known to be pointers
334
- template<typename FromPtr, typename ToPtr>
335
- struct lazy_is_pointer_convertible
336
- : thrust::detail::eval_if<
337
- is_thrust_pointer<FromPtr>::value && is_thrust_pointer<ToPtr>::value,
338
- is_pointer_convertible<FromPtr,ToPtr>,
339
- thrust::detail::identity_<thrust::detail::false_type>
340
- >
341
- {};
342
-
343
- template<typename FromPtr, typename ToPtr>
344
- struct lazy_is_void_pointer_system_convertible
345
- : thrust::detail::eval_if<
346
- is_thrust_pointer<FromPtr>::value && is_thrust_pointer<ToPtr>::value,
347
- is_void_pointer_system_convertible<FromPtr,ToPtr>,
348
- thrust::detail::identity_<thrust::detail::false_type>
349
- >
350
- {};
351
-
352
- template<typename FromPtr, typename ToPtr, typename T = void>
353
- struct enable_if_pointer_is_convertible
354
- : thrust::detail::enable_if<
355
- lazy_is_pointer_convertible<FromPtr,ToPtr>::type::value,
356
- T
357
- >
358
- {};
359
-
360
- template<typename FromPtr, typename ToPtr, typename T = void>
361
- struct enable_if_void_pointer_is_system_convertible
362
- : thrust::detail::enable_if<
363
- lazy_is_void_pointer_system_convertible<FromPtr,ToPtr>::type::value,
364
- T
365
- >
366
- {};
367
-
368
-
369
- } // end detail
370
- } // end thrust
371
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/find.h DELETED
@@ -1,385 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- /*! \file find.h
19
- * \brief Locating values in (unsorted) ranges
20
- */
21
-
22
- #pragma once
23
-
24
- #include <thrust/detail/config.h>
25
- #include <thrust/detail/execution_policy.h>
26
-
27
- namespace thrust
28
- {
29
-
30
-
31
- /*! \addtogroup algorithms
32
- */
33
-
34
- /*! \addtogroup searching
35
- * \ingroup algorithms
36
- * \{
37
- */
38
-
39
-
40
- /*! \p find returns the first iterator \c i in the range
41
- * <tt>[first, last)</tt> such that <tt>*i == value</tt>
42
- * or \c last if no such iterator exists.
43
- *
44
- * The algorithm's execution is parallelized as determined by \p exec.
45
- *
46
- * \param exec The execution policy to use for parallelization.
47
- * \param first Beginning of the sequence to search.
48
- * \param last End of the sequence to search.
49
- * \param value The value to find.
50
- * \return The first iterator \c i such that <tt>*i == value</tt> or \c last.
51
- *
52
- * \tparam DerivedPolicy The name of the derived execution policy.
53
- * \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>
54
- * and \p InputIterator's \c value_type is equality comparable to type \c T.
55
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">EqualityComparable</a>.
56
- *
57
- * \code
58
- * #include <thrust/find.h>
59
- * #include <thrust/device_vector.h>
60
- * #include <thrust/execution_policy.h>
61
- * ...
62
- * thrust::device_vector<int> input(4);
63
- *
64
- * input[0] = 0;
65
- * input[1] = 5;
66
- * input[2] = 3;
67
- * input[3] = 7;
68
- *
69
- * thrust::device_vector<int>::iterator iter;
70
- *
71
- * iter = thrust::find(thrust::device, input.begin(), input.end(), 3); // returns input.first() + 2
72
- * iter = thrust::find(thrust::device, input.begin(), input.end(), 5); // returns input.first() + 1
73
- * iter = thrust::find(thrust::device, input.begin(), input.end(), 9); // returns input.end()
74
- * \endcode
75
- *
76
- * \see find_if
77
- * \see mismatch
78
- */
79
- template<typename DerivedPolicy, typename InputIterator, typename T>
80
- __host__ __device__
81
- InputIterator find(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
82
- InputIterator first,
83
- InputIterator last,
84
- const T& value);
85
-
86
-
87
- /*! \p find returns the first iterator \c i in the range
88
- * <tt>[first, last)</tt> such that <tt>*i == value</tt>
89
- * or \c last if no such iterator exists.
90
- *
91
- * \param first Beginning of the sequence to search.
92
- * \param last End of the sequence to search.
93
- * \param value The value to find.
94
- * \return The first iterator \c i such that <tt>*i == value</tt> or \c last.
95
- *
96
- * \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>
97
- * and \p InputIterator's \c value_type is equality comparable to type \c T.
98
- * \tparam T is a model of <a href="http://www.sgi.com/tech/stl/LessThanComparable.html">EqualityComparable</a>.
99
- *
100
- * \code
101
- * #include <thrust/find.h>
102
- * #include <thrust/device_vector.h>
103
- * ...
104
- * thrust::device_vector<int> input(4);
105
- *
106
- * input[0] = 0;
107
- * input[1] = 5;
108
- * input[2] = 3;
109
- * input[3] = 7;
110
- *
111
- * thrust::device_vector<int>::iterator iter;
112
- *
113
- * iter = thrust::find(input.begin(), input.end(), 3); // returns input.first() + 2
114
- * iter = thrust::find(input.begin(), input.end(), 5); // returns input.first() + 1
115
- * iter = thrust::find(input.begin(), input.end(), 9); // returns input.end()
116
- * \endcode
117
- *
118
- * \see find_if
119
- * \see mismatch
120
- */
121
- template <typename InputIterator, typename T>
122
- InputIterator find(InputIterator first,
123
- InputIterator last,
124
- const T& value);
125
-
126
-
127
- /*! \p find_if returns the first iterator \c i in the range
128
- * <tt>[first, last)</tt> such that <tt>pred(*i)</tt> is \c true
129
- * or \c last if no such iterator exists.
130
- *
131
- * The algorithm's execution is parallelized as determined by \p exec.
132
- *
133
- * \param exec The execution policy to use for parallelization.
134
- * \param first Beginning of the sequence to search.
135
- * \param last End of the sequence to search.
136
- * \param pred A predicate used to test range elements.
137
- * \return The first iterator \c i such that <tt>pred(*i)</tt> is \c true, or \c last.
138
- *
139
- * \tparam DerivedPolicy The name of the derived execution policy.
140
- * \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>.
141
- * \tparam Predicate is a model of <a href="http://www.sgi.com/tech/stl/Predicate.html">Predicate</a>.
142
- *
143
- * \code
144
- * #include <thrust/find.h>
145
- * #include <thrust/device_vector.h>
146
- * #include <thrust/execution_policy.h>
147
- * ...
148
- *
149
- * struct greater_than_four
150
- * {
151
- * __host__ __device__
152
- * bool operator()(int x)
153
- * {
154
- * return x > 4;
155
- * }
156
- * };
157
- *
158
- * struct greater_than_ten
159
- * {
160
- * __host__ __device__
161
- * bool operator()(int x)
162
- * {
163
- * return x > 10;
164
- * }
165
- * };
166
- *
167
- * ...
168
- * thrust::device_vector<int> input(4);
169
- *
170
- * input[0] = 0;
171
- * input[1] = 5;
172
- * input[2] = 3;
173
- * input[3] = 7;
174
- *
175
- * thrust::device_vector<int>::iterator iter;
176
- *
177
- * iter = thrust::find_if(thrust::device, input.begin(), input.end(), greater_than_four()); // returns input.first() + 1
178
- *
179
- * iter = thrust::find_if(thrust::device, input.begin(), input.end(), greater_than_ten()); // returns input.end()
180
- * \endcode
181
- *
182
- * \see find
183
- * \see find_if_not
184
- * \see mismatch
185
- */
186
- template<typename DerivedPolicy, typename InputIterator, typename Predicate>
187
- __host__ __device__
188
- InputIterator find_if(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
189
- InputIterator first,
190
- InputIterator last,
191
- Predicate pred);
192
-
193
-
194
- /*! \p find_if returns the first iterator \c i in the range
195
- * <tt>[first, last)</tt> such that <tt>pred(*i)</tt> is \c true
196
- * or \c last if no such iterator exists.
197
- *
198
- * \param first Beginning of the sequence to search.
199
- * \param last End of the sequence to search.
200
- * \param pred A predicate used to test range elements.
201
- * \return The first iterator \c i such that <tt>pred(*i)</tt> is \c true, or \c last.
202
- *
203
- * \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>.
204
- * \tparam Predicate is a model of <a href="http://www.sgi.com/tech/stl/Predicate.html">Predicate</a>.
205
- *
206
- * \code
207
- * #include <thrust/find.h>
208
- * #include <thrust/device_vector.h>
209
- *
210
- * struct greater_than_four
211
- * {
212
- * __host__ __device__
213
- * bool operator()(int x)
214
- * {
215
- * return x > 4;
216
- * }
217
- * };
218
- *
219
- * struct greater_than_ten
220
- * {
221
- * __host__ __device__
222
- * bool operator()(int x)
223
- * {
224
- * return x > 10;
225
- * }
226
- * };
227
- *
228
- * ...
229
- * thrust::device_vector<int> input(4);
230
- *
231
- * input[0] = 0;
232
- * input[1] = 5;
233
- * input[2] = 3;
234
- * input[3] = 7;
235
- *
236
- * thrust::device_vector<int>::iterator iter;
237
- *
238
- * iter = thrust::find_if(input.begin(), input.end(), greater_than_four()); // returns input.first() + 1
239
- *
240
- * iter = thrust::find_if(input.begin(), input.end(), greater_than_ten()); // returns input.end()
241
- * \endcode
242
- *
243
- * \see find
244
- * \see find_if_not
245
- * \see mismatch
246
- */
247
- template <typename InputIterator, typename Predicate>
248
- InputIterator find_if(InputIterator first,
249
- InputIterator last,
250
- Predicate pred);
251
-
252
-
253
- /*! \p find_if_not returns the first iterator \c i in the range
254
- * <tt>[first, last)</tt> such that <tt>pred(*i)</tt> is \c false
255
- * or \c last if no such iterator exists.
256
- *
257
- * The algorithm's execution is parallelized as determined by \p exec.
258
- *
259
- * \param exec The execution policy to use for parallelization.
260
- * \param first Beginning of the sequence to search.
261
- * \param last End of the sequence to search.
262
- * \param pred A predicate used to test range elements.
263
- * \return The first iterator \c i such that <tt>pred(*i)</tt> is \c false, or \c last.
264
- *
265
- * \tparam DerivedPolicy The name of the derived execution policy.
266
- * \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>.
267
- * \tparam Predicate is a model of <a href="http://www.sgi.com/tech/stl/Predicate.html">Predicate</a>.
268
- *
269
- * \code
270
- * #include <thrust/find.h>
271
- * #include <thrust/device_vector.h>
272
- * #include <thrust/execution_policy.h>
273
- * ...
274
- *
275
- * struct greater_than_four
276
- * {
277
- * __host__ __device__
278
- * bool operator()(int x)
279
- * {
280
- * return x > 4;
281
- * }
282
- * };
283
- *
284
- * struct greater_than_ten
285
- * {
286
- * __host__ __device__
287
- * bool operator()(int x)
288
- * {
289
- * return x > 10;
290
- * }
291
- * };
292
- *
293
- * ...
294
- * thrust::device_vector<int> input(4);
295
- *
296
- * input[0] = 0;
297
- * input[1] = 5;
298
- * input[2] = 3;
299
- * input[3] = 7;
300
- *
301
- * thrust::device_vector<int>::iterator iter;
302
- *
303
- * iter = thrust::find_if_not(thrust::device, input.begin(), input.end(), greater_than_four()); // returns input.first()
304
- *
305
- * iter = thrust::find_if_not(thrust::device, input.begin(), input.end(), greater_than_ten()); // returns input.first()
306
- * \endcode
307
- *
308
- * \see find
309
- * \see find_if
310
- * \see mismatch
311
- */
312
- template<typename DerivedPolicy, typename InputIterator, typename Predicate>
313
- __host__ __device__
314
- InputIterator find_if_not(const thrust::detail::execution_policy_base<DerivedPolicy> &exec,
315
- InputIterator first,
316
- InputIterator last,
317
- Predicate pred);
318
-
319
-
320
- /*! \p find_if_not returns the first iterator \c i in the range
321
- * <tt>[first, last)</tt> such that <tt>pred(*i)</tt> is \c false
322
- * or \c last if no such iterator exists.
323
- *
324
- * \param first Beginning of the sequence to search.
325
- * \param last End of the sequence to search.
326
- * \param pred A predicate used to test range elements.
327
- * \return The first iterator \c i such that <tt>pred(*i)</tt> is \c false, or \c last.
328
- *
329
- * \tparam InputIterator is a model of <a href="http://www.sgi.com/tech/stl/InputIterator.html">Input Iterator</a>.
330
- * \tparam Predicate is a model of <a href="http://www.sgi.com/tech/stl/Predicate.html">Predicate</a>.
331
- *
332
- * \code
333
- * #include <thrust/find.h>
334
- * #include <thrust/device_vector.h>
335
- *
336
- * struct greater_than_four
337
- * {
338
- * __host__ __device__
339
- * bool operator()(int x)
340
- * {
341
- * return x > 4;
342
- * }
343
- * };
344
- *
345
- * struct greater_than_ten
346
- * {
347
- * __host__ __device__
348
- * bool operator()(int x)
349
- * {
350
- * return x > 10;
351
- * }
352
- * };
353
- *
354
- * ...
355
- * thrust::device_vector<int> input(4);
356
- *
357
- * input[0] = 0;
358
- * input[1] = 5;
359
- * input[2] = 3;
360
- * input[3] = 7;
361
- *
362
- * thrust::device_vector<int>::iterator iter;
363
- *
364
- * iter = thrust::find_if_not(input.begin(), input.end(), greater_than_four()); // returns input.first()
365
- *
366
- * iter = thrust::find_if_not(input.begin(), input.end(), greater_than_ten()); // returns input.first()
367
- * \endcode
368
- *
369
- * \see find
370
- * \see find_if
371
- * \see mismatch
372
- */
373
- template <typename InputIterator, typename Predicate>
374
- InputIterator find_if_not(InputIterator first,
375
- InputIterator last,
376
- Predicate pred);
377
-
378
- /*! \} // end searching
379
- */
380
-
381
-
382
- } // end namespace thrust
383
-
384
- #include <thrust/detail/find.inl>
385
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/Text2Human/Text2Human/models/parsing_gen_model.py DELETED
@@ -1,220 +0,0 @@
1
- import logging
2
- import math
3
- from collections import OrderedDict
4
-
5
- import mmcv
6
- import numpy as np
7
- import torch
8
- from torchvision.utils import save_image
9
-
10
- from models.archs.fcn_arch import FCNHead
11
- from models.archs.shape_attr_embedding_arch import ShapeAttrEmbedding
12
- from models.archs.unet_arch import ShapeUNet
13
- from models.losses.accuracy import accuracy
14
- from models.losses.cross_entropy_loss import CrossEntropyLoss
15
-
16
- logger = logging.getLogger('base')
17
-
18
-
19
- class ParsingGenModel():
20
- """Paring Generation model.
21
- """
22
-
23
- def __init__(self, opt):
24
- self.opt = opt
25
- self.device = torch.device('cuda')
26
- self.is_train = opt['is_train']
27
-
28
- self.attr_embedder = ShapeAttrEmbedding(
29
- dim=opt['embedder_dim'],
30
- out_dim=opt['embedder_out_dim'],
31
- cls_num_list=opt['attr_class_num']).to(self.device)
32
- self.parsing_encoder = ShapeUNet(
33
- in_channels=opt['encoder_in_channels']).to(self.device)
34
- self.parsing_decoder = FCNHead(
35
- in_channels=opt['fc_in_channels'],
36
- in_index=opt['fc_in_index'],
37
- channels=opt['fc_channels'],
38
- num_convs=opt['fc_num_convs'],
39
- concat_input=opt['fc_concat_input'],
40
- dropout_ratio=opt['fc_dropout_ratio'],
41
- num_classes=opt['fc_num_classes'],
42
- align_corners=opt['fc_align_corners'],
43
- ).to(self.device)
44
-
45
- self.init_training_settings()
46
-
47
- self.palette = [[0, 0, 0], [255, 250, 250], [220, 220, 220],
48
- [250, 235, 215], [255, 250, 205], [211, 211, 211],
49
- [70, 130, 180], [127, 255, 212], [0, 100, 0],
50
- [50, 205, 50], [255, 255, 0], [245, 222, 179],
51
- [255, 140, 0], [255, 0, 0], [16, 78, 139],
52
- [144, 238, 144], [50, 205, 174], [50, 155, 250],
53
- [160, 140, 88], [213, 140, 88], [90, 140, 90],
54
- [185, 210, 205], [130, 165, 180], [225, 141, 151]]
55
-
56
- def init_training_settings(self):
57
- optim_params = []
58
- for v in self.attr_embedder.parameters():
59
- if v.requires_grad:
60
- optim_params.append(v)
61
- for v in self.parsing_encoder.parameters():
62
- if v.requires_grad:
63
- optim_params.append(v)
64
- for v in self.parsing_decoder.parameters():
65
- if v.requires_grad:
66
- optim_params.append(v)
67
- # set up optimizers
68
- self.optimizer = torch.optim.Adam(
69
- optim_params,
70
- self.opt['lr'],
71
- weight_decay=self.opt['weight_decay'])
72
- self.log_dict = OrderedDict()
73
- self.entropy_loss = CrossEntropyLoss().to(self.device)
74
-
75
- def feed_data(self, data):
76
- self.pose = data['densepose'].to(self.device)
77
- self.attr = data['attr'].to(self.device)
78
- self.segm = data['segm'].to(self.device)
79
-
80
- def optimize_parameters(self):
81
- self.attr_embedder.train()
82
- self.parsing_encoder.train()
83
- self.parsing_decoder.train()
84
-
85
- self.attr_embedding = self.attr_embedder(self.attr)
86
- self.pose_enc = self.parsing_encoder(self.pose, self.attr_embedding)
87
- self.seg_logits = self.parsing_decoder(self.pose_enc)
88
-
89
- loss = self.entropy_loss(self.seg_logits, self.segm)
90
-
91
- self.optimizer.zero_grad()
92
- loss.backward()
93
- self.optimizer.step()
94
-
95
- self.log_dict['loss_total'] = loss
96
-
97
- def get_vis(self, save_path):
98
- img_cat = torch.cat([
99
- self.pose,
100
- self.segm,
101
- ], dim=3).detach()
102
- img_cat = ((img_cat + 1) / 2)
103
-
104
- img_cat = img_cat.clamp_(0, 1)
105
-
106
- save_image(img_cat, save_path, nrow=1, padding=4)
107
-
108
- def inference(self, data_loader, save_dir):
109
- self.attr_embedder.eval()
110
- self.parsing_encoder.eval()
111
- self.parsing_decoder.eval()
112
-
113
- acc = 0
114
- num = 0
115
-
116
- for _, data in enumerate(data_loader):
117
- pose = data['densepose'].to(self.device)
118
- attr = data['attr'].to(self.device)
119
- segm = data['segm'].to(self.device)
120
- img_name = data['img_name']
121
-
122
- num += pose.size(0)
123
- with torch.no_grad():
124
- attr_embedding = self.attr_embedder(attr)
125
- pose_enc = self.parsing_encoder(pose, attr_embedding)
126
- seg_logits = self.parsing_decoder(pose_enc)
127
- seg_pred = seg_logits.argmax(dim=1)
128
- acc += accuracy(seg_logits, segm)
129
- palette_label = self.palette_result(segm.cpu().numpy())
130
- palette_pred = self.palette_result(seg_pred.cpu().numpy())
131
- pose_numpy = ((pose[0] + 1) / 2. * 255.).expand(
132
- 3,
133
- pose[0].size(1),
134
- pose[0].size(2),
135
- ).cpu().numpy().clip(0, 255).astype(np.uint8).transpose(1, 2, 0)
136
- concat_result = np.concatenate(
137
- (pose_numpy, palette_pred, palette_label), axis=1)
138
- mmcv.imwrite(concat_result, f'{save_dir}/{img_name[0]}')
139
-
140
- self.attr_embedder.train()
141
- self.parsing_encoder.train()
142
- self.parsing_decoder.train()
143
- return (acc / num).item()
144
-
145
- def get_current_log(self):
146
- return self.log_dict
147
-
148
- def update_learning_rate(self, epoch):
149
- """Update learning rate.
150
-
151
- Args:
152
- current_iter (int): Current iteration.
153
- warmup_iter (int): Warmup iter numbers. -1 for no warmup.
154
- Default: -1.
155
- """
156
- lr = self.optimizer.param_groups[0]['lr']
157
-
158
- if self.opt['lr_decay'] == 'step':
159
- lr = self.opt['lr'] * (
160
- self.opt['gamma']**(epoch // self.opt['step']))
161
- elif self.opt['lr_decay'] == 'cos':
162
- lr = self.opt['lr'] * (
163
- 1 + math.cos(math.pi * epoch / self.opt['num_epochs'])) / 2
164
- elif self.opt['lr_decay'] == 'linear':
165
- lr = self.opt['lr'] * (1 - epoch / self.opt['num_epochs'])
166
- elif self.opt['lr_decay'] == 'linear2exp':
167
- if epoch < self.opt['turning_point'] + 1:
168
- # learning rate decay as 95%
169
- # at the turning point (1 / 95% = 1.0526)
170
- lr = self.opt['lr'] * (
171
- 1 - epoch / int(self.opt['turning_point'] * 1.0526))
172
- else:
173
- lr *= self.opt['gamma']
174
- elif self.opt['lr_decay'] == 'schedule':
175
- if epoch in self.opt['schedule']:
176
- lr *= self.opt['gamma']
177
- else:
178
- raise ValueError('Unknown lr mode {}'.format(self.opt['lr_decay']))
179
- # set learning rate
180
- for param_group in self.optimizer.param_groups:
181
- param_group['lr'] = lr
182
-
183
- return lr
184
-
185
- def save_network(self, save_path):
186
- """Save networks.
187
- """
188
-
189
- save_dict = {}
190
- save_dict['embedder'] = self.attr_embedder.state_dict()
191
- save_dict['encoder'] = self.parsing_encoder.state_dict()
192
- save_dict['decoder'] = self.parsing_decoder.state_dict()
193
-
194
- torch.save(save_dict, save_path)
195
-
196
- def load_network(self):
197
- checkpoint = torch.load(self.opt['pretrained_parsing_gen'])
198
-
199
- self.attr_embedder.load_state_dict(checkpoint['embedder'], strict=True)
200
- self.attr_embedder.eval()
201
-
202
- self.parsing_encoder.load_state_dict(
203
- checkpoint['encoder'], strict=True)
204
- self.parsing_encoder.eval()
205
-
206
- self.parsing_decoder.load_state_dict(
207
- checkpoint['decoder'], strict=True)
208
- self.parsing_decoder.eval()
209
-
210
- def palette_result(self, result):
211
- seg = result[0]
212
- palette = np.array(self.palette)
213
- assert palette.shape[1] == 3
214
- assert len(palette.shape) == 2
215
- color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
216
- for label, color in enumerate(palette):
217
- color_seg[seg == label, :] = color
218
- # convert to BGR
219
- color_seg = color_seg[..., ::-1]
220
- return color_seg
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/lama-example/saicinpainting/training/losses/distance_weighting.py DELETED
@@ -1,126 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- import torchvision
5
-
6
- from saicinpainting.training.losses.perceptual import IMAGENET_STD, IMAGENET_MEAN
7
-
8
-
9
- def dummy_distance_weighter(real_img, pred_img, mask):
10
- return mask
11
-
12
-
13
- def get_gauss_kernel(kernel_size, width_factor=1):
14
- coords = torch.stack(torch.meshgrid(torch.arange(kernel_size),
15
- torch.arange(kernel_size)),
16
- dim=0).float()
17
- diff = torch.exp(-((coords - kernel_size // 2) ** 2).sum(0) / kernel_size / width_factor)
18
- diff /= diff.sum()
19
- return diff
20
-
21
-
22
- class BlurMask(nn.Module):
23
- def __init__(self, kernel_size=5, width_factor=1):
24
- super().__init__()
25
- self.filter = nn.Conv2d(1, 1, kernel_size, padding=kernel_size // 2, padding_mode='replicate', bias=False)
26
- self.filter.weight.data.copy_(get_gauss_kernel(kernel_size, width_factor=width_factor))
27
-
28
- def forward(self, real_img, pred_img, mask):
29
- with torch.no_grad():
30
- result = self.filter(mask) * mask
31
- return result
32
-
33
-
34
- class EmulatedEDTMask(nn.Module):
35
- def __init__(self, dilate_kernel_size=5, blur_kernel_size=5, width_factor=1):
36
- super().__init__()
37
- self.dilate_filter = nn.Conv2d(1, 1, dilate_kernel_size, padding=dilate_kernel_size// 2, padding_mode='replicate',
38
- bias=False)
39
- self.dilate_filter.weight.data.copy_(torch.ones(1, 1, dilate_kernel_size, dilate_kernel_size, dtype=torch.float))
40
- self.blur_filter = nn.Conv2d(1, 1, blur_kernel_size, padding=blur_kernel_size // 2, padding_mode='replicate', bias=False)
41
- self.blur_filter.weight.data.copy_(get_gauss_kernel(blur_kernel_size, width_factor=width_factor))
42
-
43
- def forward(self, real_img, pred_img, mask):
44
- with torch.no_grad():
45
- known_mask = 1 - mask
46
- dilated_known_mask = (self.dilate_filter(known_mask) > 1).float()
47
- result = self.blur_filter(1 - dilated_known_mask) * mask
48
- return result
49
-
50
-
51
- class PropagatePerceptualSim(nn.Module):
52
- def __init__(self, level=2, max_iters=10, temperature=500, erode_mask_size=3):
53
- super().__init__()
54
- vgg = torchvision.models.vgg19(pretrained=True).features
55
- vgg_avg_pooling = []
56
-
57
- for weights in vgg.parameters():
58
- weights.requires_grad = False
59
-
60
- cur_level_i = 0
61
- for module in vgg.modules():
62
- if module.__class__.__name__ == 'Sequential':
63
- continue
64
- elif module.__class__.__name__ == 'MaxPool2d':
65
- vgg_avg_pooling.append(nn.AvgPool2d(kernel_size=2, stride=2, padding=0))
66
- else:
67
- vgg_avg_pooling.append(module)
68
- if module.__class__.__name__ == 'ReLU':
69
- cur_level_i += 1
70
- if cur_level_i == level:
71
- break
72
-
73
- self.features = nn.Sequential(*vgg_avg_pooling)
74
-
75
- self.max_iters = max_iters
76
- self.temperature = temperature
77
- self.do_erode = erode_mask_size > 0
78
- if self.do_erode:
79
- self.erode_mask = nn.Conv2d(1, 1, erode_mask_size, padding=erode_mask_size // 2, bias=False)
80
- self.erode_mask.weight.data.fill_(1)
81
-
82
- def forward(self, real_img, pred_img, mask):
83
- with torch.no_grad():
84
- real_img = (real_img - IMAGENET_MEAN.to(real_img)) / IMAGENET_STD.to(real_img)
85
- real_feats = self.features(real_img)
86
-
87
- vertical_sim = torch.exp(-(real_feats[:, :, 1:] - real_feats[:, :, :-1]).pow(2).sum(1, keepdim=True)
88
- / self.temperature)
89
- horizontal_sim = torch.exp(-(real_feats[:, :, :, 1:] - real_feats[:, :, :, :-1]).pow(2).sum(1, keepdim=True)
90
- / self.temperature)
91
-
92
- mask_scaled = F.interpolate(mask, size=real_feats.shape[-2:], mode='bilinear', align_corners=False)
93
- if self.do_erode:
94
- mask_scaled = (self.erode_mask(mask_scaled) > 1).float()
95
-
96
- cur_knowness = 1 - mask_scaled
97
-
98
- for iter_i in range(self.max_iters):
99
- new_top_knowness = F.pad(cur_knowness[:, :, :-1] * vertical_sim, (0, 0, 1, 0), mode='replicate')
100
- new_bottom_knowness = F.pad(cur_knowness[:, :, 1:] * vertical_sim, (0, 0, 0, 1), mode='replicate')
101
-
102
- new_left_knowness = F.pad(cur_knowness[:, :, :, :-1] * horizontal_sim, (1, 0, 0, 0), mode='replicate')
103
- new_right_knowness = F.pad(cur_knowness[:, :, :, 1:] * horizontal_sim, (0, 1, 0, 0), mode='replicate')
104
-
105
- new_knowness = torch.stack([new_top_knowness, new_bottom_knowness,
106
- new_left_knowness, new_right_knowness],
107
- dim=0).max(0).values
108
-
109
- cur_knowness = torch.max(cur_knowness, new_knowness)
110
-
111
- cur_knowness = F.interpolate(cur_knowness, size=mask.shape[-2:], mode='bilinear')
112
- result = torch.min(mask, 1 - cur_knowness)
113
-
114
- return result
115
-
116
-
117
- def make_mask_distance_weighter(kind='none', **kwargs):
118
- if kind == 'none':
119
- return dummy_distance_weighter
120
- if kind == 'blur':
121
- return BlurMask(**kwargs)
122
- if kind == 'edt':
123
- return EmulatedEDTMask(**kwargs)
124
- if kind == 'pps':
125
- return PropagatePerceptualSim(**kwargs)
126
- raise ValueError(f'Unknown mask distance weighter kind {kind}')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CarlosMF/AI-ORUS-License-v1.0.0/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: AI ORUS License V1.0.0
3
- emoji: 🤝
4
- colorFrom: purple
5
- colorTo: purple
6
- sdk: streamlit
7
- sdk_version: 1.17.0
8
- app_file: app.py
9
- pinned: false
10
- license: cc
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DCandE/rvc-models/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: Rvc Models
3
- emoji: 🎤
4
- colorFrom: red
5
- colorTo: blue
6
- sdk: gradio
7
- sdk_version: 3.27.0
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- duplicated_from: ardha27/rvc-models
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/Column-2853eb31.css DELETED
@@ -1 +0,0 @@
1
- div.svelte-vt1mxs{display:flex;position:relative;flex-direction:column}div.svelte-vt1mxs>*,div.svelte-vt1mxs>.form>*{width:var(--size-full)}.gap.svelte-vt1mxs{gap:var(--layout-gap)}.hide.svelte-vt1mxs{display:none}.compact.svelte-vt1mxs>*,.compact.svelte-vt1mxs .box{border-radius:0}.compact.svelte-vt1mxs,.panel.svelte-vt1mxs{border:solid var(--panel-border-width) var(--panel-border-color);border-radius:var(--container-radius);background:var(--panel-background-fill);padding:var(--spacing-lg)}
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/themes/glass.py DELETED
@@ -1,99 +0,0 @@
1
- from __future__ import annotations
2
-
3
- from typing import Iterable
4
-
5
- from gradio.themes.base import Base
6
- from gradio.themes.utils import colors, fonts, sizes
7
-
8
-
9
- class Glass(Base):
10
- def __init__(
11
- self,
12
- *,
13
- primary_hue: colors.Color | str = colors.stone,
14
- secondary_hue: colors.Color | str = colors.stone,
15
- neutral_hue: colors.Color | str = colors.stone,
16
- spacing_size: sizes.Size | str = sizes.spacing_sm,
17
- radius_size: sizes.Size | str = sizes.radius_sm,
18
- text_size: sizes.Size | str = sizes.text_sm,
19
- font: fonts.Font
20
- | str
21
- | Iterable[fonts.Font | str] = (
22
- "Optima",
23
- "Candara",
24
- "Noto Sans",
25
- "source-sans-pro",
26
- "sans-serif",
27
- ),
28
- font_mono: fonts.Font
29
- | str
30
- | Iterable[fonts.Font | str] = (
31
- fonts.GoogleFont("IBM Plex Mono"),
32
- "ui-monospace",
33
- "Consolas",
34
- "monospace",
35
- ),
36
- ):
37
- super().__init__(
38
- primary_hue=primary_hue,
39
- secondary_hue=secondary_hue,
40
- neutral_hue=neutral_hue,
41
- spacing_size=spacing_size,
42
- radius_size=radius_size,
43
- text_size=text_size,
44
- font=font,
45
- font_mono=font_mono,
46
- )
47
- self.name = "glass"
48
- super().set(
49
- body_background_fill_dark="*primary_800",
50
- background_fill_secondary_dark="*primary_800",
51
- block_background_fill_dark="*primary_800",
52
- button_primary_background_fill="linear-gradient(180deg, *primary_50 0%, *primary_200 50%, *primary_300 50%, *primary_200 100%)",
53
- button_primary_background_fill_hover="linear-gradient(180deg, *primary_100 0%, *primary_200 50%, *primary_300 50%, *primary_200 100%)",
54
- button_primary_background_fill_dark="linear-gradient(180deg, *primary_400 0%, *primary_500 50%, *primary_600 50%, *primary_500 100%)",
55
- button_primary_background_fill_hover_dark="linear-gradient(180deg, *primary_400 0%, *primary_500 50%, *primary_600 50%, *primary_500 100%)",
56
- button_secondary_background_fill="*button_primary_background_fill",
57
- button_secondary_background_fill_hover="*button_primary_background_fill_hover",
58
- button_secondary_background_fill_dark="*button_primary_background_fill",
59
- button_secondary_background_fill_hover_dark="*button_primary_background_fill_hover",
60
- button_cancel_background_fill="*button_primary_background_fill",
61
- button_cancel_background_fill_hover="*button_primary_background_fill_hover",
62
- button_cancel_background_fill_dark="*button_primary_background_fill",
63
- button_cancel_background_fill_hover_dark="*button_primary_background_fill_hover",
64
- button_cancel_border_color="*button_secondary_border_color",
65
- button_cancel_border_color_dark="*button_secondary_border_color",
66
- button_cancel_text_color="*button_secondary_text_color",
67
- checkbox_border_width="0px",
68
- checkbox_label_background_fill="*button_secondary_background_fill",
69
- checkbox_label_background_fill_dark="*button_secondary_background_fill",
70
- checkbox_label_background_fill_hover="*button_secondary_background_fill_hover",
71
- checkbox_label_background_fill_hover_dark="*button_secondary_background_fill_hover",
72
- checkbox_label_border_width="1px",
73
- checkbox_background_color_dark="*primary_600",
74
- button_border_width="1px",
75
- button_shadow_active="*shadow_inset",
76
- input_background_fill="linear-gradient(0deg, *secondary_50 0%, white 100%)",
77
- input_background_fill_dark="*secondary_600",
78
- input_border_color_focus_dark="*primary_400",
79
- input_border_width="1px",
80
- slider_color="*primary_400",
81
- block_label_text_color="*primary_500",
82
- block_title_text_color="*primary_500",
83
- block_label_text_weight="600",
84
- block_title_text_weight="600",
85
- block_label_text_size="*text_md",
86
- block_title_text_size="*text_md",
87
- block_label_background_fill="*primary_200",
88
- block_label_background_fill_dark="*primary_700",
89
- block_border_width="0px",
90
- block_border_width_dark="1px",
91
- panel_border_width="1px",
92
- border_color_primary_dark="*primary_500",
93
- background_fill_primary_dark="*neutral_700",
94
- background_fill_secondary="*primary_100",
95
- block_background_fill="*primary_50",
96
- block_shadow="*primary_400 0px 0px 3px 0px",
97
- table_even_background_fill_dark="*neutral_700",
98
- table_odd_background_fill_dark="*neutral_700",
99
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DaleChen/AutoGPT/tests/integration/memory_tests.py DELETED
@@ -1,49 +0,0 @@
1
- import random
2
- import string
3
- import sys
4
- import unittest
5
- from pathlib import Path
6
-
7
- from autogpt.config import Config
8
- from autogpt.memory.local import LocalCache
9
-
10
-
11
- class TestLocalCache(unittest.TestCase):
12
- def random_string(self, length):
13
- return "".join(random.choice(string.ascii_letters) for _ in range(length))
14
-
15
- def setUp(self):
16
- cfg = cfg = Config()
17
- self.cache = LocalCache(cfg)
18
- self.cache.clear()
19
-
20
- # Add example texts to the cache
21
- self.example_texts = [
22
- "The quick brown fox jumps over the lazy dog",
23
- "I love machine learning and natural language processing",
24
- "The cake is a lie, but the pie is always true",
25
- "ChatGPT is an advanced AI model for conversation",
26
- ]
27
-
28
- for text in self.example_texts:
29
- self.cache.add(text)
30
-
31
- # Add some random strings to test noise
32
- for _ in range(5):
33
- self.cache.add(self.random_string(10))
34
-
35
- def test_get_relevant(self):
36
- query = "I'm interested in artificial intelligence and NLP"
37
- k = 3
38
- relevant_texts = self.cache.get_relevant(query, k)
39
-
40
- print(f"Top {k} relevant texts for the query '{query}':")
41
- for i, text in enumerate(relevant_texts, start=1):
42
- print(f"{i}. {text}")
43
-
44
- self.assertEqual(len(relevant_texts), k)
45
- self.assertIn(self.example_texts[1], relevant_texts)
46
-
47
-
48
- if __name__ == "__main__":
49
- unittest.main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Detomo/Lighten_dark_image/README.md DELETED
@@ -1,11 +0,0 @@
1
- ---
2
- title: Lighten Dark Image
3
- emoji: 💡
4
- colorFrom: blue
5
- colorTo: white
6
- sdk: gradio
7
- app_file: app.py
8
- pinned: false
9
- ---
10
-
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Deviliaan/sd_twist/app.py DELETED
@@ -1,162 +0,0 @@
1
- """
2
- Stable Diffusion Webui Version 1.4.0
3
- https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases/tag/v1.5.0
4
-
5
- """
6
- commit_id=r"c5934fb6e3007795efcf9b71be6df1581f61e4cb" #Version 1.5.1
7
- import os
8
- from sys import executable
9
- import subprocess
10
- import pathlib
11
- import gc
12
-
13
- def Gitclone(URI:str,ClonePath:pathlib.Path ) -> int :
14
- if pathlib.Path.exists(ClonePath):
15
- return 0
16
- while True:
17
- i=subprocess.run([r"git",r"clone",str(URI),str(ClonePath)])
18
- if(i.returncode == 0 ):
19
- del i
20
- gc.collect()
21
- return 0
22
- else :
23
- del i
24
-
25
- def DownLoad(URI:str,DownloadPath:pathlib.Path,DownLoadFileName:str ) -> int:
26
- if (DownloadPath / DownLoadFileName).is_file(): return 0
27
- while (True):
28
- i=subprocess.run([r"aria2c",r"-c",r"-x" ,r"16", r"-s",r"16", r"-k" ,r"1M" ,r"-m",r"0",r"--enable-mmap=false",r"--console-log-level=error",r"-d",str(DownloadPath),r"-o",DownLoadFileName,URI]);
29
- if(i.returncode == 0 ):
30
- del i
31
- gc.collect()
32
- return 0
33
- else :
34
- del i
35
-
36
- user_home =pathlib.Path.home().resolve()
37
- os.chdir(str(user_home))
38
- #clone stable-diffusion-webui repo
39
- print("cloning stable-diffusion-webui repo")
40
- Gitclone(r"https://github.com/AUTOMATIC1111/stable-diffusion-webui.git",user_home / r"stable-diffusion-webui")
41
- os.chdir(str(user_home / r"stable-diffusion-webui"))
42
- os.system("git reset --hard "+commit_id)
43
- #install extensions
44
- print("installing extensions")
45
- Gitclone(r"https://huggingface.co/embed/negative",user_home / r"stable-diffusion-webui" / r"embeddings" / r"negative")
46
- Gitclone(r"https://huggingface.co/embed/lora",user_home / r"stable-diffusion-webui" / r"models" / r"Lora" / r"positive")
47
- DownLoad(r"https://huggingface.co/embed/upscale/resolve/main/4x-UltraSharp.pth",user_home / r"stable-diffusion-webui" / r"models" / r"ESRGAN" ,r"4x-UltraSharp.pth")
48
- while (True):
49
- i=subprocess.run([r"wget",r"https://raw.githubusercontent.com/camenduru/stable-diffusion-webui-scripts/main/run_n_times.py",r"-O",str(user_home / r"stable-diffusion-webui" / r"scripts" / r"run_n_times.py")])
50
- if(i.returncode == 0 ):
51
- del i
52
- gc.collect()
53
- break
54
- else :
55
- del i
56
- Gitclone(r"https://github.com/deforum-art/deforum-for-automatic1111-webui",user_home / r"stable-diffusion-webui" / r"extensions" / r"deforum-for-automatic1111-webui" )
57
- Gitclone(r"https://github.com/AlUlkesh/stable-diffusion-webui-images-browser",user_home / r"stable-diffusion-webui" / r"extensions"/ r"stable-diffusion-webui-images-browser")
58
- Gitclone(r"https://github.com/camenduru/stable-diffusion-webui-huggingface",user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-huggingface")
59
- Gitclone(r"https://github.com/camenduru/sd-civitai-browser",user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-civitai-browser")
60
- Gitclone(r"https://github.com/kohya-ss/sd-webui-additional-networks",user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-additional-networks")
61
- Gitclone(r"https://github.com/Mikubill/sd-webui-controlnet",user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-controlnet")
62
- Gitclone(r"https://github.com/fkunn1326/openpose-editor",user_home / r"stable-diffusion-webui" / r"extensions" / r"openpose-editor")
63
- Gitclone(r"https://github.com/jexom/sd-webui-depth-lib",user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-depth-lib")
64
- Gitclone(r"https://github.com/hnmr293/posex",user_home / r"stable-diffusion-webui" / r"extensions" / r"posex")
65
- Gitclone(r"https://github.com/nonnonstop/sd-webui-3d-open-pose-editor",user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-3d-open-pose-editor")
66
- #中文本地化的请解除下一行的注释
67
- #Gitclone(r"https://github.com/dtlnor/stable-diffusion-webui-localization-zh_CN.git",user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-localization-zh_CN")
68
- Gitclone(r"https://github.com/DominikDoom/a1111-sd-webui-tagcomplete.git" , user_home / r"stable-diffusion-webui" / r"extensions" / r"a1111-sd-webui-tagcomplete")
69
- Gitclone(r"https://github.com/camenduru/sd-webui-tunnels",user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-tunnels")
70
- Gitclone(r"https://github.com/etherealxx/batchlinks-webui",user_home / r"stable-diffusion-webui" / r"extensions" / r"batchlinks-webui")
71
- Gitclone(r"https://github.com/catppuccin/stable-diffusion-webui",user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-catppuccin")
72
- Gitclone(r"https://github.com/AUTOMATIC1111/stable-diffusion-webui-rembg",user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-rembg")
73
- Gitclone(r"https://github.com/ashen-sensored/stable-diffusion-webui-two-shot",user_home / r"stable-diffusion-webui" / r"extensions" / r"stable-diffusion-webui-two-shot")
74
- #Gitclone(r"https://github.com/camenduru/sd_webui_stealth_pnginfo",user_home / r"stable-diffusion-webui" / r"extensions" / r"sd_webui_stealth_pnginfo")
75
- os.chdir(user_home / r"stable-diffusion-webui")
76
- #download ControlNet models
77
- print("extensions dolwnload done .\ndownloading ControlNet models")
78
- dList =[r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11e_sd15_ip2p_fp16.safetensors",
79
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11e_sd15_shuffle_fp16.safetensors",
80
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_canny_fp16.safetensors",
81
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11f1p_sd15_depth_fp16.safetensors",
82
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_inpaint_fp16.safetensors",
83
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_lineart_fp16.safetensors",
84
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_mlsd_fp16.safetensors",
85
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_normalbae_fp16.safetensors",
86
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_openpose_fp16.safetensors",
87
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_scribble_fp16.safetensors",
88
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_seg_fp16.safetensors",
89
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15_softedge_fp16.safetensors",
90
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11p_sd15s2_lineart_anime_fp16.safetensors",
91
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/control_v11f1e_sd15_tile_fp16.safetensors",
92
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11e_sd15_ip2p_fp16.yaml",
93
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11e_sd15_shuffle_fp16.yaml",
94
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_canny_fp16.yaml",
95
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11f1p_sd15_depth_fp16.yaml",
96
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_inpaint_fp16.yaml",
97
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_lineart_fp16.yaml",
98
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_mlsd_fp16.yaml",
99
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_normalbae_fp16.yaml",
100
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_openpose_fp16.yaml",
101
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_scribble_fp16.yaml",
102
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_seg_fp16.yaml",
103
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15_softedge_fp16.yaml",
104
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11p_sd15s2_lineart_anime_fp16.yaml",
105
- r"https://huggingface.co/ckpt/ControlNet-v1-1/raw/main/control_v11f1e_sd15_tile_fp16.yaml",
106
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_style_sd14v1.pth",
107
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_sketch_sd14v1.pth",
108
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_seg_sd14v1.pth",
109
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_openpose_sd14v1.pth",
110
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_keypose_sd14v1.pth",
111
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_depth_sd14v1.pth",
112
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_canny_sd14v1.pth",
113
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_canny_sd15v2.pth",
114
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_depth_sd15v2.pth",
115
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_sketch_sd15v2.pth",
116
- r"https://huggingface.co/ckpt/ControlNet-v1-1/resolve/main/t2iadapter_zoedepth_sd15v1.pth"]
117
- for i in range(0,len(dList)): DownLoad(dList[i],user_home / r"stable-diffusion-webui" / r"extensions" / "sd-webui-controlnet" / r"models",pathlib.Path(dList[i]).name)
118
- del dList
119
- #download model
120
- #you can change model download address here
121
- print("ControlNet models download done.\ndownloading model")
122
- #Stable Diffusion Checkpoint Model
123
- #anything version4.5
124
- DownLoad(r"https://huggingface.co/ckpt/anything-v4.0/resolve/main/anything-v4.5-pruned.ckpt",user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion",r"anything-v4.5-pruned.ckpt")
125
- DownLoad(r"https://huggingface.co/ckpt/anything-v4.0/resolve/main/anything-v4.0.vae.pt",user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion",r"anything-v4.0.vae.pt")
126
- #Counterfeit-V3.0
127
- DownLoad(r"https://huggingface.co/gsdf/Counterfeit-V3.0/resolve/main/Counterfeit-V3.0_fp16.safetensors",user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion",r"Counterfeit-V3.0_fp16.safetensors")
128
- #AbyssOrangeMix2 sfw
129
- DownLoad(r"https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix2/AbyssOrangeMix2_sfw.safetensors",user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion",r"AbyssOrangeMix2_sfw.safetensors")
130
- DownLoad(r"https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/VAEs/orangemix.vae.pt",user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion",r"orangemix.vae.pt")
131
- #MeinaPastelV5
132
- DownLoad(r"https://huggingface.co/Meina/MeinaPastel/resolve/main/MeinaPastelV5%20-%20Baked%20VAE.safetensors",user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion",r"MeinaPastelV5_BakedVAE.safetensors")
133
- DownLoad(r"https://huggingface.co/Meina/MeinaPastel/resolve/main/MeinaPastelV5%20-%20Without%20VAE.safetensors",user_home / r"stable-diffusion-webui" / r"models" / r"Stable-diffusion",r"MeinaPastelV5_WithoutVAE.safetensors")
134
-
135
- #Lora Model
136
- #Better Light
137
- DownLoad(r"https://civitai.com/api/download/models/39885",user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-additional-networks" / r"models"/ r"lora",r"Better_light.safetensors")
138
- DownLoad(r"https://civitai.com/api/download/models/39885",user_home / r"stable-diffusion-webui" / r"models"/ r"lora",r"Better_light.safetensors")
139
- #LAS
140
- DownLoad(r"https://civitai.com/api/download/models/21065",user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-additional-networks" / r"models"/ r"lora",r"LAS.safetensors")
141
- DownLoad(r"https://civitai.com/api/download/models/21065",user_home / r"stable-diffusion-webui" / r"models"/ r"lora",r"LAS.safetensors")
142
- #Backlighting
143
- DownLoad(r"https://civitai.com/api/download/models/39164",user_home / r"stable-diffusion-webui" / r"extensions" / r"sd-webui-additional-networks" / r"models"/ r"lora",r"backlighting.safetensors")
144
- DownLoad(r"https://civitai.com/api/download/models/39164",user_home / r"stable-diffusion-webui" / r"models"/ r"lora",r"backlighting.safetensors")
145
- #GFPGAN Model
146
- #detection Resnet50
147
- DownLoad(r"https://github.com/xinntao/facexlib/releases/download/v0.1.0/detection_Resnet50_Final.pth",user_home / r"stable-diffusion-webui"/r"models"/r"GFPGAN",r"detection_Resnet50_Final.pth")
148
- #parsing_parsenet
149
- DownLoad(r"https://github.com/xinntao/facexlib/releases/download/v0.2.2/parsing_parsenet.pth",user_home / r"stable-diffusion-webui"/r"models"/r"GFPGAN",r"parsing_parsenet.pth")
150
- #GFPGANv1.4
151
- DownLoad(r"https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth",user_home / r"stable-diffusion-webui"/r"models"/r"GFPGAN",r"GFPGANv1.4.pth")
152
- #strt Stable Diffusion Webui
153
- print("Done\nStarting Webui...")
154
- os.chdir(user_home / r"stable-diffusion-webui")
155
- while True:
156
- ret=subprocess.run([executable ,user_home / r"stable-diffusion-webui" / r"launch.py",r"--precision",r"full",r"--no-half",r"--no-half-vae",r"--enable-insecure-extension-access",r"--medvram",r"--skip-torch-cuda-test",r"--enable-console-prompts",r"--ui-settings-file="+str(pathlib.Path(__file__).parent /r"config.json")])
157
- if(ret.returncode == 0 ):
158
- del ret
159
- gc.collect()
160
- else :
161
- del ret
162
- del os ,user_home ,pyexecutable ,subprocess
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Dinoking/Guccio-AI-Designer/netdissect/proggan.py DELETED
@@ -1,299 +0,0 @@
1
- import torch, numpy, itertools
2
- import torch.nn as nn
3
- from collections import OrderedDict
4
-
5
-
6
- def print_network(net, verbose=False):
7
- num_params = 0
8
- for param in net.parameters():
9
- num_params += param.numel()
10
- if verbose:
11
- print(net)
12
- print('Total number of parameters: {:3.3f} M'.format(num_params / 1e6))
13
-
14
-
15
- def from_pth_file(filename):
16
- '''
17
- Instantiate from a pth file.
18
- '''
19
- state_dict = torch.load(filename)
20
- if 'state_dict' in state_dict:
21
- state_dict = state_dict['state_dict']
22
- # Convert old version of parameter names
23
- if 'features.0.conv.weight' in state_dict:
24
- state_dict = state_dict_from_old_pt_dict(state_dict)
25
- sizes = sizes_from_state_dict(state_dict)
26
- result = ProgressiveGenerator(sizes=sizes)
27
- result.load_state_dict(state_dict)
28
- return result
29
-
30
- ###############################################################################
31
- # Modules
32
- ###############################################################################
33
-
34
- class ProgressiveGenerator(nn.Sequential):
35
- def __init__(self, resolution=None, sizes=None, modify_sequence=None,
36
- output_tanh=False):
37
- '''
38
- A pytorch progessive GAN generator that can be converted directly
39
- from either a tensorflow model or a theano model. It consists of
40
- a sequence of convolutional layers, organized in pairs, with an
41
- upsampling and reduction of channels at every other layer; and
42
- then finally followed by an output layer that reduces it to an
43
- RGB [-1..1] image.
44
-
45
- The network can be given more layers to increase the output
46
- resolution. The sizes argument indicates the fieature depth at
47
- each upsampling, starting with the input z: [input-dim, 4x4-depth,
48
- 8x8-depth, 16x16-depth...]. The output dimension is 2 * 2**len(sizes)
49
-
50
- Some default architectures can be selected by supplying the
51
- resolution argument instead.
52
-
53
- The optional modify_sequence function can be used to transform the
54
- sequence of layers before the network is constructed.
55
-
56
- If output_tanh is set to True, the network applies a tanh to clamp
57
- the output to [-1,1] before output; otherwise the output is unclamped.
58
- '''
59
- assert (resolution is None) != (sizes is None)
60
- if sizes is None:
61
- sizes = {
62
- 8: [512, 512, 512],
63
- 16: [512, 512, 512, 512],
64
- 32: [512, 512, 512, 512, 256],
65
- 64: [512, 512, 512, 512, 256, 128],
66
- 128: [512, 512, 512, 512, 256, 128, 64],
67
- 256: [512, 512, 512, 512, 256, 128, 64, 32],
68
- 1024: [512, 512, 512, 512, 512, 256, 128, 64, 32, 16]
69
- }[resolution]
70
- # Follow the schedule of upsampling given by sizes.
71
- # layers are called: layer1, layer2, etc; then output_128x128
72
- sequence = []
73
- def add_d(layer, name=None):
74
- if name is None:
75
- name = 'layer%d' % (len(sequence) + 1)
76
- sequence.append((name, layer))
77
- add_d(NormConvBlock(sizes[0], sizes[1], kernel_size=4, padding=3))
78
- add_d(NormConvBlock(sizes[1], sizes[1], kernel_size=3, padding=1))
79
- for i, (si, so) in enumerate(zip(sizes[1:-1], sizes[2:])):
80
- add_d(NormUpscaleConvBlock(si, so, kernel_size=3, padding=1))
81
- add_d(NormConvBlock(so, so, kernel_size=3, padding=1))
82
- # Create an output layer. During training, the progressive GAN
83
- # learns several such output layers for various resolutions; we
84
- # just include the last (highest resolution) one.
85
- dim = 4 * (2 ** (len(sequence) // 2 - 1))
86
- add_d(OutputConvBlock(sizes[-1], tanh=output_tanh),
87
- name='output_%dx%d' % (dim, dim))
88
- # Allow the sequence to be modified
89
- if modify_sequence is not None:
90
- sequence = modify_sequence(sequence)
91
- super().__init__(OrderedDict(sequence))
92
-
93
- def forward(self, x):
94
- # Convert vector input to 1x1 featuremap.
95
- x = x.view(x.shape[0], x.shape[1], 1, 1)
96
- return super().forward(x)
97
-
98
- class PixelNormLayer(nn.Module):
99
- def __init__(self):
100
- super(PixelNormLayer, self).__init__()
101
-
102
- def forward(self, x):
103
- return x / torch.sqrt(torch.mean(x**2, dim=1, keepdim=True) + 1e-8)
104
-
105
- class DoubleResolutionLayer(nn.Module):
106
- def forward(self, x):
107
- x = nn.functional.interpolate(x, scale_factor=2, mode='nearest')
108
- return x
109
-
110
- class WScaleLayer(nn.Module):
111
- def __init__(self, size, fan_in, gain=numpy.sqrt(2)):
112
- super(WScaleLayer, self).__init__()
113
- self.scale = gain / numpy.sqrt(fan_in) # No longer a parameter
114
- self.b = nn.Parameter(torch.randn(size))
115
- self.size = size
116
-
117
- def forward(self, x):
118
- x_size = x.size()
119
- x = x * self.scale + self.b.view(1, -1, 1, 1).expand(
120
- x_size[0], self.size, x_size[2], x_size[3])
121
- return x
122
-
123
- class NormConvBlock(nn.Module):
124
- def __init__(self, in_channels, out_channels, kernel_size, padding):
125
- super(NormConvBlock, self).__init__()
126
- self.norm = PixelNormLayer()
127
- self.conv = nn.Conv2d(
128
- in_channels, out_channels, kernel_size, 1, padding, bias=False)
129
- self.wscale = WScaleLayer(out_channels, in_channels,
130
- gain=numpy.sqrt(2) / kernel_size)
131
- self.relu = nn.LeakyReLU(inplace=True, negative_slope=0.2)
132
-
133
- def forward(self, x):
134
- x = self.norm(x)
135
- x = self.conv(x)
136
- x = self.relu(self.wscale(x))
137
- return x
138
-
139
- class NormUpscaleConvBlock(nn.Module):
140
- def __init__(self, in_channels, out_channels, kernel_size, padding):
141
- super(NormUpscaleConvBlock, self).__init__()
142
- self.norm = PixelNormLayer()
143
- self.up = DoubleResolutionLayer()
144
- self.conv = nn.Conv2d(
145
- in_channels, out_channels, kernel_size, 1, padding, bias=False)
146
- self.wscale = WScaleLayer(out_channels, in_channels,
147
- gain=numpy.sqrt(2) / kernel_size)
148
- self.relu = nn.LeakyReLU(inplace=True, negative_slope=0.2)
149
-
150
- def forward(self, x):
151
- x = self.norm(x)
152
- x = self.up(x)
153
- x = self.conv(x)
154
- x = self.relu(self.wscale(x))
155
- return x
156
-
157
- class OutputConvBlock(nn.Module):
158
- def __init__(self, in_channels, tanh=False):
159
- super().__init__()
160
- self.norm = PixelNormLayer()
161
- self.conv = nn.Conv2d(
162
- in_channels, 3, kernel_size=1, padding=0, bias=False)
163
- self.wscale = WScaleLayer(3, in_channels, gain=1)
164
- self.clamp = nn.Hardtanh() if tanh else (lambda x: x)
165
-
166
- def forward(self, x):
167
- x = self.norm(x)
168
- x = self.conv(x)
169
- x = self.wscale(x)
170
- x = self.clamp(x)
171
- return x
172
-
173
- ###############################################################################
174
- # Conversion
175
- ###############################################################################
176
-
177
- def from_tf_parameters(parameters):
178
- '''
179
- Instantiate from tensorflow variables.
180
- '''
181
- state_dict = state_dict_from_tf_parameters(parameters)
182
- sizes = sizes_from_state_dict(state_dict)
183
- result = ProgressiveGenerator(sizes=sizes)
184
- result.load_state_dict(state_dict)
185
- return result
186
-
187
- def from_old_pt_dict(parameters):
188
- '''
189
- Instantiate from old pytorch state dict.
190
- '''
191
- state_dict = state_dict_from_old_pt_dict(parameters)
192
- sizes = sizes_from_state_dict(state_dict)
193
- result = ProgressiveGenerator(sizes=sizes)
194
- result.load_state_dict(state_dict)
195
- return result
196
-
197
- def sizes_from_state_dict(params):
198
- '''
199
- In a progressive GAN, the number of channels can change after each
200
- upsampling. This function reads the state dict to figure the
201
- number of upsamplings and the channel depth of each filter.
202
- '''
203
- sizes = []
204
- for i in itertools.count():
205
- pt_layername = 'layer%d' % (i + 1)
206
- try:
207
- weight = params['%s.conv.weight' % pt_layername]
208
- except KeyError:
209
- break
210
- if i == 0:
211
- sizes.append(weight.shape[1])
212
- if i % 2 == 0:
213
- sizes.append(weight.shape[0])
214
- return sizes
215
-
216
- def state_dict_from_tf_parameters(parameters):
217
- '''
218
- Conversion from tensorflow parameters
219
- '''
220
- def torch_from_tf(data):
221
- return torch.from_numpy(data.eval())
222
-
223
- params = dict(parameters)
224
- result = {}
225
- sizes = []
226
- for i in itertools.count():
227
- resolution = 4 * (2 ** (i // 2))
228
- # Translate parameter names. For example:
229
- # 4x4/Dense/weight -> layer1.conv.weight
230
- # 32x32/Conv0_up/weight -> layer7.conv.weight
231
- # 32x32/Conv1/weight -> layer8.conv.weight
232
- tf_layername = '%dx%d/%s' % (resolution, resolution,
233
- 'Dense' if i == 0 else 'Conv' if i == 1 else
234
- 'Conv0_up' if i % 2 == 0 else 'Conv1')
235
- pt_layername = 'layer%d' % (i + 1)
236
- # Stop looping when we run out of parameters.
237
- try:
238
- weight = torch_from_tf(params['%s/weight' % tf_layername])
239
- except KeyError:
240
- break
241
- # Transpose convolution weights into pytorch format.
242
- if i == 0:
243
- # Convert dense layer to 4x4 convolution
244
- weight = weight.view(weight.shape[0], weight.shape[1] // 16,
245
- 4, 4).permute(1, 0, 2, 3).flip(2, 3)
246
- sizes.append(weight.shape[0])
247
- elif i % 2 == 0:
248
- # Convert inverse convolution to convolution
249
- weight = weight.permute(2, 3, 0, 1).flip(2, 3)
250
- else:
251
- # Ordinary Conv2d conversion.
252
- weight = weight.permute(3, 2, 0, 1)
253
- sizes.append(weight.shape[1])
254
- result['%s.conv.weight' % (pt_layername)] = weight
255
- # Copy bias vector.
256
- bias = torch_from_tf(params['%s/bias' % tf_layername])
257
- result['%s.wscale.b' % (pt_layername)] = bias
258
- # Copy just finest-grained ToRGB output layers. For example:
259
- # ToRGB_lod0/weight -> output.conv.weight
260
- i -= 1
261
- resolution = 4 * (2 ** (i // 2))
262
- tf_layername = 'ToRGB_lod0'
263
- pt_layername = 'output_%dx%d' % (resolution, resolution)
264
- result['%s.conv.weight' % pt_layername] = torch_from_tf(
265
- params['%s/weight' % tf_layername]).permute(3, 2, 0, 1)
266
- result['%s.wscale.b' % pt_layername] = torch_from_tf(
267
- params['%s/bias' % tf_layername])
268
- # Return parameters
269
- return result
270
-
271
- def state_dict_from_old_pt_dict(params):
272
- '''
273
- Conversion from the old pytorch model layer names.
274
- '''
275
- result = {}
276
- sizes = []
277
- for i in itertools.count():
278
- old_layername = 'features.%d' % i
279
- pt_layername = 'layer%d' % (i + 1)
280
- try:
281
- weight = params['%s.conv.weight' % (old_layername)]
282
- except KeyError:
283
- break
284
- if i == 0:
285
- sizes.append(weight.shape[0])
286
- if i % 2 == 0:
287
- sizes.append(weight.shape[1])
288
- result['%s.conv.weight' % (pt_layername)] = weight
289
- result['%s.wscale.b' % (pt_layername)] = params[
290
- '%s.wscale.b' % (old_layername)]
291
- # Copy the output layers.
292
- i -= 1
293
- resolution = 4 * (2 ** (i // 2))
294
- pt_layername = 'output_%dx%d' % (resolution, resolution)
295
- result['%s.conv.weight' % pt_layername] = params['output.conv.weight']
296
- result['%s.wscale.b' % pt_layername] = params['output.wscale.b']
297
- # Return parameters and also network architecture sizes.
298
- return result
299
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ECCV2022/bytetrack/tutorials/trades/byte_tracker.py DELETED
@@ -1,352 +0,0 @@
1
- from __future__ import absolute_import
2
- from __future__ import division
3
- from __future__ import print_function
4
-
5
- import numpy as np
6
- from sklearn.utils.linear_assignment_ import linear_assignment
7
- # from numba import jit
8
- import copy
9
- from .mot_online.kalman_filter import KalmanFilter
10
- from .mot_online.basetrack import BaseTrack, TrackState
11
- from .mot_online import matching
12
-
13
-
14
- class STrack(BaseTrack):
15
- shared_kalman = KalmanFilter()
16
- def __init__(self, tlwh, score):
17
-
18
- # wait activate
19
- self._tlwh = np.asarray(tlwh, dtype=np.float)
20
- self.kalman_filter = None
21
- self.mean, self.covariance = None, None
22
- self.is_activated = False
23
-
24
- self.score = score
25
- self.tracklet_len = 0
26
-
27
- def predict(self):
28
- mean_state = self.mean.copy()
29
- if self.state != TrackState.Tracked:
30
- mean_state[7] = 0
31
- self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance)
32
-
33
- @staticmethod
34
- def multi_predict(stracks):
35
- if len(stracks) > 0:
36
- multi_mean = np.asarray([st.mean.copy() for st in stracks])
37
- multi_covariance = np.asarray([st.covariance for st in stracks])
38
- for i, st in enumerate(stracks):
39
- if st.state != TrackState.Tracked:
40
- multi_mean[i][7] = 0
41
- multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance)
42
- for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
43
- stracks[i].mean = mean
44
- stracks[i].covariance = cov
45
-
46
- def activate(self, kalman_filter, frame_id):
47
- """Start a new tracklet"""
48
- self.kalman_filter = kalman_filter
49
- self.track_id = self.next_id()
50
- self.mean, self.covariance = self.kalman_filter.initiate(self.tlwh_to_xyah(self._tlwh))
51
-
52
- self.tracklet_len = 0
53
- self.state = TrackState.Tracked
54
- if frame_id == 1:
55
- self.is_activated = True
56
- # self.is_activated = True
57
- self.frame_id = frame_id
58
- self.start_frame = frame_id
59
-
60
- def re_activate(self, new_track, frame_id, new_id=False):
61
- self.mean, self.covariance = self.kalman_filter.update(
62
- self.mean, self.covariance, self.tlwh_to_xyah(new_track.tlwh)
63
- )
64
- self.tracklet_len = 0
65
- self.state = TrackState.Tracked
66
- self.is_activated = True
67
- self.frame_id = frame_id
68
- if new_id:
69
- self.track_id = self.next_id()
70
- self.score = new_track.score
71
-
72
- def update(self, new_track, frame_id):
73
- """
74
- Update a matched track
75
- :type new_track: STrack
76
- :type frame_id: int
77
- :type update_feature: bool
78
- :return:
79
- """
80
- self.frame_id = frame_id
81
- self.tracklet_len += 1
82
-
83
- new_tlwh = new_track.tlwh
84
- self.mean, self.covariance = self.kalman_filter.update(
85
- self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh))
86
- self.state = TrackState.Tracked
87
- self.is_activated = True
88
-
89
- self.score = new_track.score
90
-
91
- @property
92
- # @jit(nopython=True)
93
- def tlwh(self):
94
- """Get current position in bounding box format `(top left x, top left y,
95
- width, height)`.
96
- """
97
- if self.mean is None:
98
- return self._tlwh.copy()
99
- ret = self.mean[:4].copy()
100
- ret[2] *= ret[3]
101
- ret[:2] -= ret[2:] / 2
102
- return ret
103
-
104
- @property
105
- # @jit(nopython=True)
106
- def tlbr(self):
107
- """Convert bounding box to format `(min x, min y, max x, max y)`, i.e.,
108
- `(top left, bottom right)`.
109
- """
110
- ret = self.tlwh.copy()
111
- ret[2:] += ret[:2]
112
- return ret
113
-
114
- @staticmethod
115
- # @jit(nopython=True)
116
- def tlwh_to_xyah(tlwh):
117
- """Convert bounding box to format `(center x, center y, aspect ratio,
118
- height)`, where the aspect ratio is `width / height`.
119
- """
120
- ret = np.asarray(tlwh).copy()
121
- ret[:2] += ret[2:] / 2
122
- ret[2] /= ret[3]
123
- return ret
124
-
125
- def to_xyah(self):
126
- return self.tlwh_to_xyah(self.tlwh)
127
-
128
- @staticmethod
129
- # @jit(nopython=True)
130
- def tlbr_to_tlwh(tlbr):
131
- ret = np.asarray(tlbr).copy()
132
- ret[2:] -= ret[:2]
133
- return ret
134
-
135
- @staticmethod
136
- # @jit(nopython=True)
137
- def tlwh_to_tlbr(tlwh):
138
- ret = np.asarray(tlwh).copy()
139
- ret[2:] += ret[:2]
140
- return ret
141
-
142
- def __repr__(self):
143
- return 'OT_{}_({}-{})'.format(self.track_id, self.start_frame, self.end_frame)
144
-
145
-
146
-
147
- class BYTETracker(object):
148
- def __init__(self, args, frame_rate=30):
149
- self.args = args
150
- self.det_thresh = args.new_thresh
151
- self.buffer_size = int(frame_rate / 30.0 * args.track_buffer)
152
- self.max_time_lost = self.buffer_size
153
- self.reset()
154
-
155
- # below has no effect to final output, just to be compatible to codebase
156
- def init_track(self, results):
157
- for item in results:
158
- if item['score'] > self.opt.new_thresh and item['class'] == 1:
159
- self.id_count += 1
160
- item['active'] = 1
161
- item['age'] = 1
162
- item['tracking_id'] = self.id_count
163
- if not ('ct' in item):
164
- bbox = item['bbox']
165
- item['ct'] = [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2]
166
- self.tracks.append(item)
167
-
168
- def reset(self):
169
- self.frame_id = 0
170
- self.kalman_filter = KalmanFilter()
171
- self.tracked_stracks = [] # type: list[STrack]
172
- self.lost_stracks = [] # type: list[STrack]
173
- self.removed_stracks = [] # type: list[STrack]
174
- self.tracks = []
175
-
176
- # below has no effect to final output, just to be compatible to codebase
177
- self.id_count = 0
178
-
179
- def step(self, results, public_det=None):
180
- self.frame_id += 1
181
- activated_starcks = []
182
- refind_stracks = []
183
- lost_stracks = []
184
- removed_stracks = []
185
- detections = []
186
- detections_second = []
187
-
188
- scores = np.array([item['score'] for item in results if item['class'] == 1], np.float32)
189
- bboxes = np.vstack([item['bbox'] for item in results if item['class'] == 1]) # N x 4, x1y1x2y2
190
-
191
- remain_inds = scores >= self.args.track_thresh
192
- dets = bboxes[remain_inds]
193
- scores_keep = scores[remain_inds]
194
-
195
-
196
- inds_low = scores > self.args.out_thresh
197
- inds_high = scores < self.args.track_thresh
198
- inds_second = np.logical_and(inds_low, inds_high)
199
- dets_second = bboxes[inds_second]
200
- scores_second = scores[inds_second]
201
-
202
- if len(dets) > 0:
203
- '''Detections'''
204
- detections = [STrack(STrack.tlbr_to_tlwh(tlbr), s) for
205
- (tlbr, s) in zip(dets, scores_keep)]
206
- else:
207
- detections = []
208
-
209
- ''' Add newly detected tracklets to tracked_stracks'''
210
- unconfirmed = []
211
- tracked_stracks = [] # type: list[STrack]
212
- for track in self.tracked_stracks:
213
- if not track.is_activated:
214
- unconfirmed.append(track)
215
- else:
216
- tracked_stracks.append(track)
217
-
218
- ''' Step 2: First association, with Kalman and IOU'''
219
- strack_pool = joint_stracks(tracked_stracks, self.lost_stracks)
220
- # Predict the current location with KF
221
- STrack.multi_predict(strack_pool)
222
- dists = matching.iou_distance(strack_pool, detections)
223
- #dists = matching.fuse_motion(self.kalman_filter, dists, strack_pool, detections)
224
- matches, u_track, u_detection = matching.linear_assignment(dists, thresh=0.9)
225
-
226
- for itracked, idet in matches:
227
- track = strack_pool[itracked]
228
- det = detections[idet]
229
- if track.state == TrackState.Tracked:
230
- track.update(detections[idet], self.frame_id)
231
- activated_starcks.append(track)
232
- else:
233
- track.re_activate(det, self.frame_id, new_id=False)
234
- refind_stracks.append(track)
235
-
236
- ''' Step 3: Second association, association the untrack to the low score detections, with IOU'''
237
- if len(dets_second) > 0:
238
- '''Detections'''
239
- detections_second = [STrack(STrack.tlbr_to_tlwh(tlbr), s) for
240
- (tlbr, s) in zip(dets_second, scores_second)]
241
- else:
242
- detections_second = []
243
- r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
244
- dists = matching.iou_distance(r_tracked_stracks, detections_second)
245
- matches, u_track, u_detection_second = matching.linear_assignment(dists, thresh=0.4)
246
- for itracked, idet in matches:
247
- track = r_tracked_stracks[itracked]
248
- det = detections_second[idet]
249
- if track.state == TrackState.Tracked:
250
- track.update(det, self.frame_id)
251
- activated_starcks.append(track)
252
- else:
253
- track.re_activate(det, self.frame_id, new_id=False)
254
- refind_stracks.append(track)
255
-
256
- for it in u_track:
257
- #track = r_tracked_stracks[it]
258
- track = r_tracked_stracks[it]
259
- if not track.state == TrackState.Lost:
260
- track.mark_lost()
261
- lost_stracks.append(track)
262
-
263
- '''Deal with unconfirmed tracks, usually tracks with only one beginning frame'''
264
- detections = [detections[i] for i in u_detection]
265
- dists = matching.iou_distance(unconfirmed, detections)
266
- matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7)
267
- for itracked, idet in matches:
268
- unconfirmed[itracked].update(detections[idet], self.frame_id)
269
- activated_starcks.append(unconfirmed[itracked])
270
- for it in u_unconfirmed:
271
- track = unconfirmed[it]
272
- track.mark_removed()
273
- removed_stracks.append(track)
274
-
275
- """ Step 4: Init new stracks"""
276
- for inew in u_detection:
277
- track = detections[inew]
278
- if track.score < self.det_thresh:
279
- continue
280
- track.activate(self.kalman_filter, self.frame_id)
281
- activated_starcks.append(track)
282
- """ Step 5: Update state"""
283
- for track in self.lost_stracks:
284
- if self.frame_id - track.end_frame > self.max_time_lost:
285
- track.mark_removed()
286
- removed_stracks.append(track)
287
-
288
- self.tracked_stracks = [t for t in self.tracked_stracks if t.state == TrackState.Tracked]
289
- self.tracked_stracks = joint_stracks(self.tracked_stracks, activated_starcks)
290
- self.tracked_stracks = joint_stracks(self.tracked_stracks, refind_stracks)
291
- self.lost_stracks = sub_stracks(self.lost_stracks, self.tracked_stracks)
292
- self.lost_stracks.extend(lost_stracks)
293
- self.lost_stracks = sub_stracks(self.lost_stracks, self.removed_stracks)
294
- self.removed_stracks.extend(removed_stracks)
295
- self.tracked_stracks, self.lost_stracks = remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks)
296
- output_stracks = [track for track in self.tracked_stracks if track.is_activated]
297
-
298
- ret = []
299
- for track in output_stracks:
300
- track_dict = {}
301
- track_dict['score'] = track.score
302
- track_dict['bbox'] = track.tlbr
303
- bbox = track_dict['bbox']
304
- track_dict['ct'] = [(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2]
305
- track_dict['active'] = 1 if track.is_activated else 0
306
- track_dict['tracking_id'] = track.track_id
307
- track_dict['class'] = 1
308
- ret.append(track_dict)
309
-
310
- self.tracks = ret
311
- return ret
312
-
313
-
314
- def joint_stracks(tlista, tlistb):
315
- exists = {}
316
- res = []
317
- for t in tlista:
318
- exists[t.track_id] = 1
319
- res.append(t)
320
- for t in tlistb:
321
- tid = t.track_id
322
- if not exists.get(tid, 0):
323
- exists[tid] = 1
324
- res.append(t)
325
- return res
326
-
327
-
328
- def sub_stracks(tlista, tlistb):
329
- stracks = {}
330
- for t in tlista:
331
- stracks[t.track_id] = t
332
- for t in tlistb:
333
- tid = t.track_id
334
- if stracks.get(tid, 0):
335
- del stracks[tid]
336
- return list(stracks.values())
337
-
338
-
339
- def remove_duplicate_stracks(stracksa, stracksb):
340
- pdist = matching.iou_distance(stracksa, stracksb)
341
- pairs = np.where(pdist < 0.15)
342
- dupa, dupb = list(), list()
343
- for p, q in zip(*pairs):
344
- timep = stracksa[p].frame_id - stracksa[p].start_frame
345
- timeq = stracksb[q].frame_id - stracksb[q].start_frame
346
- if timep > timeq:
347
- dupb.append(q)
348
- else:
349
- dupa.append(p)
350
- resa = [t for i, t in enumerate(stracksa) if not i in dupa]
351
- resb = [t for i, t in enumerate(stracksb) if not i in dupb]
352
- return resa, resb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Edward-Ji/essentials-of-microeconomics/essentials_of_microeconomics/util.py DELETED
@@ -1,31 +0,0 @@
1
- from enum import Enum
2
-
3
- from sympy import N, latex, parse_expr
4
-
5
-
6
- class Approx(Enum):
7
- HIDE = "Hide"
8
- REPLACE = "Replace"
9
- APPEND = "Append"
10
-
11
-
12
- def latex_approx(expr, perc: int = 15, approx: Approx = Approx.HIDE):
13
- if approx == Approx.HIDE:
14
- return latex(expr)
15
- evalf = N(expr, perc)
16
- if evalf == expr:
17
- return latex(expr)
18
- if approx == Approx.REPLACE:
19
- return latex(evalf)
20
- if approx == Approx.APPEND:
21
- return latex(expr) + r"\approx " + latex(evalf)
22
- assert False
23
-
24
-
25
- sympy_dict = {}
26
- exec("from sympy import *", sympy_dict) # pylint: disable=exec-used
27
-
28
-
29
- def parse_expr_safer(*args, **kwargs):
30
- kwargs.setdefault("global_dict", {}).update(sympy_dict)
31
- return parse_expr(*args, **kwargs)