parquet-converter commited on
Commit
b1722c5
·
1 Parent(s): 50a67e9

Update parquet files (step 25 of 249)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/169153tej/My-New-Gen-Ai-Chat-Bot/README.md +0 -12
  2. spaces/1acneusushi/gradio-2dmoleculeeditor/data/Behure Logon Mp3 Song Download _HOT_l.md +0 -44
  3. spaces/1acneusushi/gradio-2dmoleculeeditor/data/EOBD Facile Version Complete Crack APK Download Tips and Tricks for Using the Elm327 App.md +0 -152
  4. spaces/1gistliPinn/ChatGPT4/Examples/Avira Software Updater Pro Activation Code.md +0 -21
  5. spaces/1gistliPinn/ChatGPT4/Examples/Flight Of The Phoenix In Hindi Movie Dubbed 48.md +0 -6
  6. spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/APK Shopee Merchant The Ultimate Guide for ShopeePay ShopeeFood Merchants.md +0 -161
  7. spaces/1phancelerku/anime-remove-background/Crack Turkey Sandwiches - A Delicious Way to Use Up Turkey.md +0 -111
  8. spaces/801artistry/RVC801/tools/torchgate/utils.py +0 -66
  9. spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/open_clip/factory.py +0 -277
  10. spaces/AIFILMS/generate_human_motion/VQ-Trans/models/vqvae.py +0 -118
  11. spaces/AIFILMS/generate_human_motion/pyrender/pyrender/constants.py +0 -149
  12. spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/commons/layers.py +0 -50
  13. spaces/AIGC-Audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/loss.py +0 -41
  14. spaces/ASJMO/freegpt/client/js/icons.js +0 -1
  15. spaces/AchyuthGamer/OpenGPT/g4f/Provider/needs_auth/Raycast.py +0 -72
  16. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/scaleouter.js +0 -2
  17. spaces/AkitoP/umamusume_bert_vits2/text/__init__.py +0 -28
  18. spaces/Alesteba/NeRF_ficus-pxl/app.py +0 -79
  19. spaces/Alichuan/VITS-Umamusume-voice-synthesizer/text/japanese.py +0 -153
  20. spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/op_edit/fused_act.py +0 -100
  21. spaces/Anar0140/6.AI.Dashboard.Wiki.Chat.Cognitive.HTML5/index.html +0 -36
  22. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/installation.md +0 -142
  23. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/t2i_adapter/__init__.py +0 -14
  24. spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/mask_heads/maskiou_head.py +0 -186
  25. spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_160k_ade20k.py +0 -2
  26. spaces/AnonAndDesu/Desu_Proxy/README.md +0 -10
  27. spaces/Anonymous-sub/Rerender/ControlNet/annotator/mlsd/utils.py +0 -580
  28. spaces/Anonymous-sub/Rerender/gmflow_module/gmflow/utils.py +0 -86
  29. spaces/Anthony7906/MengHuiMXD_GPT/assets/custom.js +0 -224
  30. spaces/Apex-X/GODROOP/roop/processors/frame/face_swapper.py +0 -88
  31. spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/factory.py +0 -730
  32. spaces/Atualli/yoloxTeste/telegramCrise.sh +0 -1
  33. spaces/Baishali/Pneumonia-Detection/app.py +0 -55
  34. spaces/Benson/text-generation/Examples/Descargar Clave De Licencia Para Fifa 19.md +0 -81
  35. spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_resources/simple.py +0 -116
  36. spaces/CVPR/LIVE/pybind11/tests/test_callbacks.py +0 -137
  37. spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/copy_if.h +0 -50
  38. spaces/CVPR/lama-example/saicinpainting/evaluation/losses/fid/fid_score.py +0 -328
  39. spaces/CVPR/monoscene_lite/monoscene/.ipynb_checkpoints/unet3d_nyu-checkpoint.py +0 -90
  40. spaces/Chirag1994/Melanoma_Skin_Cancer_Detection_App/model.py +0 -22
  41. spaces/Classly/README/README.md +0 -10
  42. spaces/CoWork/dreambooth-training-public/train_dreambooth.py +0 -889
  43. spaces/CofAI/chat.b4/g4f/Provider/Providers/Lockchat.py +0 -32
  44. spaces/Crossper6/stable-diffusion-webui/README.md +0 -14
  45. spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/models/__init__.py +0 -201
  46. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/attr/validators.py +0 -720
  47. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/S_T_A_T_.py +0 -5
  48. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-ec1a8aac.js +0 -7
  49. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpcore/_async/__init__.py +0 -39
  50. spaces/Danielzero/GPT3.5/modules/presets.py +0 -222
spaces/169153tej/My-New-Gen-Ai-Chat-Bot/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: My New Gen Ai Chat Bot
3
- emoji: 😻
4
- colorFrom: indigo
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.39.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/Behure Logon Mp3 Song Download _HOT_l.md DELETED
@@ -1,44 +0,0 @@
1
-
2
- <h1>How to Download Behure Logon Mp3 Song for Free</h1>
3
- <p>Behure Logon is a traditional Rongali Bihu song from Assam, India. It is a melodious and festive song that celebrates the joy of spring and love. If you are looking for a way to download Behure Logon mp3 song for free, then you have come to the right place.</p>
4
- <p>In this article, we will show you how to download Behure Logon mp3 song from various sources, such as Wynk Music, YouTube, and JioSaavn. We will also provide you with some tips on how to optimize your download speed and quality.</p>
5
- <h2>Behure Logon Mp3 Song Downloadl</h2><br /><p><b><b>DOWNLOAD</b> &#10037;&#10037;&#10037; <a href="https://byltly.com/2uKwBl">https://byltly.com/2uKwBl</a></b></p><br /><br />
6
- <h2>Download Behure Logon Mp3 Song from Wynk Music</h2>
7
- <p>Wynk Music is a popular music streaming and downloading app that offers a wide range of songs in different languages and genres. You can download Behure Logon mp3 song from Wynk Music by following these steps:</p>
8
- <ol>
9
- <li>Install Wynk Music app on your Android or iOS device.</li>
10
- <li>Open the app and sign up with your mobile number or log in with your existing account.</li>
11
- <li>Search for "Bihure Logon" in the search bar and select the song by Debashree Mukherjee from the album Bihure Logon.</li>
12
- <li>Tap on the download icon next to the song title and choose the quality you prefer.</li>
13
- <li>Wait for the download to complete and enjoy listening to the song offline.</li>
14
- </ol>
15
- <p>You can also set Behure Logon as your Hello Tune on Wynk Music app for free. To do so, tap on the Hello Tune icon next to the song title and follow the instructions.</p>
16
- <h2>Download Behure Logon Mp3 Song from YouTube</h2>
17
- <p>YouTube is a popular video-sharing platform that also hosts many music videos and songs. You can download Behure Logon mp3 song from YouTube by using a third-party tool such as Y2mate or 4K Video Downloader. Here are the steps to do so:</p>
18
- <ol>
19
- <li>Go to YouTube and search for "Bihure Logon Modhure Logon" by Swagato Dey from Preet Korona album.</li>
20
- <li>Copy the URL of the video from the address bar or share menu.</li>
21
- <li>Go to Y2mate or 4K Video Downloader website and paste the URL in the input box.</li>
22
- <li>Select mp3 as the output format and choose the quality you want.</li>
23
- <li>Click on the download button and wait for the conversion to finish.</li>
24
- <li>Save the mp3 file to your device and enjoy listening to the song offline.</li>
25
- </ol>
26
- <p>Note: Downloading songs from YouTube may violate its terms of service and copyright laws. Please use this method at your own risk.</p>
27
- <h2>Download Behure Logon Mp3 Song from JioSaavn</h2>
28
- <p>JioSaavn is another popular music streaming and downloading app that offers a variety of songs in different languages and genres. You can download Behure Logon mp3 song from JioSaavn by following these steps:</p>
29
- <ol>
30
- <li>Install JioSaavn app on your Android or iOS device.</li>
31
- <li>Open the app and sign up with your mobile number or log in with your existing account.</li>
32
- <li>Search for "Bihure Logon Modhure Logon" by Jk Majlish from Bihure Logon Modhure Logon album.</li>
33
- <li>Tap on the download icon next to the song title and choose the quality you prefer.</li>
34
- <li>Wait for the download to complete and enjoy listening to the song offline.</li>
35
- </ol>
36
- <h2>Tips to Optimize Your Download Speed and Quality</h2>
37
- <p>To ensure that you get the best download speed and quality for Behure Logon mp3 song, here are some tips you can follow:</p>
38
- <ul>
39
- <li>Use a fast and stable internet connection, preferably Wi-Fi or 4G.</li>
40
- <li>Avoid downloading multiple files at the same time or running other apps that consume bandwidth.</li>
41
- <li>Choose a high-quality output format such as 320 kbps or higher for better sound clarity.</li>
42
- <li>Delete any unwanted or duplicate</p> 81aa517590<br />
43
- <br />
44
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1acneusushi/gradio-2dmoleculeeditor/data/EOBD Facile Version Complete Crack APK Download Tips and Tricks for Using the Elm327 App.md DELETED
@@ -1,152 +0,0 @@
1
- <br />
2
- <h1>EOBD Facile Version Complete Crack APK Download</h1>
3
- <p>Do you want to diagnose your car's performance and troubleshoot any issues with ease? If so, you might be interested in EOBD Facile, a popular app that turns your smartphone into an OBD2 scanner. But what if you don't want to pay for the full version of the app? Is there a way to get it for free? In this article, we will tell you everything you need to know about EOBD Facile version complete crack APK download, including what it is, how to do it, whether it is safe, and what are some alternatives.</p>
4
- <h2>eobd facile version complete crack apk download</h2><br /><p><b><b>Download</b> &#10022;&#10022;&#10022; <a href="https://byltly.com/2uKwNl">https://byltly.com/2uKwNl</a></b></p><br /><br />
5
- <h2>What is EOBD Facile?</h2>
6
- <p>EOBD Facile is an app that allows you to connect your Android device to your car's OBD2 port via a Bluetooth or Wi-Fi adapter. OBD2 stands for On-Board Diagnostics II, a system that monitors your car's engine, emissions, and other parameters. With EOBD Facile, you can access real-time data from your car's sensors, such as speed, RPM, temperature, fuel consumption, and more. You can also read and clear fault codes, reset the check engine light, and perform various tests and diagnostics.</p>
7
- <h3>Features of EOBD Facile</h3>
8
- <p>Some of the features of EOBD Facile are:</p>
9
- <ul>
10
- <li>Compatible with most OBD2 compliant vehicles (cars and light trucks) from 2001 onwards in Europe and 1996 onwards in the US.</li>
11
- <li>Supports multiple protocols, such as ISO 9141-2, ISO 14230-4 KWP, ISO 15765-4 CAN, SAE J1850 PWM, and SAE J1850 VPW.</li>
12
- <li>Displays over 100 parameters in real-time, such as speed, RPM, coolant temperature, intake air temperature, fuel pressure, oxygen sensor voltage, etc.</li>
13
- <li>Reads and clears generic and manufacturer-specific fault codes (DTCs) and shows their definitions.</li>
14
- <li>Resets the check engine light (MIL) and turns off the malfunction indicator lamp.</li>
15
- <li>Performs various tests and diagnostics, such as oxygen sensor test, readiness monitor test, EVAP system test, etc.</li>
16
- <li>Records and exports data in CSV format for further analysis.</li>
17
- <li>Creates custom dashboards with gauges and graphs.</li>
18
- <li>Supports multiple languages, such as English, French, German, Spanish, Italian, Portuguese, etc.</li>
19
- </ul>
20
- <h3>Benefits of EOBD Facile</h3>
21
- <p>Some of the benefits of using EOBD Facile are:</p>
22
- <ul>
23
- <li>You can save money by diagnosing and fixing minor problems yourself without going to a mechanic.</li>
24
- <li>You can improve your car's performance and fuel efficiency by monitoring its parameters and adjusting them accordingly.</li>
25
- <li>You can prevent major issues by detecting and clearing fault codes before they cause damage to your car's components.</li>
26
- <li>You can learn more about how your car works and how to maintain it properly.</li>
27
- </ul>
28
- <h2>How to Download EOBD Facile Version Complete Crack APK?</h2>
29
- <p>If you want to enjoy all the features and benefits of EOBD Facile without paying for the full version of the app ($49.99), you might be tempted to download a cracked version of the app from the internet. A cracked app is an app that has been modified to bypass its license verification or remove its restrictions. An APK file is an Android application package file that contains all the files and code needed to install an app on an Android device. To download EOBD Facile version complete crack APK, you need to follow these steps:</p>
30
- <h3>Step 1: Find a Reliable Source</h3>
31
- <p>The first step is to find a website that offers EOBD Facile version complete crack APK for download. There are many websites that claim to provide cracked apps for free, but not all of them are trustworthy. Some of them may contain malware or viruses that can harm your device or steal your personal information. Therefore, you need to be careful and do some research before downloading anything from an unknown source. You can check the reviews and ratings of the website or the app from other users or use a reputable antivirus software to scan the file before downloading it.</p>
32
- <p>eobd facile premium apk cracked download<br />
33
- eobd facile full version mod apk free download<br />
34
- eobd facile pro apk unlocked download<br />
35
- eobd facile plus edition apk crack download<br />
36
- eobd facile ultimate apk cracked download<br />
37
- eobd facile activation code crack apk download<br />
38
- eobd facile keygen crack apk download<br />
39
- eobd facile license key crack apk download<br />
40
- eobd facile serial number crack apk download<br />
41
- eobd facile registration code crack apk download<br />
42
- eobd facile obd2 scanner crack apk download<br />
43
- eobd facile car diagnostic crack apk download<br />
44
- eobd facile elm327 crack apk download<br />
45
- eobd facile bluetooth crack apk download<br />
46
- eobd facile wifi crack apk download<br />
47
- eobd facile android crack apk download<br />
48
- eobd facile ios crack apk download<br />
49
- eobd facile windows crack apk download<br />
50
- eobd facile mac crack apk download<br />
51
- eobd facile linux crack apk download<br />
52
- eobd facile software crack apk download<br />
53
- eobd facile app crack apk download<br />
54
- eobd facile tool crack apk download<br />
55
- eobd facile program crack apk download<br />
56
- eobd facile application crack apk download<br />
57
- eobd facile online crack apk download<br />
58
- eobd facile offline crack apk download<br />
59
- eobd facile latest version crack apk download<br />
60
- eobd facile updated version crack apk download<br />
61
- eobd facile new version crack apk download<br />
62
- eobd facile old version crack apk download<br />
63
- eobd facile original version crack apk download<br />
64
- eobd facile hacked version crack apk download<br />
65
- eobd facile modded version crack apk download<br />
66
- eobd facile patched version crack apk download<br />
67
- eobd facile unlocked version crack apk download<br />
68
- eobd facile full features version crack apk download<br />
69
- eobd facile premium features version crack apk download<br />
70
- eobd facile plus features version crack apk download<br />
71
- eobd facile ultimate features version crack apk download<br />
72
- how to install eobd facile version complete crack apk <br />
73
- how to use eobd facile version complete crack apk <br />
74
- how to update eobd facile version complete crack apk <br />
75
- how to uninstall eobd facile version complete crack apk <br />
76
- how to activate eobd facile version complete crack apk <br />
77
- how to get eobd facile version complete crack apk for free <br />
78
- how to get rid of ads in eobd facile version complete crack apk <br />
79
- how to fix errors in eobd facile version complete crack apk <br />
80
- how to scan codes with eobd facile version complete crack apk <br />
81
- how to clear codes with eobd facile version complete crack apk</p>
82
- <h3>Step 2: Enable Unknown Sources</h3>
83
- <p>The next step is to enable unknown sources on your device. This is a security setting that prevents you from installing apps from sources other than the official Google Play Store. Since you are downloading an app from a third-party website, you need to enable this option to allow the installation. To do this, go to Settings > Security > Unknown Sources and toggle it on. You may see a warning message that installing apps from unknown sources may harm your device or compromise your data. Tap OK to proceed.</p>
84
- <h3>Step 3: Install the APK File</h3>
85
- <p>The third step is to install the APK file on your device. To do this, locate the downloaded file in your file manager or downloads folder and tap on it. You may see a prompt asking you if you want to install this application. Tap Install and wait for the installation process to complete. You may also see some permissions requests that ask you to grant access to certain features or data on your device. Tap Allow or Accept as needed.</p>
86
- <h3>Step 4: Launch the App and Enjoy</h3>
87
- <p>The final step is to launch the app and enjoy its features. To do this, go to your app drawer or home screen and tap on the EOBD Facile icon. You should see the app's interface with all its options and settings. You can now connect your device to your car's OBD2 port via a Bluetooth or Wi-Fi adapter and start using the app as you wish.</p>
88
- <h2>Is EOBD Facile Version Complete Crack APK Safe?</h2>
89
- <p>While downloading EOBD Facile version complete crack APK may seem like an easy way to get the full version of the app for free, it is not without risks. There are some potential dangers and disadvantages of using cracked apps that you should be aware of before doing so.</p>
90
- <h3>Risks of Using Cracked Apps</h3>
91
- <p>Some of the risks of using cracked apps are:</p>
92
- <ul>
93
- <li>You may expose your device to malware or viruses that can damage it or compromise your data. Cracked apps may contain hidden code or files that can infect your device or access your personal information without your consent.</li>
94
- <li>You may violate intellectual property rights or laws by using pirated software. Cracked apps are illegal copies of original apps that infringe on their developers' rights and revenue. By using them, you may face legal consequences or penalties depending on your country's laws.</li>
95
- <li>You may miss out on updates or support from the developers. Cracked apps are usually outdated versions of original apps that do not receive any updates or bug fixes from their developers. By using them, you may encounter errors or glitches that affect their functionality or compatibility with your device or car model.</li>
96
- <li>You may compromise your user experience or satisfaction by using inferior quality software. Cracked apps may have reduced features or performance compared to original apps due to their modifications or limitations. By using them, you may not enjoy all the benefits or advantages that original apps offer.</li>
97
- </ul>
98
- <h3>How to Protect Your Device from Malware</h3>
99
- <p>If you decide to download EOBD Facile version complete crack APK despite its risks, you should take some precautions to protect your device from malware or viruses. Some of the ways to do this are:</p>
100
- <ul>
101
- <li>Use a reputable antivirus software to scan the file before downloading it or installing it on your device. This can help detect any malicious code or files that may harm your device or steal your data.</li>
102
- <li>Avoid granting unnecessary permissions or access requests that may compromise your privacy or security. Only allow permissions that are relevant or essential for the app's functionality or purpose.</li>
103
- <li>Delete any suspicious files or apps that may have been downloaded along with the cracked app or after installing it on your device. These may be malware disguised as legitimate files or apps that can infect your device or access your data without your knowledge.</li>
104
- <h2>Alternatives to EOBD Facile Version Complete Crack APK</h2>
105
- <p>If you are looking for a safer and more ethical way to use EOBD Facile without paying for the full version of the app, you might want to consider some alternatives. There are some options that can provide you with similar features and benefits without risking your device or violating any laws.</p>
106
- <h3>EOBD Facile Plus Edition</h3>
107
- <p>One option is to upgrade to EOBD Facile Plus Edition, which is a paid subscription service that gives you access to all the features of the full version of the app for a monthly or yearly fee. You can choose from three plans: Basic ($4.99/month or $49.99/year), Premium ($9.99/month or $99.99/year), or Ultimate ($19.99/month or $199.99/year). Each plan offers different levels of data storage, export options, dashboard customization, and customer support. You can also try a 7-day free trial before committing to any plan.</p>
108
- <h3>Other OBD2 Apps for Android</h3>
109
- <p>Another option is to use other OBD2 apps for Android that can connect to your car's OBD2 port and provide you with similar data and diagnostics. Some of these apps are free or have free versions with limited features, while others are paid or have paid versions with more features. Some examples of these apps are:</p>
110
- <table>
111
- <tr>
112
- <th>App Name</th>
113
- <th>Price</th>
114
- <th>Features</th>
115
- </tr>
116
- <tr>
117
- <td>Torque Pro</td>
118
- <td>$4.95</td>
119
- <td>- Displays over 200 parameters in real-time<br>- Reads and clears fault codes and shows their definitions<br>- Resets the check engine light<br>- Performs various tests and diagnostics<br>- Records and exports data in CSV format<br>- Creates custom dashboards with gauges and graphs<br>- Supports multiple languages<br>- Supports multiple protocols<br>- Compatible with most OBD2 compliant vehicles</td>
120
- </tr>
121
- <tr>
122
- <td>Car Scanner ELM OBD2</td>
123
- <td>Free (with in-app purchases)</td>
124
- <td>- Displays over 100 parameters in real-time<br>- Reads and clears fault codes and shows their definitions<br>- Resets the check engine light<br>- Performs various tests and diagnostics<br>- Records and exports data in CSV format<br>- Creates custom dashboards with gauges and graphs<br>- Supports multiple languages<br>- Supports multiple protocols<br>- Compatible with most OBD2 compliant vehicles</td>
125
- </tr>
126
- <tr>
127
- <td>OBD Fusion</td>
128
- <td>$4.99</td>
129
- <td>- Displays over 100 parameters in real-time<br>- Reads and clears fault codes and shows their definitions<br>- Resets the check engine light<br>- Performs various tests and diagnostics<br>- Records and exports data in CSV format<br>- Creates custom dashboards with gauges and graphs<br>- Supports multiple languages<br>- Supports multiple protocols<br>- Compatible with most OBD2 compliant vehicles</td>
130
- </tr>
131
- <tr>
132
- <td>OBD Auto Doctor</td>
133
- <td>Free (with in-app purchases)</td>
134
- <td>- Displays over 100 parameters in real-time<br>- Reads and clears fault codes and shows their definitions<br>- Resets the check engine light<br>- Performs various tests and diagnostics<br>- Records and exports data in CSV format<br>- Creates custom dashboards with gauges and graphs<br>- Supports multiple languages<br>- Supports multiple protocols<br>- Compatible with most OBD2 compliant vehicles</td>
135
- </tr>
136
- <tr>
137
- <td>OBDLink</td>
138
- <td>Free (with in-app purchases)</td>
139
- <td>- Displays over 100 parameters in real-time<br>- Reads and clears fault codes and shows their definitions<br>- Resets the check engine light<br>- Performs various tests and diagnostics<br>- Records and exports data in CSV format<br>- Creates custom dashboards with gauges and graphs<br>- Supports multiple languages<br>- Supports multiple protocols<br>- Compatible with most OBD2 compliant vehicles</td>
140
- </tr>
141
- </table>
142
- <h2>Conclusion</h2>
143
- <p>In conclusion, EOBD Facile is a useful app that can help you diagnose your car's performance and troubleshoot any issues with ease. However, if you want to use the full version of the app without paying for it, you might be tempted to download EOBD Facile version complete crack APK from the internet. This is not a safe or ethical option, as it may expose your device to malware or viruses, violate intellectual property rights or laws, miss out on updates or support from the developers, or compromise your user experience or satisfaction. Therefore, we recommend that you either upgrade to EOBD Facile Plus Edition, which is a paid subscription service that gives you access to all the features of the full version of the app for a monthly or yearly fee, or use other OBD2 apps for Android that can provide you with similar features and benefits without risking your device or violating any laws.</p>
144
- <h3>Frequently Asked Questions (FAQs)</h3>
145
- <ol><li><b>What is EOBD Facile?</b><br>EOBD Facile is an app that allows you to connect your Android device to your car's OBD2 port via a Bluetooth or Wi-Fi adapter and access real-time data from your car's sensors, read and clear fault codes, reset the check engine light, and perform various tests and diagnostics.</li>
146
- <li><b>How to download EOBD Facile version complete crack APK?</b><br>To download EOBD Facile version complete crack APK, you need to find a reliable source that offers it for download, enable unknown sources on your device, install the APK file on your device, and launch the app.</li>
147
- <li><b>Is EOBD Facile version complete crack APK safe?</b><br>No, EOBD Facile version complete crack APK is not safe, as it may expose your device to malware or viruses, violate intellectual property rights or laws, miss out on updates or support from the developers, or compromise your user experience or satisfaction.</li>
148
- <li><b>What are some alternatives to EOBD Facile version complete crack APK?</b><br>Some alternatives to EOBD Facile version complete crack APK are EOBD Facile Plus Edition, which is a paid subscription service that gives you access to all the features of the full version of the app for a monthly or yearly fee, or other OBD2 apps for Android that can provide you with similar features and benefits without risking your device or violating any laws.</li>
149
- <li><b>What are some features of EOBD Facile?</b><br>Some features of EOBD Facile are compatible with most OBD2 compliant vehicles, supports multiple protocols, displays over 100 parameters in real-time, reads and clears fault codes and shows their definitions, resets the check engine light, performs various tests and diagnostics, records and exports data in CSV format, creates custom dashboards with gauges and graphs, supports multiple languages.</li></ol>
150
- </p> 0a6ba089eb<br />
151
- <br />
152
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Avira Software Updater Pro Activation Code.md DELETED
@@ -1,21 +0,0 @@
1
-
2
- <h1>How to Activate Avira Software Updater Pro with a License Key</h1>
3
- <p>Avira Software Updater Pro is a powerful tool that helps you keep your software drivers up to date on your PC. It scans your system for outdated software and lets you download and install the latest versions with a single click. It also protects you from security vulnerabilities and exploits by patching your software as soon as updates are available.</p>
4
- <p>But how do you activate Avira Software Updater Pro with a license key? In this article, we will show you the steps to do so.</p>
5
- <h2>Avira Software Updater Pro Activation Code</h2><br /><p><b><b>DOWNLOAD</b> &#10002; <a href="https://imgfil.com/2uy0rr">https://imgfil.com/2uy0rr</a></b></p><br /><br />
6
- <h2>Step 1: Download and install Avira Software Updater Pro</h2>
7
- <p>You can download Avira Software Updater Pro from the official website[^1^] or from FileHippo[^3^]. The file size is about 5.41 MB and the installation process is simple and fast. Just follow the instructions on the screen and agree to the terms and conditions.</p>
8
- <h2>Step 2: Run Avira Software Updater Pro and enter your license key</h2>
9
- <p>After installing Avira Software Updater Pro, run it from your desktop or start menu. You will see a window like this:</p>
10
- <img src="https://i.imgur.com/7l0Z9Xs.png" alt="Avira Software Updater Pro window">
11
- <p>Click on the "Upgrade now" button at the bottom right corner. You will be prompted to enter your license key. You can find your license key in the confirmation email that you received after purchasing Avira Software Updater Pro. Alternatively, you can log in to your Avira account and access your license key from there.</p>
12
- <p>Copy and paste your license key into the text box and click on "Activate". You will see a message that says "Your license has been activated successfully". Congratulations! You have now activated Avira Software Updater Pro with a license key.</p>
13
- <h2>Step 3: Enjoy the benefits of Avira Software Updater Pro</h2>
14
- <p>Now that you have activated Avira Software Updater Pro, you can enjoy its features and benefits. You can scan your system for outdated software, download and install updates automatically or manually, select which software and drivers you want to keep up to date, and more. You can also customize your settings and preferences according to your needs.</p>
15
- <p>Avira Software Updater Pro supports hundreds of third-party software, including popular ones like Zoom, Adobe, Google, Skype, etc.[^1^] It also updates both Windows and third-party software[^2^], ensuring that you have the latest features, optimizations, bug fixes, and security patches.</p>
16
- <p>With Avira Software Updater Pro, you can save time and effort, improve your PC performance, and protect yourself from cyberattacks. It is a simple, elegant, and easy to use solution for keeping your software drivers up to date on your PC.</p>
17
- <p></p>
18
- <h2>Conclusion</h2>
19
- <p>In this article, we have shown you how to activate Avira Software Updater Pro with a license key. We hope that this guide has been helpful for you. If you have any questions or problems, please contact Avira support[^4^] or visit their official website[^1^] for more information.</p> d5da3c52bf<br />
20
- <br />
21
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Flight Of The Phoenix In Hindi Movie Dubbed 48.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Flight Of The Phoenix In Hindi Movie Dubbed 48</h2><br /><p><b><b>Download File</b> &#10001; &#10001; &#10001; <a href="https://imgfil.com/2uy1Zq">https://imgfil.com/2uy1Zq</a></b></p><br /><br />
2
-
3
- 3cee63e6c2<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/APK Shopee Merchant The Ultimate Guide for ShopeePay ShopeeFood Merchants.md DELETED
@@ -1,161 +0,0 @@
1
-
2
- <h1>Download APK Shopee Merchant: A Guide for Android Users</h1>
3
- <p>If you are an online seller who wants to grow your business with Shopee, you might be interested in downloading APK Shopee Merchant. This is a practical and reliable application that helps you manage your business more easily with Shopee, no. 1 online shopping platform in Indonesia, anytime and anywhere.</p>
4
- <p>But what is Shopee Merchant, and what is an APK file? And why would you want to download it instead of getting it from Google Play? In this article, we will answer these questions and show you how to download and use APK Shopee Merchant on your Android device.</p>
5
- <h2>download apk shopee merchant</h2><br /><p><b><b>DOWNLOAD</b> &#9989; <a href="https://urlin.us/2uSUoa">https://urlin.us/2uSUoa</a></b></p><br /><br />
6
- <h2>What is Shopee Merchant?</h2>
7
- <p>Shopee Merchant is an app that allows you to join ShopeePay and ShopeeFood easily in one app. ShopeePay is a digital payment service that lets you accept payments from customers using QR codes or phone numbers. ShopeeFood is a food delivery service that lets you sell your food products to hungry customers in your area.</p>
8
- <p>As a merchant, you will get the following benefits from using Shopee Merchant:</p>
9
- <ul>
10
- <li>Self-registration: You can sign up as a seller on Shopee without any hassle or fees.</li>
11
- <li>Supporting features: You can access various features that help you manage your inventory, orders , payments, promotions, and customer service.</li>
12
- <li>Integrated wallet: You can receive and withdraw your earnings directly from your ShopeePay wallet.</li>
13
- <li>Self-promo creation: You can create and customize your own promotional materials, such as banners, flyers, and stickers, to attract more customers.</li>
14
- <li>Analytics and insights: You can monitor your business performance and get useful tips and suggestions to improve your sales.</li>
15
- </ul>
16
- <p>With Shopee Merchant, you can enjoy the convenience and security of selling online with Shopee, the leading e-commerce platform in Southeast Asia and Taiwan.</p>
17
- <h2>What is an APK file?</h2>
18
- <p>An APK file is a file format that stands for Android Package Kit. It is used to distribute and install applications on Android devices. An APK file contains all the components of an app, such as the code, resources, assets, certificates, and manifest.</p>
19
- <p>How to download apk shopee partner app for android<br />
20
- Shopee partner apk latest version free download<br />
21
- Benefits of using shopee partner app for shopeepay and shopeefood merchant<br />
22
- Shopee partner app review and rating by users<br />
23
- Tips and tricks to manage your business with shopee partner app<br />
24
- Shopee partner app download size and compatibility<br />
25
- How to join shopeepay and shopeefood easily with shopee partner app<br />
26
- How to track your wallet balance and transaction history with shopee partner app<br />
27
- How to organize your menu and create promotion with shopee partner app<br />
28
- How to update your information and menu with shopee partner app<br />
29
- Shopee partner app vs other apps for online shopping platform merchants<br />
30
- How to contact shopee customer service through shopee partner app<br />
31
- How to register and verify your account with shopee partner app<br />
32
- How to use shopee partner app offline mode<br />
33
- How to sync your data across devices with shopee partner app<br />
34
- How to backup and restore your data with shopee partner app<br />
35
- How to enable notifications and alerts with shopee partner app<br />
36
- How to customize your settings and preferences with shopee partner app<br />
37
- How to troubleshoot common issues with shopee partner app<br />
38
- How to uninstall and reinstall shopee partner app<br />
39
- How to get the best deals and discounts with shopee partner app<br />
40
- How to increase your sales and revenue with shopee partner app<br />
41
- How to attract more customers and reviews with shopee partner app<br />
42
- How to improve your ranking and visibility with shopee partner app<br />
43
- How to integrate your social media accounts with shopee partner app<br />
44
- How to access analytics and reports with shopee partner app<br />
45
- How to use QR code scanner and generator with shopee partner app<br />
46
- How to accept multiple payment methods with shopee partner app<br />
47
- How to manage your inventory and orders with shopee partner app<br />
48
- How to handle refunds and cancellations with shopee partner app<br />
49
- How to join the shopee community and network with other merchants with shopee partner app<br />
50
- How to participate in contests and events with shopee partner app<br />
51
- How to earn rewards and points with shopee partner app<br />
52
- How to redeem vouchers and coupons with shopee partner app<br />
53
- How to share feedback and suggestions with shopee partner app<br />
54
- Shopee partner apk modded version download link<br />
55
- Shopee partner apk cracked version download link<br />
56
- Shopee partner apk premium version download link<br />
57
- Shopee partner apk pro version download link<br />
58
- Shopee partner apk hacked version download link<br />
59
- Shopee partner apk old version download link<br />
60
- Shopee partner apk beta version download link<br />
61
- Shopee partner apk original version download link<br />
62
- Shopee partner apk mirror version download link<br />
63
- Shopee partner apk alternative version download link</p>
64
- <p>An APK file can be opened on Android devices by using a file manager app or a web browser. However, before installing an APK file, you need to enable the option to allow installation of apps from unknown sources in your device settings. This is because APK files are not verified by Google Play, which is the official app store for Android devices.</p>
65
- <h2>Why download APK Shopee Merchant?</h2>
66
- <h3>Access the latest version of the app</h3>
67
- <p>One of the reasons why you might want to download APK Shopee Merchant is to access the latest version of the app. Sometimes, the app updates are not available on Google Play due to various reasons, such as compatibility issues, regional restrictions, or technical errors. By downloading the APK file from a reliable source, you can get the most updated version of Shopee Merchant, which may have new features, bug fixes, or performance improvements.</p>
68
- <h3>Install the app on unsupported devices</h3>
69
- <p>Another reason why you might want to download APK Shopee Merchant is to install the app on devices that are not supported by Google Play. Some devices may not be compatible with Google Play due to their hardware specifications, software versions, or manufacturer policies. Some devices may also have limited storage space that prevents them from downloading large apps from Google Play. By downloading the APK file from a website, you can install Shopee Merchant on any device that runs on Android OS, as long as it meets the minimum requirements of the app.</p>
70
- <h3>Avoid regional restrictions</h3>
71
- <p>A third reason why you might want to download APK Shopee Merchant is to avoid regional restrictions. Some apps may not be available or accessible in certain regions due to legal regulations, licensing agreements, or censorship policies. For example, Shopee Merchant may not be available in some countries where Shopee does not operate or where online selling is prohibited or regulated. By downloading the APK file from a website, you can bypass these restrictions and use Shopee Merchant wherever you are.</p>
72
- <h2>How to download APK Shopee Merchant?</h2>
73
- <h3>Find a reliable source</h3>
74
- <p>The first step to download APK Shopee Merchant is to find a reliable source that offers the APK file for download. There are many websites that provide APK files for various apps, but not all of them are trustworthy or safe. Some websites may contain malware, viruses, or fake files that can harm your device or steal your data.</p>
75
- <p>To find a reliable source, you should look for the following criteria:</p>
76
- <ul>
77
- <li>The website has a good reputation and positive reviews from other users.</li>
78
- <li>The website has a secure connection (HTTPS) and a valid certificate.</li>
79
- <li>The website provides clear and accurate information about the APK file, such as the name, size, version, developer, and permissions.</li>
80
- <li>The website does not require you to register, pay, or complete surveys to download the APK file.</li>
81
- <li>The website does not have excessive ads or pop-ups that interfere with your browsing experience.</li>
82
- </ul>
83
- <p>One example of a reliable source that offers APK Shopee Merchant for download is [APKPure], which is one of the most popular and trusted websites for downloading APK files.</p>
84
- <h3>Enable unknown sources</h3>
85
- <p>The second step to download APK Shopee Merchant is to enable unknown sources on your device settings. This will allow you to install apps from sources other than Google Play. To do this, follow these steps:</p>
86
- <ol>
87
- <li>Go to your device settings and tap on Security or Privacy.</li>
88
- <li>Find the option that says Unknown sources or Install unknown apps and toggle it on.</li>
89
- <li>A warning message will appear asking you to confirm your action. Tap on OK or Allow.</li>
90
- </ol>
91
- <p>Note that this option may vary depending on your device model and Android version. <h3>Download and install the file</h3>
92
- <p>The third step to download APK Shopee Merchant is to download and install the file on your device. To do this, follow these steps:</p>
93
- <ol>
94
- <li>Go to the website that offers the APK file for download and tap on the download button or link.</li>
95
- <li>A pop-up window will appear asking you to confirm your download. Tap on OK or Download.</li>
96
- <li>Wait for the download to complete. You can check the progress on your notification bar or your download folder.</li>
97
- <li>Once the download is finished, tap on the APK file to open it. You may need to use a file manager app to locate it on your device.</li>
98
- <li>A prompt will appear asking you to install the app. Tap on Install or Next.</li>
99
- <li>Wait for the installation to complete. You can check the progress on your screen or your notification bar.</li>
100
- <li>Once the installation is finished, tap on Open or Done.</li>
101
- </ol>
102
- <p>Congratulations! You have successfully downloaded and installed APK Shopee Merchant on your device. You can now start using the app to manage your business with Shopee.</p>
103
- <h2>How to use APK Shopee Merchant?</h2>
104
- <h3>Register as a merchant</h3>
105
- <p>The first step to use APK Shopee Merchant is to register as a merchant on Shopee. To do this, follow these steps:</p>
106
- <ol>
107
- <li>Open the app and tap on Sign Up or Register.</li>
108
- <li>Select your country and enter your phone number. Tap on Next or Send OTP.</li>
109
- <li>Enter the one-time password (OTP) that you received via SMS. Tap on Next or Verify.</li>
110
- <li>Create a password and a username for your account. Tap on Next or Register.</li>
111
- <li>Fill in your personal information, such as your name, email address, and date of birth. Tap on Next or Continue.</li>
112
- <li>Select the type of business you want to run, such as food, beverage, or others. Tap on Next or Continue.</li>
113
- <li>Fill in your business information, such as your business name, address, category, and description. Tap on Next or Continue.</li>
114
- <li>Upload your identity document, such as your ID card, passport, or driver's license. Tap on Next or Continue.</li>
115
- <li>Upload your business document, such as your business license, tax number, or bank statement. Tap on Next or Continue.</li>
116
- <li>Review and confirm your information and documents. Tap on Submit or Finish.</li>
117
- </ol>
118
- <p>Your registration is now complete. You will receive a confirmation message from Shopee within 24 hours. Once your account is verified, you can start selling on ShopeePay and ShopeeFood.</p>
119
- <h3>Manage your business</h3>
120
- <p>The second step to use APK Shopee Merchant is to manage your business using the app. To do this, you can access various features and functions that help you with the following tasks:</p>
121
- <table border="1">
122
- <tr><th>Task</th><th>Feature</th><th>Description</th></tr>
123
- <tr><td>Create and edit your menu</td><td>Menu</td><td>You can add, edit, delete, or arrange your products in different categories and subcategories. You can also set the prices, discounts, stock availability, and delivery options for each product.</td></tr>
124
- <tr><td>Track your orders and payments</td><td>Orders</td><td>You can view, accept, reject, or cancel your orders from customers. You can also update the status of your orders, such as preparing, ready, or delivered. You can also view the payment details and history of each order.</td></tr>
125
- <tr><td>Promote your products</td><td>Promotions</td><td>You can create and manage various types of promotions for your products, such as vouchers, flash sales, free shipping, or bundle deals. You can also set the duration, budget, and target audience for each promotion.</td></tr>
126
- <tr><td>Communicate with customers</td><td>Chat</td><td>You can chat with your customers directly from the app. You can send and receive text messages, images, videos, voice notes, or stickers. You can also use quick replies or templates to answer common questions or requests.</td></tr>
127
- </table>
128
- <p>With these features, you can manage your business more efficiently and effectively with Shopee Merchant.</p>
129
- <h3>Grow your sales</h3>
130
- <p>The third step to use APK Shopee Merchant is to grow your sales using the app. To do this, you can access various features and benefits that help you with the following goals:</p>
131
- <table border="1">
132
- <tr><th >Goal</th><th>Feature</th><th>Benefit</th></tr>
133
- <tr><td>Increase your visibility</td><td>Self-promo creation</td><td>You can create and customize your own promotional materials, such as banners, flyers, and stickers, to attract more customers. You can also print or share them on social media platforms.</td></tr>
134
- <tr><td>Improve your reputation</td><td>Ratings and reviews</td><td>You can collect and display ratings and reviews from your customers on your menu page. You can also respond to them and thank them for their feedback. This can help you build trust and loyalty among your customers.</td></tr>
135
- <tr><td>Expand your market</td><td>Regional expansion</td><td>You can expand your market to other regions where Shopee operates, such as Malaysia, Singapore, Thailand, Vietnam, Philippines, or Taiwan. You can also adjust your menu and prices according to the local preferences and demand.</td></tr>
136
- <tr><td>Optimize your performance</td><td>Analytics and insights</td><td>You can monitor your business performance and get useful tips and suggestions to improve your sales. You can also access various reports and statistics, such as sales volume, revenue, customer behavior, and market trends.</td></tr>
137
- </table>
138
- <p>With these features and benefits, you can grow your sales and customer satisfaction with Shopee Merchant.</p>
139
- <h2>Conclusion</h2>
140
- <p>In conclusion, downloading APK Shopee Merchant is a smart and convenient way to manage your business with Shopee on your Android device. You can access the latest version of the app, install it on unsupported devices, and avoid regional restrictions. You can also register as a merchant, manage your business, and grow your sales using various features and benefits that Shopee Merchant offers. If you are an online seller who wants to join ShopeePay and ShopeeFood easily in one app, you should download APK Shopee Merchant today and start selling more with Shopee.</p>
141
- <h2>FAQs</h2>
142
- <p>Here are some frequently asked questions that you might have about downloading APK Shopee Merchant:</p>
143
- <ol>
144
- <li>Is it safe to download APK files from unknown sources?</li>
145
- <p>It depends on the source that you download the APK file from. Some sources may be reliable and safe, while others may be malicious or fraudulent. To ensure your safety, you should only download APK files from reputable and trusted websites, such as [APKPure]. You should also scan the APK file with an antivirus app before installing it on your device.</p>
146
- <li>How can I update my APK Shopee Merchant app?</li>
147
- <p>You can update your APK Shopee Merchant app by downloading the latest version of the APK file from the same source that you downloaded it from. You can also check for updates within the app by tapping on the menu icon and selecting Settings > About > Check for updates.</p>
148
- <li>What if I encounter problems or errors while using the app?</li>
149
- <p>If you encounter any problems or errors while using the app, you can try the following solutions:</p>
150
- <ul>
151
- <li>Clear the cache and data of the app by going to your device settings > Apps > Shopee Merchant > Storage > Clear cache / Clear data.</li>
152
- <li>Uninstall and reinstall the app by deleting the APK file from your device and downloading it again from the website.</li>
153
- <li>Contact Shopee for support or feedback by tapping on the menu icon and selecting Help Center > Contact Us.</li>
154
- </ul>
155
- <li>Can I use APK Shopee Merchant on other operating systems besides Android?</li>
156
- <p>No, you cannot use APK Shopee Merchant on other operating systems besides Android. APK files are only compatible with Android devices. If you want to use Shopee Merchant on other devices, such as iOS or Windows, you will need to download the app from their respective app stores or use the web version of Shopee Merchant.</p>
157
- <li>How can I contact Shopee for support or feedback?</li>
158
- <p>You can contact Shopee for support or feedback by tapping on the menu icon and selecting Help Center > Contact Us. You can also email them at [[email protected]] or call them at [1500 407]. They are available 24/7 to assist you with any issues or inquiries that you may have.</p>
159
- </ol></p> 197e85843d<br />
160
- <br />
161
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/1phancelerku/anime-remove-background/Crack Turkey Sandwiches - A Delicious Way to Use Up Turkey.md DELETED
@@ -1,111 +0,0 @@
1
- <br />
2
- <h1>What is Crackturkey and Why You Should Avoid It</h1>
3
- <p>If you are looking for cracked software, games, or accounts online, you might have come across some websites that claim to offer them for free or for a low price. These websites are known as crackturkey sites, and they are not what they seem. In fact, they are very dangerous and can harm your device, your data, and your identity. In this article, we will explain what crackturkey is, what are the risks of using it, and how to recognize and avoid crackturkey sites.</p>
4
- <h2>Introduction</h2>
5
- <h3>What is crackturkey?</h3>
6
- <p>Crackturkey is a term that refers to websites that offer cracked or pirated software, games, or accounts for download or purchase. These websites are usually run by hackers or scammers who want to infect your device with malware, steal your personal information, or trick you into paying for something that does not work or does not exist. Crackturkey sites often use fake names, logos, and reviews to make themselves look legitimate and trustworthy. However, they are anything but that.</p>
7
- <h2>crackturkey</h2><br /><p><b><b>Download</b> &raquo; <a href="https://jinyurl.com/2uNPkN">https://jinyurl.com/2uNPkN</a></b></p><br /><br />
8
- <h3>What are the risks of using crackturkey?</h3>
9
- <p>Using crackturkey can expose you to many serious risks, such as:</p>
10
- <ul>
11
- <li><b>Malware infection:</b> Crackturkey sites often contain malicious files that can infect your device with viruses, worms, trojans, ransomware, spyware, or adware. These malware can damage your device, delete or encrypt your files, monitor your online activity, steal your passwords, credit card numbers, or bank details, or display unwanted ads or pop-ups.</li>
12
- <li><b>Data loss:</b> Crackturkey sites can also cause you to lose your data, either by deleting it intentionally or accidentally, or by making it inaccessible due to encryption or corruption. You might lose your important documents, photos, videos, music, or other files that you have stored on your device.</li>
13
- <li><b>Identity theft:</b> Crackturkey sites can also compromise your identity by stealing your personal information, such as your name, email address, phone number, social media accounts, or other online identities. They can use this information to impersonate you online, send spam emails or messages in your name, make fraudulent purchases or transactions with your credit card or bank account, or access your other online accounts.</li>
14
- <li><b>Legal issues:</b> Crackturkey sites can also get you into legal trouble by violating the intellectual property rights of the original software or game developers or owners. Downloading or using cracked or pirated software or games is illegal in most countries and can result in fines or even jail time. You might also face lawsuits from the developers or owners who can sue you for damages.</li>
15
- </ul>
16
- <h2>How to Recognize and Avoid Crackturkey Sites</h2>
17
- <h3>How to spot a crackturkey site</h3>
18
- <p>Crackturkey sites can be hard to distinguish from legitimate ones at first glance. However, there are some signs that can help you identify them and avoid falling for their traps. Here are some of them:</p>
19
- <h4>Check the domain name and URL</h4>
20
- <p>A common way that crackturkey sites try to deceive you is by using domain names and URLs that look similar to the official ones of the software or game that they claim to offer. For example, they might use a domain name like <code>www.adobe-photoshop-crack.com</code> instead of <code>www.adobe.com</code>, or a URL like <code>https://www.crackerte <h4>Look for signs of poor quality and security</h4>
21
- <p>Another way that crackturkey sites can reveal their true nature is by showing signs of poor quality and security. For example, they might have:</p>
22
- <ul>
23
- <li><b>Spelling and grammar errors:</b> Crackturkey sites often have spelling and grammar mistakes in their content, titles, or descriptions. This can indicate that they are not professional or reliable, and that they might have been translated from another language by a machine or a non-native speaker.</li>
24
- <li><b>Broken links or images:</b> Crackturkey sites often have broken links or images that do not load properly or lead to nowhere. This can indicate that they are not maintained or updated regularly, and that they might contain outdated or corrupted files.</li>
25
- <li><b>Lack of HTTPS or SSL encryption:</b> Crackturkey sites often do not have HTTPS or SSL encryption, which means that they are not secure and that your data can be intercepted or tampered with by third parties. You can check if a website has HTTPS or SSL encryption by looking for a padlock icon or the word "Secure" in the address bar of your browser.</li>
26
- </ul>
27
- <h4>Beware of fake reviews and testimonials</h4>
28
- <p>A third way that crackturkey sites can try to fool you is by using fake reviews and testimonials to make themselves look credible and trustworthy. For example, they might have:</p>
29
- <p>crackturkey.com<br />
30
- crackturkey iptv forum<br />
31
- crackturkey mernis data<br />
32
- crackturkey twitter<br />
33
- crackturkey eam Türkçe<br />
34
- crackturkey gizlilik politikası<br />
35
- crackturkey şartlar ve kurallar<br />
36
- crackturkey en büyük cracking topluluğu<br />
37
- crackturkey üyeler kong<br />
38
- crackturkey hata sorunlar<br />
39
- crackturkey iptv hesapları<br />
40
- crackturkey netflix accounts<br />
41
- crackturkey spotify premium<br />
42
- crackturkey discord server<br />
43
- crackturkey cracking tools<br />
44
- crackturkey proxy list<br />
45
- crackturkey combo list<br />
46
- crackturkey dork generator<br />
47
- crackturkey sql injection<br />
48
- crackturkey brute force<br />
49
- crackturkey checker programları<br />
50
- crackturkey mail access<br />
51
- crackturkey gaming accounts<br />
52
- crackturkey vpn accounts<br />
53
- crackturkey nordvpn<br />
54
- crackturkey expressvpn<br />
55
- crackturkey hulu accounts<br />
56
- crackturkey disney plus accounts<br />
57
- crackturkey amazon prime accounts<br />
58
- crackturkey steam accounts<br />
59
- crackturkey origin accounts<br />
60
- crackturkey uplay accounts<br />
61
- crackturkey epic games accounts<br />
62
- crackturkey minecraft accounts<br />
63
- crackturkey fortnite accounts<br />
64
- crackturkey roblox accounts<br />
65
- crackturkey pubg accounts<br />
66
- crackturkey valorant accounts<br />
67
- crackturkey league of legends accounts<br />
68
- crackturkey cs go accounts<br />
69
- crackturkey social media accounts<br />
70
- crackturkey instagram accounts<br />
71
- crackturkey facebook accounts<br />
72
- crackturkey twitter accounts<br />
73
- crackturkey snapchat accounts<br />
74
- crackturkey tiktok accounts<br />
75
- crackturkey youtube premium accounts</p>
76
- <ul>
77
- <li><b>Too many positive reviews:</b> Crackturkey sites often have too many positive reviews that sound too good to be true, such as "This is the best software ever!", "It works perfectly!", or "I love it!". These reviews are usually written by bots or paid reviewers who have not actually used the software or game.</li>
78
- <li><b>No negative reviews:</b> Crackturkey sites often have no negative reviews or complaints from users who have encountered problems or issues with the software or game. This can indicate that they are censoring or deleting any negative feedback, or that they have not been used by many people at all.</li>
79
- <li><b>No dates or names:</b> Crackturkey sites often have no dates or names attached to their reviews or testimonials, which makes them hard to verify or trust. This can indicate that they are fabricated or copied from other sources.</li>
80
- </ul>
81
- <h3>How to avoid crackturkey sites</h3>
82
- <p>Now that you know how to spot a crackturkey site, you might be wondering how to avoid them and protect yourself from their dangers. Here are some tips that can help you do that:</p>
83
- <h4>Use reputable and trusted sources</h4>
84
- <p>The best way to avoid crackturkey sites is to use reputable and trusted sources for downloading or purchasing software, games, or accounts online. These sources are usually the official websites of the developers or owners, or authorized distributors or resellers. They offer genuine, legal, and safe products that are updated and supported regularly. You can also check the ratings, reviews, and feedback from other users who have used these sources before.</p>
85
- <h4>Use antivirus and firewall software</h4>
86
- <p>The second way to avoid crackturkey sites is to use antivirus and firewall software on your device. These software can help you detect and block any malware, phishing, or hacking attempts from crackturkey sites. They can also warn you of any suspicious or malicious websites that you might encounter online. You should always keep your antivirus and firewall software updated and scan your device regularly for any threats.</p>
87
- <h4>Report and block crackturkey sites</h4>
88
- <p>The third way to avoid crackturkey sites is to report and block them whenever you find them online. You can report them to the authorities, such as the cybercrime units of your local police or the Federal Trade Commission (FTC) in the US. You can also report them to the web hosting providers, domain registrars, search engines, social media platforms, or other online services that they use. You can also block them from your browser, email, or phone settings, or use tools like AdBlocker Plus or Malwarebytes to prevent them from appearing on your screen.</p>
89
- <h2>Conclusion</h2>
90
- <h3>Summary of the main points</h3>
91
- <p>In conclusion, crackturkey is a term that refers to websites that offer cracked or pirated software, games, or accounts for download or purchase. These websites are very dangerous and can harm your device, your data, and your identity. They can also get you into legal trouble by violating the intellectual property rights of the original developers or owners. You should avoid crackturkey sites by using reputable and trusted sources, using antivirus and firewall software, and reporting and blocking them whenever you encounter them online.</p>
92
- <h3>Call to action</h3> <p>If you want to learn more about how to protect yourself from crackturkey and other online threats, you can check out some of these resources:</p>
93
- <ul>
94
- <li><a href="">How to Avoid Malware and Scams When Downloading Software</a></li>
95
- <li><a href="">How to Spot and Avoid Fake or Pirated Software</a></li>
96
- <li><a href="">How to Report Online Scams and Fraud</a></li>
97
- </ul>
98
- <p>We hope you found this article helpful and informative. If you did, please share it with your friends and family who might benefit from it. And if you have any questions or comments, please leave them below. We would love to hear from you!</p>
99
- <h2>FAQs</h2>
100
- <h3>What is crackturkey?</h3>
101
- <p>Crackturkey is a term that refers to websites that offer cracked or pirated software, games, or accounts for download or purchase.</p>
102
- <h3>What are the risks of using crackturkey?</h3>
103
- <p>Using crackturkey can expose you to many serious risks, such as malware infection, data loss, identity theft, and legal issues.</p>
104
- <h3>How to spot a crackturkey site?</h3>
105
- <p>You can spot a crackturkey site by checking the domain name and URL, looking for signs of poor quality and security, and beware of fake reviews and testimonials.</p>
106
- <h3>How to avoid crackturkey sites?</h3>
107
- <p>You can avoid crackturkey sites by using reputable and trusted sources, using antivirus and firewall software, and reporting and blocking them whenever you find them online.</p>
108
- <h3>Where can I find more information about crackturkey and online security?</h3>
109
- <p>You can find more information about crackturkey and online security by visiting some of the resources we have listed above, or by doing your own research online.</p> 401be4b1e0<br />
110
- <br />
111
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/tools/torchgate/utils.py DELETED
@@ -1,66 +0,0 @@
1
- import torch
2
- from torch.types import Number
3
-
4
-
5
- @torch.no_grad()
6
- def amp_to_db(x: torch.Tensor, eps=torch.finfo(torch.float64).eps, top_db=40) -> torch.Tensor:
7
- """
8
- Convert the input tensor from amplitude to decibel scale.
9
-
10
- Arguments:
11
- x {[torch.Tensor]} -- [Input tensor.]
12
-
13
- Keyword Arguments:
14
- eps {[float]} -- [Small value to avoid numerical instability.]
15
- (default: {torch.finfo(torch.float64).eps})
16
- top_db {[float]} -- [threshold the output at ``top_db`` below the peak]
17
- ` (default: {40})
18
-
19
- Returns:
20
- [torch.Tensor] -- [Output tensor in decibel scale.]
21
- """
22
- x_db = 20 * torch.log10(x.abs() + eps)
23
- return torch.max(x_db, (x_db.max(-1).values - top_db).unsqueeze(-1))
24
-
25
-
26
- @torch.no_grad()
27
- def temperature_sigmoid(x: torch.Tensor, x0: float, temp_coeff: float) -> torch.Tensor:
28
- """
29
- Apply a sigmoid function with temperature scaling.
30
-
31
- Arguments:
32
- x {[torch.Tensor]} -- [Input tensor.]
33
- x0 {[float]} -- [Parameter that controls the threshold of the sigmoid.]
34
- temp_coeff {[float]} -- [Parameter that controls the slope of the sigmoid.]
35
-
36
- Returns:
37
- [torch.Tensor] -- [Output tensor after applying the sigmoid with temperature scaling.]
38
- """
39
- return torch.sigmoid((x - x0) / temp_coeff)
40
-
41
-
42
- @torch.no_grad()
43
- def linspace(start: Number, stop: Number, num: int = 50, endpoint: bool = True, **kwargs) -> torch.Tensor:
44
- """
45
- Generate a linearly spaced 1-D tensor.
46
-
47
- Arguments:
48
- start {[Number]} -- [The starting value of the sequence.]
49
- stop {[Number]} -- [The end value of the sequence, unless `endpoint` is set to False.
50
- In that case, the sequence consists of all but the last of ``num + 1``
51
- evenly spaced samples, so that `stop` is excluded. Note that the step
52
- size changes when `endpoint` is False.]
53
-
54
- Keyword Arguments:
55
- num {[int]} -- [Number of samples to generate. Default is 50. Must be non-negative.]
56
- endpoint {[bool]} -- [If True, `stop` is the last sample. Otherwise, it is not included.
57
- Default is True.]
58
- **kwargs -- [Additional arguments to be passed to the underlying PyTorch `linspace` function.]
59
-
60
- Returns:
61
- [torch.Tensor] -- [1-D tensor of `num` equally spaced samples from `start` to `stop`.]
62
- """
63
- if endpoint:
64
- return torch.linspace(start, stop, num, **kwargs)
65
- else:
66
- return torch.linspace(start, stop, num + 1, **kwargs)[:-1]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/open_clip/factory.py DELETED
@@ -1,277 +0,0 @@
1
- import json
2
- import logging
3
- import os
4
- import pathlib
5
- import re
6
- from copy import deepcopy
7
- from pathlib import Path
8
-
9
- import torch
10
-
11
- from .model import CLAP, convert_weights_to_fp16
12
- from .openai import load_openai_model
13
- from .pretrained import get_pretrained_url, download_pretrained
14
- from .transform import image_transform
15
-
16
- _MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
17
- _MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
18
-
19
-
20
- def _natural_key(string_):
21
- return [int(s) if s.isdigit() else s for s in re.split(r"(\d+)", string_.lower())]
22
-
23
-
24
- def _rescan_model_configs():
25
- global _MODEL_CONFIGS
26
-
27
- config_ext = (".json",)
28
- config_files = []
29
- for config_path in _MODEL_CONFIG_PATHS:
30
- if config_path.is_file() and config_path.suffix in config_ext:
31
- config_files.append(config_path)
32
- elif config_path.is_dir():
33
- for ext in config_ext:
34
- config_files.extend(config_path.glob(f"*{ext}"))
35
-
36
- for cf in config_files:
37
- if os.path.basename(cf)[0] == ".":
38
- continue # Ignore hidden files
39
-
40
- with open(cf, "r") as f:
41
- model_cfg = json.load(f)
42
- if all(a in model_cfg for a in ("embed_dim", "audio_cfg", "text_cfg")):
43
- _MODEL_CONFIGS[cf.stem] = model_cfg
44
-
45
- _MODEL_CONFIGS = {
46
- k: v
47
- for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))
48
- }
49
-
50
-
51
- _rescan_model_configs() # initial populate of model config registry
52
-
53
-
54
- def load_state_dict(checkpoint_path: str, map_location="cpu", skip_params=True):
55
- checkpoint = torch.load(checkpoint_path, map_location=map_location)
56
- if isinstance(checkpoint, dict) and "state_dict" in checkpoint:
57
- state_dict = checkpoint["state_dict"]
58
- else:
59
- state_dict = checkpoint
60
- if skip_params:
61
- if next(iter(state_dict.items()))[0].startswith("module"):
62
- state_dict = {k[7:]: v for k, v in state_dict.items()}
63
- # for k in state_dict:
64
- # if k.startswith('transformer'):
65
- # v = state_dict.pop(k)
66
- # state_dict['text_branch.' + k[12:]] = v
67
- return state_dict
68
-
69
-
70
- def create_model(
71
- amodel_name: str,
72
- tmodel_name: str,
73
- pretrained: str = "",
74
- precision: str = "fp32",
75
- device: torch.device = torch.device("cpu"),
76
- jit: bool = False,
77
- force_quick_gelu: bool = False,
78
- openai_model_cache_dir: str = os.path.expanduser("~/.cache/clip"),
79
- skip_params=True,
80
- pretrained_audio: str = "",
81
- pretrained_text: str = "",
82
- enable_fusion: bool = False,
83
- fusion_type: str = "None"
84
- # pretrained_image: bool = False,
85
- ):
86
- amodel_name = amodel_name.replace(
87
- "/", "-"
88
- ) # for callers using old naming with / in ViT names
89
- pretrained_orig = pretrained
90
- pretrained = pretrained.lower()
91
- if pretrained == "openai":
92
- if amodel_name in _MODEL_CONFIGS:
93
- logging.info(f"Loading {amodel_name} model config.")
94
- model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name])
95
- else:
96
- logging.error(
97
- f"Model config for {amodel_name} not found; available models {list_models()}."
98
- )
99
- raise RuntimeError(f"Model config for {amodel_name} not found.")
100
-
101
- logging.info(f"Loading pretrained ViT-B-16 text encoder from OpenAI.")
102
- # Hard Code in model name
103
- model_cfg["text_cfg"]["model_type"] = tmodel_name
104
- model = load_openai_model(
105
- "ViT-B-16",
106
- model_cfg,
107
- device=device,
108
- jit=jit,
109
- cache_dir=openai_model_cache_dir,
110
- enable_fusion=enable_fusion,
111
- fusion_type=fusion_type,
112
- )
113
- # See https://discuss.pytorch.org/t/valueerror-attemting-to-unscale-fp16-gradients/81372
114
- if precision == "amp" or precision == "fp32":
115
- model = model.float()
116
- else:
117
- if amodel_name in _MODEL_CONFIGS:
118
- logging.info(f"Loading {amodel_name} model config.")
119
- model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name])
120
- else:
121
- logging.error(
122
- f"Model config for {amodel_name} not found; available models {list_models()}."
123
- )
124
- raise RuntimeError(f"Model config for {amodel_name} not found.")
125
-
126
- if force_quick_gelu:
127
- # override for use of QuickGELU on non-OpenAI transformer models
128
- model_cfg["quick_gelu"] = True
129
-
130
- # if pretrained_image:
131
- # if 'timm_amodel_name' in model_cfg.get('vision_cfg', {}):
132
- # # pretrained weight loading for timm models set via vision_cfg
133
- # model_cfg['vision_cfg']['timm_model_pretrained'] = True
134
- # else:
135
- # assert False, 'pretrained image towers currently only supported for timm models'
136
- model_cfg["text_cfg"]["model_type"] = tmodel_name
137
- model_cfg["enable_fusion"] = enable_fusion
138
- model_cfg["fusion_type"] = fusion_type
139
- model = CLAP(**model_cfg)
140
-
141
- if pretrained:
142
- checkpoint_path = ""
143
- url = get_pretrained_url(amodel_name, pretrained)
144
- if url:
145
- checkpoint_path = download_pretrained(url, root=openai_model_cache_dir)
146
- elif os.path.exists(pretrained_orig):
147
- checkpoint_path = pretrained_orig
148
- if checkpoint_path:
149
- logging.info(
150
- f"Loading pretrained {amodel_name}-{tmodel_name} weights ({pretrained})."
151
- )
152
- ckpt = load_state_dict(checkpoint_path, skip_params=True)
153
- model.load_state_dict(ckpt)
154
- param_names = [n for n, p in model.named_parameters()]
155
- # for n in param_names:
156
- # print(n, "\t", "Loaded" if n in ckpt else "Unloaded")
157
- else:
158
- logging.warning(
159
- f"Pretrained weights ({pretrained}) not found for model {amodel_name}."
160
- )
161
- raise RuntimeError(
162
- f"Pretrained weights ({pretrained}) not found for model {amodel_name}."
163
- )
164
-
165
- if pretrained_audio:
166
- if amodel_name.startswith("PANN"):
167
- if "Cnn14_mAP" in pretrained_audio: # official checkpoint
168
- audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
169
- audio_ckpt = audio_ckpt["model"]
170
- keys = list(audio_ckpt.keys())
171
- for key in keys:
172
- if (
173
- "spectrogram_extractor" not in key
174
- and "logmel_extractor" not in key
175
- ):
176
- v = audio_ckpt.pop(key)
177
- audio_ckpt["audio_branch." + key] = v
178
- elif os.path.basename(pretrained_audio).startswith(
179
- "PANN"
180
- ): # checkpoint trained via HTSAT codebase
181
- audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
182
- audio_ckpt = audio_ckpt["state_dict"]
183
- keys = list(audio_ckpt.keys())
184
- for key in keys:
185
- if key.startswith("sed_model"):
186
- v = audio_ckpt.pop(key)
187
- audio_ckpt["audio_branch." + key[10:]] = v
188
- elif os.path.basename(pretrained_audio).startswith(
189
- "finetuned"
190
- ): # checkpoint trained via linear probe codebase
191
- audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
192
- else:
193
- raise ValueError("Unknown audio checkpoint")
194
- elif amodel_name.startswith("HTSAT"):
195
- if "HTSAT_AudioSet_Saved" in pretrained_audio: # official checkpoint
196
- audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
197
- audio_ckpt = audio_ckpt["state_dict"]
198
- keys = list(audio_ckpt.keys())
199
- for key in keys:
200
- if key.startswith("sed_model") and (
201
- "spectrogram_extractor" not in key
202
- and "logmel_extractor" not in key
203
- ):
204
- v = audio_ckpt.pop(key)
205
- audio_ckpt["audio_branch." + key[10:]] = v
206
- elif os.path.basename(pretrained_audio).startswith(
207
- "HTSAT"
208
- ): # checkpoint trained via HTSAT codebase
209
- audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
210
- audio_ckpt = audio_ckpt["state_dict"]
211
- keys = list(audio_ckpt.keys())
212
- for key in keys:
213
- if key.startswith("sed_model"):
214
- v = audio_ckpt.pop(key)
215
- audio_ckpt["audio_branch." + key[10:]] = v
216
- elif os.path.basename(pretrained_audio).startswith(
217
- "finetuned"
218
- ): # checkpoint trained via linear probe codebase
219
- audio_ckpt = torch.load(pretrained_audio, map_location="cpu")
220
- else:
221
- raise ValueError("Unknown audio checkpoint")
222
- else:
223
- raise f"this audio encoder pretrained checkpoint is not support"
224
-
225
- model.load_state_dict(audio_ckpt, strict=False)
226
- logging.info(
227
- f"Loading pretrained {amodel_name} weights ({pretrained_audio})."
228
- )
229
- param_names = [n for n, p in model.named_parameters()]
230
- for n in param_names:
231
- print(n, "\t", "Loaded" if n in audio_ckpt else "Unloaded")
232
-
233
- model.to(device=device)
234
- if precision == "fp16":
235
- assert device.type != "cpu"
236
- convert_weights_to_fp16(model)
237
-
238
- if jit:
239
- model = torch.jit.script(model)
240
-
241
- return model, model_cfg
242
-
243
-
244
- def create_model_and_transforms(
245
- model_name: str,
246
- pretrained: str = "",
247
- precision: str = "fp32",
248
- device: torch.device = torch.device("cpu"),
249
- jit: bool = False,
250
- force_quick_gelu: bool = False,
251
- # pretrained_image: bool = False,
252
- ):
253
- model = create_model(
254
- model_name,
255
- pretrained,
256
- precision,
257
- device,
258
- jit,
259
- force_quick_gelu=force_quick_gelu,
260
- # pretrained_image=pretrained_image
261
- )
262
- preprocess_train = image_transform(model.visual.image_size, is_train=True)
263
- preprocess_val = image_transform(model.visual.image_size, is_train=False)
264
- return model, preprocess_train, preprocess_val
265
-
266
-
267
- def list_models():
268
- """enumerate available model architectures based on config files"""
269
- return list(_MODEL_CONFIGS.keys())
270
-
271
-
272
- def add_model_config(path):
273
- """add model config path or file and update registry"""
274
- if not isinstance(path, Path):
275
- path = Path(path)
276
- _MODEL_CONFIG_PATHS.append(path)
277
- _rescan_model_configs()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/generate_human_motion/VQ-Trans/models/vqvae.py DELETED
@@ -1,118 +0,0 @@
1
- import torch.nn as nn
2
- from models.encdec import Encoder, Decoder
3
- from models.quantize_cnn import QuantizeEMAReset, Quantizer, QuantizeEMA, QuantizeReset
4
-
5
-
6
- class VQVAE_251(nn.Module):
7
- def __init__(self,
8
- args,
9
- nb_code=1024,
10
- code_dim=512,
11
- output_emb_width=512,
12
- down_t=3,
13
- stride_t=2,
14
- width=512,
15
- depth=3,
16
- dilation_growth_rate=3,
17
- activation='relu',
18
- norm=None):
19
-
20
- super().__init__()
21
- self.code_dim = code_dim
22
- self.num_code = nb_code
23
- self.quant = args.quantizer
24
- self.encoder = Encoder(251 if args.dataname == 'kit' else 263, output_emb_width, down_t, stride_t, width, depth, dilation_growth_rate, activation=activation, norm=norm)
25
- self.decoder = Decoder(251 if args.dataname == 'kit' else 263, output_emb_width, down_t, stride_t, width, depth, dilation_growth_rate, activation=activation, norm=norm)
26
- if args.quantizer == "ema_reset":
27
- self.quantizer = QuantizeEMAReset(nb_code, code_dim, args)
28
- elif args.quantizer == "orig":
29
- self.quantizer = Quantizer(nb_code, code_dim, 1.0)
30
- elif args.quantizer == "ema":
31
- self.quantizer = QuantizeEMA(nb_code, code_dim, args)
32
- elif args.quantizer == "reset":
33
- self.quantizer = QuantizeReset(nb_code, code_dim, args)
34
-
35
-
36
- def preprocess(self, x):
37
- # (bs, T, Jx3) -> (bs, Jx3, T)
38
- x = x.permute(0,2,1).float()
39
- return x
40
-
41
-
42
- def postprocess(self, x):
43
- # (bs, Jx3, T) -> (bs, T, Jx3)
44
- x = x.permute(0,2,1)
45
- return x
46
-
47
-
48
- def encode(self, x):
49
- N, T, _ = x.shape
50
- x_in = self.preprocess(x)
51
- x_encoder = self.encoder(x_in)
52
- x_encoder = self.postprocess(x_encoder)
53
- x_encoder = x_encoder.contiguous().view(-1, x_encoder.shape[-1]) # (NT, C)
54
- code_idx = self.quantizer.quantize(x_encoder)
55
- code_idx = code_idx.view(N, -1)
56
- return code_idx
57
-
58
-
59
- def forward(self, x):
60
-
61
- x_in = self.preprocess(x)
62
- # Encode
63
- x_encoder = self.encoder(x_in)
64
-
65
- ## quantization
66
- x_quantized, loss, perplexity = self.quantizer(x_encoder)
67
-
68
- ## decoder
69
- x_decoder = self.decoder(x_quantized)
70
- x_out = self.postprocess(x_decoder)
71
- return x_out, loss, perplexity
72
-
73
-
74
- def forward_decoder(self, x):
75
- x_d = self.quantizer.dequantize(x)
76
- x_d = x_d.view(1, -1, self.code_dim).permute(0, 2, 1).contiguous()
77
-
78
- # decoder
79
- x_decoder = self.decoder(x_d)
80
- x_out = self.postprocess(x_decoder)
81
- return x_out
82
-
83
-
84
-
85
- class HumanVQVAE(nn.Module):
86
- def __init__(self,
87
- args,
88
- nb_code=512,
89
- code_dim=512,
90
- output_emb_width=512,
91
- down_t=3,
92
- stride_t=2,
93
- width=512,
94
- depth=3,
95
- dilation_growth_rate=3,
96
- activation='relu',
97
- norm=None):
98
-
99
- super().__init__()
100
-
101
- self.nb_joints = 21 if args.dataname == 'kit' else 22
102
- self.vqvae = VQVAE_251(args, nb_code, code_dim, output_emb_width, down_t, stride_t, width, depth, dilation_growth_rate, activation=activation, norm=norm)
103
-
104
- def encode(self, x):
105
- b, t, c = x.size()
106
- quants = self.vqvae.encode(x) # (N, T)
107
- return quants
108
-
109
- def forward(self, x):
110
-
111
- x_out, loss, perplexity = self.vqvae(x)
112
-
113
- return x_out, loss, perplexity
114
-
115
- def forward_decoder(self, x):
116
- x_out = self.vqvae.forward_decoder(x)
117
- return x_out
118
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/generate_human_motion/pyrender/pyrender/constants.py DELETED
@@ -1,149 +0,0 @@
1
- DEFAULT_Z_NEAR = 0.05 # Near clipping plane, in meters
2
- DEFAULT_Z_FAR = 100.0 # Far clipping plane, in meters
3
- DEFAULT_SCENE_SCALE = 2.0 # Default scene scale
4
- MAX_N_LIGHTS = 4 # Maximum number of lights of each type allowed
5
- TARGET_OPEN_GL_MAJOR = 4 # Target OpenGL Major Version
6
- TARGET_OPEN_GL_MINOR = 1 # Target OpenGL Minor Version
7
- MIN_OPEN_GL_MAJOR = 3 # Minimum OpenGL Major Version
8
- MIN_OPEN_GL_MINOR = 3 # Minimum OpenGL Minor Version
9
- FLOAT_SZ = 4 # Byte size of GL float32
10
- UINT_SZ = 4 # Byte size of GL uint32
11
- SHADOW_TEX_SZ = 2048 # Width and Height of Shadow Textures
12
- TEXT_PADDING = 20 # Width of padding for rendering text (px)
13
-
14
-
15
- # Flags for render type
16
- class RenderFlags(object):
17
- """Flags for rendering in the scene.
18
-
19
- Combine them with the bitwise or. For example,
20
-
21
- >>> flags = OFFSCREEN | SHADOWS_DIRECTIONAL | VERTEX_NORMALS
22
-
23
- would result in an offscreen render with directional shadows and
24
- vertex normals enabled.
25
- """
26
- NONE = 0
27
- """Normal PBR Render."""
28
- DEPTH_ONLY = 1
29
- """Only render the depth buffer."""
30
- OFFSCREEN = 2
31
- """Render offscreen and return the depth and (optionally) color buffers."""
32
- FLIP_WIREFRAME = 4
33
- """Invert the status of wireframe rendering for each mesh."""
34
- ALL_WIREFRAME = 8
35
- """Render all meshes as wireframes."""
36
- ALL_SOLID = 16
37
- """Render all meshes as solids."""
38
- SHADOWS_DIRECTIONAL = 32
39
- """Render shadows for directional lights."""
40
- SHADOWS_POINT = 64
41
- """Render shadows for point lights."""
42
- SHADOWS_SPOT = 128
43
- """Render shadows for spot lights."""
44
- SHADOWS_ALL = 32 | 64 | 128
45
- """Render shadows for all lights."""
46
- VERTEX_NORMALS = 256
47
- """Render vertex normals."""
48
- FACE_NORMALS = 512
49
- """Render face normals."""
50
- SKIP_CULL_FACES = 1024
51
- """Do not cull back faces."""
52
- RGBA = 2048
53
- """Render the color buffer with the alpha channel enabled."""
54
- FLAT = 4096
55
- """Render the color buffer flat, with no lighting computations."""
56
- SEG = 8192
57
-
58
-
59
- class TextAlign:
60
- """Text alignment options for captions.
61
-
62
- Only use one at a time.
63
- """
64
- CENTER = 0
65
- """Center the text by width and height."""
66
- CENTER_LEFT = 1
67
- """Center the text by height and left-align it."""
68
- CENTER_RIGHT = 2
69
- """Center the text by height and right-align it."""
70
- BOTTOM_LEFT = 3
71
- """Put the text in the bottom-left corner."""
72
- BOTTOM_RIGHT = 4
73
- """Put the text in the bottom-right corner."""
74
- BOTTOM_CENTER = 5
75
- """Center the text by width and fix it to the bottom."""
76
- TOP_LEFT = 6
77
- """Put the text in the top-left corner."""
78
- TOP_RIGHT = 7
79
- """Put the text in the top-right corner."""
80
- TOP_CENTER = 8
81
- """Center the text by width and fix it to the top."""
82
-
83
-
84
- class GLTF(object):
85
- """Options for GL objects."""
86
- NEAREST = 9728
87
- """Nearest neighbor interpolation."""
88
- LINEAR = 9729
89
- """Linear interpolation."""
90
- NEAREST_MIPMAP_NEAREST = 9984
91
- """Nearest mipmapping."""
92
- LINEAR_MIPMAP_NEAREST = 9985
93
- """Linear mipmapping."""
94
- NEAREST_MIPMAP_LINEAR = 9986
95
- """Nearest mipmapping."""
96
- LINEAR_MIPMAP_LINEAR = 9987
97
- """Linear mipmapping."""
98
- CLAMP_TO_EDGE = 33071
99
- """Clamp to the edge of the texture."""
100
- MIRRORED_REPEAT = 33648
101
- """Mirror the texture."""
102
- REPEAT = 10497
103
- """Repeat the texture."""
104
- POINTS = 0
105
- """Render as points."""
106
- LINES = 1
107
- """Render as lines."""
108
- LINE_LOOP = 2
109
- """Render as a line loop."""
110
- LINE_STRIP = 3
111
- """Render as a line strip."""
112
- TRIANGLES = 4
113
- """Render as triangles."""
114
- TRIANGLE_STRIP = 5
115
- """Render as a triangle strip."""
116
- TRIANGLE_FAN = 6
117
- """Render as a triangle fan."""
118
-
119
-
120
- class BufFlags(object):
121
- POSITION = 0
122
- NORMAL = 1
123
- TANGENT = 2
124
- TEXCOORD_0 = 4
125
- TEXCOORD_1 = 8
126
- COLOR_0 = 16
127
- JOINTS_0 = 32
128
- WEIGHTS_0 = 64
129
-
130
-
131
- class TexFlags(object):
132
- NONE = 0
133
- NORMAL = 1
134
- OCCLUSION = 2
135
- EMISSIVE = 4
136
- BASE_COLOR = 8
137
- METALLIC_ROUGHNESS = 16
138
- DIFFUSE = 32
139
- SPECULAR_GLOSSINESS = 64
140
-
141
-
142
- class ProgramFlags:
143
- NONE = 0
144
- USE_MATERIAL = 1
145
- VERTEX_NORMALS = 2
146
- FACE_NORMALS = 4
147
-
148
-
149
- __all__ = ['RenderFlags', 'TextAlign', 'GLTF']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/AudioGPT/text_to_speech/modules/commons/layers.py DELETED
@@ -1,50 +0,0 @@
1
- import torch
2
- from torch import nn
3
-
4
-
5
- class LayerNorm(torch.nn.LayerNorm):
6
- """Layer normalization module.
7
- :param int nout: output dim size
8
- :param int dim: dimension to be normalized
9
- """
10
-
11
- def __init__(self, nout, dim=-1, eps=1e-5):
12
- """Construct an LayerNorm object."""
13
- super(LayerNorm, self).__init__(nout, eps=eps)
14
- self.dim = dim
15
-
16
- def forward(self, x):
17
- """Apply layer normalization.
18
- :param torch.Tensor x: input tensor
19
- :return: layer normalized tensor
20
- :rtype torch.Tensor
21
- """
22
- if self.dim == -1:
23
- return super(LayerNorm, self).forward(x)
24
- return super(LayerNorm, self).forward(x.transpose(1, -1)).transpose(1, -1)
25
-
26
-
27
- class Reshape(nn.Module):
28
- def __init__(self, *args):
29
- super(Reshape, self).__init__()
30
- self.shape = args
31
-
32
- def forward(self, x):
33
- return x.view(self.shape)
34
-
35
-
36
- class Permute(nn.Module):
37
- def __init__(self, *args):
38
- super(Permute, self).__init__()
39
- self.args = args
40
-
41
- def forward(self, x):
42
- return x.permute(self.args)
43
-
44
-
45
- def Embedding(num_embeddings, embedding_dim, padding_idx=None):
46
- m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
47
- nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
48
- if padding_idx is not None:
49
- nn.init.constant_(m.weight[padding_idx], 0)
50
- return m
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIGC-Audio/Make_An_Audio/ldm/modules/losses_audio/vggishish/loss.py DELETED
@@ -1,41 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- import torch.optim as optim
5
-
6
- class WeightedCrossEntropy(nn.CrossEntropyLoss):
7
-
8
- def __init__(self, weights, **pytorch_ce_loss_args) -> None:
9
- super().__init__(reduction='none', **pytorch_ce_loss_args)
10
- self.weights = weights
11
-
12
- def __call__(self, outputs, targets, to_weight=True):
13
- loss = super().__call__(outputs, targets)
14
- if to_weight:
15
- return (loss * self.weights[targets]).sum() / self.weights[targets].sum()
16
- else:
17
- return loss.mean()
18
-
19
-
20
- if __name__ == '__main__':
21
- x = torch.randn(10, 5)
22
- target = torch.randint(0, 5, (10,))
23
- weights = torch.tensor([1., 2., 3., 4., 5.])
24
-
25
- # criterion_weighted = nn.CrossEntropyLoss(weight=weights)
26
- # loss_weighted = criterion_weighted(x, target)
27
-
28
- # criterion_weighted_manual = nn.CrossEntropyLoss(reduction='none')
29
- # loss_weighted_manual = criterion_weighted_manual(x, target)
30
- # print(loss_weighted, loss_weighted_manual.mean())
31
- # loss_weighted_manual = (loss_weighted_manual * weights[target]).sum() / weights[target].sum()
32
- # print(loss_weighted, loss_weighted_manual)
33
- # print(torch.allclose(loss_weighted, loss_weighted_manual))
34
-
35
- pytorch_weighted = nn.CrossEntropyLoss(weight=weights)
36
- pytorch_unweighted = nn.CrossEntropyLoss()
37
- custom = WeightedCrossEntropy(weights)
38
-
39
- assert torch.allclose(pytorch_weighted(x, target), custom(x, target, to_weight=True))
40
- assert torch.allclose(pytorch_unweighted(x, target), custom(x, target, to_weight=False))
41
- print(custom(x, target, to_weight=True), custom(x, target, to_weight=False))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ASJMO/freegpt/client/js/icons.js DELETED
@@ -1 +0,0 @@
1
- window.FontAwesomeKitConfig={asyncLoading:{enabled:!1},autoA11y:{enabled:!0},baseUrl:"https://ka-f.fontawesome.com",baseUrlKit:"https://kit-pro.fontawesome.com",detectConflictsUntil:null,iconUploads:{},id:96462084,license:"pro",method:"css",minify:{enabled:!0},token:"d0514f1901",v4FontFaceShim:{enabled:!0},v4shim:{enabled:!0},v5FontFaceShim:{enabled:!0},version:"6.1.1"},function(t){"function"==typeof define&&define.amd?define("kit-loader",t):t()}(function(){"use strict";function t(e){return(t="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t})(e)}function e(t,e,n){return e in t?Object.defineProperty(t,e,{value:n,enumerable:!0,configurable:!0,writable:!0}):t[e]=n,t}function n(t,e){var n=Object.keys(t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(t);e&&(o=o.filter(function(e){return Object.getOwnPropertyDescriptor(t,e).enumerable})),n.push.apply(n,o)}return n}function o(t){for(var o=1;o<arguments.length;o++){var r=null!=arguments[o]?arguments[o]:{};o%2?n(Object(r),!0).forEach(function(n){e(t,n,r[n])}):Object.getOwnPropertyDescriptors?Object.defineProperties(t,Object.getOwnPropertyDescriptors(r)):n(Object(r)).forEach(function(e){Object.defineProperty(t,e,Object.getOwnPropertyDescriptor(r,e))})}return t}function r(t,e){return function(t){if(Array.isArray(t))return t}(t)||function(t,e){if("undefined"!=typeof Symbol&&Symbol.iterator in Object(t)){var n=[],o=!0,r=!1,i=void 0;try{for(var c,a=t[Symbol.iterator]();!(o=(c=a.next()).done)&&(n.push(c.value),!e||n.length!==e);o=!0);}catch(t){r=!0,i=t}finally{try{o||null==a.return||a.return()}finally{if(r)throw i}}return n}}(t,e)||function(t,e){if(t){if("string"==typeof t)return i(t,e);var n=Object.prototype.toString.call(t).slice(8,-1);return"Object"===n&&t.constructor&&(n=t.constructor.name),"Map"===n||"Set"===n?Array.from(t):"Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)?i(t,e):void 0}}(t,e)||function(){throw new TypeError("Invalid attempt to destructure non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()}function i(t,e){(null==e||e>t.length)&&(e=t.length);for(var n=0,o=new Array(e);n<e;n++)o[n]=t[n];return o}function c(t,e){var n=e&&e.addOn||"",o=e&&e.baseFilename||t.license+n,r=e&&e.minify?".min":"",i=e&&e.fileSuffix||t.method,c=e&&e.subdir||t.method;return t.baseUrl+"/releases/"+("latest"===t.version?"latest":"v".concat(t.version))+"/"+c+"/"+o+r+"."+i}function a(t,e){var n=e||["fa"],o="."+Array.prototype.join.call(n,",."),r=t.querySelectorAll(o);Array.prototype.forEach.call(r,function(e){var n=e.getAttribute("title");e.setAttribute("aria-hidden","true");var o=!e.nextElementSibling||!e.nextElementSibling.classList.contains("sr-only");if(n&&o){var r=t.createElement("span");r.innerHTML=n,r.classList.add("sr-only"),e.parentNode.insertBefore(r,e.nextSibling)}})}var u,f=function(){},s="undefined"!=typeof global&&void 0!==global.process&&"function"==typeof global.process.emit,d="undefined"==typeof setImmediate?setTimeout:setImmediate,l=[];function h(){for(var t=0;t<l.length;t++)l[t][0](l[t][1]);l=[],u=!1}function m(t,e){l.push([t,e]),u||(u=!0,d(h,0))}function p(t){var e=t.owner,n=e._state,o=e._data,r=t[n],i=t.then;if("function"==typeof r){n="fulfilled";try{o=r(o)}catch(t){g(i,t)}}v(i,o)||("fulfilled"===n&&b(i,o),"rejected"===n&&g(i,o))}function v(e,n){var o;try{if(e===n)throw new TypeError("A promises callback cannot return that same promise.");if(n&&("function"==typeof n||"object"===t(n))){var r=n.then;if("function"==typeof r)return r.call(n,function(t){o||(o=!0,n===t?y(e,t):b(e,t))},function(t){o||(o=!0,g(e,t))}),!0}}catch(t){return o||g(e,t),!0}return!1}function b(t,e){t!==e&&v(t,e)||y(t,e)}function y(t,e){"pending"===t._state&&(t._state="settled",t._data=e,m(A,t))}function g(t,e){"pending"===t._state&&(t._state="settled",t._data=e,m(S,t))}function w(t){t._then=t._then.forEach(p)}function A(t){t._state="fulfilled",w(t)}function S(t){t._state="rejected",w(t),!t._handled&&s&&global.process.emit("unhandledRejection",t._data,t)}function O(t){global.process.emit("rejectionHandled",t)}function j(t){if("function"!=typeof t)throw new TypeError("Promise resolver "+t+" is not a function");if(this instanceof j==0)throw new TypeError("Failed to construct 'Promise': Please use the 'new' operator, this object constructor cannot be called as a function.");this._then=[],function(t,e){function n(t){g(e,t)}try{t(function(t){b(e,t)},n)}catch(t){n(t)}}(t,this)}j.prototype={constructor:j,_state:"pending",_then:null,_data:void 0,_handled:!1,then:function(t,e){var n={owner:this,then:new this.constructor(f),fulfilled:t,rejected:e};return!e&&!t||this._handled||(this._handled=!0,"rejected"===this._state&&s&&m(O,this)),"fulfilled"===this._state||"rejected"===this._state?m(p,n):this._then.push(n),n.then},catch:function(t){return this.then(null,t)}},j.all=function(t){if(!Array.isArray(t))throw new TypeError("You must pass an array to Promise.all().");return new j(function(e,n){var o=[],r=0;function i(t){return r++,function(n){o[t]=n,--r||e(o)}}for(var c,a=0;a<t.length;a++)(c=t[a])&&"function"==typeof c.then?c.then(i(a),n):o[a]=c;r||e(o)})},j.race=function(t){if(!Array.isArray(t))throw new TypeError("You must pass an array to Promise.race().");return new j(function(e,n){for(var o,r=0;r<t.length;r++)(o=t[r])&&"function"==typeof o.then?o.then(e,n):e(o)})},j.resolve=function(e){return e&&"object"===t(e)&&e.constructor===j?e:new j(function(t){t(e)})},j.reject=function(t){return new j(function(e,n){n(t)})};var F="function"==typeof Promise?Promise:j;function E(t,e){var n=e.fetch,o=e.XMLHttpRequest,r=e.token,i=t;return"URLSearchParams"in window?(i=new URL(t)).searchParams.set("token",r):i=i+"?token="+encodeURIComponent(r),i=i.toString(),new F(function(t,e){if("function"==typeof n)n(i,{mode:"cors",cache:"default"}).then(function(t){if(t.ok)return t.text();throw new Error("")}).then(function(e){t(e)}).catch(e);else if("function"==typeof o){var r=new o;r.addEventListener("loadend",function(){this.responseText?t(this.responseText):e(new Error(""))}),["abort","error","timeout"].map(function(t){r.addEventListener(t,function(){e(new Error(""))})}),r.open("GET",i),r.send()}else e(new Error(""))})}function _(t,e,n){var o=t;return[[/(url\("?)\.\.\/\.\.\/\.\./g,function(t,n){return"".concat(n).concat(e)}],[/(url\("?)\.\.\/webfonts/g,function(t,o){return"".concat(o).concat(e,"/releases/v").concat(n,"/webfonts")}],[/(url\("?)https:\/\/kit-free([^.])*\.fontawesome\.com/g,function(t,n){return"".concat(n).concat(e)}]].forEach(function(t){var e=r(t,2),n=e[0],i=e[1];o=o.replace(n,i)}),o}function C(t,e){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:function(){},r=e.document||r,i=a.bind(a,r,["fa","fab","fas","far","fal","fad","fak"]),u=Object.keys(t.iconUploads||{}).length>0;t.autoA11y.enabled&&n(i);var f=[{id:"fa-main",addOn:void 0}];t.v4shim&&t.v4shim.enabled&&f.push({id:"fa-v4-shims",addOn:"-v4-shims"}),t.v5FontFaceShim&&t.v5FontFaceShim.enabled&&f.push({id:"fa-v5-font-face",addOn:"-v5-font-face"}),t.v4FontFaceShim&&t.v4FontFaceShim.enabled&&f.push({id:"fa-v4-font-face",addOn:"-v4-font-face"}),u&&f.push({id:"fa-kit-upload",customCss:!0});var s=f.map(function(n){return new F(function(r,i){E(n.customCss?function(t){return t.baseUrlKit+"/"+t.token+"/"+t.id+"/kit-upload.css"}(t):c(t,{addOn:n.addOn,minify:t.minify.enabled}),e).then(function(i){r(function(t,e){var n=e.contentFilter||function(t,e){return t},o=document.createElement("style"),r=document.createTextNode(n(t,e));return o.appendChild(r),o.media="all",e.id&&o.setAttribute("id",e.id),e&&e.detectingConflicts&&e.detectionIgnoreAttr&&o.setAttributeNode(document.createAttribute(e.detectionIgnoreAttr)),o}(i,o(o({},e),{},{baseUrl:t.baseUrl,version:t.version,id:n.id,contentFilter:function(t,e){return _(t,e.baseUrl,e.version)}})))}).catch(i)})});return F.all(s)}function P(t,e){var n=document.createElement("SCRIPT"),o=document.createTextNode(t);return n.appendChild(o),n.referrerPolicy="strict-origin",e.id&&n.setAttribute("id",e.id),e&&e.detectingConflicts&&e.detectionIgnoreAttr&&n.setAttributeNode(document.createAttribute(e.detectionIgnoreAttr)),n}function U(t){var e,n=[],o=document,r=(o.documentElement.doScroll?/^loaded|^c/:/^loaded|^i|^c/).test(o.readyState);r||o.addEventListener("DOMContentLoaded",e=function(){for(o.removeEventListener("DOMContentLoaded",e),r=1;e=n.shift();)e()}),r?setTimeout(t,0):n.push(t)}try{if(window.FontAwesomeKitConfig){var k=window.FontAwesomeKitConfig,L={detectingConflicts:k.detectConflictsUntil&&new Date<=new Date(k.detectConflictsUntil),detectionIgnoreAttr:"data-fa-detection-ignore",fetch:window.fetch,token:k.token,XMLHttpRequest:window.XMLHttpRequest,document:document},I=document.currentScript,T=I?I.parentElement:document.head;(function(){var t=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{},e=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};return"js"===t.method?function(t,e){e.autoA11y=t.autoA11y.enabled,"pro"===t.license&&(e.autoFetchSvg=!0,e.fetchSvgFrom=t.baseUrl+"/releases/"+("latest"===t.version?"latest":"v".concat(t.version))+"/svgs",e.fetchUploadedSvgFrom=t.uploadsUrl);var n=[];return t.v4shim.enabled&&n.push(new F(function(n,r){E(c(t,{addOn:"-v4-shims",minify:t.minify.enabled}),e).then(function(t){n(P(t,o(o({},e),{},{id:"fa-v4-shims"})))}).catch(r)})),n.push(new F(function(n,r){E(c(t,{minify:t.minify.enabled}),e).then(function(t){var r=P(t,o(o({},e),{},{id:"fa-main"}));n(function(t,e){var n=e&&void 0!==e.autoFetchSvg?e.autoFetchSvg:void 0,o=e&&void 0!==e.autoA11y?e.autoA11y:void 0;return void 0!==o&&t.setAttribute("data-auto-a11y",o?"true":"false"),n&&(t.setAttributeNode(document.createAttribute("data-auto-fetch-svg")),t.setAttribute("data-fetch-svg-from",e.fetchSvgFrom),t.setAttribute("data-fetch-uploaded-svg-from",e.fetchUploadedSvgFrom)),t}(r,e))}).catch(r)})),F.all(n)}(t,e):"css"===t.method?C(t,e,function(t){U(t),function(t){"undefined"!=typeof MutationObserver&&new MutationObserver(t).observe(document,{childList:!0,subtree:!0})}(t)}):void 0})(k,L).then(function(t){t.map(function(t){try{T.insertBefore(t,I?I.nextSibling:null)}catch(e){T.appendChild(t)}}),L.detectingConflicts&&I&&U(function(){I.setAttributeNode(document.createAttribute(L.detectionIgnoreAttr));var t=function(t,e){var n=document.createElement("script");return e&&e.detectionIgnoreAttr&&n.setAttributeNode(document.createAttribute(e.detectionIgnoreAttr)),n.src=c(t,{baseFilename:"conflict-detection",fileSuffix:"js",subdir:"js",minify:t.minify.enabled}),n}(k,L);document.body.appendChild(t)})}).catch(function(t){console.error("".concat("Font Awesome Kit:"," ").concat(t))})}}catch(t){console.error("".concat("Font Awesome Kit:"," ").concat(t))}});
 
 
spaces/AchyuthGamer/OpenGPT/g4f/Provider/needs_auth/Raycast.py DELETED
@@ -1,72 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import json
4
-
5
- import requests
6
-
7
- from ...typing import Any, CreateResult
8
- from ..base_provider import BaseProvider
9
-
10
-
11
- class Raycast(BaseProvider):
12
- url = "https://raycast.com"
13
- supports_gpt_35_turbo = True
14
- supports_gpt_4 = True
15
- supports_stream = True
16
- needs_auth = True
17
- working = True
18
-
19
- @staticmethod
20
- def create_completion(
21
- model: str,
22
- messages: list[dict[str, str]],
23
- stream: bool,
24
- **kwargs: Any,
25
- ) -> CreateResult:
26
- auth = kwargs.get('auth')
27
- headers = {
28
- 'Accept': 'application/json',
29
- 'Accept-Language': 'en-US,en;q=0.9',
30
- 'Authorization': f'Bearer {auth}',
31
- 'Content-Type': 'application/json',
32
- 'User-Agent': 'Raycast/0 CFNetwork/1410.0.3 Darwin/22.6.0',
33
- }
34
- parsed_messages = []
35
- for message in messages:
36
- parsed_messages.append({
37
- 'author': message['role'],
38
- 'content': {'text': message['content']}
39
- })
40
- data = {
41
- "debug": False,
42
- "locale": "en-CN",
43
- "messages": parsed_messages,
44
- "model": model,
45
- "provider": "openai",
46
- "source": "ai_chat",
47
- "system_instruction": "markdown",
48
- "temperature": 0.5
49
- }
50
- response = requests.post("https://backend.raycast.com/api/v1/ai/chat_completions", headers=headers, json=data, stream=True)
51
- for token in response.iter_lines():
52
- if b'data: ' not in token:
53
- continue
54
- completion_chunk = json.loads(token.decode().replace('data: ', ''))
55
- token = completion_chunk['text']
56
- if token != None:
57
- yield token
58
-
59
- @classmethod
60
- @property
61
- def params(cls):
62
- params = [
63
- ("model", "str"),
64
- ("messages", "list[dict[str, str]]"),
65
- ("stream", "bool"),
66
- ("temperature", "float"),
67
- ("top_p", "int"),
68
- ("model", "str"),
69
- ("auth", "str"),
70
- ]
71
- param = ", ".join([": ".join(p) for p in params])
72
- return f"g4f.provider.{cls.__name__} supports: ({param})"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/plugins/scaleouter.js DELETED
@@ -1,2 +0,0 @@
1
- import ScaleOuter from './scale/scaleouter/ScaleOuter.js';
2
- export default ScaleOuter;
 
 
 
spaces/AkitoP/umamusume_bert_vits2/text/__init__.py DELETED
@@ -1,28 +0,0 @@
1
- from text.symbols import *
2
-
3
- _symbol_to_id = {s: i for i, s in enumerate(symbols)}
4
-
5
-
6
- def cleaned_text_to_sequence(cleaned_text, tones, language):
7
- """Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
8
- Args:
9
- text: string to convert to a sequence
10
- Returns:
11
- List of integers corresponding to the symbols in the text
12
- """
13
- phones = [_symbol_to_id[symbol] for symbol in cleaned_text]
14
- tone_start = language_tone_start_map[language]
15
- tones = [i + tone_start for i in tones]
16
- lang_id = language_id_map[language]
17
- lang_ids = [lang_id for i in phones]
18
- return phones, tones, lang_ids
19
-
20
-
21
- def get_bert(norm_text, word2ph, language, device):
22
- from .chinese_bert import get_bert_feature as zh_bert
23
- from .english_bert_mock import get_bert_feature as en_bert
24
- from .japanese_bert import get_bert_feature as jp_bert
25
-
26
- lang_bert_func_map = {"ZH": zh_bert, "EN": en_bert, "JP": jp_bert}
27
- bert = lang_bert_func_map[language](norm_text, word2ph, device)
28
- return bert
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alesteba/NeRF_ficus-pxl/app.py DELETED
@@ -1,79 +0,0 @@
1
- import streamlit as st
2
- import tensorflow as tf
3
- import numpy as np
4
-
5
- from config import *
6
- from transformations import *
7
- from rendering import *
8
-
9
- # Setting random seed to obtain reproducible results.
10
- tf.random.set_seed(42)
11
-
12
- def show_rendered_image(r,theta,phi):
13
-
14
- # Get the camera to world matrix.
15
-
16
- c2w = pose_spherical(theta, phi, r)
17
-
18
- ray_oris, ray_dirs = get_rays(H, W, focal, c2w)
19
- rays_flat, t_vals = render_flat_rays(
20
- ray_oris, ray_dirs, near=2.0, far=6.0, num_samples=NUM_SAMPLES, rand=False
21
- )
22
-
23
- rgb, depth = render_rgb_depth(
24
- nerf_loaded, rays_flat[None, ...], t_vals[None, ...], rand=False, train=False
25
- )
26
-
27
- return(rgb[0], depth[0])
28
-
29
-
30
- # app.py text matter starts here
31
-
32
- st.title('3D volumetric rendering with NeRF - A concrete example, Ficus Dataset')
33
-
34
- import base64
35
-
36
- file = open(r'./training(3).gif', 'rb')
37
- contents = file.read()
38
- data_url = base64.b64encode(contents).decode('utf-8')
39
- file.close()
40
-
41
- # st.markdown(
42
- # f'<img src="data:image/gif;base64,{data_url}" alt="cat gif">',
43
- # unsafe_allow_html=True,
44
- # )
45
-
46
- st.markdown("[NeRF](https://arxiv.org/abs/2003.08934) proposes an ingenious way to synthesize novel views of a scene by modelling the volumetric scene function through a neural network. The network learns to model the volumetric scene, thus generating novel views (images) of the 3D scene that the model was not shown at training time.")
47
- # st.markdown("![](https://github.com/alesteba/training_NeRF/blob/e89da9448b3993117c78532c14c7142970f0d8df/training(3).gif)")
48
-
49
- st.markdown(
50
- f'<img src="data:image/gif;base64,{data_url}" alt="cat gif" width=100%>',
51
- unsafe_allow_html=True,
52
- )
53
- # st.image(image, caption='Training Steps')
54
- st.markdown("## Interactive Demo")
55
-
56
- # download the model:
57
- # from my own model repo
58
-
59
- from huggingface_hub import from_pretrained_keras
60
- nerf_loaded = from_pretrained_keras("Alesteba/NeRF_ficus")
61
-
62
-
63
- # set the values of r theta phi
64
- r = 4.0
65
- theta = st.slider("key_1",min_value=0.0, max_value=360.0, label_visibility="hidden")
66
- phi = st.slider("key_2", min_value=0.0, max_value=360.0, label_visibility="hidden")
67
- # phi = -30.0
68
- color, depth = show_rendered_image(r, theta, phi)
69
-
70
- col1, col2= st.columns(2)
71
-
72
- with col1:
73
- color = tf.keras.utils.array_to_img(color)
74
- st.image(color, caption="Color Image", clamp=True, width=300)
75
-
76
- with col2:
77
- depth = tf.keras.utils.array_to_img(depth[..., None])
78
- st.image(depth, caption="Depth Map", clamp=True, width=300)
79
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alichuan/VITS-Umamusume-voice-synthesizer/text/japanese.py DELETED
@@ -1,153 +0,0 @@
1
- import re
2
- from unidecode import unidecode
3
- import pyopenjtalk
4
-
5
-
6
- # Regular expression matching Japanese without punctuation marks:
7
- _japanese_characters = re.compile(
8
- r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
9
-
10
- # Regular expression matching non-Japanese characters or punctuation marks:
11
- _japanese_marks = re.compile(
12
- r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
13
-
14
- # List of (symbol, Japanese) pairs for marks:
15
- _symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [
16
- ('%', 'パーセント')
17
- ]]
18
-
19
- # List of (romaji, ipa) pairs for marks:
20
- _romaji_to_ipa = [(re.compile('%s' % x[0]), x[1]) for x in [
21
- ('ts', 'ʦ'),
22
- ('u', 'ɯ'),
23
- ('j', 'ʥ'),
24
- ('y', 'j'),
25
- ('ni', 'n^i'),
26
- ('nj', 'n^'),
27
- ('hi', 'çi'),
28
- ('hj', 'ç'),
29
- ('f', 'ɸ'),
30
- ('I', 'i*'),
31
- ('U', 'ɯ*'),
32
- ('r', 'ɾ')
33
- ]]
34
-
35
- # List of (romaji, ipa2) pairs for marks:
36
- _romaji_to_ipa2 = [(re.compile('%s' % x[0]), x[1]) for x in [
37
- ('u', 'ɯ'),
38
- ('ʧ', 'tʃ'),
39
- ('j', 'dʑ'),
40
- ('y', 'j'),
41
- ('ni', 'n^i'),
42
- ('nj', 'n^'),
43
- ('hi', 'çi'),
44
- ('hj', 'ç'),
45
- ('f', 'ɸ'),
46
- ('I', 'i*'),
47
- ('U', 'ɯ*'),
48
- ('r', 'ɾ')
49
- ]]
50
-
51
- # List of (consonant, sokuon) pairs:
52
- _real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [
53
- (r'Q([↑↓]*[kg])', r'k#\1'),
54
- (r'Q([↑↓]*[tdjʧ])', r't#\1'),
55
- (r'Q([↑↓]*[sʃ])', r's\1'),
56
- (r'Q([↑↓]*[pb])', r'p#\1')
57
- ]]
58
-
59
- # List of (consonant, hatsuon) pairs:
60
- _real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [
61
- (r'N([↑↓]*[pbm])', r'm\1'),
62
- (r'N([↑↓]*[ʧʥj])', r'n^\1'),
63
- (r'N([↑↓]*[tdn])', r'n\1'),
64
- (r'N([↑↓]*[kg])', r'ŋ\1')
65
- ]]
66
-
67
-
68
- def symbols_to_japanese(text):
69
- for regex, replacement in _symbols_to_japanese:
70
- text = re.sub(regex, replacement, text)
71
- return text
72
-
73
-
74
- def japanese_to_romaji_with_accent(text):
75
- '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html'''
76
- text = symbols_to_japanese(text)
77
- sentences = re.split(_japanese_marks, text)
78
- marks = re.findall(_japanese_marks, text)
79
- text = ''
80
- for i, sentence in enumerate(sentences):
81
- if re.match(_japanese_characters, sentence):
82
- if text != '':
83
- text += ' '
84
- labels = pyopenjtalk.extract_fullcontext(sentence)
85
- for n, label in enumerate(labels):
86
- phoneme = re.search(r'\-([^\+]*)\+', label).group(1)
87
- if phoneme not in ['sil', 'pau']:
88
- text += phoneme.replace('ch', 'ʧ').replace('sh',
89
- 'ʃ').replace('cl', 'Q')
90
- else:
91
- continue
92
- # n_moras = int(re.search(r'/F:(\d+)_', label).group(1))
93
- a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1))
94
- a2 = int(re.search(r"\+(\d+)\+", label).group(1))
95
- a3 = int(re.search(r"\+(\d+)/", label).group(1))
96
- if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil', 'pau']:
97
- a2_next = -1
98
- else:
99
- a2_next = int(
100
- re.search(r"\+(\d+)\+", labels[n + 1]).group(1))
101
- # Accent phrase boundary
102
- if a3 == 1 and a2_next == 1:
103
- text += ' '
104
- # Falling
105
- elif a1 == 0 and a2_next == a2 + 1:
106
- text += '↓'
107
- # Rising
108
- elif a2 == 1 and a2_next == 2:
109
- text += '↑'
110
- if i < len(marks):
111
- text += unidecode(marks[i]).replace(' ', '')
112
- return text
113
-
114
-
115
- def get_real_sokuon(text):
116
- for regex, replacement in _real_sokuon:
117
- text = re.sub(regex, replacement, text)
118
- return text
119
-
120
-
121
- def get_real_hatsuon(text):
122
- for regex, replacement in _real_hatsuon:
123
- text = re.sub(regex, replacement, text)
124
- return text
125
-
126
-
127
- def japanese_to_ipa(text):
128
- text = japanese_to_romaji_with_accent(text).replace('...', '…')
129
- text = re.sub(
130
- r'([aiueo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text)
131
- text = get_real_sokuon(text)
132
- text = get_real_hatsuon(text)
133
- for regex, replacement in _romaji_to_ipa:
134
- text = re.sub(regex, replacement, text)
135
- return text
136
-
137
-
138
- def japanese_to_ipa2(text):
139
- text = japanese_to_romaji_with_accent(text).replace('...', '…')
140
- text = get_real_sokuon(text)
141
- text = get_real_hatsuon(text)
142
- for regex, replacement in _romaji_to_ipa2:
143
- text = re.sub(regex, replacement, text)
144
- return text
145
-
146
-
147
- def japanese_to_ipa3(text):
148
- text = japanese_to_ipa2(text).replace('n^', 'ȵ').replace(
149
- 'ʃ', 'ɕ').replace('*', '\u0325').replace('#', '\u031a')
150
- text = re.sub(
151
- r'([aiɯeo])\1+', lambda x: x.group(0)[0]+'ː'*(len(x.group(0))-1), text)
152
- text = re.sub(r'((?:^|\s)(?:ts|tɕ|[kpt]))', r'\1ʰ', text)
153
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Amrrs/DragGan-Inversion/stylegan_human/torch_utils/op_edit/fused_act.py DELETED
@@ -1,100 +0,0 @@
1
- # Copyright (c) SenseTime Research. All rights reserved.
2
-
3
- import os
4
-
5
- import torch
6
- from torch import nn
7
- from torch.nn import functional as F
8
- from torch.autograd import Function
9
- from torch.utils.cpp_extension import load
10
-
11
-
12
- module_path = os.path.dirname(__file__)
13
- fused = load(
14
- "fused",
15
- sources=[
16
- os.path.join(module_path, "fused_bias_act.cpp"),
17
- os.path.join(module_path, "fused_bias_act_kernel.cu"),
18
- ],
19
- )
20
-
21
-
22
- class FusedLeakyReLUFunctionBackward(Function):
23
- @staticmethod
24
- def forward(ctx, grad_output, out, negative_slope, scale):
25
- ctx.save_for_backward(out)
26
- ctx.negative_slope = negative_slope
27
- ctx.scale = scale
28
-
29
- empty = grad_output.new_empty(0)
30
-
31
- grad_input = fused.fused_bias_act(
32
- grad_output, empty, out, 3, 1, negative_slope, scale
33
- )
34
-
35
- dim = [0]
36
-
37
- if grad_input.ndim > 2:
38
- dim += list(range(2, grad_input.ndim))
39
-
40
- grad_bias = grad_input.sum(dim).detach()
41
-
42
- return grad_input, grad_bias
43
-
44
- @staticmethod
45
- def backward(ctx, gradgrad_input, gradgrad_bias):
46
- (out,) = ctx.saved_tensors
47
- gradgrad_out = fused.fused_bias_act(
48
- gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale
49
- )
50
-
51
- return gradgrad_out, None, None, None
52
-
53
-
54
- class FusedLeakyReLUFunction(Function):
55
- @staticmethod
56
- def forward(ctx, input, bias, negative_slope, scale):
57
- empty = input.new_empty(0)
58
- out = fused.fused_bias_act(
59
- input, bias, empty, 3, 0, negative_slope, scale)
60
- ctx.save_for_backward(out)
61
- ctx.negative_slope = negative_slope
62
- ctx.scale = scale
63
-
64
- return out
65
-
66
- @staticmethod
67
- def backward(ctx, grad_output):
68
- (out,) = ctx.saved_tensors
69
-
70
- grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
71
- grad_output, out, ctx.negative_slope, ctx.scale
72
- )
73
-
74
- return grad_input, grad_bias, None, None
75
-
76
-
77
- class FusedLeakyReLU(nn.Module):
78
- def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
79
- super().__init__()
80
-
81
- self.bias = nn.Parameter(torch.zeros(channel))
82
- self.negative_slope = negative_slope
83
- self.scale = scale
84
-
85
- def forward(self, input):
86
- return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale)
87
-
88
-
89
- def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
90
- if input.device.type == "cpu":
91
- rest_dim = [1] * (input.ndim - bias.ndim - 1)
92
- return (
93
- F.leaky_relu(
94
- input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=0.2
95
- )
96
- * scale
97
- )
98
-
99
- else:
100
- return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anar0140/6.AI.Dashboard.Wiki.Chat.Cognitive.HTML5/index.html DELETED
@@ -1,36 +0,0 @@
1
- <html>
2
- <head>
3
-
4
- <script type="module" src="https://gradio.s3-us-west-2.amazonaws.com/3.12.0/gradio.js">
5
- </script>
6
-
7
-
8
- </head>
9
- <body>
10
-
11
- <iframe
12
- src="https://awacke1-twitter-sentiment-live-realtime.hf.space"
13
- frameborder="0"
14
- width="850"
15
- height="1024"
16
- ></iframe>
17
-
18
- <iframe
19
- src="https://awacke1-streamlitwikipediachat.hf.space"
20
- frameborder="0"
21
- width="850"
22
- height="1024"
23
- ></iframe>
24
-
25
- <iframe
26
- src="https://awacke1-cognitive-ai-episodic-semantic-m-f4b3d67.hf.space"
27
- frameborder="0"
28
- width="850"
29
- height="1024"
30
- ></iframe>
31
-
32
-
33
-
34
- </body>
35
-
36
- </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/docs/source/ko/installation.md DELETED
@@ -1,142 +0,0 @@
1
- <!--Copyright 2023 The HuggingFace Team. All rights reserved.
2
-
3
- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
- the License. You may obtain a copy of the License at
5
-
6
- http://www.apache.org/licenses/LICENSE-2.0
7
-
8
- Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
- an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
- specific language governing permissions and limitations under the License.
11
- -->
12
-
13
- # 설치
14
-
15
- 사용하시는 라이브러리에 맞는 🤗 Diffusers를 설치하세요.
16
-
17
- 🤗 Diffusers는 Python 3.7+, PyTorch 1.7.0+ 및 flax에서 테스트되었습니다. 사용중인 딥러닝 라이브러리에 대한 아래의 설치 안내를 따르세요.
18
-
19
- - [PyTorch 설치 안내](https://pytorch.org/get-started/locally/)
20
- - [Flax 설치 안내](https://flax.readthedocs.io/en/latest/)
21
-
22
- ## pip를 이용한 설치
23
-
24
- [가상 환경](https://docs.python.org/3/library/venv.html)에 🤗 Diffusers를 설치해야 합니다.
25
- Python 가상 환경에 익숙하지 않은 경우 [가상환경 pip 설치 가이드](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/)를 살펴보세요.
26
- 가상 환경을 사용하면 서로 다른 프로젝트를 더 쉽게 관리하고, 종속성간의 호환성 문제를 피할 수 있습니다.
27
-
28
- 프로젝트 디렉토리에 가상 환경을 생성하는 것으로 시작하세요:
29
-
30
- ```bash
31
- python -m venv .env
32
- ```
33
-
34
- 그리고 가상 환경을 활성화합니다:
35
-
36
- ```bash
37
- source .env/bin/activate
38
- ```
39
-
40
- 이제 다음의 명령어로 🤗 Diffusers를 설치할 준비가 되었습니다:
41
-
42
- **PyTorch의 경우**
43
-
44
- ```bash
45
- pip install diffusers["torch"]
46
- ```
47
-
48
- **Flax의 경우**
49
-
50
- ```bash
51
- pip install diffusers["flax"]
52
- ```
53
-
54
- ## 소스로부터 설치
55
-
56
- 소스에서 `diffusers`를 설치하기 전에, `torch` 및 `accelerate`이 설치되어 있는지 확인하세요.
57
-
58
- `torch` 설치에 대해서는 [torch docs](https://pytorch.org/get-started/locally/#start-locally)를 참고하세요.
59
-
60
- 다음과 같이 `accelerate`을 설치하세요.
61
-
62
- ```bash
63
- pip install accelerate
64
- ```
65
-
66
- 다음 명령어를 사용하여 소스에서 🤗 Diffusers를 설치하세요:
67
-
68
- ```bash
69
- pip install git+https://github.com/huggingface/diffusers
70
- ```
71
-
72
- 이 명령어는 최신 `stable` 버전이 아닌 최첨단 `main` 버전을 설치합니다.
73
- `main` 버전은 최신 개발 정보를 최신 상태로 유지하는 데 유용합니다.
74
- 예를 들어 마지막 공식 릴리즈 이후 버그가 수정되었지만, 새 릴리즈가 아직 출시되지 않은 경우입니다.
75
- 그러나 이는 `main` 버전이 항상 안정적이지 않을 수 있음을 의미합니다.
76
- 우리는 `main` 버전이 지속적으로 작동하도록 노력하고 있으며, 대부분의 문제는 보통 몇 시간 또는 하루 안에 해결됩니다.
77
- 문제가 발생하면 더 빨리 해결할 수 있도록 [Issue](https://github.com/huggingface/transformers/issues)를 열어주세요!
78
-
79
-
80
- ## 편집가능한 설치
81
-
82
- 다음을 수행하려면 편집가능한 설치가 필요합니다:
83
-
84
- * 소스 코드의 `main` 버전을 사용
85
- * 🤗 Diffusers에 기여 (코드의 변경 사항을 테스트하기 위해 필요)
86
-
87
- 저장소를 복제하고 다음 명령어를 사용하여 🤗 Diffusers를 설치합니다:
88
-
89
- ```bash
90
- git clone https://github.com/huggingface/diffusers.git
91
- cd diffusers
92
- ```
93
-
94
- **PyTorch의 경우**
95
-
96
- ```
97
- pip install -e ".[torch]"
98
- ```
99
-
100
- **Flax의 경우**
101
-
102
- ```
103
- pip install -e ".[flax]"
104
- ```
105
-
106
- 이러한 명령어들은 저장소를 복제한 폴더와 Python 라이브러리 경로를 연결합니다.
107
- Python은 이제 일반 라이브러리 경로에 더하여 복제한 폴더 내부를 살펴봅니다.
108
- 예를들어 Python 패키지가 `~/anaconda3/envs/main/lib/python3.7/site-packages/`에 설치되어 있는 경우 Python은 복제한 폴더인 `~/diffusers/`도 검색합니다.
109
-
110
- <Tip warning={true}>
111
-
112
- 라이브러리를 계속 사용하려면 `diffusers` 폴더를 유지해야 합니다.
113
-
114
- </Tip>
115
-
116
- 이제 다음 명령어를 사용하여 최신 버전의 🤗 Diffusers로 쉽게 업데이트할 수 있습니다:
117
-
118
- ```bash
119
- cd ~/diffusers/
120
- git pull
121
- ```
122
-
123
- 이렇게 하면, 다음에 실행할 때 Python 환경이 🤗 Diffusers의 `main` 버전을 찾게 됩니다.
124
-
125
- ## 텔레메트리 로깅에 대한 알림
126
-
127
- 우리 라이브러리는 `from_pretrained()` 요청 중에 텔레메트리 정보를 원격으로 수집합니다.
128
- 이 데이터에는 Diffusers 및 PyTorch/Flax의 버전, 요청된 모델 또는 파이프라인 클래스, 그리고 허브에서 호스팅되는 경우 사전학습된 체크포인트에 대한 경로를 포함합니다.
129
- 이 사용 데이터는 문제를 디버깅하고 새로운 기능의 우선순위를 지정하는데 도움이 됩니다.
130
- 텔레메트리는 HuggingFace 허브에서 모델과 파이���라인을 불러올 때만 전송되며, 로컬 사용 중에는 수집되지 않습니다.
131
-
132
- 우리는 추가 정보를 공유하지 않기를 원하는 사람이 있다는 것을 이해하고 개인 정보를 존중하므로, 터미널에서 `DISABLE_TELEMETRY` 환경 변수를 설정하여 텔레메트리 수집을 비활성화할 수 있습니다.
133
-
134
- Linux/MacOS에서:
135
- ```bash
136
- export DISABLE_TELEMETRY=YES
137
- ```
138
-
139
- Windows에서:
140
- ```bash
141
- set DISABLE_TELEMETRY=YES
142
- ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/pipelines/t2i_adapter/__init__.py DELETED
@@ -1,14 +0,0 @@
1
- from ...utils import (
2
- OptionalDependencyNotAvailable,
3
- is_torch_available,
4
- is_transformers_available,
5
- )
6
-
7
-
8
- try:
9
- if not (is_transformers_available() and is_torch_available()):
10
- raise OptionalDependencyNotAvailable()
11
- except OptionalDependencyNotAvailable:
12
- from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
13
- else:
14
- from .pipeline_stable_diffusion_adapter import StableDiffusionAdapterPipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/mmdet/models/roi_heads/mask_heads/maskiou_head.py DELETED
@@ -1,186 +0,0 @@
1
- import numpy as np
2
- import torch
3
- import torch.nn as nn
4
- from mmcv.cnn import Conv2d, Linear, MaxPool2d, kaiming_init, normal_init
5
- from mmcv.runner import force_fp32
6
- from torch.nn.modules.utils import _pair
7
-
8
- from mmdet.models.builder import HEADS, build_loss
9
-
10
-
11
- @HEADS.register_module()
12
- class MaskIoUHead(nn.Module):
13
- """Mask IoU Head.
14
-
15
- This head predicts the IoU of predicted masks and corresponding gt masks.
16
- """
17
-
18
- def __init__(self,
19
- num_convs=4,
20
- num_fcs=2,
21
- roi_feat_size=14,
22
- in_channels=256,
23
- conv_out_channels=256,
24
- fc_out_channels=1024,
25
- num_classes=80,
26
- loss_iou=dict(type='MSELoss', loss_weight=0.5)):
27
- super(MaskIoUHead, self).__init__()
28
- self.in_channels = in_channels
29
- self.conv_out_channels = conv_out_channels
30
- self.fc_out_channels = fc_out_channels
31
- self.num_classes = num_classes
32
- self.fp16_enabled = False
33
-
34
- self.convs = nn.ModuleList()
35
- for i in range(num_convs):
36
- if i == 0:
37
- # concatenation of mask feature and mask prediction
38
- in_channels = self.in_channels + 1
39
- else:
40
- in_channels = self.conv_out_channels
41
- stride = 2 if i == num_convs - 1 else 1
42
- self.convs.append(
43
- Conv2d(
44
- in_channels,
45
- self.conv_out_channels,
46
- 3,
47
- stride=stride,
48
- padding=1))
49
-
50
- roi_feat_size = _pair(roi_feat_size)
51
- pooled_area = (roi_feat_size[0] // 2) * (roi_feat_size[1] // 2)
52
- self.fcs = nn.ModuleList()
53
- for i in range(num_fcs):
54
- in_channels = (
55
- self.conv_out_channels *
56
- pooled_area if i == 0 else self.fc_out_channels)
57
- self.fcs.append(Linear(in_channels, self.fc_out_channels))
58
-
59
- self.fc_mask_iou = Linear(self.fc_out_channels, self.num_classes)
60
- self.relu = nn.ReLU()
61
- self.max_pool = MaxPool2d(2, 2)
62
- self.loss_iou = build_loss(loss_iou)
63
-
64
- def init_weights(self):
65
- for conv in self.convs:
66
- kaiming_init(conv)
67
- for fc in self.fcs:
68
- kaiming_init(
69
- fc,
70
- a=1,
71
- mode='fan_in',
72
- nonlinearity='leaky_relu',
73
- distribution='uniform')
74
- normal_init(self.fc_mask_iou, std=0.01)
75
-
76
- def forward(self, mask_feat, mask_pred):
77
- mask_pred = mask_pred.sigmoid()
78
- mask_pred_pooled = self.max_pool(mask_pred.unsqueeze(1))
79
-
80
- x = torch.cat((mask_feat, mask_pred_pooled), 1)
81
-
82
- for conv in self.convs:
83
- x = self.relu(conv(x))
84
- x = x.flatten(1)
85
- for fc in self.fcs:
86
- x = self.relu(fc(x))
87
- mask_iou = self.fc_mask_iou(x)
88
- return mask_iou
89
-
90
- @force_fp32(apply_to=('mask_iou_pred', ))
91
- def loss(self, mask_iou_pred, mask_iou_targets):
92
- pos_inds = mask_iou_targets > 0
93
- if pos_inds.sum() > 0:
94
- loss_mask_iou = self.loss_iou(mask_iou_pred[pos_inds],
95
- mask_iou_targets[pos_inds])
96
- else:
97
- loss_mask_iou = mask_iou_pred.sum() * 0
98
- return dict(loss_mask_iou=loss_mask_iou)
99
-
100
- @force_fp32(apply_to=('mask_pred', ))
101
- def get_targets(self, sampling_results, gt_masks, mask_pred, mask_targets,
102
- rcnn_train_cfg):
103
- """Compute target of mask IoU.
104
-
105
- Mask IoU target is the IoU of the predicted mask (inside a bbox) and
106
- the gt mask of corresponding gt mask (the whole instance).
107
- The intersection area is computed inside the bbox, and the gt mask area
108
- is computed with two steps, firstly we compute the gt area inside the
109
- bbox, then divide it by the area ratio of gt area inside the bbox and
110
- the gt area of the whole instance.
111
-
112
- Args:
113
- sampling_results (list[:obj:`SamplingResult`]): sampling results.
114
- gt_masks (BitmapMask | PolygonMask): Gt masks (the whole instance)
115
- of each image, with the same shape of the input image.
116
- mask_pred (Tensor): Predicted masks of each positive proposal,
117
- shape (num_pos, h, w).
118
- mask_targets (Tensor): Gt mask of each positive proposal,
119
- binary map of the shape (num_pos, h, w).
120
- rcnn_train_cfg (dict): Training config for R-CNN part.
121
-
122
- Returns:
123
- Tensor: mask iou target (length == num positive).
124
- """
125
- pos_proposals = [res.pos_bboxes for res in sampling_results]
126
- pos_assigned_gt_inds = [
127
- res.pos_assigned_gt_inds for res in sampling_results
128
- ]
129
-
130
- # compute the area ratio of gt areas inside the proposals and
131
- # the whole instance
132
- area_ratios = map(self._get_area_ratio, pos_proposals,
133
- pos_assigned_gt_inds, gt_masks)
134
- area_ratios = torch.cat(list(area_ratios))
135
- assert mask_targets.size(0) == area_ratios.size(0)
136
-
137
- mask_pred = (mask_pred > rcnn_train_cfg.mask_thr_binary).float()
138
- mask_pred_areas = mask_pred.sum((-1, -2))
139
-
140
- # mask_pred and mask_targets are binary maps
141
- overlap_areas = (mask_pred * mask_targets).sum((-1, -2))
142
-
143
- # compute the mask area of the whole instance
144
- gt_full_areas = mask_targets.sum((-1, -2)) / (area_ratios + 1e-7)
145
-
146
- mask_iou_targets = overlap_areas / (
147
- mask_pred_areas + gt_full_areas - overlap_areas)
148
- return mask_iou_targets
149
-
150
- def _get_area_ratio(self, pos_proposals, pos_assigned_gt_inds, gt_masks):
151
- """Compute area ratio of the gt mask inside the proposal and the gt
152
- mask of the corresponding instance."""
153
- num_pos = pos_proposals.size(0)
154
- if num_pos > 0:
155
- area_ratios = []
156
- proposals_np = pos_proposals.cpu().numpy()
157
- pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy()
158
- # compute mask areas of gt instances (batch processing for speedup)
159
- gt_instance_mask_area = gt_masks.areas
160
- for i in range(num_pos):
161
- gt_mask = gt_masks[pos_assigned_gt_inds[i]]
162
-
163
- # crop the gt mask inside the proposal
164
- bbox = proposals_np[i, :].astype(np.int32)
165
- gt_mask_in_proposal = gt_mask.crop(bbox)
166
-
167
- ratio = gt_mask_in_proposal.areas[0] / (
168
- gt_instance_mask_area[pos_assigned_gt_inds[i]] + 1e-7)
169
- area_ratios.append(ratio)
170
- area_ratios = torch.from_numpy(np.stack(area_ratios)).float().to(
171
- pos_proposals.device)
172
- else:
173
- area_ratios = pos_proposals.new_zeros((0, ))
174
- return area_ratios
175
-
176
- @force_fp32(apply_to=('mask_iou_pred', ))
177
- def get_mask_scores(self, mask_iou_pred, det_bboxes, det_labels):
178
- """Get the mask scores.
179
-
180
- mask_score = bbox_score * mask_iou
181
- """
182
- inds = range(det_labels.size(0))
183
- mask_scores = mask_iou_pred[inds, det_labels] * det_bboxes[inds, -1]
184
- mask_scores = mask_scores.cpu().numpy()
185
- det_labels = det_labels.cpu().numpy()
186
- return [mask_scores[det_labels == i] for i in range(self.num_classes)]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_segmentation/configs/deeplabv3plus/deeplabv3plus_r101-d8_512x512_160k_ade20k.py DELETED
@@ -1,2 +0,0 @@
1
- _base_ = './deeplabv3plus_r50-d8_512x512_160k_ade20k.py'
2
- model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
 
 
 
spaces/AnonAndDesu/Desu_Proxy/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: Desu_Proxy
3
- emoji: 📉
4
- colorFrom: green
5
- colorTo: purple
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/ControlNet/annotator/mlsd/utils.py DELETED
@@ -1,580 +0,0 @@
1
- '''
2
- modified by lihaoweicv
3
- pytorch version
4
- '''
5
-
6
- '''
7
- M-LSD
8
- Copyright 2021-present NAVER Corp.
9
- Apache License v2.0
10
- '''
11
-
12
- import os
13
- import numpy as np
14
- import cv2
15
- import torch
16
- from torch.nn import functional as F
17
-
18
-
19
- def deccode_output_score_and_ptss(tpMap, topk_n = 200, ksize = 5):
20
- '''
21
- tpMap:
22
- center: tpMap[1, 0, :, :]
23
- displacement: tpMap[1, 1:5, :, :]
24
- '''
25
- b, c, h, w = tpMap.shape
26
- assert b==1, 'only support bsize==1'
27
- displacement = tpMap[:, 1:5, :, :][0]
28
- center = tpMap[:, 0, :, :]
29
- heat = torch.sigmoid(center)
30
- hmax = F.max_pool2d( heat, (ksize, ksize), stride=1, padding=(ksize-1)//2)
31
- keep = (hmax == heat).float()
32
- heat = heat * keep
33
- heat = heat.reshape(-1, )
34
-
35
- scores, indices = torch.topk(heat, topk_n, dim=-1, largest=True)
36
- yy = torch.floor_divide(indices, w).unsqueeze(-1)
37
- xx = torch.fmod(indices, w).unsqueeze(-1)
38
- ptss = torch.cat((yy, xx),dim=-1)
39
-
40
- ptss = ptss.detach().cpu().numpy()
41
- scores = scores.detach().cpu().numpy()
42
- displacement = displacement.detach().cpu().numpy()
43
- displacement = displacement.transpose((1,2,0))
44
- return ptss, scores, displacement
45
-
46
-
47
- def pred_lines(image, model,
48
- input_shape=[512, 512],
49
- score_thr=0.10,
50
- dist_thr=20.0):
51
- h, w, _ = image.shape
52
- h_ratio, w_ratio = [h / input_shape[0], w / input_shape[1]]
53
-
54
- resized_image = np.concatenate([cv2.resize(image, (input_shape[1], input_shape[0]), interpolation=cv2.INTER_AREA),
55
- np.ones([input_shape[0], input_shape[1], 1])], axis=-1)
56
-
57
- resized_image = resized_image.transpose((2,0,1))
58
- batch_image = np.expand_dims(resized_image, axis=0).astype('float32')
59
- batch_image = (batch_image / 127.5) - 1.0
60
-
61
- batch_image = torch.from_numpy(batch_image).float().cuda()
62
- outputs = model(batch_image)
63
- pts, pts_score, vmap = deccode_output_score_and_ptss(outputs, 200, 3)
64
- start = vmap[:, :, :2]
65
- end = vmap[:, :, 2:]
66
- dist_map = np.sqrt(np.sum((start - end) ** 2, axis=-1))
67
-
68
- segments_list = []
69
- for center, score in zip(pts, pts_score):
70
- y, x = center
71
- distance = dist_map[y, x]
72
- if score > score_thr and distance > dist_thr:
73
- disp_x_start, disp_y_start, disp_x_end, disp_y_end = vmap[y, x, :]
74
- x_start = x + disp_x_start
75
- y_start = y + disp_y_start
76
- x_end = x + disp_x_end
77
- y_end = y + disp_y_end
78
- segments_list.append([x_start, y_start, x_end, y_end])
79
-
80
- lines = 2 * np.array(segments_list) # 256 > 512
81
- lines[:, 0] = lines[:, 0] * w_ratio
82
- lines[:, 1] = lines[:, 1] * h_ratio
83
- lines[:, 2] = lines[:, 2] * w_ratio
84
- lines[:, 3] = lines[:, 3] * h_ratio
85
-
86
- return lines
87
-
88
-
89
- def pred_squares(image,
90
- model,
91
- input_shape=[512, 512],
92
- params={'score': 0.06,
93
- 'outside_ratio': 0.28,
94
- 'inside_ratio': 0.45,
95
- 'w_overlap': 0.0,
96
- 'w_degree': 1.95,
97
- 'w_length': 0.0,
98
- 'w_area': 1.86,
99
- 'w_center': 0.14}):
100
- '''
101
- shape = [height, width]
102
- '''
103
- h, w, _ = image.shape
104
- original_shape = [h, w]
105
-
106
- resized_image = np.concatenate([cv2.resize(image, (input_shape[0], input_shape[1]), interpolation=cv2.INTER_AREA),
107
- np.ones([input_shape[0], input_shape[1], 1])], axis=-1)
108
- resized_image = resized_image.transpose((2, 0, 1))
109
- batch_image = np.expand_dims(resized_image, axis=0).astype('float32')
110
- batch_image = (batch_image / 127.5) - 1.0
111
-
112
- batch_image = torch.from_numpy(batch_image).float().cuda()
113
- outputs = model(batch_image)
114
-
115
- pts, pts_score, vmap = deccode_output_score_and_ptss(outputs, 200, 3)
116
- start = vmap[:, :, :2] # (x, y)
117
- end = vmap[:, :, 2:] # (x, y)
118
- dist_map = np.sqrt(np.sum((start - end) ** 2, axis=-1))
119
-
120
- junc_list = []
121
- segments_list = []
122
- for junc, score in zip(pts, pts_score):
123
- y, x = junc
124
- distance = dist_map[y, x]
125
- if score > params['score'] and distance > 20.0:
126
- junc_list.append([x, y])
127
- disp_x_start, disp_y_start, disp_x_end, disp_y_end = vmap[y, x, :]
128
- d_arrow = 1.0
129
- x_start = x + d_arrow * disp_x_start
130
- y_start = y + d_arrow * disp_y_start
131
- x_end = x + d_arrow * disp_x_end
132
- y_end = y + d_arrow * disp_y_end
133
- segments_list.append([x_start, y_start, x_end, y_end])
134
-
135
- segments = np.array(segments_list)
136
-
137
- ####### post processing for squares
138
- # 1. get unique lines
139
- point = np.array([[0, 0]])
140
- point = point[0]
141
- start = segments[:, :2]
142
- end = segments[:, 2:]
143
- diff = start - end
144
- a = diff[:, 1]
145
- b = -diff[:, 0]
146
- c = a * start[:, 0] + b * start[:, 1]
147
-
148
- d = np.abs(a * point[0] + b * point[1] - c) / np.sqrt(a ** 2 + b ** 2 + 1e-10)
149
- theta = np.arctan2(diff[:, 0], diff[:, 1]) * 180 / np.pi
150
- theta[theta < 0.0] += 180
151
- hough = np.concatenate([d[:, None], theta[:, None]], axis=-1)
152
-
153
- d_quant = 1
154
- theta_quant = 2
155
- hough[:, 0] //= d_quant
156
- hough[:, 1] //= theta_quant
157
- _, indices, counts = np.unique(hough, axis=0, return_index=True, return_counts=True)
158
-
159
- acc_map = np.zeros([512 // d_quant + 1, 360 // theta_quant + 1], dtype='float32')
160
- idx_map = np.zeros([512 // d_quant + 1, 360 // theta_quant + 1], dtype='int32') - 1
161
- yx_indices = hough[indices, :].astype('int32')
162
- acc_map[yx_indices[:, 0], yx_indices[:, 1]] = counts
163
- idx_map[yx_indices[:, 0], yx_indices[:, 1]] = indices
164
-
165
- acc_map_np = acc_map
166
- # acc_map = acc_map[None, :, :, None]
167
- #
168
- # ### fast suppression using tensorflow op
169
- # acc_map = tf.constant(acc_map, dtype=tf.float32)
170
- # max_acc_map = tf.keras.layers.MaxPool2D(pool_size=(5, 5), strides=1, padding='same')(acc_map)
171
- # acc_map = acc_map * tf.cast(tf.math.equal(acc_map, max_acc_map), tf.float32)
172
- # flatten_acc_map = tf.reshape(acc_map, [1, -1])
173
- # topk_values, topk_indices = tf.math.top_k(flatten_acc_map, k=len(pts))
174
- # _, h, w, _ = acc_map.shape
175
- # y = tf.expand_dims(topk_indices // w, axis=-1)
176
- # x = tf.expand_dims(topk_indices % w, axis=-1)
177
- # yx = tf.concat([y, x], axis=-1)
178
-
179
- ### fast suppression using pytorch op
180
- acc_map = torch.from_numpy(acc_map_np).unsqueeze(0).unsqueeze(0)
181
- _,_, h, w = acc_map.shape
182
- max_acc_map = F.max_pool2d(acc_map,kernel_size=5, stride=1, padding=2)
183
- acc_map = acc_map * ( (acc_map == max_acc_map).float() )
184
- flatten_acc_map = acc_map.reshape([-1, ])
185
-
186
- scores, indices = torch.topk(flatten_acc_map, len(pts), dim=-1, largest=True)
187
- yy = torch.div(indices, w, rounding_mode='floor').unsqueeze(-1)
188
- xx = torch.fmod(indices, w).unsqueeze(-1)
189
- yx = torch.cat((yy, xx), dim=-1)
190
-
191
- yx = yx.detach().cpu().numpy()
192
-
193
- topk_values = scores.detach().cpu().numpy()
194
- indices = idx_map[yx[:, 0], yx[:, 1]]
195
- basis = 5 // 2
196
-
197
- merged_segments = []
198
- for yx_pt, max_indice, value in zip(yx, indices, topk_values):
199
- y, x = yx_pt
200
- if max_indice == -1 or value == 0:
201
- continue
202
- segment_list = []
203
- for y_offset in range(-basis, basis + 1):
204
- for x_offset in range(-basis, basis + 1):
205
- indice = idx_map[y + y_offset, x + x_offset]
206
- cnt = int(acc_map_np[y + y_offset, x + x_offset])
207
- if indice != -1:
208
- segment_list.append(segments[indice])
209
- if cnt > 1:
210
- check_cnt = 1
211
- current_hough = hough[indice]
212
- for new_indice, new_hough in enumerate(hough):
213
- if (current_hough == new_hough).all() and indice != new_indice:
214
- segment_list.append(segments[new_indice])
215
- check_cnt += 1
216
- if check_cnt == cnt:
217
- break
218
- group_segments = np.array(segment_list).reshape([-1, 2])
219
- sorted_group_segments = np.sort(group_segments, axis=0)
220
- x_min, y_min = sorted_group_segments[0, :]
221
- x_max, y_max = sorted_group_segments[-1, :]
222
-
223
- deg = theta[max_indice]
224
- if deg >= 90:
225
- merged_segments.append([x_min, y_max, x_max, y_min])
226
- else:
227
- merged_segments.append([x_min, y_min, x_max, y_max])
228
-
229
- # 2. get intersections
230
- new_segments = np.array(merged_segments) # (x1, y1, x2, y2)
231
- start = new_segments[:, :2] # (x1, y1)
232
- end = new_segments[:, 2:] # (x2, y2)
233
- new_centers = (start + end) / 2.0
234
- diff = start - end
235
- dist_segments = np.sqrt(np.sum(diff ** 2, axis=-1))
236
-
237
- # ax + by = c
238
- a = diff[:, 1]
239
- b = -diff[:, 0]
240
- c = a * start[:, 0] + b * start[:, 1]
241
- pre_det = a[:, None] * b[None, :]
242
- det = pre_det - np.transpose(pre_det)
243
-
244
- pre_inter_y = a[:, None] * c[None, :]
245
- inter_y = (pre_inter_y - np.transpose(pre_inter_y)) / (det + 1e-10)
246
- pre_inter_x = c[:, None] * b[None, :]
247
- inter_x = (pre_inter_x - np.transpose(pre_inter_x)) / (det + 1e-10)
248
- inter_pts = np.concatenate([inter_x[:, :, None], inter_y[:, :, None]], axis=-1).astype('int32')
249
-
250
- # 3. get corner information
251
- # 3.1 get distance
252
- '''
253
- dist_segments:
254
- | dist(0), dist(1), dist(2), ...|
255
- dist_inter_to_segment1:
256
- | dist(inter,0), dist(inter,0), dist(inter,0), ... |
257
- | dist(inter,1), dist(inter,1), dist(inter,1), ... |
258
- ...
259
- dist_inter_to_semgnet2:
260
- | dist(inter,0), dist(inter,1), dist(inter,2), ... |
261
- | dist(inter,0), dist(inter,1), dist(inter,2), ... |
262
- ...
263
- '''
264
-
265
- dist_inter_to_segment1_start = np.sqrt(
266
- np.sum(((inter_pts - start[:, None, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
267
- dist_inter_to_segment1_end = np.sqrt(
268
- np.sum(((inter_pts - end[:, None, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
269
- dist_inter_to_segment2_start = np.sqrt(
270
- np.sum(((inter_pts - start[None, :, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
271
- dist_inter_to_segment2_end = np.sqrt(
272
- np.sum(((inter_pts - end[None, :, :]) ** 2), axis=-1, keepdims=True)) # [n_batch, n_batch, 1]
273
-
274
- # sort ascending
275
- dist_inter_to_segment1 = np.sort(
276
- np.concatenate([dist_inter_to_segment1_start, dist_inter_to_segment1_end], axis=-1),
277
- axis=-1) # [n_batch, n_batch, 2]
278
- dist_inter_to_segment2 = np.sort(
279
- np.concatenate([dist_inter_to_segment2_start, dist_inter_to_segment2_end], axis=-1),
280
- axis=-1) # [n_batch, n_batch, 2]
281
-
282
- # 3.2 get degree
283
- inter_to_start = new_centers[:, None, :] - inter_pts
284
- deg_inter_to_start = np.arctan2(inter_to_start[:, :, 1], inter_to_start[:, :, 0]) * 180 / np.pi
285
- deg_inter_to_start[deg_inter_to_start < 0.0] += 360
286
- inter_to_end = new_centers[None, :, :] - inter_pts
287
- deg_inter_to_end = np.arctan2(inter_to_end[:, :, 1], inter_to_end[:, :, 0]) * 180 / np.pi
288
- deg_inter_to_end[deg_inter_to_end < 0.0] += 360
289
-
290
- '''
291
- B -- G
292
- | |
293
- C -- R
294
- B : blue / G: green / C: cyan / R: red
295
-
296
- 0 -- 1
297
- | |
298
- 3 -- 2
299
- '''
300
- # rename variables
301
- deg1_map, deg2_map = deg_inter_to_start, deg_inter_to_end
302
- # sort deg ascending
303
- deg_sort = np.sort(np.concatenate([deg1_map[:, :, None], deg2_map[:, :, None]], axis=-1), axis=-1)
304
-
305
- deg_diff_map = np.abs(deg1_map - deg2_map)
306
- # we only consider the smallest degree of intersect
307
- deg_diff_map[deg_diff_map > 180] = 360 - deg_diff_map[deg_diff_map > 180]
308
-
309
- # define available degree range
310
- deg_range = [60, 120]
311
-
312
- corner_dict = {corner_info: [] for corner_info in range(4)}
313
- inter_points = []
314
- for i in range(inter_pts.shape[0]):
315
- for j in range(i + 1, inter_pts.shape[1]):
316
- # i, j > line index, always i < j
317
- x, y = inter_pts[i, j, :]
318
- deg1, deg2 = deg_sort[i, j, :]
319
- deg_diff = deg_diff_map[i, j]
320
-
321
- check_degree = deg_diff > deg_range[0] and deg_diff < deg_range[1]
322
-
323
- outside_ratio = params['outside_ratio'] # over ratio >>> drop it!
324
- inside_ratio = params['inside_ratio'] # over ratio >>> drop it!
325
- check_distance = ((dist_inter_to_segment1[i, j, 1] >= dist_segments[i] and \
326
- dist_inter_to_segment1[i, j, 0] <= dist_segments[i] * outside_ratio) or \
327
- (dist_inter_to_segment1[i, j, 1] <= dist_segments[i] and \
328
- dist_inter_to_segment1[i, j, 0] <= dist_segments[i] * inside_ratio)) and \
329
- ((dist_inter_to_segment2[i, j, 1] >= dist_segments[j] and \
330
- dist_inter_to_segment2[i, j, 0] <= dist_segments[j] * outside_ratio) or \
331
- (dist_inter_to_segment2[i, j, 1] <= dist_segments[j] and \
332
- dist_inter_to_segment2[i, j, 0] <= dist_segments[j] * inside_ratio))
333
-
334
- if check_degree and check_distance:
335
- corner_info = None
336
-
337
- if (deg1 >= 0 and deg1 <= 45 and deg2 >= 45 and deg2 <= 120) or \
338
- (deg2 >= 315 and deg1 >= 45 and deg1 <= 120):
339
- corner_info, color_info = 0, 'blue'
340
- elif (deg1 >= 45 and deg1 <= 125 and deg2 >= 125 and deg2 <= 225):
341
- corner_info, color_info = 1, 'green'
342
- elif (deg1 >= 125 and deg1 <= 225 and deg2 >= 225 and deg2 <= 315):
343
- corner_info, color_info = 2, 'black'
344
- elif (deg1 >= 0 and deg1 <= 45 and deg2 >= 225 and deg2 <= 315) or \
345
- (deg2 >= 315 and deg1 >= 225 and deg1 <= 315):
346
- corner_info, color_info = 3, 'cyan'
347
- else:
348
- corner_info, color_info = 4, 'red' # we don't use it
349
- continue
350
-
351
- corner_dict[corner_info].append([x, y, i, j])
352
- inter_points.append([x, y])
353
-
354
- square_list = []
355
- connect_list = []
356
- segments_list = []
357
- for corner0 in corner_dict[0]:
358
- for corner1 in corner_dict[1]:
359
- connect01 = False
360
- for corner0_line in corner0[2:]:
361
- if corner0_line in corner1[2:]:
362
- connect01 = True
363
- break
364
- if connect01:
365
- for corner2 in corner_dict[2]:
366
- connect12 = False
367
- for corner1_line in corner1[2:]:
368
- if corner1_line in corner2[2:]:
369
- connect12 = True
370
- break
371
- if connect12:
372
- for corner3 in corner_dict[3]:
373
- connect23 = False
374
- for corner2_line in corner2[2:]:
375
- if corner2_line in corner3[2:]:
376
- connect23 = True
377
- break
378
- if connect23:
379
- for corner3_line in corner3[2:]:
380
- if corner3_line in corner0[2:]:
381
- # SQUARE!!!
382
- '''
383
- 0 -- 1
384
- | |
385
- 3 -- 2
386
- square_list:
387
- order: 0 > 1 > 2 > 3
388
- | x0, y0, x1, y1, x2, y2, x3, y3 |
389
- | x0, y0, x1, y1, x2, y2, x3, y3 |
390
- ...
391
- connect_list:
392
- order: 01 > 12 > 23 > 30
393
- | line_idx01, line_idx12, line_idx23, line_idx30 |
394
- | line_idx01, line_idx12, line_idx23, line_idx30 |
395
- ...
396
- segments_list:
397
- order: 0 > 1 > 2 > 3
398
- | line_idx0_i, line_idx0_j, line_idx1_i, line_idx1_j, line_idx2_i, line_idx2_j, line_idx3_i, line_idx3_j |
399
- | line_idx0_i, line_idx0_j, line_idx1_i, line_idx1_j, line_idx2_i, line_idx2_j, line_idx3_i, line_idx3_j |
400
- ...
401
- '''
402
- square_list.append(corner0[:2] + corner1[:2] + corner2[:2] + corner3[:2])
403
- connect_list.append([corner0_line, corner1_line, corner2_line, corner3_line])
404
- segments_list.append(corner0[2:] + corner1[2:] + corner2[2:] + corner3[2:])
405
-
406
- def check_outside_inside(segments_info, connect_idx):
407
- # return 'outside or inside', min distance, cover_param, peri_param
408
- if connect_idx == segments_info[0]:
409
- check_dist_mat = dist_inter_to_segment1
410
- else:
411
- check_dist_mat = dist_inter_to_segment2
412
-
413
- i, j = segments_info
414
- min_dist, max_dist = check_dist_mat[i, j, :]
415
- connect_dist = dist_segments[connect_idx]
416
- if max_dist > connect_dist:
417
- return 'outside', min_dist, 0, 1
418
- else:
419
- return 'inside', min_dist, -1, -1
420
-
421
- top_square = None
422
-
423
- try:
424
- map_size = input_shape[0] / 2
425
- squares = np.array(square_list).reshape([-1, 4, 2])
426
- score_array = []
427
- connect_array = np.array(connect_list)
428
- segments_array = np.array(segments_list).reshape([-1, 4, 2])
429
-
430
- # get degree of corners:
431
- squares_rollup = np.roll(squares, 1, axis=1)
432
- squares_rolldown = np.roll(squares, -1, axis=1)
433
- vec1 = squares_rollup - squares
434
- normalized_vec1 = vec1 / (np.linalg.norm(vec1, axis=-1, keepdims=True) + 1e-10)
435
- vec2 = squares_rolldown - squares
436
- normalized_vec2 = vec2 / (np.linalg.norm(vec2, axis=-1, keepdims=True) + 1e-10)
437
- inner_products = np.sum(normalized_vec1 * normalized_vec2, axis=-1) # [n_squares, 4]
438
- squares_degree = np.arccos(inner_products) * 180 / np.pi # [n_squares, 4]
439
-
440
- # get square score
441
- overlap_scores = []
442
- degree_scores = []
443
- length_scores = []
444
-
445
- for connects, segments, square, degree in zip(connect_array, segments_array, squares, squares_degree):
446
- '''
447
- 0 -- 1
448
- | |
449
- 3 -- 2
450
-
451
- # segments: [4, 2]
452
- # connects: [4]
453
- '''
454
-
455
- ###################################### OVERLAP SCORES
456
- cover = 0
457
- perimeter = 0
458
- # check 0 > 1 > 2 > 3
459
- square_length = []
460
-
461
- for start_idx in range(4):
462
- end_idx = (start_idx + 1) % 4
463
-
464
- connect_idx = connects[start_idx] # segment idx of segment01
465
- start_segments = segments[start_idx]
466
- end_segments = segments[end_idx]
467
-
468
- start_point = square[start_idx]
469
- end_point = square[end_idx]
470
-
471
- # check whether outside or inside
472
- start_position, start_min, start_cover_param, start_peri_param = check_outside_inside(start_segments,
473
- connect_idx)
474
- end_position, end_min, end_cover_param, end_peri_param = check_outside_inside(end_segments, connect_idx)
475
-
476
- cover += dist_segments[connect_idx] + start_cover_param * start_min + end_cover_param * end_min
477
- perimeter += dist_segments[connect_idx] + start_peri_param * start_min + end_peri_param * end_min
478
-
479
- square_length.append(
480
- dist_segments[connect_idx] + start_peri_param * start_min + end_peri_param * end_min)
481
-
482
- overlap_scores.append(cover / perimeter)
483
- ######################################
484
- ###################################### DEGREE SCORES
485
- '''
486
- deg0 vs deg2
487
- deg1 vs deg3
488
- '''
489
- deg0, deg1, deg2, deg3 = degree
490
- deg_ratio1 = deg0 / deg2
491
- if deg_ratio1 > 1.0:
492
- deg_ratio1 = 1 / deg_ratio1
493
- deg_ratio2 = deg1 / deg3
494
- if deg_ratio2 > 1.0:
495
- deg_ratio2 = 1 / deg_ratio2
496
- degree_scores.append((deg_ratio1 + deg_ratio2) / 2)
497
- ######################################
498
- ###################################### LENGTH SCORES
499
- '''
500
- len0 vs len2
501
- len1 vs len3
502
- '''
503
- len0, len1, len2, len3 = square_length
504
- len_ratio1 = len0 / len2 if len2 > len0 else len2 / len0
505
- len_ratio2 = len1 / len3 if len3 > len1 else len3 / len1
506
- length_scores.append((len_ratio1 + len_ratio2) / 2)
507
-
508
- ######################################
509
-
510
- overlap_scores = np.array(overlap_scores)
511
- overlap_scores /= np.max(overlap_scores)
512
-
513
- degree_scores = np.array(degree_scores)
514
- # degree_scores /= np.max(degree_scores)
515
-
516
- length_scores = np.array(length_scores)
517
-
518
- ###################################### AREA SCORES
519
- area_scores = np.reshape(squares, [-1, 4, 2])
520
- area_x = area_scores[:, :, 0]
521
- area_y = area_scores[:, :, 1]
522
- correction = area_x[:, -1] * area_y[:, 0] - area_y[:, -1] * area_x[:, 0]
523
- area_scores = np.sum(area_x[:, :-1] * area_y[:, 1:], axis=-1) - np.sum(area_y[:, :-1] * area_x[:, 1:], axis=-1)
524
- area_scores = 0.5 * np.abs(area_scores + correction)
525
- area_scores /= (map_size * map_size) # np.max(area_scores)
526
- ######################################
527
-
528
- ###################################### CENTER SCORES
529
- centers = np.array([[256 // 2, 256 // 2]], dtype='float32') # [1, 2]
530
- # squares: [n, 4, 2]
531
- square_centers = np.mean(squares, axis=1) # [n, 2]
532
- center2center = np.sqrt(np.sum((centers - square_centers) ** 2))
533
- center_scores = center2center / (map_size / np.sqrt(2.0))
534
-
535
- '''
536
- score_w = [overlap, degree, area, center, length]
537
- '''
538
- score_w = [0.0, 1.0, 10.0, 0.5, 1.0]
539
- score_array = params['w_overlap'] * overlap_scores \
540
- + params['w_degree'] * degree_scores \
541
- + params['w_area'] * area_scores \
542
- - params['w_center'] * center_scores \
543
- + params['w_length'] * length_scores
544
-
545
- best_square = []
546
-
547
- sorted_idx = np.argsort(score_array)[::-1]
548
- score_array = score_array[sorted_idx]
549
- squares = squares[sorted_idx]
550
-
551
- except Exception as e:
552
- pass
553
-
554
- '''return list
555
- merged_lines, squares, scores
556
- '''
557
-
558
- try:
559
- new_segments[:, 0] = new_segments[:, 0] * 2 / input_shape[1] * original_shape[1]
560
- new_segments[:, 1] = new_segments[:, 1] * 2 / input_shape[0] * original_shape[0]
561
- new_segments[:, 2] = new_segments[:, 2] * 2 / input_shape[1] * original_shape[1]
562
- new_segments[:, 3] = new_segments[:, 3] * 2 / input_shape[0] * original_shape[0]
563
- except:
564
- new_segments = []
565
-
566
- try:
567
- squares[:, :, 0] = squares[:, :, 0] * 2 / input_shape[1] * original_shape[1]
568
- squares[:, :, 1] = squares[:, :, 1] * 2 / input_shape[0] * original_shape[0]
569
- except:
570
- squares = []
571
- score_array = []
572
-
573
- try:
574
- inter_points = np.array(inter_points)
575
- inter_points[:, 0] = inter_points[:, 0] * 2 / input_shape[1] * original_shape[1]
576
- inter_points[:, 1] = inter_points[:, 1] * 2 / input_shape[0] * original_shape[0]
577
- except:
578
- inter_points = []
579
-
580
- return new_segments, squares, score_array, inter_points
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anonymous-sub/Rerender/gmflow_module/gmflow/utils.py DELETED
@@ -1,86 +0,0 @@
1
- import torch
2
- from .position import PositionEmbeddingSine
3
-
4
-
5
- def split_feature(feature,
6
- num_splits=2,
7
- channel_last=False,
8
- ):
9
- if channel_last: # [B, H, W, C]
10
- b, h, w, c = feature.size()
11
- assert h % num_splits == 0 and w % num_splits == 0
12
-
13
- b_new = b * num_splits * num_splits
14
- h_new = h // num_splits
15
- w_new = w // num_splits
16
-
17
- feature = feature.view(b, num_splits, h // num_splits, num_splits, w // num_splits, c
18
- ).permute(0, 1, 3, 2, 4, 5).reshape(b_new, h_new, w_new, c) # [B*K*K, H/K, W/K, C]
19
- else: # [B, C, H, W]
20
- b, c, h, w = feature.size()
21
- assert h % num_splits == 0 and w % num_splits == 0
22
-
23
- b_new = b * num_splits * num_splits
24
- h_new = h // num_splits
25
- w_new = w // num_splits
26
-
27
- feature = feature.view(b, c, num_splits, h // num_splits, num_splits, w // num_splits
28
- ).permute(0, 2, 4, 1, 3, 5).reshape(b_new, c, h_new, w_new) # [B*K*K, C, H/K, W/K]
29
-
30
- return feature
31
-
32
-
33
- def merge_splits(splits,
34
- num_splits=2,
35
- channel_last=False,
36
- ):
37
- if channel_last: # [B*K*K, H/K, W/K, C]
38
- b, h, w, c = splits.size()
39
- new_b = b // num_splits // num_splits
40
-
41
- splits = splits.view(new_b, num_splits, num_splits, h, w, c)
42
- merge = splits.permute(0, 1, 3, 2, 4, 5).contiguous().view(
43
- new_b, num_splits * h, num_splits * w, c) # [B, H, W, C]
44
- else: # [B*K*K, C, H/K, W/K]
45
- b, c, h, w = splits.size()
46
- new_b = b // num_splits // num_splits
47
-
48
- splits = splits.view(new_b, num_splits, num_splits, c, h, w)
49
- merge = splits.permute(0, 3, 1, 4, 2, 5).contiguous().view(
50
- new_b, c, num_splits * h, num_splits * w) # [B, C, H, W]
51
-
52
- return merge
53
-
54
-
55
- def normalize_img(img0, img1):
56
- # loaded images are in [0, 255]
57
- # normalize by ImageNet mean and std
58
- mean = torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(img1.device)
59
- std = torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(img1.device)
60
- img0 = (img0 / 255. - mean) / std
61
- img1 = (img1 / 255. - mean) / std
62
-
63
- return img0, img1
64
-
65
-
66
- def feature_add_position(feature0, feature1, attn_splits, feature_channels):
67
- pos_enc = PositionEmbeddingSine(num_pos_feats=feature_channels // 2)
68
-
69
- if attn_splits > 1: # add position in splited window
70
- feature0_splits = split_feature(feature0, num_splits=attn_splits)
71
- feature1_splits = split_feature(feature1, num_splits=attn_splits)
72
-
73
- position = pos_enc(feature0_splits)
74
-
75
- feature0_splits = feature0_splits + position
76
- feature1_splits = feature1_splits + position
77
-
78
- feature0 = merge_splits(feature0_splits, num_splits=attn_splits)
79
- feature1 = merge_splits(feature1_splits, num_splits=attn_splits)
80
- else:
81
- position = pos_enc(feature0)
82
-
83
- feature0 = feature0 + position
84
- feature1 = feature1 + position
85
-
86
- return feature0, feature1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anthony7906/MengHuiMXD_GPT/assets/custom.js DELETED
@@ -1,224 +0,0 @@
1
-
2
- // custom javascript here
3
-
4
- const MAX_HISTORY_LENGTH = 32;
5
-
6
- var key_down_history = [];
7
- var currentIndex = -1;
8
- var user_input_ta;
9
-
10
- var gradioContainer = null;
11
- var user_input_ta = null;
12
- var user_input_tb = null;
13
- var userInfoDiv = null;
14
- var appTitleDiv = null;
15
- var chatbot = null;
16
- var apSwitch = null;
17
-
18
- var ga = document.getElementsByTagName("gradio-app");
19
- var targetNode = ga[0];
20
- var isInIframe = (window.self !== window.top);
21
-
22
- // gradio 页面加载好了么??? 我能动你的元素了么??
23
- function gradioLoaded(mutations) {
24
- for (var i = 0; i < mutations.length; i++) {
25
- if (mutations[i].addedNodes.length) {
26
- gradioContainer = document.querySelector(".gradio-container");
27
- user_input_tb = document.getElementById('user_input_tb');
28
- userInfoDiv = document.getElementById("user_info");
29
- appTitleDiv = document.getElementById("app_title");
30
- chatbot = document.querySelector('#chuanhu_chatbot');
31
- apSwitch = document.querySelector('.apSwitch input[type="checkbox"]');
32
-
33
- if (gradioContainer && apSwitch) { // gradioCainter 加载出来了没?
34
- adjustDarkMode();
35
- }
36
- if (user_input_tb) { // user_input_tb 加载出来了没?
37
- selectHistory();
38
- }
39
- if (userInfoDiv && appTitleDiv) { // userInfoDiv 和 appTitleDiv 加载出来了没?
40
- setTimeout(showOrHideUserInfo(), 2000);
41
- }
42
- if (chatbot) { // chatbot 加载出来了没?
43
- setChatbotHeight()
44
- }
45
- }
46
- }
47
- }
48
-
49
- function selectHistory() {
50
- user_input_ta = user_input_tb.querySelector("textarea");
51
- if (user_input_ta) {
52
- observer.disconnect(); // 停止监听
53
- // 在 textarea 上监听 keydown 事件
54
- user_input_ta.addEventListener("keydown", function (event) {
55
- var value = user_input_ta.value.trim();
56
- // 判断按下的是否为方向键
57
- if (event.code === 'ArrowUp' || event.code === 'ArrowDown') {
58
- // 如果按下的是方向键,且输入框中有内容,且历史记录中没有该内容,则不执行操作
59
- if (value && key_down_history.indexOf(value) === -1)
60
- return;
61
- // 对于需要响应的动作,阻止默认行为。
62
- event.preventDefault();
63
- var length = key_down_history.length;
64
- if (length === 0) {
65
- currentIndex = -1; // 如果历史记录为空,直接将当前选中的记录重置
66
- return;
67
- }
68
- if (currentIndex === -1) {
69
- currentIndex = length;
70
- }
71
- if (event.code === 'ArrowUp' && currentIndex > 0) {
72
- currentIndex--;
73
- user_input_ta.value = key_down_history[currentIndex];
74
- } else if (event.code === 'ArrowDown' && currentIndex < length - 1) {
75
- currentIndex++;
76
- user_input_ta.value = key_down_history[currentIndex];
77
- }
78
- user_input_ta.selectionStart = user_input_ta.value.length;
79
- user_input_ta.selectionEnd = user_input_ta.value.length;
80
- const input_event = new InputEvent("input", { bubbles: true, cancelable: true });
81
- user_input_ta.dispatchEvent(input_event);
82
- } else if (event.code === "Enter") {
83
- if (value) {
84
- currentIndex = -1;
85
- if (key_down_history.indexOf(value) === -1) {
86
- key_down_history.push(value);
87
- if (key_down_history.length > MAX_HISTORY_LENGTH) {
88
- key_down_history.shift();
89
- }
90
- }
91
- }
92
- }
93
- });
94
- }
95
- }
96
-
97
- function toggleUserInfoVisibility(shouldHide) {
98
- if (userInfoDiv) {
99
- if (shouldHide) {
100
- userInfoDiv.classList.add("hideK");
101
- } else {
102
- userInfoDiv.classList.remove("hideK");
103
- }
104
- }
105
- }
106
- function showOrHideUserInfo() {
107
- var sendBtn = document.getElementById("submit_btn");
108
-
109
- // Bind mouse/touch events to show/hide user info
110
- appTitleDiv.addEventListener("mouseenter", function () {
111
- toggleUserInfoVisibility(false);
112
- });
113
- userInfoDiv.addEventListener("mouseenter", function () {
114
- toggleUserInfoVisibility(false);
115
- });
116
- sendBtn.addEventListener("mouseenter", function () {
117
- toggleUserInfoVisibility(false);
118
- });
119
-
120
- appTitleDiv.addEventListener("mouseleave", function () {
121
- toggleUserInfoVisibility(true);
122
- });
123
- userInfoDiv.addEventListener("mouseleave", function () {
124
- toggleUserInfoVisibility(true);
125
- });
126
- sendBtn.addEventListener("mouseleave", function () {
127
- toggleUserInfoVisibility(true);
128
- });
129
-
130
- appTitleDiv.ontouchstart = function () {
131
- toggleUserInfoVisibility(false);
132
- };
133
- userInfoDiv.ontouchstart = function () {
134
- toggleUserInfoVisibility(false);
135
- };
136
- sendBtn.ontouchstart = function () {
137
- toggleUserInfoVisibility(false);
138
- };
139
-
140
- appTitleDiv.ontouchend = function () {
141
- setTimeout(function () {
142
- toggleUserInfoVisibility(true);
143
- }, 3000);
144
- };
145
- userInfoDiv.ontouchend = function () {
146
- setTimeout(function () {
147
- toggleUserInfoVisibility(true);
148
- }, 3000);
149
- };
150
- sendBtn.ontouchend = function () {
151
- setTimeout(function () {
152
- toggleUserInfoVisibility(true);
153
- }, 3000); // Delay 1 second to hide user info
154
- };
155
-
156
- // Hide user info after 2 second
157
- setTimeout(function () {
158
- toggleUserInfoVisibility(true);
159
- }, 2000);
160
- }
161
-
162
- function toggleDarkMode(isEnabled) {
163
- if (isEnabled) {
164
- gradioContainer.classList.add("dark");
165
- document.body.style.setProperty("background-color", "var(--neutral-950)", "important");
166
- } else {
167
- gradioContainer.classList.remove("dark");
168
- document.body.style.backgroundColor = "";
169
- }
170
- }
171
- function adjustDarkMode() {
172
- const darkModeQuery = window.matchMedia("(prefers-color-scheme: dark)");
173
-
174
- // 根据当前颜色模式设置初始状态
175
- apSwitch.checked = darkModeQuery.matches;
176
- toggleDarkMode(darkModeQuery.matches);
177
- // 监听颜色模式变化
178
- darkModeQuery.addEventListener("change", (e) => {
179
- apSwitch.checked = e.matches;
180
- toggleDarkMode(e.matches);
181
- });
182
- // apSwitch = document.querySelector('.apSwitch input[type="checkbox"]');
183
- apSwitch.addEventListener("change", (e) => {
184
- toggleDarkMode(e.target.checked);
185
- });
186
- }
187
-
188
- function setChatbotHeight() {
189
- const screenWidth = window.innerWidth;
190
- const statusDisplay = document.querySelector('#status_display');
191
- const statusDisplayHeight = statusDisplay ? statusDisplay.offsetHeight : 0;
192
- const wrap = chatbot.querySelector('.wrap');
193
- const vh = window.innerHeight * 0.01;
194
- document.documentElement.style.setProperty('--vh', `${vh}px`);
195
- if (isInIframe) {
196
- chatbot.style.height = `700px`;
197
- wrap.style.maxHeight = `calc(700px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`
198
- } else {
199
- if (screenWidth <= 320) {
200
- chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 150}px)`;
201
- wrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 150}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`;
202
- } else if (screenWidth <= 499) {
203
- chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 100}px)`;
204
- wrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 100}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`;
205
- } else {
206
- chatbot.style.height = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 160}px)`;
207
- wrap.style.maxHeight = `calc(var(--vh, 1vh) * 100 - ${statusDisplayHeight + 160}px - var(--line-sm) * 1rem - 2 * var(--block-label-margin))`;
208
- }
209
- }
210
- }
211
-
212
- // 监视页面内部 DOM 变动
213
- var observer = new MutationObserver(function (mutations) {
214
- gradioLoaded(mutations);
215
- });
216
- observer.observe(targetNode, { childList: true, subtree: true });
217
-
218
- // 监视页面变化
219
- window.addEventListener("DOMContentLoaded", function () {
220
- isInIframe = (window.self !== window.top);
221
- });
222
- window.addEventListener('resize', setChatbotHeight);
223
- window.addEventListener('scroll', setChatbotHeight);
224
- window.matchMedia("(prefers-color-scheme: dark)").addEventListener("change", adjustDarkMode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Apex-X/GODROOP/roop/processors/frame/face_swapper.py DELETED
@@ -1,88 +0,0 @@
1
- from typing import Any, List, Callable
2
- import cv2
3
- import insightface
4
- import threading
5
-
6
- import roop.globals
7
- import roop.processors.frame.core
8
- from roop.core import update_status
9
- from roop.face_analyser import get_one_face, get_many_faces
10
- from roop.typing import Face, Frame
11
- from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video
12
-
13
- FACE_SWAPPER = None
14
- THREAD_LOCK = threading.Lock()
15
- NAME = 'ROOP.FACE-SWAPPER'
16
-
17
-
18
- def get_face_swapper() -> Any:
19
- global FACE_SWAPPER
20
-
21
- with THREAD_LOCK:
22
- if FACE_SWAPPER is None:
23
- model_path = resolve_relative_path('../models/inswapper_128.onnx')
24
- FACE_SWAPPER = insightface.model_zoo.get_model(model_path, providers=roop.globals.execution_providers)
25
- return FACE_SWAPPER
26
-
27
-
28
- def pre_check() -> bool:
29
- download_directory_path = resolve_relative_path('../models')
30
- conditional_download(download_directory_path, ['https://huggingface.co/Apex-X/inswapper_128.onnx/resolve/main/inswapper_128.onnx'])
31
- return True
32
-
33
-
34
- def pre_start() -> bool:
35
- if not is_image(roop.globals.source_path):
36
- update_status('Select an image for source path.', NAME)
37
- return False
38
- elif not get_one_face(cv2.imread(roop.globals.source_path)):
39
- update_status('No face in source path detected.', NAME)
40
- return False
41
- if not is_image(roop.globals.target_path) and not is_video(roop.globals.target_path):
42
- update_status('Select an image or video for target path.', NAME)
43
- return False
44
- return True
45
-
46
-
47
- def post_process() -> None:
48
- global FACE_SWAPPER
49
-
50
- FACE_SWAPPER = None
51
-
52
-
53
- def swap_face(source_face: Face, target_face: Face, temp_frame: Frame) -> Frame:
54
- return get_face_swapper().get(temp_frame, target_face, source_face, paste_back=True)
55
-
56
-
57
- def process_frame(source_face: Face, temp_frame: Frame) -> Frame:
58
- if roop.globals.many_faces:
59
- many_faces = get_many_faces(temp_frame)
60
- if many_faces:
61
- for target_face in many_faces:
62
- temp_frame = swap_face(source_face, target_face, temp_frame)
63
- else:
64
- target_face = get_one_face(temp_frame)
65
- if target_face:
66
- temp_frame = swap_face(source_face, target_face, temp_frame)
67
- return temp_frame
68
-
69
-
70
- def process_frames(source_path: str, temp_frame_paths: List[str], update: Callable[[], None]) -> None:
71
- source_face = get_one_face(cv2.imread(source_path))
72
- for temp_frame_path in temp_frame_paths:
73
- temp_frame = cv2.imread(temp_frame_path)
74
- result = process_frame(source_face, temp_frame)
75
- cv2.imwrite(temp_frame_path, result)
76
- if update:
77
- update()
78
-
79
-
80
- def process_image(source_path: str, target_path: str, output_path: str) -> None:
81
- source_face = get_one_face(cv2.imread(source_path))
82
- target_frame = cv2.imread(target_path)
83
- result = process_frame(source_face, target_frame)
84
- cv2.imwrite(output_path, result)
85
-
86
-
87
- def process_video(source_path: str, temp_frame_paths: List[str]) -> None:
88
- roop.processors.frame.core.process_video(source_path, temp_frame_paths, process_frames)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/resolution/resolvelib/factory.py DELETED
@@ -1,730 +0,0 @@
1
- import contextlib
2
- import functools
3
- import logging
4
- from typing import (
5
- TYPE_CHECKING,
6
- Dict,
7
- FrozenSet,
8
- Iterable,
9
- Iterator,
10
- List,
11
- Mapping,
12
- NamedTuple,
13
- Optional,
14
- Sequence,
15
- Set,
16
- Tuple,
17
- TypeVar,
18
- cast,
19
- )
20
-
21
- from pip._vendor.packaging.requirements import InvalidRequirement
22
- from pip._vendor.packaging.specifiers import SpecifierSet
23
- from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
24
- from pip._vendor.resolvelib import ResolutionImpossible
25
-
26
- from pip._internal.cache import CacheEntry, WheelCache
27
- from pip._internal.exceptions import (
28
- DistributionNotFound,
29
- InstallationError,
30
- MetadataInconsistent,
31
- UnsupportedPythonVersion,
32
- UnsupportedWheel,
33
- )
34
- from pip._internal.index.package_finder import PackageFinder
35
- from pip._internal.metadata import BaseDistribution, get_default_environment
36
- from pip._internal.models.link import Link
37
- from pip._internal.models.wheel import Wheel
38
- from pip._internal.operations.prepare import RequirementPreparer
39
- from pip._internal.req.constructors import install_req_from_link_and_ireq
40
- from pip._internal.req.req_install import (
41
- InstallRequirement,
42
- check_invalid_constraint_type,
43
- )
44
- from pip._internal.resolution.base import InstallRequirementProvider
45
- from pip._internal.utils.compatibility_tags import get_supported
46
- from pip._internal.utils.hashes import Hashes
47
- from pip._internal.utils.packaging import get_requirement
48
- from pip._internal.utils.virtualenv import running_under_virtualenv
49
-
50
- from .base import Candidate, CandidateVersion, Constraint, Requirement
51
- from .candidates import (
52
- AlreadyInstalledCandidate,
53
- BaseCandidate,
54
- EditableCandidate,
55
- ExtrasCandidate,
56
- LinkCandidate,
57
- RequiresPythonCandidate,
58
- as_base_candidate,
59
- )
60
- from .found_candidates import FoundCandidates, IndexCandidateInfo
61
- from .requirements import (
62
- ExplicitRequirement,
63
- RequiresPythonRequirement,
64
- SpecifierRequirement,
65
- UnsatisfiableRequirement,
66
- )
67
-
68
- if TYPE_CHECKING:
69
- from typing import Protocol
70
-
71
- class ConflictCause(Protocol):
72
- requirement: RequiresPythonRequirement
73
- parent: Candidate
74
-
75
-
76
- logger = logging.getLogger(__name__)
77
-
78
- C = TypeVar("C")
79
- Cache = Dict[Link, C]
80
-
81
-
82
- class CollectedRootRequirements(NamedTuple):
83
- requirements: List[Requirement]
84
- constraints: Dict[str, Constraint]
85
- user_requested: Dict[str, int]
86
-
87
-
88
- class Factory:
89
- def __init__(
90
- self,
91
- finder: PackageFinder,
92
- preparer: RequirementPreparer,
93
- make_install_req: InstallRequirementProvider,
94
- wheel_cache: Optional[WheelCache],
95
- use_user_site: bool,
96
- force_reinstall: bool,
97
- ignore_installed: bool,
98
- ignore_requires_python: bool,
99
- py_version_info: Optional[Tuple[int, ...]] = None,
100
- ) -> None:
101
- self._finder = finder
102
- self.preparer = preparer
103
- self._wheel_cache = wheel_cache
104
- self._python_candidate = RequiresPythonCandidate(py_version_info)
105
- self._make_install_req_from_spec = make_install_req
106
- self._use_user_site = use_user_site
107
- self._force_reinstall = force_reinstall
108
- self._ignore_requires_python = ignore_requires_python
109
-
110
- self._build_failures: Cache[InstallationError] = {}
111
- self._link_candidate_cache: Cache[LinkCandidate] = {}
112
- self._editable_candidate_cache: Cache[EditableCandidate] = {}
113
- self._installed_candidate_cache: Dict[str, AlreadyInstalledCandidate] = {}
114
- self._extras_candidate_cache: Dict[
115
- Tuple[int, FrozenSet[str]], ExtrasCandidate
116
- ] = {}
117
-
118
- if not ignore_installed:
119
- env = get_default_environment()
120
- self._installed_dists = {
121
- dist.canonical_name: dist
122
- for dist in env.iter_installed_distributions(local_only=False)
123
- }
124
- else:
125
- self._installed_dists = {}
126
-
127
- @property
128
- def force_reinstall(self) -> bool:
129
- return self._force_reinstall
130
-
131
- def _fail_if_link_is_unsupported_wheel(self, link: Link) -> None:
132
- if not link.is_wheel:
133
- return
134
- wheel = Wheel(link.filename)
135
- if wheel.supported(self._finder.target_python.get_tags()):
136
- return
137
- msg = f"{link.filename} is not a supported wheel on this platform."
138
- raise UnsupportedWheel(msg)
139
-
140
- def _make_extras_candidate(
141
- self, base: BaseCandidate, extras: FrozenSet[str]
142
- ) -> ExtrasCandidate:
143
- cache_key = (id(base), extras)
144
- try:
145
- candidate = self._extras_candidate_cache[cache_key]
146
- except KeyError:
147
- candidate = ExtrasCandidate(base, extras)
148
- self._extras_candidate_cache[cache_key] = candidate
149
- return candidate
150
-
151
- def _make_candidate_from_dist(
152
- self,
153
- dist: BaseDistribution,
154
- extras: FrozenSet[str],
155
- template: InstallRequirement,
156
- ) -> Candidate:
157
- try:
158
- base = self._installed_candidate_cache[dist.canonical_name]
159
- except KeyError:
160
- base = AlreadyInstalledCandidate(dist, template, factory=self)
161
- self._installed_candidate_cache[dist.canonical_name] = base
162
- if not extras:
163
- return base
164
- return self._make_extras_candidate(base, extras)
165
-
166
- def _make_candidate_from_link(
167
- self,
168
- link: Link,
169
- extras: FrozenSet[str],
170
- template: InstallRequirement,
171
- name: Optional[NormalizedName],
172
- version: Optional[CandidateVersion],
173
- ) -> Optional[Candidate]:
174
- # TODO: Check already installed candidate, and use it if the link and
175
- # editable flag match.
176
-
177
- if link in self._build_failures:
178
- # We already tried this candidate before, and it does not build.
179
- # Don't bother trying again.
180
- return None
181
-
182
- if template.editable:
183
- if link not in self._editable_candidate_cache:
184
- try:
185
- self._editable_candidate_cache[link] = EditableCandidate(
186
- link,
187
- template,
188
- factory=self,
189
- name=name,
190
- version=version,
191
- )
192
- except MetadataInconsistent as e:
193
- logger.info(
194
- "Discarding [blue underline]%s[/]: [yellow]%s[reset]",
195
- link,
196
- e,
197
- extra={"markup": True},
198
- )
199
- self._build_failures[link] = e
200
- return None
201
-
202
- base: BaseCandidate = self._editable_candidate_cache[link]
203
- else:
204
- if link not in self._link_candidate_cache:
205
- try:
206
- self._link_candidate_cache[link] = LinkCandidate(
207
- link,
208
- template,
209
- factory=self,
210
- name=name,
211
- version=version,
212
- )
213
- except MetadataInconsistent as e:
214
- logger.info(
215
- "Discarding [blue underline]%s[/]: [yellow]%s[reset]",
216
- link,
217
- e,
218
- extra={"markup": True},
219
- )
220
- self._build_failures[link] = e
221
- return None
222
- base = self._link_candidate_cache[link]
223
-
224
- if not extras:
225
- return base
226
- return self._make_extras_candidate(base, extras)
227
-
228
- def _iter_found_candidates(
229
- self,
230
- ireqs: Sequence[InstallRequirement],
231
- specifier: SpecifierSet,
232
- hashes: Hashes,
233
- prefers_installed: bool,
234
- incompatible_ids: Set[int],
235
- ) -> Iterable[Candidate]:
236
- if not ireqs:
237
- return ()
238
-
239
- # The InstallRequirement implementation requires us to give it a
240
- # "template". Here we just choose the first requirement to represent
241
- # all of them.
242
- # Hopefully the Project model can correct this mismatch in the future.
243
- template = ireqs[0]
244
- assert template.req, "Candidates found on index must be PEP 508"
245
- name = canonicalize_name(template.req.name)
246
-
247
- extras: FrozenSet[str] = frozenset()
248
- for ireq in ireqs:
249
- assert ireq.req, "Candidates found on index must be PEP 508"
250
- specifier &= ireq.req.specifier
251
- hashes &= ireq.hashes(trust_internet=False)
252
- extras |= frozenset(ireq.extras)
253
-
254
- def _get_installed_candidate() -> Optional[Candidate]:
255
- """Get the candidate for the currently-installed version."""
256
- # If --force-reinstall is set, we want the version from the index
257
- # instead, so we "pretend" there is nothing installed.
258
- if self._force_reinstall:
259
- return None
260
- try:
261
- installed_dist = self._installed_dists[name]
262
- except KeyError:
263
- return None
264
- # Don't use the installed distribution if its version does not fit
265
- # the current dependency graph.
266
- if not specifier.contains(installed_dist.version, prereleases=True):
267
- return None
268
- candidate = self._make_candidate_from_dist(
269
- dist=installed_dist,
270
- extras=extras,
271
- template=template,
272
- )
273
- # The candidate is a known incompatibility. Don't use it.
274
- if id(candidate) in incompatible_ids:
275
- return None
276
- return candidate
277
-
278
- def iter_index_candidate_infos() -> Iterator[IndexCandidateInfo]:
279
- result = self._finder.find_best_candidate(
280
- project_name=name,
281
- specifier=specifier,
282
- hashes=hashes,
283
- )
284
- icans = list(result.iter_applicable())
285
-
286
- # PEP 592: Yanked releases are ignored unless the specifier
287
- # explicitly pins a version (via '==' or '===') that can be
288
- # solely satisfied by a yanked release.
289
- all_yanked = all(ican.link.is_yanked for ican in icans)
290
-
291
- def is_pinned(specifier: SpecifierSet) -> bool:
292
- for sp in specifier:
293
- if sp.operator == "===":
294
- return True
295
- if sp.operator != "==":
296
- continue
297
- if sp.version.endswith(".*"):
298
- continue
299
- return True
300
- return False
301
-
302
- pinned = is_pinned(specifier)
303
-
304
- # PackageFinder returns earlier versions first, so we reverse.
305
- for ican in reversed(icans):
306
- if not (all_yanked and pinned) and ican.link.is_yanked:
307
- continue
308
- func = functools.partial(
309
- self._make_candidate_from_link,
310
- link=ican.link,
311
- extras=extras,
312
- template=template,
313
- name=name,
314
- version=ican.version,
315
- )
316
- yield ican.version, func
317
-
318
- return FoundCandidates(
319
- iter_index_candidate_infos,
320
- _get_installed_candidate(),
321
- prefers_installed,
322
- incompatible_ids,
323
- )
324
-
325
- def _iter_explicit_candidates_from_base(
326
- self,
327
- base_requirements: Iterable[Requirement],
328
- extras: FrozenSet[str],
329
- ) -> Iterator[Candidate]:
330
- """Produce explicit candidates from the base given an extra-ed package.
331
-
332
- :param base_requirements: Requirements known to the resolver. The
333
- requirements are guaranteed to not have extras.
334
- :param extras: The extras to inject into the explicit requirements'
335
- candidates.
336
- """
337
- for req in base_requirements:
338
- lookup_cand, _ = req.get_candidate_lookup()
339
- if lookup_cand is None: # Not explicit.
340
- continue
341
- # We've stripped extras from the identifier, and should always
342
- # get a BaseCandidate here, unless there's a bug elsewhere.
343
- base_cand = as_base_candidate(lookup_cand)
344
- assert base_cand is not None, "no extras here"
345
- yield self._make_extras_candidate(base_cand, extras)
346
-
347
- def _iter_candidates_from_constraints(
348
- self,
349
- identifier: str,
350
- constraint: Constraint,
351
- template: InstallRequirement,
352
- ) -> Iterator[Candidate]:
353
- """Produce explicit candidates from constraints.
354
-
355
- This creates "fake" InstallRequirement objects that are basically clones
356
- of what "should" be the template, but with original_link set to link.
357
- """
358
- for link in constraint.links:
359
- self._fail_if_link_is_unsupported_wheel(link)
360
- candidate = self._make_candidate_from_link(
361
- link,
362
- extras=frozenset(),
363
- template=install_req_from_link_and_ireq(link, template),
364
- name=canonicalize_name(identifier),
365
- version=None,
366
- )
367
- if candidate:
368
- yield candidate
369
-
370
- def find_candidates(
371
- self,
372
- identifier: str,
373
- requirements: Mapping[str, Iterable[Requirement]],
374
- incompatibilities: Mapping[str, Iterator[Candidate]],
375
- constraint: Constraint,
376
- prefers_installed: bool,
377
- ) -> Iterable[Candidate]:
378
- # Collect basic lookup information from the requirements.
379
- explicit_candidates: Set[Candidate] = set()
380
- ireqs: List[InstallRequirement] = []
381
- for req in requirements[identifier]:
382
- cand, ireq = req.get_candidate_lookup()
383
- if cand is not None:
384
- explicit_candidates.add(cand)
385
- if ireq is not None:
386
- ireqs.append(ireq)
387
-
388
- # If the current identifier contains extras, add explicit candidates
389
- # from entries from extra-less identifier.
390
- with contextlib.suppress(InvalidRequirement):
391
- parsed_requirement = get_requirement(identifier)
392
- explicit_candidates.update(
393
- self._iter_explicit_candidates_from_base(
394
- requirements.get(parsed_requirement.name, ()),
395
- frozenset(parsed_requirement.extras),
396
- ),
397
- )
398
-
399
- # Add explicit candidates from constraints. We only do this if there are
400
- # known ireqs, which represent requirements not already explicit. If
401
- # there are no ireqs, we're constraining already-explicit requirements,
402
- # which is handled later when we return the explicit candidates.
403
- if ireqs:
404
- try:
405
- explicit_candidates.update(
406
- self._iter_candidates_from_constraints(
407
- identifier,
408
- constraint,
409
- template=ireqs[0],
410
- ),
411
- )
412
- except UnsupportedWheel:
413
- # If we're constrained to install a wheel incompatible with the
414
- # target architecture, no candidates will ever be valid.
415
- return ()
416
-
417
- # Since we cache all the candidates, incompatibility identification
418
- # can be made quicker by comparing only the id() values.
419
- incompat_ids = {id(c) for c in incompatibilities.get(identifier, ())}
420
-
421
- # If none of the requirements want an explicit candidate, we can ask
422
- # the finder for candidates.
423
- if not explicit_candidates:
424
- return self._iter_found_candidates(
425
- ireqs,
426
- constraint.specifier,
427
- constraint.hashes,
428
- prefers_installed,
429
- incompat_ids,
430
- )
431
-
432
- return (
433
- c
434
- for c in explicit_candidates
435
- if id(c) not in incompat_ids
436
- and constraint.is_satisfied_by(c)
437
- and all(req.is_satisfied_by(c) for req in requirements[identifier])
438
- )
439
-
440
- def _make_requirement_from_install_req(
441
- self, ireq: InstallRequirement, requested_extras: Iterable[str]
442
- ) -> Optional[Requirement]:
443
- if not ireq.match_markers(requested_extras):
444
- logger.info(
445
- "Ignoring %s: markers '%s' don't match your environment",
446
- ireq.name,
447
- ireq.markers,
448
- )
449
- return None
450
- if not ireq.link:
451
- return SpecifierRequirement(ireq)
452
- self._fail_if_link_is_unsupported_wheel(ireq.link)
453
- cand = self._make_candidate_from_link(
454
- ireq.link,
455
- extras=frozenset(ireq.extras),
456
- template=ireq,
457
- name=canonicalize_name(ireq.name) if ireq.name else None,
458
- version=None,
459
- )
460
- if cand is None:
461
- # There's no way we can satisfy a URL requirement if the underlying
462
- # candidate fails to build. An unnamed URL must be user-supplied, so
463
- # we fail eagerly. If the URL is named, an unsatisfiable requirement
464
- # can make the resolver do the right thing, either backtrack (and
465
- # maybe find some other requirement that's buildable) or raise a
466
- # ResolutionImpossible eventually.
467
- if not ireq.name:
468
- raise self._build_failures[ireq.link]
469
- return UnsatisfiableRequirement(canonicalize_name(ireq.name))
470
- return self.make_requirement_from_candidate(cand)
471
-
472
- def collect_root_requirements(
473
- self, root_ireqs: List[InstallRequirement]
474
- ) -> CollectedRootRequirements:
475
- collected = CollectedRootRequirements([], {}, {})
476
- for i, ireq in enumerate(root_ireqs):
477
- if ireq.constraint:
478
- # Ensure we only accept valid constraints
479
- problem = check_invalid_constraint_type(ireq)
480
- if problem:
481
- raise InstallationError(problem)
482
- if not ireq.match_markers():
483
- continue
484
- assert ireq.name, "Constraint must be named"
485
- name = canonicalize_name(ireq.name)
486
- if name in collected.constraints:
487
- collected.constraints[name] &= ireq
488
- else:
489
- collected.constraints[name] = Constraint.from_ireq(ireq)
490
- else:
491
- req = self._make_requirement_from_install_req(
492
- ireq,
493
- requested_extras=(),
494
- )
495
- if req is None:
496
- continue
497
- if ireq.user_supplied and req.name not in collected.user_requested:
498
- collected.user_requested[req.name] = i
499
- collected.requirements.append(req)
500
- return collected
501
-
502
- def make_requirement_from_candidate(
503
- self, candidate: Candidate
504
- ) -> ExplicitRequirement:
505
- return ExplicitRequirement(candidate)
506
-
507
- def make_requirement_from_spec(
508
- self,
509
- specifier: str,
510
- comes_from: Optional[InstallRequirement],
511
- requested_extras: Iterable[str] = (),
512
- ) -> Optional[Requirement]:
513
- ireq = self._make_install_req_from_spec(specifier, comes_from)
514
- return self._make_requirement_from_install_req(ireq, requested_extras)
515
-
516
- def make_requires_python_requirement(
517
- self,
518
- specifier: SpecifierSet,
519
- ) -> Optional[Requirement]:
520
- if self._ignore_requires_python:
521
- return None
522
- # Don't bother creating a dependency for an empty Requires-Python.
523
- if not str(specifier):
524
- return None
525
- return RequiresPythonRequirement(specifier, self._python_candidate)
526
-
527
- def get_wheel_cache_entry(
528
- self, link: Link, name: Optional[str]
529
- ) -> Optional[CacheEntry]:
530
- """Look up the link in the wheel cache.
531
-
532
- If ``preparer.require_hashes`` is True, don't use the wheel cache,
533
- because cached wheels, always built locally, have different hashes
534
- than the files downloaded from the index server and thus throw false
535
- hash mismatches. Furthermore, cached wheels at present have
536
- nondeterministic contents due to file modification times.
537
- """
538
- if self._wheel_cache is None:
539
- return None
540
- return self._wheel_cache.get_cache_entry(
541
- link=link,
542
- package_name=name,
543
- supported_tags=get_supported(),
544
- )
545
-
546
- def get_dist_to_uninstall(self, candidate: Candidate) -> Optional[BaseDistribution]:
547
- # TODO: Are there more cases this needs to return True? Editable?
548
- dist = self._installed_dists.get(candidate.project_name)
549
- if dist is None: # Not installed, no uninstallation required.
550
- return None
551
-
552
- # We're installing into global site. The current installation must
553
- # be uninstalled, no matter it's in global or user site, because the
554
- # user site installation has precedence over global.
555
- if not self._use_user_site:
556
- return dist
557
-
558
- # We're installing into user site. Remove the user site installation.
559
- if dist.in_usersite:
560
- return dist
561
-
562
- # We're installing into user site, but the installed incompatible
563
- # package is in global site. We can't uninstall that, and would let
564
- # the new user installation to "shadow" it. But shadowing won't work
565
- # in virtual environments, so we error out.
566
- if running_under_virtualenv() and dist.in_site_packages:
567
- message = (
568
- f"Will not install to the user site because it will lack "
569
- f"sys.path precedence to {dist.raw_name} in {dist.location}"
570
- )
571
- raise InstallationError(message)
572
- return None
573
-
574
- def _report_requires_python_error(
575
- self, causes: Sequence["ConflictCause"]
576
- ) -> UnsupportedPythonVersion:
577
- assert causes, "Requires-Python error reported with no cause"
578
-
579
- version = self._python_candidate.version
580
-
581
- if len(causes) == 1:
582
- specifier = str(causes[0].requirement.specifier)
583
- message = (
584
- f"Package {causes[0].parent.name!r} requires a different "
585
- f"Python: {version} not in {specifier!r}"
586
- )
587
- return UnsupportedPythonVersion(message)
588
-
589
- message = f"Packages require a different Python. {version} not in:"
590
- for cause in causes:
591
- package = cause.parent.format_for_error()
592
- specifier = str(cause.requirement.specifier)
593
- message += f"\n{specifier!r} (required by {package})"
594
- return UnsupportedPythonVersion(message)
595
-
596
- def _report_single_requirement_conflict(
597
- self, req: Requirement, parent: Optional[Candidate]
598
- ) -> DistributionNotFound:
599
- if parent is None:
600
- req_disp = str(req)
601
- else:
602
- req_disp = f"{req} (from {parent.name})"
603
-
604
- cands = self._finder.find_all_candidates(req.project_name)
605
- skipped_by_requires_python = self._finder.requires_python_skipped_reasons()
606
- versions = [str(v) for v in sorted({c.version for c in cands})]
607
-
608
- if skipped_by_requires_python:
609
- logger.critical(
610
- "Ignored the following versions that require a different python "
611
- "version: %s",
612
- "; ".join(skipped_by_requires_python) or "none",
613
- )
614
- logger.critical(
615
- "Could not find a version that satisfies the requirement %s "
616
- "(from versions: %s)",
617
- req_disp,
618
- ", ".join(versions) or "none",
619
- )
620
- if str(req) == "requirements.txt":
621
- logger.info(
622
- "HINT: You are attempting to install a package literally "
623
- 'named "requirements.txt" (which cannot exist). Consider '
624
- "using the '-r' flag to install the packages listed in "
625
- "requirements.txt"
626
- )
627
-
628
- return DistributionNotFound(f"No matching distribution found for {req}")
629
-
630
- def get_installation_error(
631
- self,
632
- e: "ResolutionImpossible[Requirement, Candidate]",
633
- constraints: Dict[str, Constraint],
634
- ) -> InstallationError:
635
- assert e.causes, "Installation error reported with no cause"
636
-
637
- # If one of the things we can't solve is "we need Python X.Y",
638
- # that is what we report.
639
- requires_python_causes = [
640
- cause
641
- for cause in e.causes
642
- if isinstance(cause.requirement, RequiresPythonRequirement)
643
- and not cause.requirement.is_satisfied_by(self._python_candidate)
644
- ]
645
- if requires_python_causes:
646
- # The comprehension above makes sure all Requirement instances are
647
- # RequiresPythonRequirement, so let's cast for convenience.
648
- return self._report_requires_python_error(
649
- cast("Sequence[ConflictCause]", requires_python_causes),
650
- )
651
-
652
- # Otherwise, we have a set of causes which can't all be satisfied
653
- # at once.
654
-
655
- # The simplest case is when we have *one* cause that can't be
656
- # satisfied. We just report that case.
657
- if len(e.causes) == 1:
658
- req, parent = e.causes[0]
659
- if req.name not in constraints:
660
- return self._report_single_requirement_conflict(req, parent)
661
-
662
- # OK, we now have a list of requirements that can't all be
663
- # satisfied at once.
664
-
665
- # A couple of formatting helpers
666
- def text_join(parts: List[str]) -> str:
667
- if len(parts) == 1:
668
- return parts[0]
669
-
670
- return ", ".join(parts[:-1]) + " and " + parts[-1]
671
-
672
- def describe_trigger(parent: Candidate) -> str:
673
- ireq = parent.get_install_requirement()
674
- if not ireq or not ireq.comes_from:
675
- return f"{parent.name}=={parent.version}"
676
- if isinstance(ireq.comes_from, InstallRequirement):
677
- return str(ireq.comes_from.name)
678
- return str(ireq.comes_from)
679
-
680
- triggers = set()
681
- for req, parent in e.causes:
682
- if parent is None:
683
- # This is a root requirement, so we can report it directly
684
- trigger = req.format_for_error()
685
- else:
686
- trigger = describe_trigger(parent)
687
- triggers.add(trigger)
688
-
689
- if triggers:
690
- info = text_join(sorted(triggers))
691
- else:
692
- info = "the requested packages"
693
-
694
- msg = (
695
- "Cannot install {} because these package versions "
696
- "have conflicting dependencies.".format(info)
697
- )
698
- logger.critical(msg)
699
- msg = "\nThe conflict is caused by:"
700
-
701
- relevant_constraints = set()
702
- for req, parent in e.causes:
703
- if req.name in constraints:
704
- relevant_constraints.add(req.name)
705
- msg = msg + "\n "
706
- if parent:
707
- msg = msg + f"{parent.name} {parent.version} depends on "
708
- else:
709
- msg = msg + "The user requested "
710
- msg = msg + req.format_for_error()
711
- for key in relevant_constraints:
712
- spec = constraints[key].specifier
713
- msg += f"\n The user requested (constraint) {key}{spec}"
714
-
715
- msg = (
716
- msg
717
- + "\n\n"
718
- + "To fix this you could try to:\n"
719
- + "1. loosen the range of package versions you've specified\n"
720
- + "2. remove package versions to allow pip attempt to solve "
721
- + "the dependency conflict\n"
722
- )
723
-
724
- logger.info(msg)
725
-
726
- return DistributionNotFound(
727
- "ResolutionImpossible: for help visit "
728
- "https://pip.pypa.io/en/latest/topics/dependency-resolution/"
729
- "#dealing-with-dependency-conflicts"
730
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Atualli/yoloxTeste/telegramCrise.sh DELETED
@@ -1 +0,0 @@
1
- curl -X POST "https://api.telegram.org/bot766543741:AAE0oO_ni_QYkfS8tZxC-VZt0RJztFiZNHc/sendMessage?chat_id=-927074982&text=$1"
 
 
spaces/Baishali/Pneumonia-Detection/app.py DELETED
@@ -1,55 +0,0 @@
1
- __author__ = "Baishali Dutta"
2
- __copyright__ = "Copyright (C) 2021 Baishali Dutta"
3
- __license__ = "Apache License 2.0"
4
- __version__ = "0.1"
5
-
6
- # -------------------------------------------------------------------------
7
- # Importing the libraries
8
- # -------------------------------------------------------------------------
9
- import gradio as gr
10
- import numpy as np
11
- from tensorflow.keras.models import load_model
12
- from tensorflow.keras.preprocessing import image
13
-
14
- # -------------------------------------------------------------------------
15
- # Configurations
16
- # -------------------------------------------------------------------------
17
- MODEL_LOC = 'pneumonia_detection_cnn_model.h5'
18
-
19
- # load the trained CNN model
20
- cnn_model = load_model(MODEL_LOC)
21
-
22
-
23
- def make_prediction(test_image):
24
- test_image = test_image.name
25
- test_image = image.load_img(test_image, target_size=(224, 224))
26
- test_image = image.img_to_array(test_image) / 255.
27
- test_image = np.expand_dims(test_image, axis=0)
28
- result = cnn_model.predict(test_image)
29
- return {"Normal": str(result[0][0]), "Pneumonia": str(result[0][1])}
30
-
31
-
32
- image_input = gr.inputs.Image(type="file")
33
-
34
- title = "Pneumonia Detection"
35
- description = "This application uses a Convolutional Neural Network (CNN) model to predict whether a chosen X-ray shows if " \
36
- "the person has pneumonia disease or not. To check the model prediction, here are the true labels of the " \
37
- "provided examples below: the first 4 images belong to normal whereas the last 4 images are of pneumonia " \
38
- "category. More specifically, the 5th and 6th images are viral pneumonia infection in nature whereas " \
39
- "the last 2 images are bacterial infection in nature."
40
-
41
- gr.Interface(fn=make_prediction,
42
- inputs=image_input,
43
- outputs="label",
44
- examples=[["image1_normal.jpeg"],
45
- ["image2_normal.jpeg"],
46
- ["image3_normal.jpeg"],
47
- ["image4_normal.jpeg"],
48
- ["image1_pneumonia_virus.jpeg"],
49
- ["image2_pneumonia_virus.jpeg"],
50
- ["image1_pneumonia_bacteria.jpeg"],
51
- ["image2_pneumonia_bacteria.jpeg"]],
52
- title=title,
53
- description=description,
54
- article="http://raw.githubusercontent.com/baishalidutta/Pneumonia-Detection/gradio/README.md") \
55
- .launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Benson/text-generation/Examples/Descargar Clave De Licencia Para Fifa 19.md DELETED
@@ -1,81 +0,0 @@
1
- <br />
2
- <h1>Cómo descargar la clave de licencia para FIFA 19</h1>
3
- <p>FIFA 19 es uno de los videojuegos de fútbol más populares del mundo, desarrollado por EA Sports y lanzado en 2018. Cuenta con la prestigiosa Liga de Campeones de la UEFA, un modo de arranque renovado y una variedad de nuevas características de juego. Si quieres jugar a FIFA 19 en tu PC, necesitarás una clave de licencia para activarlo. Una clave de licencia es un código de 25 caracteres que verifica que su copia de FIFA 19 es original y no se ha utilizado en más dispositivos que los Términos de licencia de software de Microsoft permiten. En este artículo, te mostraremos cómo descargar una clave de licencia para FIFA 19 de diferentes fuentes y cómo activarla en tu PC.</p>
4
- <h2>descargar clave de licencia para fifa 19</h2><br /><p><b><b>DOWNLOAD</b> &#9913;&#9913;&#9913; <a href="https://bltlly.com/2v6Jlv">https://bltlly.com/2v6Jlv</a></b></p><br /><br />
5
- <h2>¿Qué es FIFA 19 y por qué necesitas una clave de licencia? </h2>
6
- <h3>Características y jugabilidad de FIFA 19</h3>
7
- <p>FIFA 19 es la 26ª entrega de la serie FIFA, y la primera en incluir la UEFA Champions League, la UEFA Europa League y la Supercopa de la UEFA. Puedes jugar como tus equipos favoritos y jugadores de todo el mundo, y competir en varios modos como el Modo Carrera, Ultimate Team, The Journey y más. También puedes disfrutar de las nuevas características de juego como el Active Touch System, Dynamic Tactics, 50/50 Battles, Timed Finishing y más. FIFA 19 también tiene gráficos impresionantes, animaciones realistas, bandas sonoras inmersivas y comentarios auténticos. </p>
8
- <h3>Requisitos del sistema FIFA 19 y compatibilidad</h3>
9
- <p>Para ejecutar FIFA 19 en su PC, necesitará cumplir con los requisitos mínimos o recomendados del sistema. Aquí están las especificaciones que necesita saber:</p>
10
- <tabla>
11
- <tr><th>Mínimo</th><th>Recomendado</th></tr>
12
- <tr><td>OS: Windows 7/8.1/10 - 64-Bit</td><td>OS: Windows 10 - 64-Bit</td></tr>
13
- <tr><td>CPU: Core i3-2100 @ 3.1GHz o AMD Phenom II X4 965 @ 3.4 GHz</td><td>CPU: Intel i3 6300T o equivalente</td></tr>
14
- <tr><td>RAM: 8 GB</td><td>RAM: 8 GB</td></tr>
15
-
16
- <tr><td>DISCO DURO: Al menos 50 GB de espacio libre</td><td>DISCO DURO: Al menos 50 GB de espacio libre</td></tr>
17
- <tr><td>VIDEO: NVIDIA GTX 460 1GB o AMD Radeon R7 260</td><td>VIDEO: NVIDIA GeForce GTX 670 o AMD Radeon R9 270X</td></tr>
18
- <tr><td>DirectX: DirectX 11 compatible (7 necesarios para DirectX 11)</td><td>DirectX: DirectX 12 compatible</td></tr>
19
- <tr><td>ENTRADA: Teclado y ratón, controlador analógico dual</td><td>ENTRADA: Teclado y ratón, controlador analógico dual</td></tr>
20
- <tr><td>REQUISITOS DE CONEXIÓN ONLINE: Se requiere conexión a Internet para instalar y jugar. </td><td>REQUISITOS DE CONEXIÓN ONLINE: Se requiere conexión a Internet para instalar y jugar. </td></tr <h3>Métodos de activación FIFA 19 y clave de producto</h3>
21
- <p>Para jugar FIFA 19 en tu PC, tendrás que activarlo con una clave de producto válida. Una clave de producto es un código de 25 caracteres que se parece a esto: XXXXX-XXXXX-XXXXX-XXXXX-XXXXX. Puedes encontrar tu clave de producto de diferentes maneras dependiendo de cómo compraste FIFA 19. Hay tres métodos principales de activación para FIFA 19: activación en línea, activación telefónica y activación fuera de línea. La activación en línea es la forma más fácil y común de activar FIFA 19. Solo tienes que introducir tu clave de producto cuando se te solicite durante la instalación o el lanzamiento del juego, y luego iniciar sesión con tu cuenta de EA. La activación del teléfono es una forma alternativa de activar FIFA 19 si tiene problemas con la activación en línea. Solo tienes que llamar al número gratuito proporcionado por EA y seguir las instrucciones para introducir la clave del producto y obtener un código de confirmación. La activación sin conexión es una forma de último recurso para activar FIFA 19 si no tiene conexión a Internet o acceso telefónico. Solo tienes que ponerte en contacto con el servicio de atención al cliente de EA y proporcionarles la clave de tu producto y alguna información sobre tu PC. A continuación, le dará un archivo de activación sin conexión que puede utilizar para activar FIFA 19 en su PC.</p>
22
- <p></p>
23
- <h2>Cómo obtener una clave de licencia para FIFA 19 de un distribuidor autorizado</h2>
24
-
25
- <p>Una de las maneras más fáciles de obtener una clave de licencia para FIFA 19 es comprar una copia física del juego de un minorista autorizado como Amazon, Walmart, Best Buy, GameStop, etc. Cuando usted compra una copia física de FIFA 19, obtendrá un disco DVD que contiene los archivos del juego y un inserto de papel que tiene su clave de producto impresa en él. Solo tiene que insertar el disco en la unidad de DVD de su PC y siga las instrucciones de instalación. A continuación, puede introducir su clave de producto cuando se le solicite y activar FIFA 19 en línea, por teléfono o fuera de línea. </p>
26
- <h3>Comprar una copia digital de FIFA 19 en una tienda online</h3>
27
- <p>Otra forma de obtener una clave de licencia para FIFA 19 es comprar una copia digital del juego en una tienda en línea como Origin, Steam, GOG, Humble Bundle, etc. Cuando compras una copia digital de FIFA 19, recibirá una confirmación por correo electrónico que contiene su clave de producto y un enlace para descargar los archivos del juego. Solo tienes que hacer clic en el enlace y descargar los archivos del juego a su PC. A continuación, puede introducir su clave de producto cuando se le solicite y activar FIFA 19 en línea, por teléfono o fuera de línea. </p>
28
- <h3>Comprar una copia digital de FIFA 19 desde la aplicación Microsoft Store</h3>
29
- <p>Una tercera manera de obtener una clave de licencia para FIFA 19 es comprar una copia digital del juego desde la aplicación Microsoft Store en su PC con Windows 10. Cuando compres una copia digital de FIFA 19 desde la aplicación de Microsoft Store, no obtendrás una clave de producto ni una confirmación por correo electrónico. En su lugar, obtendrá una licencia digital que está vinculada a su cuenta de Microsoft y su PC. Solo necesitas descargar e instalar el juego desde la aplicación de Microsoft Store e iniciar sesión con tu cuenta de Microsoft. A continuación, puede jugar FIFA 19 sin introducir ninguna clave de producto o activarlo. </p> <h2>Cómo obtener una clave de licencia para FIFA 19 de otras fuentes</h2>
30
- <h3>Usando un software de búsqueda de claves</h3>
31
-
32
- <h3>Usando un generador de llaves o herramienta de crack</h3>
33
- <p>Si no has comprado FIFA 19 en un minorista autorizado o en una tienda online, puedes intentar usar un generador de claves o una herramienta para obtener una clave de licencia para FIFA 19. Un generador de claves o herramienta crack es un programa que genera claves de producto aleatorias o evita el proceso de activación del software. Algunas de las herramientas más populares para FIFA 19 son FIFA 19 Key Generator, FIFA 19 Crack, FIFA 19 Serial Key, etc. Solo necesitas descargar y ejecutar uno de estos programas y obtener una clave de producto o un archivo crack para FIFA 19. A continuación, puede introducir la clave del producto cuando se le solicite o reemplazar el archivo de juego original con el archivo de crack y activar FIFA 19 en línea, por teléfono o sin conexión. </p>
34
- <h3>Usando una actualización gratuita o una oferta de prueba</h3>
35
- <p>Si ya ha comprado una versión anterior de FIFA como FIFA 18 o FIFA 17, puede intentar utilizar una actualización gratuita o una oferta de prueba para obtener una clave de licencia para FIFA 19. Una actualización gratuita o una oferta de prueba es una promoción que le permite actualizar o probar la última versión del software de forma gratuita o a un precio reducido. Algunas de las ofertas gratuitas de actualización o prueba para FIFA 19 son EA Play, Origin Access, EA Access, etc. Solo tienes que registrarte en uno de estos servicios y descargar FIFA 19 de su biblioteca. A continuación, puede jugar FIFA 19 sin introducir ninguna clave de producto o activarlo. </p>
36
- <h2>Cómo activar FIFA 19 con su clave de licencia</h2>
37
- <h3>Introducir la clave del producto durante la instalación o el lanzamiento</h3>
38
- <p>La forma más común de activar FIFA 19 con tu clave de licencia es introducirla durante la instalación o el lanzamiento del juego. Solo tienes que seguir estos pasos:</p>
39
- <ol>
40
- <li>Inserte el disco DVD en la unidad de DVD de su PC o descargue los archivos del juego desde el enlace proporcionado por la tienda en línea. </li>
41
- <li>Ejecute el archivo setup.exe y siga las instrucciones de instalación. </li>
42
- <li>Cuando se le solicite, introduzca su clave de producto en el cuadro y haga clic en Next.</li>
43
-
44
- <li>Espere a que el juego se instale y se inicie. </li>
45
- <li>Disfruta jugando FIFA 19 en tu PC.</li>
46
- </ol>
47
- <h3>Activar su licencia digital online o offline</h3>
48
- <p>Si ha comprado una copia digital de FIFA 19 desde la aplicación Microsoft Store o ha utilizado una actualización gratuita o una oferta de prueba, no tendrá que introducir ninguna clave de producto para activarla. En su lugar, tendrá una licencia digital vinculada a su cuenta de Microsoft y su PC. Solo tiene que seguir estos pasos:</p>
49
- <ol>
50
- <li>Descargar e instalar FIFA 19 desde la aplicación de Microsoft Store o el servicio para el que te registraste. </li>
51
- <li>Inicia sesión con tu cuenta de Microsoft que usaste para comprar o descargar FIFA 19. </li>
52
- <li>Si tiene una conexión a Internet, su licencia digital se activará automáticamente. </li>
53
- <li>Si no tiene una conexión a Internet, puede activar su licencia digital sin conexión mediante el Solucionador de problemas de activación. Para ello, vaya a Configuración > Actualización y seguridad > Activación > Solución de problemas y siga las instrucciones. </li>
54
- <li>Disfruta jugando FIFA 19 en tu PC.</li>
55
- </ol>
56
- <h3>Solución de problemas y errores de activación comunes</h3>
57
- <p>A veces, puede encontrar algunos errores o problemas al intentar activar FIFA 19 con su clave de licencia. Estos son algunos de los más comunes y cómo solucionarlos:</p>
58
- <ul>
59
- <li>Si recibe un mensaje de error que dice "Esta clave de producto ya se ha utilizado en otro dispositivo", significa que ha superado el número de dispositivos que puede activar con su clave de producto. Para solucionar esto, debes desactivar uno de tus dispositivos anteriores iniciando sesión con tu cuenta de EA y yendo a Mi cuenta > Configuración de privacidad > Seguridad > Desactivar dispositivos.</li>
60
- <li>Si recibe un mensaje de error que dice "Esta clave de producto es inválida o incorrecta", significa que ha introducido una clave de producto incorrecta o ha cometido un error tipográfico. Para solucionar esto, debe revisar su clave de producto nuevamente y asegurarse de que la ingrese correctamente y sin espacios. </li>
61
-
62
- <li>Si recibe un mensaje de error que dice "No se puede activar FIFA 19 en este momento", significa que hay un problema con los servidores de EA o su conexión a Internet. Para solucionar esto, debe esperar un tiempo e intentarlo de nuevo más tarde, o verificar su conexión a Internet y asegurarse de que es estable y seguro. </li>
63
- <li>Si recibes un mensaje de error que dice "Límite de activación alcanzado para FIFA 19", significa que has alcanzado el número máximo de veces que puedes activar FIFA 19 con tu clave de producto. Para solucionarlo, debes ponerte en contacto con el servicio de atención al cliente de EA y solicitar un restablecimiento de tu límite de activación. </li>
64
- </ul>
65
- <h2>Conclusión y preguntas frecuentes</h2>
66
- <p>En conclusión, FIFA 19 es un gran videojuego de fútbol que puedes jugar en tu PC con una clave de licencia. Puede obtener una clave de licencia para FIFA 19 de varias fuentes, como comprar una copia física o digital del juego en un minorista autorizado o una tienda en línea, usar un software de búsqueda de claves, usar un generador de claves o una herramienta de crack, o usar una actualización gratuita o una oferta de prueba. También puede activar FIFA 19 con su clave de licencia en línea, por teléfono o fuera de línea, dependiendo de su situación. Sin embargo, también puede encontrar algunos errores o problemas al intentar activar FIFA 19 con su clave de licencia, por lo que debe ser consciente de las posibles soluciones y consejos de solución de problemas. Esperamos que este artículo te haya ayudado a aprender cómo descargar una clave de licencia para FIFA 19 y disfrutar jugando en tu PC.</p>
67
- <p>Aquí hay algunas preguntas frecuentes que puede tener sobre la descarga de una clave de licencia para FIFA 19:</p>
68
- <ol>
69
- <li>Q: ¿Puedo usar la misma clave de producto para FIFA 19 en más de un PC? </li>
70
- <li>A: No, solo puedes usar la misma clave de producto para FIFA 19 en un PC a la vez. Si quieres jugar a FIFA 19 en otro PC, tendrás que desactivar el primer PC y activar el segundo PC con la misma clave de producto. </li>
71
- <li>Q: ¿Puedo compartir mi clave de producto para FIFA 19 con otra persona? </li>
72
-
73
- <li>Q: ¿Puedo obtener un reembolso por mi clave de producto para FIFA 19 si no me gusta el juego? </li>
74
- <li>A: Depende de dónde compraste tu clave de producto para FIFA 19 y cuál es su política de reembolso. Algunos minoristas o tiendas en línea pueden ofrecer un reembolso por su clave de producto para FIFA 19 dentro de un cierto período de tiempo y bajo ciertas condiciones. Tendrá que ponerse en contacto con ellos y solicitar su política de reembolso y proceso. </li>
75
- <li>Q: ¿Puedo jugar FIFA 19 sin una clave de producto o activación? </li>
76
- <li>A: No, no puedes jugar a FIFA 19 sin una clave de producto o activación. Necesitarás una clave de producto válida y una activación para jugar a FIFA 19 en tu PC. Si intentas jugar a FIFA 19 sin una clave de producto o activación, recibirás un mensaje de error y el juego no se iniciará. </li>
77
- <li>Q: ¿Puedo jugar FIFA 19 sin conexión después de activarlo con mi clave de producto? </li>
78
- <li>A: Sí, puedes jugar FIFA 19 sin conexión después de activarlo con tu clave de producto. Sin embargo, algunas características del juego pueden no estar disponibles sin conexión, como modos multijugador en línea, actualizaciones en línea, recompensas en línea, etc. También tendrá que conectarse a Internet al menos una vez cada 30 días para verificar su estado de activación. </li>
79
- </ol></p> 64aa2da5cf<br />
80
- <br />
81
- <br />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Big-Web/MMSD/env/Lib/site-packages/setuptools/_vendor/importlib_resources/simple.py DELETED
@@ -1,116 +0,0 @@
1
- """
2
- Interface adapters for low-level readers.
3
- """
4
-
5
- import abc
6
- import io
7
- import itertools
8
- from typing import BinaryIO, List
9
-
10
- from .abc import Traversable, TraversableResources
11
-
12
-
13
- class SimpleReader(abc.ABC):
14
- """
15
- The minimum, low-level interface required from a resource
16
- provider.
17
- """
18
-
19
- @abc.abstractproperty
20
- def package(self):
21
- # type: () -> str
22
- """
23
- The name of the package for which this reader loads resources.
24
- """
25
-
26
- @abc.abstractmethod
27
- def children(self):
28
- # type: () -> List['SimpleReader']
29
- """
30
- Obtain an iterable of SimpleReader for available
31
- child containers (e.g. directories).
32
- """
33
-
34
- @abc.abstractmethod
35
- def resources(self):
36
- # type: () -> List[str]
37
- """
38
- Obtain available named resources for this virtual package.
39
- """
40
-
41
- @abc.abstractmethod
42
- def open_binary(self, resource):
43
- # type: (str) -> BinaryIO
44
- """
45
- Obtain a File-like for a named resource.
46
- """
47
-
48
- @property
49
- def name(self):
50
- return self.package.split('.')[-1]
51
-
52
-
53
- class ResourceHandle(Traversable):
54
- """
55
- Handle to a named resource in a ResourceReader.
56
- """
57
-
58
- def __init__(self, parent, name):
59
- # type: (ResourceContainer, str) -> None
60
- self.parent = parent
61
- self.name = name # type: ignore
62
-
63
- def is_file(self):
64
- return True
65
-
66
- def is_dir(self):
67
- return False
68
-
69
- def open(self, mode='r', *args, **kwargs):
70
- stream = self.parent.reader.open_binary(self.name)
71
- if 'b' not in mode:
72
- stream = io.TextIOWrapper(*args, **kwargs)
73
- return stream
74
-
75
- def joinpath(self, name):
76
- raise RuntimeError("Cannot traverse into a resource")
77
-
78
-
79
- class ResourceContainer(Traversable):
80
- """
81
- Traversable container for a package's resources via its reader.
82
- """
83
-
84
- def __init__(self, reader):
85
- # type: (SimpleReader) -> None
86
- self.reader = reader
87
-
88
- def is_dir(self):
89
- return True
90
-
91
- def is_file(self):
92
- return False
93
-
94
- def iterdir(self):
95
- files = (ResourceHandle(self, name) for name in self.reader.resources)
96
- dirs = map(ResourceContainer, self.reader.children())
97
- return itertools.chain(files, dirs)
98
-
99
- def open(self, *args, **kwargs):
100
- raise IsADirectoryError()
101
-
102
- def joinpath(self, name):
103
- return next(
104
- traversable for traversable in self.iterdir() if traversable.name == name
105
- )
106
-
107
-
108
- class TraversableReader(TraversableResources, SimpleReader):
109
- """
110
- A TraversableResources based on SimpleReader. Resource providers
111
- may derive from this class to provide the TraversableResources
112
- interface by supplying the SimpleReader interface.
113
- """
114
-
115
- def files(self):
116
- return ResourceContainer(self)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/pybind11/tests/test_callbacks.py DELETED
@@ -1,137 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- import pytest
3
- from pybind11_tests import callbacks as m
4
- from threading import Thread
5
-
6
-
7
- def test_callbacks():
8
- from functools import partial
9
-
10
- def func1():
11
- return "func1"
12
-
13
- def func2(a, b, c, d):
14
- return "func2", a, b, c, d
15
-
16
- def func3(a):
17
- return "func3({})".format(a)
18
-
19
- assert m.test_callback1(func1) == "func1"
20
- assert m.test_callback2(func2) == ("func2", "Hello", "x", True, 5)
21
- assert m.test_callback1(partial(func2, 1, 2, 3, 4)) == ("func2", 1, 2, 3, 4)
22
- assert m.test_callback1(partial(func3, "partial")) == "func3(partial)"
23
- assert m.test_callback3(lambda i: i + 1) == "func(43) = 44"
24
-
25
- f = m.test_callback4()
26
- assert f(43) == 44
27
- f = m.test_callback5()
28
- assert f(number=43) == 44
29
-
30
-
31
- def test_bound_method_callback():
32
- # Bound Python method:
33
- class MyClass:
34
- def double(self, val):
35
- return 2 * val
36
-
37
- z = MyClass()
38
- assert m.test_callback3(z.double) == "func(43) = 86"
39
-
40
- z = m.CppBoundMethodTest()
41
- assert m.test_callback3(z.triple) == "func(43) = 129"
42
-
43
-
44
- def test_keyword_args_and_generalized_unpacking():
45
-
46
- def f(*args, **kwargs):
47
- return args, kwargs
48
-
49
- assert m.test_tuple_unpacking(f) == (("positional", 1, 2, 3, 4, 5, 6), {})
50
- assert m.test_dict_unpacking(f) == (("positional", 1), {"key": "value", "a": 1, "b": 2})
51
- assert m.test_keyword_args(f) == ((), {"x": 10, "y": 20})
52
- assert m.test_unpacking_and_keywords1(f) == ((1, 2), {"c": 3, "d": 4})
53
- assert m.test_unpacking_and_keywords2(f) == (
54
- ("positional", 1, 2, 3, 4, 5),
55
- {"key": "value", "a": 1, "b": 2, "c": 3, "d": 4, "e": 5}
56
- )
57
-
58
- with pytest.raises(TypeError) as excinfo:
59
- m.test_unpacking_error1(f)
60
- assert "Got multiple values for keyword argument" in str(excinfo.value)
61
-
62
- with pytest.raises(TypeError) as excinfo:
63
- m.test_unpacking_error2(f)
64
- assert "Got multiple values for keyword argument" in str(excinfo.value)
65
-
66
- with pytest.raises(RuntimeError) as excinfo:
67
- m.test_arg_conversion_error1(f)
68
- assert "Unable to convert call argument" in str(excinfo.value)
69
-
70
- with pytest.raises(RuntimeError) as excinfo:
71
- m.test_arg_conversion_error2(f)
72
- assert "Unable to convert call argument" in str(excinfo.value)
73
-
74
-
75
- def test_lambda_closure_cleanup():
76
- m.test_cleanup()
77
- cstats = m.payload_cstats()
78
- assert cstats.alive() == 0
79
- assert cstats.copy_constructions == 1
80
- assert cstats.move_constructions >= 1
81
-
82
-
83
- def test_cpp_function_roundtrip():
84
- """Test if passing a function pointer from C++ -> Python -> C++ yields the original pointer"""
85
-
86
- assert m.test_dummy_function(m.dummy_function) == "matches dummy_function: eval(1) = 2"
87
- assert (m.test_dummy_function(m.roundtrip(m.dummy_function)) ==
88
- "matches dummy_function: eval(1) = 2")
89
- assert m.roundtrip(None, expect_none=True) is None
90
- assert (m.test_dummy_function(lambda x: x + 2) ==
91
- "can't convert to function pointer: eval(1) = 3")
92
-
93
- with pytest.raises(TypeError) as excinfo:
94
- m.test_dummy_function(m.dummy_function2)
95
- assert "incompatible function arguments" in str(excinfo.value)
96
-
97
- with pytest.raises(TypeError) as excinfo:
98
- m.test_dummy_function(lambda x, y: x + y)
99
- assert any(s in str(excinfo.value) for s in ("missing 1 required positional argument",
100
- "takes exactly 2 arguments"))
101
-
102
-
103
- def test_function_signatures(doc):
104
- assert doc(m.test_callback3) == "test_callback3(arg0: Callable[[int], int]) -> str"
105
- assert doc(m.test_callback4) == "test_callback4() -> Callable[[int], int]"
106
-
107
-
108
- def test_movable_object():
109
- assert m.callback_with_movable(lambda _: None) is True
110
-
111
-
112
- def test_async_callbacks():
113
- # serves as state for async callback
114
- class Item:
115
- def __init__(self, value):
116
- self.value = value
117
-
118
- res = []
119
-
120
- # generate stateful lambda that will store result in `res`
121
- def gen_f():
122
- s = Item(3)
123
- return lambda j: res.append(s.value + j)
124
-
125
- # do some work async
126
- work = [1, 2, 3, 4]
127
- m.test_async_callback(gen_f(), work)
128
- # wait until work is done
129
- from time import sleep
130
- sleep(0.5)
131
- assert sum(res) == sum([x + 3 for x in work])
132
-
133
-
134
- def test_async_async_callbacks():
135
- t = Thread(target=test_async_callbacks)
136
- t.start()
137
- t.join()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/tbb/detail/copy_if.h DELETED
@@ -1,50 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
- #include <thrust/system/tbb/detail/execution_policy.h>
21
-
22
- namespace thrust
23
- {
24
- namespace system
25
- {
26
- namespace tbb
27
- {
28
- namespace detail
29
- {
30
-
31
-
32
- template<typename InputIterator1,
33
- typename InputIterator2,
34
- typename OutputIterator,
35
- typename Predicate>
36
- OutputIterator copy_if(tag,
37
- InputIterator1 first,
38
- InputIterator1 last,
39
- InputIterator2 stencil,
40
- OutputIterator result,
41
- Predicate pred);
42
-
43
-
44
- } // end detail
45
- } // end tbb
46
- } // end system
47
- } // end thrust
48
-
49
- #include <thrust/system/tbb/detail/copy_if.inl>
50
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/lama-example/saicinpainting/evaluation/losses/fid/fid_score.py DELETED
@@ -1,328 +0,0 @@
1
- #!/usr/bin/env python3
2
- """Calculates the Frechet Inception Distance (FID) to evalulate GANs
3
-
4
- The FID metric calculates the distance between two distributions of images.
5
- Typically, we have summary statistics (mean & covariance matrix) of one
6
- of these distributions, while the 2nd distribution is given by a GAN.
7
-
8
- When run as a stand-alone program, it compares the distribution of
9
- images that are stored as PNG/JPEG at a specified location with a
10
- distribution given by summary statistics (in pickle format).
11
-
12
- The FID is calculated by assuming that X_1 and X_2 are the activations of
13
- the pool_3 layer of the inception net for generated samples and real world
14
- samples respectively.
15
-
16
- See --help to see further details.
17
-
18
- Code apapted from https://github.com/bioinf-jku/TTUR to use PyTorch instead
19
- of Tensorflow
20
-
21
- Copyright 2018 Institute of Bioinformatics, JKU Linz
22
-
23
- Licensed under the Apache License, Version 2.0 (the "License");
24
- you may not use this file except in compliance with the License.
25
- You may obtain a copy of the License at
26
-
27
- http://www.apache.org/licenses/LICENSE-2.0
28
-
29
- Unless required by applicable law or agreed to in writing, software
30
- distributed under the License is distributed on an "AS IS" BASIS,
31
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
32
- See the License for the specific language governing permissions and
33
- limitations under the License.
34
- """
35
- import os
36
- import pathlib
37
- from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
38
-
39
- import numpy as np
40
- import torch
41
- # from scipy.misc import imread
42
- from imageio import imread
43
- from PIL import Image, JpegImagePlugin
44
- from scipy import linalg
45
- from torch.nn.functional import adaptive_avg_pool2d
46
- from torchvision.transforms import CenterCrop, Compose, Resize, ToTensor
47
-
48
- try:
49
- from tqdm import tqdm
50
- except ImportError:
51
- # If not tqdm is not available, provide a mock version of it
52
- def tqdm(x): return x
53
-
54
- try:
55
- from .inception import InceptionV3
56
- except ModuleNotFoundError:
57
- from inception import InceptionV3
58
-
59
- parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
60
- parser.add_argument('path', type=str, nargs=2,
61
- help=('Path to the generated images or '
62
- 'to .npz statistic files'))
63
- parser.add_argument('--batch-size', type=int, default=50,
64
- help='Batch size to use')
65
- parser.add_argument('--dims', type=int, default=2048,
66
- choices=list(InceptionV3.BLOCK_INDEX_BY_DIM),
67
- help=('Dimensionality of Inception features to use. '
68
- 'By default, uses pool3 features'))
69
- parser.add_argument('-c', '--gpu', default='', type=str,
70
- help='GPU to use (leave blank for CPU only)')
71
- parser.add_argument('--resize', default=256)
72
-
73
- transform = Compose([Resize(256), CenterCrop(256), ToTensor()])
74
-
75
-
76
- def get_activations(files, model, batch_size=50, dims=2048,
77
- cuda=False, verbose=False, keep_size=False):
78
- """Calculates the activations of the pool_3 layer for all images.
79
-
80
- Params:
81
- -- files : List of image files paths
82
- -- model : Instance of inception model
83
- -- batch_size : Batch size of images for the model to process at once.
84
- Make sure that the number of samples is a multiple of
85
- the batch size, otherwise some samples are ignored. This
86
- behavior is retained to match the original FID score
87
- implementation.
88
- -- dims : Dimensionality of features returned by Inception
89
- -- cuda : If set to True, use GPU
90
- -- verbose : If set to True and parameter out_step is given, the number
91
- of calculated batches is reported.
92
- Returns:
93
- -- A numpy array of dimension (num images, dims) that contains the
94
- activations of the given tensor when feeding inception with the
95
- query tensor.
96
- """
97
- model.eval()
98
-
99
- if len(files) % batch_size != 0:
100
- print(('Warning: number of images is not a multiple of the '
101
- 'batch size. Some samples are going to be ignored.'))
102
- if batch_size > len(files):
103
- print(('Warning: batch size is bigger than the data size. '
104
- 'Setting batch size to data size'))
105
- batch_size = len(files)
106
-
107
- n_batches = len(files) // batch_size
108
- n_used_imgs = n_batches * batch_size
109
-
110
- pred_arr = np.empty((n_used_imgs, dims))
111
-
112
- for i in tqdm(range(n_batches)):
113
- if verbose:
114
- print('\rPropagating batch %d/%d' % (i + 1, n_batches),
115
- end='', flush=True)
116
- start = i * batch_size
117
- end = start + batch_size
118
-
119
- # # Official code goes below
120
- # images = np.array([imread(str(f)).astype(np.float32)
121
- # for f in files[start:end]])
122
-
123
- # # Reshape to (n_images, 3, height, width)
124
- # images = images.transpose((0, 3, 1, 2))
125
- # images /= 255
126
- # batch = torch.from_numpy(images).type(torch.FloatTensor)
127
- # #
128
-
129
- t = transform if not keep_size else ToTensor()
130
-
131
- if isinstance(files[0], pathlib.PosixPath):
132
- images = [t(Image.open(str(f))) for f in files[start:end]]
133
-
134
- elif isinstance(files[0], Image.Image):
135
- images = [t(f) for f in files[start:end]]
136
-
137
- else:
138
- raise ValueError(f"Unknown data type for image: {type(files[0])}")
139
-
140
- batch = torch.stack(images)
141
-
142
- if cuda:
143
- batch = batch.cuda()
144
-
145
- pred = model(batch)[0]
146
-
147
- # If model output is not scalar, apply global spatial average pooling.
148
- # This happens if you choose a dimensionality not equal 2048.
149
- if pred.shape[2] != 1 or pred.shape[3] != 1:
150
- pred = adaptive_avg_pool2d(pred, output_size=(1, 1))
151
-
152
- pred_arr[start:end] = pred.cpu().data.numpy().reshape(batch_size, -1)
153
-
154
- if verbose:
155
- print(' done')
156
-
157
- return pred_arr
158
-
159
-
160
- def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
161
- """Numpy implementation of the Frechet Distance.
162
- The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
163
- and X_2 ~ N(mu_2, C_2) is
164
- d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
165
-
166
- Stable version by Dougal J. Sutherland.
167
-
168
- Params:
169
- -- mu1 : Numpy array containing the activations of a layer of the
170
- inception net (like returned by the function 'get_predictions')
171
- for generated samples.
172
- -- mu2 : The sample mean over activations, precalculated on an
173
- representative data set.
174
- -- sigma1: The covariance matrix over activations for generated samples.
175
- -- sigma2: The covariance matrix over activations, precalculated on an
176
- representative data set.
177
-
178
- Returns:
179
- -- : The Frechet Distance.
180
- """
181
-
182
- mu1 = np.atleast_1d(mu1)
183
- mu2 = np.atleast_1d(mu2)
184
-
185
- sigma1 = np.atleast_2d(sigma1)
186
- sigma2 = np.atleast_2d(sigma2)
187
-
188
- assert mu1.shape == mu2.shape, \
189
- 'Training and test mean vectors have different lengths'
190
- assert sigma1.shape == sigma2.shape, \
191
- 'Training and test covariances have different dimensions'
192
-
193
- diff = mu1 - mu2
194
-
195
- # Product might be almost singular
196
- covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
197
- if not np.isfinite(covmean).all():
198
- msg = ('fid calculation produces singular product; '
199
- 'adding %s to diagonal of cov estimates') % eps
200
- print(msg)
201
- offset = np.eye(sigma1.shape[0]) * eps
202
- covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
203
-
204
- # Numerical error might give slight imaginary component
205
- if np.iscomplexobj(covmean):
206
- # if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
207
- if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-2):
208
- m = np.max(np.abs(covmean.imag))
209
- raise ValueError('Imaginary component {}'.format(m))
210
- covmean = covmean.real
211
-
212
- tr_covmean = np.trace(covmean)
213
-
214
- return (diff.dot(diff) + np.trace(sigma1) +
215
- np.trace(sigma2) - 2 * tr_covmean)
216
-
217
-
218
- def calculate_activation_statistics(files, model, batch_size=50,
219
- dims=2048, cuda=False, verbose=False, keep_size=False):
220
- """Calculation of the statistics used by the FID.
221
- Params:
222
- -- files : List of image files paths
223
- -- model : Instance of inception model
224
- -- batch_size : The images numpy array is split into batches with
225
- batch size batch_size. A reasonable batch size
226
- depends on the hardware.
227
- -- dims : Dimensionality of features returned by Inception
228
- -- cuda : If set to True, use GPU
229
- -- verbose : If set to True and parameter out_step is given, the
230
- number of calculated batches is reported.
231
- Returns:
232
- -- mu : The mean over samples of the activations of the pool_3 layer of
233
- the inception model.
234
- -- sigma : The covariance matrix of the activations of the pool_3 layer of
235
- the inception model.
236
- """
237
- act = get_activations(files, model, batch_size, dims, cuda, verbose, keep_size=keep_size)
238
- mu = np.mean(act, axis=0)
239
- sigma = np.cov(act, rowvar=False)
240
- return mu, sigma
241
-
242
-
243
- def _compute_statistics_of_path(path, model, batch_size, dims, cuda):
244
- if path.endswith('.npz'):
245
- f = np.load(path)
246
- m, s = f['mu'][:], f['sigma'][:]
247
- f.close()
248
- else:
249
- path = pathlib.Path(path)
250
- files = list(path.glob('*.jpg')) + list(path.glob('*.png'))
251
- m, s = calculate_activation_statistics(files, model, batch_size,
252
- dims, cuda)
253
-
254
- return m, s
255
-
256
-
257
- def _compute_statistics_of_images(images, model, batch_size, dims, cuda, keep_size=False):
258
- if isinstance(images, list): # exact paths to files are provided
259
- m, s = calculate_activation_statistics(images, model, batch_size,
260
- dims, cuda, keep_size=keep_size)
261
-
262
- return m, s
263
-
264
- else:
265
- raise ValueError
266
-
267
-
268
- def calculate_fid_given_paths(paths, batch_size, cuda, dims):
269
- """Calculates the FID of two paths"""
270
- for p in paths:
271
- if not os.path.exists(p):
272
- raise RuntimeError('Invalid path: %s' % p)
273
-
274
- block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
275
-
276
- model = InceptionV3([block_idx])
277
- if cuda:
278
- model.cuda()
279
-
280
- m1, s1 = _compute_statistics_of_path(paths[0], model, batch_size,
281
- dims, cuda)
282
- m2, s2 = _compute_statistics_of_path(paths[1], model, batch_size,
283
- dims, cuda)
284
- fid_value = calculate_frechet_distance(m1, s1, m2, s2)
285
-
286
- return fid_value
287
-
288
-
289
- def calculate_fid_given_images(images, batch_size, cuda, dims, use_globals=False, keep_size=False):
290
- if use_globals:
291
- global FID_MODEL # for multiprocessing
292
-
293
- for imgs in images:
294
- if isinstance(imgs, list) and isinstance(imgs[0], (Image.Image, JpegImagePlugin.JpegImageFile)):
295
- pass
296
- else:
297
- raise RuntimeError('Invalid images')
298
-
299
- block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
300
-
301
- if 'FID_MODEL' not in globals() or not use_globals:
302
- model = InceptionV3([block_idx])
303
- if cuda:
304
- model.cuda()
305
-
306
- if use_globals:
307
- FID_MODEL = model
308
-
309
- else:
310
- model = FID_MODEL
311
-
312
- m1, s1 = _compute_statistics_of_images(images[0], model, batch_size,
313
- dims, cuda, keep_size=False)
314
- m2, s2 = _compute_statistics_of_images(images[1], model, batch_size,
315
- dims, cuda, keep_size=False)
316
- fid_value = calculate_frechet_distance(m1, s1, m2, s2)
317
- return fid_value
318
-
319
-
320
- if __name__ == '__main__':
321
- args = parser.parse_args()
322
- os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
323
-
324
- fid_value = calculate_fid_given_paths(args.path,
325
- args.batch_size,
326
- args.gpu != '',
327
- args.dims)
328
- print('FID: ', fid_value)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/monoscene_lite/monoscene/.ipynb_checkpoints/unet3d_nyu-checkpoint.py DELETED
@@ -1,90 +0,0 @@
1
- # encoding: utf-8
2
- import torch
3
- import torch.nn as nn
4
- import torch.nn.functional as F
5
- import numpy as np
6
- from monoscene.CRP3D import CPMegaVoxels
7
- from monoscene.modules import (
8
- Process,
9
- Upsample,
10
- Downsample,
11
- SegmentationHead,
12
- ASPP,
13
- )
14
-
15
-
16
- class UNet3D(nn.Module):
17
- def __init__(
18
- self,
19
- class_num,
20
- norm_layer,
21
- feature,
22
- full_scene_size,
23
- n_relations=4,
24
- project_res=[],
25
- context_prior=True,
26
- bn_momentum=0.1,
27
- ):
28
- super(UNet3D, self).__init__()
29
- self.business_layer = []
30
- self.project_res = project_res
31
-
32
- self.feature_1_4 = feature
33
- self.feature_1_8 = feature * 2
34
- self.feature_1_16 = feature * 4
35
-
36
- self.feature_1_16_dec = self.feature_1_16
37
- self.feature_1_8_dec = self.feature_1_8
38
- self.feature_1_4_dec = self.feature_1_4
39
-
40
- self.process_1_4 = nn.Sequential(
41
- Process(self.feature_1_4, norm_layer, bn_momentum, dilations=[1, 2, 3]),
42
- Downsample(self.feature_1_4, norm_layer, bn_momentum),
43
- )
44
- self.process_1_8 = nn.Sequential(
45
- Process(self.feature_1_8, norm_layer, bn_momentum, dilations=[1, 2, 3]),
46
- Downsample(self.feature_1_8, norm_layer, bn_momentum),
47
- )
48
- self.up_1_16_1_8 = Upsample(
49
- self.feature_1_16_dec, self.feature_1_8_dec, norm_layer, bn_momentum
50
- )
51
- self.up_1_8_1_4 = Upsample(
52
- self.feature_1_8_dec, self.feature_1_4_dec, norm_layer, bn_momentum
53
- )
54
- self.ssc_head_1_4 = SegmentationHead(
55
- self.feature_1_4_dec, self.feature_1_4_dec, class_num, [1, 2, 3]
56
- )
57
-
58
- self.context_prior = context_prior
59
- size_1_16 = tuple(np.ceil(i / 4).astype(int) for i in full_scene_size)
60
-
61
- if context_prior:
62
- self.CP_mega_voxels = CPMegaVoxels(
63
- self.feature_1_16,
64
- size_1_16,
65
- n_relations=n_relations,
66
- bn_momentum=bn_momentum,
67
- )
68
-
69
- #
70
- def forward(self, input_dict):
71
- res = {}
72
-
73
- x3d_1_4 = input_dict["x3d"]
74
- x3d_1_8 = self.process_1_4(x3d_1_4)
75
- x3d_1_16 = self.process_1_8(x3d_1_8)
76
-
77
- if self.context_prior:
78
- ret = self.CP_mega_voxels(x3d_1_16)
79
- x3d_1_16 = ret["x"]
80
- for k in ret.keys():
81
- res[k] = ret[k]
82
-
83
- x3d_up_1_8 = self.up_1_16_1_8(x3d_1_16) + x3d_1_8
84
- x3d_up_1_4 = self.up_1_8_1_4(x3d_up_1_8) + x3d_1_4
85
-
86
- ssc_logit_1_4 = self.ssc_head_1_4(x3d_up_1_4)
87
-
88
- res["ssc_logit"] = ssc_logit_1_4
89
-
90
- return res
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Chirag1994/Melanoma_Skin_Cancer_Detection_App/model.py DELETED
@@ -1,22 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- import torch.nn.functional as F
4
- from efficientnet_pytorch import EfficientNet
5
-
6
-
7
- class Model(nn.Module):
8
- """
9
- Creates an efficientnet-b5 model instance.
10
- """
11
- def __init__(self, model_name="efficientnet-b5", pool_type=F.adaptive_avg_pool2d):
12
- super().__init__()
13
- self.pool_type = pool_type
14
- self.model_name = model_name
15
- self.backbone = EfficientNet.from_pretrained(model_name)
16
- in_features = getattr(self.backbone, "_fc").in_features
17
- self.classifier = nn.Linear(in_features, 1)
18
-
19
- def forward(self, x):
20
- features = self.pool_type(self.backbone.extract_features(x), 1)
21
- features = features.view(x.size(0), -1)
22
- return self.classifier(features)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Classly/README/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: README
3
- emoji: 📊
4
- colorFrom: yellow
5
- colorTo: purple
6
- sdk: static
7
- pinned: false
8
- ---
9
-
10
- Edit this `README.md` markdown file to author your organization card 🔥
 
 
 
 
 
 
 
 
 
 
 
spaces/CoWork/dreambooth-training-public/train_dreambooth.py DELETED
@@ -1,889 +0,0 @@
1
- import argparse
2
- import itertools
3
- import math
4
- import os
5
- from pathlib import Path
6
- from typing import Optional
7
- import subprocess
8
- import sys
9
- import gc
10
- import random
11
-
12
- import torch
13
- import torch.nn.functional as F
14
- import torch.utils.checkpoint
15
- from torch.utils.data import Dataset
16
-
17
- from accelerate import Accelerator
18
- from accelerate.logging import get_logger
19
- from accelerate.utils import set_seed
20
- from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
21
- from diffusers.utils.import_utils import is_xformers_available
22
- from diffusers.optimization import get_scheduler
23
- from huggingface_hub import HfFolder, Repository, whoami
24
- from PIL import Image
25
- from torchvision import transforms
26
- from tqdm.auto import tqdm
27
- from transformers import CLIPTextModel, CLIPTokenizer
28
-
29
-
30
- logger = get_logger(__name__)
31
-
32
-
33
- def parse_args():
34
- parser = argparse.ArgumentParser(description="Simple example of a training script.")
35
- parser.add_argument(
36
- "--pretrained_model_name_or_path",
37
- type=str,
38
- default=None,
39
- #required=True,
40
- help="Path to pretrained model or model identifier from huggingface.co/models.",
41
- )
42
- parser.add_argument(
43
- "--tokenizer_name",
44
- type=str,
45
- default=None,
46
- help="Pretrained tokenizer name or path if not the same as model_name",
47
- )
48
- parser.add_argument(
49
- "--instance_data_dir",
50
- type=str,
51
- default=None,
52
- #required=True,
53
- help="A folder containing the training data of instance images.",
54
- )
55
- parser.add_argument(
56
- "--class_data_dir",
57
- type=str,
58
- default=None,
59
- #required=False,
60
- help="A folder containing the training data of class images.",
61
- )
62
- parser.add_argument(
63
- "--instance_prompt",
64
- type=str,
65
- default=None,
66
- help="The prompt with identifier specifying the instance",
67
- )
68
- parser.add_argument(
69
- "--class_prompt",
70
- type=str,
71
- default="",
72
- help="The prompt to specify images in the same class as provided instance images.",
73
- )
74
- parser.add_argument(
75
- "--with_prior_preservation",
76
- default=False,
77
- action="store_true",
78
- help="Flag to add prior preservation loss.",
79
- )
80
- parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
81
- parser.add_argument(
82
- "--num_class_images",
83
- type=int,
84
- default=100,
85
- help=(
86
- "Minimal class images for prior preservation loss. If not have enough images, additional images will be"
87
- " sampled with class_prompt."
88
- ),
89
- )
90
- parser.add_argument(
91
- "--output_dir",
92
- type=str,
93
- default="",
94
- help="The output directory where the model predictions and checkpoints will be written.",
95
- )
96
- parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
97
- parser.add_argument(
98
- "--resolution",
99
- type=int,
100
- default=512,
101
- help=(
102
- "The resolution for input images, all the images in the train/validation dataset will be resized to this"
103
- " resolution"
104
- ),
105
- )
106
- parser.add_argument(
107
- "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution"
108
- )
109
- parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder")
110
- parser.add_argument(
111
- "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
112
- )
113
- parser.add_argument(
114
- "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
115
- )
116
- parser.add_argument("--num_train_epochs", type=int, default=1)
117
- parser.add_argument(
118
- "--max_train_steps",
119
- type=int,
120
- default=None,
121
- help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
122
- )
123
- parser.add_argument(
124
- "--gradient_accumulation_steps",
125
- type=int,
126
- default=1,
127
- help="Number of updates steps to accumulate before performing a backward/update pass.",
128
- )
129
- parser.add_argument(
130
- "--gradient_checkpointing",
131
- action="store_true",
132
- help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
133
- )
134
- parser.add_argument(
135
- "--learning_rate",
136
- type=float,
137
- default=5e-6,
138
- help="Initial learning rate (after the potential warmup period) to use.",
139
- )
140
- parser.add_argument(
141
- "--scale_lr",
142
- action="store_true",
143
- default=False,
144
- help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
145
- )
146
- parser.add_argument(
147
- "--lr_scheduler",
148
- type=str,
149
- default="constant",
150
- help=(
151
- 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
152
- ' "constant", "constant_with_warmup"]'
153
- ),
154
- )
155
- parser.add_argument(
156
- "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
157
- )
158
- parser.add_argument(
159
- "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
160
- )
161
- parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
162
- parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
163
- parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
164
- parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
165
- parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
166
- parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
167
- parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
168
- parser.add_argument(
169
- "--hub_model_id",
170
- type=str,
171
- default=None,
172
- help="The name of the repository to keep in sync with the local `output_dir`.",
173
- )
174
- parser.add_argument(
175
- "--logging_dir",
176
- type=str,
177
- default="logs",
178
- help=(
179
- "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
180
- " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
181
- ),
182
- )
183
- parser.add_argument(
184
- "--mixed_precision",
185
- type=str,
186
- default="no",
187
- choices=["no", "fp16", "bf16"],
188
- help=(
189
- "Whether to use mixed precision. Choose"
190
- "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
191
- "and an Nvidia Ampere GPU."
192
- ),
193
- )
194
-
195
- parser.add_argument(
196
- "--save_n_steps",
197
- type=int,
198
- default=1,
199
- help=("Save the model every n global_steps"),
200
- )
201
-
202
-
203
- parser.add_argument(
204
- "--save_starting_step",
205
- type=int,
206
- default=1,
207
- help=("The step from which it starts saving intermediary checkpoints"),
208
- )
209
-
210
- parser.add_argument(
211
- "--stop_text_encoder_training",
212
- type=int,
213
- default=1000000,
214
- help=("The step at which the text_encoder is no longer trained"),
215
- )
216
-
217
-
218
- parser.add_argument(
219
- "--image_captions_filename",
220
- action="store_true",
221
- help="Get captions from filename",
222
- )
223
-
224
-
225
- parser.add_argument(
226
- "--dump_only_text_encoder",
227
- action="store_true",
228
- default=False,
229
- help="Dump only text encoder",
230
- )
231
-
232
- parser.add_argument(
233
- "--train_only_unet",
234
- action="store_true",
235
- default=False,
236
- help="Train only the unet",
237
- )
238
-
239
- parser.add_argument(
240
- "--cache_latents",
241
- action="store_true",
242
- default=False,
243
- help="Train only the unet",
244
- )
245
-
246
- parser.add_argument(
247
- "--Session_dir",
248
- type=str,
249
- default="",
250
- help="Current session directory",
251
- )
252
-
253
-
254
-
255
-
256
- parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
257
-
258
- args = parser.parse_args()
259
- env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
260
- if env_local_rank != -1 and env_local_rank != args.local_rank:
261
- args.local_rank = env_local_rank
262
-
263
- #if args.instance_data_dir is None:
264
- # raise ValueError("You must specify a train data directory.")
265
-
266
- #if args.with_prior_preservation:
267
- # if args.class_data_dir is None:
268
- # raise ValueError("You must specify a data directory for class images.")
269
- # if args.class_prompt is None:
270
- # raise ValueError("You must specify prompt for class images.")
271
-
272
- return args
273
-
274
-
275
- class DreamBoothDataset(Dataset):
276
- """
277
- A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
278
- It pre-processes the images and the tokenizes prompts.
279
- """
280
-
281
- def __init__(
282
- self,
283
- instance_data_root,
284
- instance_prompt,
285
- tokenizer,
286
- args,
287
- class_data_root=None,
288
- class_prompt=None,
289
- size=512,
290
- center_crop=False,
291
- ):
292
- self.size = size
293
- self.center_crop = center_crop
294
- self.tokenizer = tokenizer
295
- self.image_captions_filename = None
296
-
297
- self.instance_data_root = Path(instance_data_root)
298
- if not self.instance_data_root.exists():
299
- raise ValueError("Instance images root doesn't exists.")
300
-
301
- self.instance_images_path = list(Path(instance_data_root).iterdir())
302
- self.num_instance_images = len(self.instance_images_path)
303
- self.instance_prompt = instance_prompt
304
- self._length = self.num_instance_images
305
-
306
- if args.image_captions_filename:
307
- self.image_captions_filename = True
308
-
309
- if class_data_root is not None:
310
- self.class_data_root = Path(class_data_root)
311
- self.class_data_root.mkdir(parents=True, exist_ok=True)
312
- self.class_images_path = list(self.class_data_root.iterdir())
313
- random.shuffle(self.class_images_path)
314
- self.num_class_images = len(self.class_images_path)
315
- self._length = max(self.num_class_images, self.num_instance_images)
316
- self.class_prompt = class_prompt
317
- else:
318
- self.class_data_root = None
319
-
320
- self.image_transforms = transforms.Compose(
321
- [
322
- transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
323
- transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
324
- transforms.ToTensor(),
325
- transforms.Normalize([0.5], [0.5]),
326
- ]
327
- )
328
-
329
- def __len__(self):
330
- return self._length
331
-
332
- def __getitem__(self, index):
333
- example = {}
334
- path = self.instance_images_path[index % self.num_instance_images]
335
- instance_image = Image.open(path)
336
- if not instance_image.mode == "RGB":
337
- instance_image = instance_image.convert("RGB")
338
-
339
- instance_prompt = self.instance_prompt
340
-
341
- if self.image_captions_filename:
342
- filename = Path(path).stem
343
- pt=''.join([i for i in filename if not i.isdigit()])
344
- pt=pt.replace("_"," ")
345
- pt=pt.replace("(","")
346
- pt=pt.replace(")","")
347
- pt=pt.replace("-","")
348
- instance_prompt = pt
349
- sys.stdout.write(" " +instance_prompt+" ")
350
- sys.stdout.flush()
351
-
352
-
353
- example["instance_images"] = self.image_transforms(instance_image)
354
- example["instance_prompt_ids"] = self.tokenizer(
355
- instance_prompt,
356
- padding="do_not_pad",
357
- truncation=True,
358
- max_length=self.tokenizer.model_max_length,
359
- ).input_ids
360
-
361
- if self.class_data_root:
362
- class_image = Image.open(self.class_images_path[index % self.num_class_images])
363
- if not class_image.mode == "RGB":
364
- class_image = class_image.convert("RGB")
365
- example["class_images"] = self.image_transforms(class_image)
366
- example["class_prompt_ids"] = self.tokenizer(
367
- self.class_prompt,
368
- padding="do_not_pad",
369
- truncation=True,
370
- max_length=self.tokenizer.model_max_length,
371
- ).input_ids
372
-
373
- return example
374
-
375
-
376
-
377
- class PromptDataset(Dataset):
378
- "A simple dataset to prepare the prompts to generate class images on multiple GPUs."
379
-
380
- def __init__(self, prompt, num_samples):
381
- self.prompt = prompt
382
- self.num_samples = num_samples
383
-
384
- def __len__(self):
385
- return self.num_samples
386
-
387
- def __getitem__(self, index):
388
- example = {}
389
- example["prompt"] = self.prompt
390
- example["index"] = index
391
- return example
392
-
393
- class LatentsDataset(Dataset):
394
- def __init__(self, latents_cache, text_encoder_cache):
395
- self.latents_cache = latents_cache
396
- self.text_encoder_cache = text_encoder_cache
397
-
398
- def __len__(self):
399
- return len(self.latents_cache)
400
-
401
- def __getitem__(self, index):
402
- return self.latents_cache[index], self.text_encoder_cache[index]
403
-
404
- def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
405
- if token is None:
406
- token = HfFolder.get_token()
407
- if organization is None:
408
- username = whoami(token)["name"]
409
- return f"{username}/{model_id}"
410
- else:
411
- return f"{organization}/{model_id}"
412
-
413
- def merge_two_dicts(starting_dict: dict, updater_dict: dict) -> dict:
414
- """
415
- Starts from base starting dict and then adds the remaining key values from updater replacing the values from
416
- the first starting/base dict with the second updater dict.
417
-
418
- For later: how does d = {**d1, **d2} replace collision?
419
-
420
- :param starting_dict:
421
- :param updater_dict:
422
- :return:
423
- """
424
- new_dict: dict = starting_dict.copy() # start with keys and values of starting_dict
425
- new_dict.update(updater_dict) # modifies starting_dict with keys and values of updater_dict
426
- return new_dict
427
-
428
- def merge_args(args1: argparse.Namespace, args2: argparse.Namespace) -> argparse.Namespace:
429
- """
430
-
431
- ref: https://stackoverflow.com/questions/56136549/how-can-i-merge-two-argparse-namespaces-in-python-2-x
432
- :param args1:
433
- :param args2:
434
- :return:
435
- """
436
- # - the merged args
437
- # The vars() function returns the __dict__ attribute to values of the given object e.g {field:value}.
438
- merged_key_values_for_namespace: dict = merge_two_dicts(vars(args1), vars(args2))
439
- args = argparse.Namespace(**merged_key_values_for_namespace)
440
- return args
441
-
442
- def run_training(args_imported):
443
- args_default = parse_args()
444
- args = merge_args(args_default, args_imported)
445
- print(args)
446
- logging_dir = Path(args.output_dir, args.logging_dir)
447
- i=args.save_starting_step
448
- accelerator = Accelerator(
449
- gradient_accumulation_steps=args.gradient_accumulation_steps,
450
- mixed_precision=args.mixed_precision,
451
- log_with="tensorboard",
452
- logging_dir=logging_dir,
453
- )
454
-
455
- # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate
456
- # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models.
457
- # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate.
458
- if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1:
459
- raise ValueError(
460
- "Gradient accumulation is not supported when training the text encoder in distributed training. "
461
- "Please set gradient_accumulation_steps to 1. This feature will be supported in the future."
462
- )
463
-
464
- if args.seed is not None:
465
- set_seed(args.seed)
466
-
467
- if args.with_prior_preservation:
468
- class_images_dir = Path(args.class_data_dir)
469
- if not class_images_dir.exists():
470
- class_images_dir.mkdir(parents=True)
471
- cur_class_images = len(list(class_images_dir.iterdir()))
472
-
473
- if cur_class_images < args.num_class_images:
474
- torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
475
- pipeline = StableDiffusionPipeline.from_pretrained(
476
- args.pretrained_model_name_or_path, torch_dtype=torch_dtype
477
- )
478
- pipeline.set_progress_bar_config(disable=True)
479
-
480
- num_new_images = args.num_class_images - cur_class_images
481
- logger.info(f"Number of class images to sample: {num_new_images}.")
482
-
483
- sample_dataset = PromptDataset(args.class_prompt, num_new_images)
484
- sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
485
-
486
- sample_dataloader = accelerator.prepare(sample_dataloader)
487
- pipeline.to(accelerator.device)
488
-
489
- for example in tqdm(
490
- sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
491
- ):
492
- with torch.autocast("cuda"):
493
- images = pipeline(example["prompt"]).images
494
-
495
- for i, image in enumerate(images):
496
- image.save(class_images_dir / f"{example['index'][i] + cur_class_images}.jpg")
497
-
498
- del pipeline
499
- if torch.cuda.is_available():
500
- torch.cuda.empty_cache()
501
-
502
- # Handle the repository creation
503
- if accelerator.is_main_process:
504
- if args.push_to_hub:
505
- if args.hub_model_id is None:
506
- repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
507
- else:
508
- repo_name = args.hub_model_id
509
- repo = Repository(args.output_dir, clone_from=repo_name)
510
-
511
- with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
512
- if "step_*" not in gitignore:
513
- gitignore.write("step_*\n")
514
- if "epoch_*" not in gitignore:
515
- gitignore.write("epoch_*\n")
516
- elif args.output_dir is not None:
517
- os.makedirs(args.output_dir, exist_ok=True)
518
-
519
- # Load the tokenizer
520
- if args.tokenizer_name:
521
- tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
522
- elif args.pretrained_model_name_or_path:
523
- tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
524
-
525
- # Load models and create wrapper for stable diffusion
526
- if args.train_only_unet:
527
- if os.path.exists(str(args.output_dir+"/text_encoder_trained")):
528
- text_encoder = CLIPTextModel.from_pretrained(args.output_dir, subfolder="text_encoder_trained")
529
- elif os.path.exists(str(args.output_dir+"/text_encoder")):
530
- text_encoder = CLIPTextModel.from_pretrained(args.output_dir, subfolder="text_encoder")
531
- else:
532
- text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
533
- else:
534
- text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
535
- vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
536
- unet = UNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
537
- if is_xformers_available():
538
- try:
539
- print("Enabling memory efficient attention with xformers...")
540
- unet.enable_xformers_memory_efficient_attention()
541
- except Exception as e:
542
- logger.warning(
543
- f"Could not enable memory efficient attention. Make sure xformers is installed correctly and a GPU is available: {e}"
544
- )
545
- vae.requires_grad_(False)
546
- if not args.train_text_encoder:
547
- text_encoder.requires_grad_(False)
548
-
549
- if args.gradient_checkpointing:
550
- unet.enable_gradient_checkpointing()
551
- if args.train_text_encoder:
552
- text_encoder.gradient_checkpointing_enable()
553
-
554
- if args.scale_lr:
555
- args.learning_rate = (
556
- args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
557
- )
558
-
559
- # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
560
- if args.use_8bit_adam:
561
- try:
562
- import bitsandbytes as bnb
563
- except ImportError:
564
- raise ImportError(
565
- "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
566
- )
567
-
568
- optimizer_class = bnb.optim.AdamW8bit
569
- else:
570
- optimizer_class = torch.optim.AdamW
571
-
572
- params_to_optimize = (
573
- itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters()
574
- )
575
- optimizer = optimizer_class(
576
- params_to_optimize,
577
- lr=args.learning_rate,
578
- betas=(args.adam_beta1, args.adam_beta2),
579
- weight_decay=args.adam_weight_decay,
580
- eps=args.adam_epsilon,
581
- )
582
-
583
- noise_scheduler = DDPMScheduler.from_config(args.pretrained_model_name_or_path, subfolder="scheduler")
584
-
585
- train_dataset = DreamBoothDataset(
586
- instance_data_root=args.instance_data_dir,
587
- instance_prompt=args.instance_prompt,
588
- class_data_root=args.class_data_dir if args.with_prior_preservation else None,
589
- class_prompt=args.class_prompt,
590
- tokenizer=tokenizer,
591
- size=args.resolution,
592
- center_crop=args.center_crop,
593
- args=args,
594
- )
595
-
596
- def collate_fn(examples):
597
- input_ids = [example["instance_prompt_ids"] for example in examples]
598
- pixel_values = [example["instance_images"] for example in examples]
599
-
600
- # Concat class and instance examples for prior preservation.
601
- # We do this to avoid doing two forward passes.
602
- if args.with_prior_preservation:
603
- input_ids += [example["class_prompt_ids"] for example in examples]
604
- pixel_values += [example["class_images"] for example in examples]
605
-
606
- pixel_values = torch.stack(pixel_values)
607
- pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
608
-
609
- input_ids = tokenizer.pad({"input_ids": input_ids}, padding=True, return_tensors="pt").input_ids
610
-
611
- batch = {
612
- "input_ids": input_ids,
613
- "pixel_values": pixel_values,
614
- }
615
- return batch
616
-
617
- train_dataloader = torch.utils.data.DataLoader(
618
- train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn
619
- )
620
-
621
- # Scheduler and math around the number of training steps.
622
- overrode_max_train_steps = False
623
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
624
- if args.max_train_steps is None:
625
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
626
- overrode_max_train_steps = True
627
-
628
- lr_scheduler = get_scheduler(
629
- args.lr_scheduler,
630
- optimizer=optimizer,
631
- num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps,
632
- num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
633
- )
634
-
635
- if args.train_text_encoder:
636
- unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
637
- unet, text_encoder, optimizer, train_dataloader, lr_scheduler
638
- )
639
- else:
640
- unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
641
- unet, optimizer, train_dataloader, lr_scheduler
642
- )
643
-
644
- weight_dtype = torch.float32
645
- if args.mixed_precision == "fp16":
646
- weight_dtype = torch.float16
647
- elif args.mixed_precision == "bf16":
648
- weight_dtype = torch.bfloat16
649
-
650
- # Move text_encode and vae to gpu.
651
- # For mixed precision training we cast the text_encoder and vae weights to half-precision
652
- # as these models are only used for inference, keeping weights in full precision is not required.
653
- vae.to(accelerator.device, dtype=weight_dtype)
654
- if not args.train_text_encoder:
655
- text_encoder.to(accelerator.device, dtype=weight_dtype)
656
-
657
-
658
- if args.cache_latents:
659
- latents_cache = []
660
- text_encoder_cache = []
661
- for batch in tqdm(train_dataloader, desc="Caching latents"):
662
- with torch.no_grad():
663
- batch["pixel_values"] = batch["pixel_values"].to(accelerator.device, non_blocking=True, dtype=weight_dtype)
664
- batch["input_ids"] = batch["input_ids"].to(accelerator.device, non_blocking=True)
665
- latents_cache.append(vae.encode(batch["pixel_values"]).latent_dist)
666
- if args.train_text_encoder:
667
- text_encoder_cache.append(batch["input_ids"])
668
- else:
669
- text_encoder_cache.append(text_encoder(batch["input_ids"])[0])
670
- train_dataset = LatentsDataset(latents_cache, text_encoder_cache)
671
- train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=1, collate_fn=lambda x: x, shuffle=True)
672
-
673
- del vae
674
- #if not args.train_text_encoder:
675
- # del text_encoder
676
- if torch.cuda.is_available():
677
- torch.cuda.empty_cache()
678
-
679
- # We need to recalculate our total training steps as the size of the training dataloader may have changed.
680
- num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
681
- if overrode_max_train_steps:
682
- args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
683
- # Afterwards we recalculate our number of training epochs
684
- args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
685
-
686
- # We need to initialize the trackers we use, and also store our configuration.
687
- # The trackers initializes automatically on the main process.
688
- if accelerator.is_main_process:
689
- accelerator.init_trackers("dreambooth", config=vars(args))
690
-
691
- def bar(prg):
692
- br='|'+'█' * prg + ' ' * (25-prg)+'|'
693
- return br
694
-
695
- # Train!
696
- total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
697
-
698
- logger.info("***** Running training *****")
699
- logger.info(f" Num examples = {len(train_dataset)}")
700
- logger.info(f" Num batches each epoch = {len(train_dataloader)}")
701
- logger.info(f" Num Epochs = {args.num_train_epochs}")
702
- logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
703
- logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
704
- logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
705
- logger.info(f" Total optimization steps = {args.max_train_steps}")
706
- # Only show the progress bar once on each machine.
707
- progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
708
- global_step = 0
709
-
710
- for epoch in range(args.num_train_epochs):
711
- unet.train()
712
- if args.train_text_encoder:
713
- text_encoder.train()
714
- for step, batch in enumerate(train_dataloader):
715
- with accelerator.accumulate(unet):
716
- # Convert images to latent space
717
- with torch.no_grad():
718
- if args.cache_latents:
719
- latents_dist = batch[0][0]
720
- else:
721
- latents_dist = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist
722
- latents = latents_dist.sample() * 0.18215
723
-
724
- # Sample noise that we'll add to the latents
725
- noise = torch.randn_like(latents)
726
- bsz = latents.shape[0]
727
- # Sample a random timestep for each image
728
- timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
729
- timesteps = timesteps.long()
730
-
731
- # Add noise to the latents according to the noise magnitude at each timestep
732
- # (this is the forward diffusion process)
733
- noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
734
-
735
- # Get the text embedding for conditioning
736
- if(args.cache_latents):
737
- if args.train_text_encoder:
738
- encoder_hidden_states = text_encoder(batch[0][1])[0]
739
- else:
740
- encoder_hidden_states = batch[0][1]
741
- else:
742
- encoder_hidden_states = text_encoder(batch["input_ids"])[0]
743
-
744
- # Predict the noise residual
745
- model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
746
-
747
- # Get the target for loss depending on the prediction type
748
- if noise_scheduler.config.prediction_type == "epsilon":
749
- target = noise
750
- elif noise_scheduler.config.prediction_type == "v_prediction":
751
- target = noise_scheduler.get_velocity(latents, noise, timesteps)
752
- else:
753
- raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
754
-
755
- if args.with_prior_preservation:
756
- # Chunk the noise and model_pred into two parts and compute the loss on each part separately.
757
- model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0)
758
- target, target_prior = torch.chunk(target, 2, dim=0)
759
-
760
- # Compute instance loss
761
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="none").mean([1, 2, 3]).mean()
762
-
763
- # Compute prior loss
764
- prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean")
765
-
766
- # Add the prior loss to the instance loss.
767
- loss = loss + args.prior_loss_weight * prior_loss
768
- else:
769
- loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
770
-
771
- accelerator.backward(loss)
772
- if accelerator.sync_gradients:
773
- params_to_clip = (
774
- itertools.chain(unet.parameters(), text_encoder.parameters())
775
- if args.train_text_encoder
776
- else unet.parameters()
777
- )
778
- accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
779
- optimizer.step()
780
- lr_scheduler.step()
781
- optimizer.zero_grad()
782
-
783
- # Checks if the accelerator has performed an optimization step behind the scenes
784
- if accelerator.sync_gradients:
785
- progress_bar.update(1)
786
- global_step += 1
787
-
788
- fll=round((global_step*100)/args.max_train_steps)
789
- fll=round(fll/4)
790
- pr=bar(fll)
791
-
792
- logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
793
- progress_bar.set_postfix(**logs)
794
- progress_bar.set_description_str("Progress:"+pr)
795
- accelerator.log(logs, step=global_step)
796
-
797
- if global_step >= args.max_train_steps:
798
- break
799
-
800
- if args.train_text_encoder and global_step == args.stop_text_encoder_training and global_step >= 30:
801
- if accelerator.is_main_process:
802
- print(" " +" Freezing the text_encoder ..."+" ")
803
- frz_dir=args.output_dir + "/text_encoder_frozen"
804
- if os.path.exists(frz_dir):
805
- subprocess.call('rm -r '+ frz_dir, shell=True)
806
- os.mkdir(frz_dir)
807
- pipeline = StableDiffusionPipeline.from_pretrained(
808
- args.pretrained_model_name_or_path,
809
- unet=accelerator.unwrap_model(unet),
810
- text_encoder=accelerator.unwrap_model(text_encoder),
811
- )
812
- pipeline.text_encoder.save_pretrained(frz_dir)
813
-
814
- if args.save_n_steps >= 200:
815
- if global_step < args.max_train_steps and global_step+1==i:
816
- ckpt_name = "_step_" + str(global_step+1)
817
- save_dir = Path(args.output_dir+ckpt_name)
818
- save_dir=str(save_dir)
819
- save_dir=save_dir.replace(" ", "_")
820
- if not os.path.exists(save_dir):
821
- os.mkdir(save_dir)
822
- inst=save_dir[16:]
823
- inst=inst.replace(" ", "_")
824
- print(" SAVING CHECKPOINT: "+args.Session_dir+"/"+inst+".ckpt")
825
- # Create the pipeline using the trained modules and save it.
826
- if accelerator.is_main_process:
827
- pipeline = StableDiffusionPipeline.from_pretrained(
828
- args.pretrained_model_name_or_path,
829
- unet=accelerator.unwrap_model(unet),
830
- text_encoder=accelerator.unwrap_model(text_encoder),
831
- )
832
- pipeline.save_pretrained(save_dir)
833
- frz_dir=args.output_dir + "/text_encoder_frozen"
834
- if args.train_text_encoder and os.path.exists(frz_dir):
835
- subprocess.call('rm -r '+save_dir+'/text_encoder/*.*', shell=True)
836
- subprocess.call('cp -f '+frz_dir +'/*.* '+ save_dir+'/text_encoder', shell=True)
837
- chkpth=args.Session_dir+"/"+inst+".ckpt"
838
- subprocess.call('python /content/diffusers/scripts/convert_diffusers_to_original_stable_diffusion.py --model_path ' + save_dir + ' --checkpoint_path ' + chkpth + ' --half', shell=True)
839
- subprocess.call('rm -r '+ save_dir, shell=True)
840
- i=i+args.save_n_steps
841
-
842
- accelerator.wait_for_everyone()
843
-
844
- # Create the pipeline using using the trained modules and save it.
845
- if accelerator.is_main_process:
846
- if args.dump_only_text_encoder:
847
- txt_dir=args.output_dir + "/text_encoder_trained"
848
- if not os.path.exists(txt_dir):
849
- os.mkdir(txt_dir)
850
- pipeline = StableDiffusionPipeline.from_pretrained(
851
- args.pretrained_model_name_or_path,
852
- unet=accelerator.unwrap_model(unet),
853
- text_encoder=accelerator.unwrap_model(text_encoder),
854
- )
855
- pipeline.text_encoder.save_pretrained(txt_dir)
856
-
857
- elif args.train_only_unet:
858
- pipeline = StableDiffusionPipeline.from_pretrained(
859
- args.pretrained_model_name_or_path,
860
- unet=accelerator.unwrap_model(unet),
861
- text_encoder=accelerator.unwrap_model(text_encoder),
862
- )
863
- pipeline.save_pretrained(args.output_dir)
864
- txt_dir=args.output_dir + "/text_encoder_trained"
865
- subprocess.call('rm -r '+txt_dir, shell=True)
866
-
867
- else:
868
- pipeline = StableDiffusionPipeline.from_pretrained(
869
- args.pretrained_model_name_or_path,
870
- unet=accelerator.unwrap_model(unet),
871
- text_encoder=accelerator.unwrap_model(text_encoder),
872
- )
873
- frz_dir=args.output_dir + "/text_encoder_frozen"
874
- pipeline.save_pretrained(args.output_dir)
875
- if args.train_text_encoder and os.path.exists(frz_dir):
876
- subprocess.call('mv -f '+frz_dir +'/*.* '+ args.output_dir+'/text_encoder', shell=True)
877
- subprocess.call('rm -r '+ frz_dir, shell=True)
878
-
879
- if args.push_to_hub:
880
- repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True)
881
-
882
- accelerator.end_training()
883
- del pipeline
884
- torch.cuda.empty_cache()
885
- gc.collect()
886
- if __name__ == "__main__":
887
- pass
888
- #main()
889
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CofAI/chat.b4/g4f/Provider/Providers/Lockchat.py DELETED
@@ -1,32 +0,0 @@
1
- import requests
2
- import os
3
- import json
4
- from ...typing import sha256, Dict, get_type_hints
5
- url = 'http://supertest.lockchat.app'
6
- model = ['gpt-4', 'gpt-3.5-turbo']
7
- supports_stream = True
8
- needs_auth = False
9
-
10
- def _create_completion(model: str, messages: list, stream: bool, temperature: float = 0.7, **kwargs):
11
-
12
- payload = {
13
- "temperature": 0.7,
14
- "messages": messages,
15
- "model": model,
16
- "stream": True,
17
- }
18
- headers = {
19
- "user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0",
20
- }
21
- response = requests.post("http://supertest.lockchat.app/v1/chat/completions",
22
- json=payload, headers=headers, stream=True)
23
- for token in response.iter_lines():
24
- if b'The model: `gpt-4` does not exist' in token:
25
- print('error, retrying...')
26
- _create_completion(model=model, messages=messages, stream=stream, temperature=temperature, **kwargs)
27
- if b"content" in token:
28
- token = json.loads(token.decode('utf-8').split('data: ')[1])['choices'][0]['delta'].get('content')
29
- if token: yield (token)
30
-
31
- params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
32
- '(%s)' % ', '.join([f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Crossper6/stable-diffusion-webui/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: Stable Diffusion Webui
3
- emoji: 💻
4
- colorFrom: yellow
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.12.0
8
- app_file: app.py
9
- pinned: false
10
- license: openrail
11
- duplicated_from: voltcutter/stable-diffusion-webui
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DAMO-NLP-SG/Video-LLaMA/video_llama/models/__init__.py DELETED
@@ -1,201 +0,0 @@
1
- """
2
- Adapted from salesforce@LAVIS Vision-CAIR@MiniGPT-4. Below is the original copyright:
3
- Copyright (c) 2022, salesforce.com, inc.
4
- All rights reserved.
5
- SPDX-License-Identifier: BSD-3-Clause
6
- For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause
7
- """
8
-
9
- import logging
10
- import torch
11
- from omegaconf import OmegaConf
12
-
13
- from video_llama.common.registry import registry
14
- from video_llama.models.base_model import BaseModel
15
- from video_llama.models.blip2 import Blip2Base
16
- from video_llama.models.video_llama import VideoLLAMA
17
- from video_llama.processors.base_processor import BaseProcessor
18
-
19
-
20
- __all__ = [
21
- "load_model",
22
- "BaseModel",
23
- "Blip2Base",
24
- "VideoLLAMA"
25
- ]
26
-
27
-
28
- def load_model(name, model_type, is_eval=False, device="cpu", checkpoint=None):
29
- """
30
- Load supported models.
31
-
32
- To list all available models and types in registry:
33
- >>> from video_llama.models import model_zoo
34
- >>> print(model_zoo)
35
-
36
- Args:
37
- name (str): name of the model.
38
- model_type (str): type of the model.
39
- is_eval (bool): whether the model is in eval mode. Default: False.
40
- device (str): device to use. Default: "cpu".
41
- checkpoint (str): path or to checkpoint. Default: None.
42
- Note that expecting the checkpoint to have the same keys in state_dict as the model.
43
-
44
- Returns:
45
- model (torch.nn.Module): model.
46
- """
47
-
48
- model = registry.get_model_class(name).from_pretrained(model_type=model_type)
49
-
50
- if checkpoint is not None:
51
- model.load_checkpoint(checkpoint)
52
-
53
- if is_eval:
54
- model.eval()
55
-
56
- if device == "cpu":
57
- model = model.float()
58
-
59
- return model.to(device)
60
-
61
-
62
- def load_preprocess(config):
63
- """
64
- Load preprocessor configs and construct preprocessors.
65
-
66
- If no preprocessor is specified, return BaseProcessor, which does not do any preprocessing.
67
-
68
- Args:
69
- config (dict): preprocessor configs.
70
-
71
- Returns:
72
- vis_processors (dict): preprocessors for visual inputs.
73
- txt_processors (dict): preprocessors for text inputs.
74
-
75
- Key is "train" or "eval" for processors used in training and evaluation respectively.
76
- """
77
-
78
- def _build_proc_from_cfg(cfg):
79
- return (
80
- registry.get_processor_class(cfg.name).from_config(cfg)
81
- if cfg is not None
82
- else BaseProcessor()
83
- )
84
-
85
- vis_processors = dict()
86
- txt_processors = dict()
87
-
88
- vis_proc_cfg = config.get("vis_processor")
89
- txt_proc_cfg = config.get("text_processor")
90
-
91
- if vis_proc_cfg is not None:
92
- vis_train_cfg = vis_proc_cfg.get("train")
93
- vis_eval_cfg = vis_proc_cfg.get("eval")
94
- else:
95
- vis_train_cfg = None
96
- vis_eval_cfg = None
97
-
98
- vis_processors["train"] = _build_proc_from_cfg(vis_train_cfg)
99
- vis_processors["eval"] = _build_proc_from_cfg(vis_eval_cfg)
100
-
101
- if txt_proc_cfg is not None:
102
- txt_train_cfg = txt_proc_cfg.get("train")
103
- txt_eval_cfg = txt_proc_cfg.get("eval")
104
- else:
105
- txt_train_cfg = None
106
- txt_eval_cfg = None
107
-
108
- txt_processors["train"] = _build_proc_from_cfg(txt_train_cfg)
109
- txt_processors["eval"] = _build_proc_from_cfg(txt_eval_cfg)
110
-
111
- return vis_processors, txt_processors
112
-
113
-
114
- def load_model_and_preprocess(name, model_type, is_eval=False, device="cpu"):
115
- """
116
- Load model and its related preprocessors.
117
-
118
- List all available models and types in registry:
119
- >>> from video_llama.models import model_zoo
120
- >>> print(model_zoo)
121
-
122
- Args:
123
- name (str): name of the model.
124
- model_type (str): type of the model.
125
- is_eval (bool): whether the model is in eval mode. Default: False.
126
- device (str): device to use. Default: "cpu".
127
-
128
- Returns:
129
- model (torch.nn.Module): model.
130
- vis_processors (dict): preprocessors for visual inputs.
131
- txt_processors (dict): preprocessors for text inputs.
132
- """
133
- model_cls = registry.get_model_class(name)
134
-
135
- # load model
136
- model = model_cls.from_pretrained(model_type=model_type)
137
-
138
- if is_eval:
139
- model.eval()
140
-
141
- # load preprocess
142
- cfg = OmegaConf.load(model_cls.default_config_path(model_type))
143
- if cfg is not None:
144
- preprocess_cfg = cfg.preprocess
145
-
146
- vis_processors, txt_processors = load_preprocess(preprocess_cfg)
147
- else:
148
- vis_processors, txt_processors = None, None
149
- logging.info(
150
- f"""No default preprocess for model {name} ({model_type}).
151
- This can happen if the model is not finetuned on downstream datasets,
152
- or it is not intended for direct use without finetuning.
153
- """
154
- )
155
-
156
- if device == "cpu" or device == torch.device("cpu"):
157
- model = model.float()
158
-
159
- return model.to(device), vis_processors, txt_processors
160
-
161
-
162
- class ModelZoo:
163
- """
164
- A utility class to create string representation of available model architectures and types.
165
-
166
- >>> from video_llama.models import model_zoo
167
- >>> # list all available models
168
- >>> print(model_zoo)
169
- >>> # show total number of models
170
- >>> print(len(model_zoo))
171
- """
172
-
173
- def __init__(self) -> None:
174
- self.model_zoo = {
175
- k: list(v.PRETRAINED_MODEL_CONFIG_DICT.keys())
176
- for k, v in registry.mapping["model_name_mapping"].items()
177
- }
178
-
179
- def __str__(self) -> str:
180
- return (
181
- "=" * 50
182
- + "\n"
183
- + f"{'Architectures':<30} {'Types'}\n"
184
- + "=" * 50
185
- + "\n"
186
- + "\n".join(
187
- [
188
- f"{name:<30} {', '.join(types)}"
189
- for name, types in self.model_zoo.items()
190
- ]
191
- )
192
- )
193
-
194
- def __iter__(self):
195
- return iter(self.model_zoo.items())
196
-
197
- def __len__(self):
198
- return sum([len(v) for v in self.model_zoo.values()])
199
-
200
-
201
- model_zoo = ModelZoo()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/attr/validators.py DELETED
@@ -1,720 +0,0 @@
1
- # SPDX-License-Identifier: MIT
2
-
3
- """
4
- Commonly useful validators.
5
- """
6
-
7
-
8
- import operator
9
- import re
10
-
11
- from contextlib import contextmanager
12
- from re import Pattern
13
-
14
- from ._config import get_run_validators, set_run_validators
15
- from ._make import _AndValidator, and_, attrib, attrs
16
- from .converters import default_if_none
17
- from .exceptions import NotCallableError
18
-
19
-
20
- __all__ = [
21
- "and_",
22
- "deep_iterable",
23
- "deep_mapping",
24
- "disabled",
25
- "ge",
26
- "get_disabled",
27
- "gt",
28
- "in_",
29
- "instance_of",
30
- "is_callable",
31
- "le",
32
- "lt",
33
- "matches_re",
34
- "max_len",
35
- "min_len",
36
- "not_",
37
- "optional",
38
- "provides",
39
- "set_disabled",
40
- ]
41
-
42
-
43
- def set_disabled(disabled):
44
- """
45
- Globally disable or enable running validators.
46
-
47
- By default, they are run.
48
-
49
- :param disabled: If ``True``, disable running all validators.
50
- :type disabled: bool
51
-
52
- .. warning::
53
-
54
- This function is not thread-safe!
55
-
56
- .. versionadded:: 21.3.0
57
- """
58
- set_run_validators(not disabled)
59
-
60
-
61
- def get_disabled():
62
- """
63
- Return a bool indicating whether validators are currently disabled or not.
64
-
65
- :return: ``True`` if validators are currently disabled.
66
- :rtype: bool
67
-
68
- .. versionadded:: 21.3.0
69
- """
70
- return not get_run_validators()
71
-
72
-
73
- @contextmanager
74
- def disabled():
75
- """
76
- Context manager that disables running validators within its context.
77
-
78
- .. warning::
79
-
80
- This context manager is not thread-safe!
81
-
82
- .. versionadded:: 21.3.0
83
- """
84
- set_run_validators(False)
85
- try:
86
- yield
87
- finally:
88
- set_run_validators(True)
89
-
90
-
91
- @attrs(repr=False, slots=True, hash=True)
92
- class _InstanceOfValidator:
93
- type = attrib()
94
-
95
- def __call__(self, inst, attr, value):
96
- """
97
- We use a callable class to be able to change the ``__repr__``.
98
- """
99
- if not isinstance(value, self.type):
100
- raise TypeError(
101
- "'{name}' must be {type!r} (got {value!r} that is a "
102
- "{actual!r}).".format(
103
- name=attr.name,
104
- type=self.type,
105
- actual=value.__class__,
106
- value=value,
107
- ),
108
- attr,
109
- self.type,
110
- value,
111
- )
112
-
113
- def __repr__(self):
114
- return "<instance_of validator for type {type!r}>".format(
115
- type=self.type
116
- )
117
-
118
-
119
- def instance_of(type):
120
- """
121
- A validator that raises a `TypeError` if the initializer is called
122
- with a wrong type for this particular attribute (checks are performed using
123
- `isinstance` therefore it's also valid to pass a tuple of types).
124
-
125
- :param type: The type to check for.
126
- :type type: type or tuple of type
127
-
128
- :raises TypeError: With a human readable error message, the attribute
129
- (of type `attrs.Attribute`), the expected type, and the value it
130
- got.
131
- """
132
- return _InstanceOfValidator(type)
133
-
134
-
135
- @attrs(repr=False, frozen=True, slots=True)
136
- class _MatchesReValidator:
137
- pattern = attrib()
138
- match_func = attrib()
139
-
140
- def __call__(self, inst, attr, value):
141
- """
142
- We use a callable class to be able to change the ``__repr__``.
143
- """
144
- if not self.match_func(value):
145
- raise ValueError(
146
- "'{name}' must match regex {pattern!r}"
147
- " ({value!r} doesn't)".format(
148
- name=attr.name, pattern=self.pattern.pattern, value=value
149
- ),
150
- attr,
151
- self.pattern,
152
- value,
153
- )
154
-
155
- def __repr__(self):
156
- return "<matches_re validator for pattern {pattern!r}>".format(
157
- pattern=self.pattern
158
- )
159
-
160
-
161
- def matches_re(regex, flags=0, func=None):
162
- r"""
163
- A validator that raises `ValueError` if the initializer is called
164
- with a string that doesn't match *regex*.
165
-
166
- :param regex: a regex string or precompiled pattern to match against
167
- :param int flags: flags that will be passed to the underlying re function
168
- (default 0)
169
- :param callable func: which underlying `re` function to call. Valid options
170
- are `re.fullmatch`, `re.search`, and `re.match`; the default ``None``
171
- means `re.fullmatch`. For performance reasons, the pattern is always
172
- precompiled using `re.compile`.
173
-
174
- .. versionadded:: 19.2.0
175
- .. versionchanged:: 21.3.0 *regex* can be a pre-compiled pattern.
176
- """
177
- valid_funcs = (re.fullmatch, None, re.search, re.match)
178
- if func not in valid_funcs:
179
- raise ValueError(
180
- "'func' must be one of {}.".format(
181
- ", ".join(
182
- sorted(
183
- e and e.__name__ or "None" for e in set(valid_funcs)
184
- )
185
- )
186
- )
187
- )
188
-
189
- if isinstance(regex, Pattern):
190
- if flags:
191
- raise TypeError(
192
- "'flags' can only be used with a string pattern; "
193
- "pass flags to re.compile() instead"
194
- )
195
- pattern = regex
196
- else:
197
- pattern = re.compile(regex, flags)
198
-
199
- if func is re.match:
200
- match_func = pattern.match
201
- elif func is re.search:
202
- match_func = pattern.search
203
- else:
204
- match_func = pattern.fullmatch
205
-
206
- return _MatchesReValidator(pattern, match_func)
207
-
208
-
209
- @attrs(repr=False, slots=True, hash=True)
210
- class _ProvidesValidator:
211
- interface = attrib()
212
-
213
- def __call__(self, inst, attr, value):
214
- """
215
- We use a callable class to be able to change the ``__repr__``.
216
- """
217
- if not self.interface.providedBy(value):
218
- raise TypeError(
219
- "'{name}' must provide {interface!r} which {value!r} "
220
- "doesn't.".format(
221
- name=attr.name, interface=self.interface, value=value
222
- ),
223
- attr,
224
- self.interface,
225
- value,
226
- )
227
-
228
- def __repr__(self):
229
- return "<provides validator for interface {interface!r}>".format(
230
- interface=self.interface
231
- )
232
-
233
-
234
- def provides(interface):
235
- """
236
- A validator that raises a `TypeError` if the initializer is called
237
- with an object that does not provide the requested *interface* (checks are
238
- performed using ``interface.providedBy(value)`` (see `zope.interface
239
- <https://zopeinterface.readthedocs.io/en/latest/>`_).
240
-
241
- :param interface: The interface to check for.
242
- :type interface: ``zope.interface.Interface``
243
-
244
- :raises TypeError: With a human readable error message, the attribute
245
- (of type `attrs.Attribute`), the expected interface, and the
246
- value it got.
247
-
248
- .. deprecated:: 23.1.0
249
- """
250
- import warnings
251
-
252
- warnings.warn(
253
- "attrs's zope-interface support is deprecated and will be removed in, "
254
- "or after, April 2024.",
255
- DeprecationWarning,
256
- stacklevel=2,
257
- )
258
- return _ProvidesValidator(interface)
259
-
260
-
261
- @attrs(repr=False, slots=True, hash=True)
262
- class _OptionalValidator:
263
- validator = attrib()
264
-
265
- def __call__(self, inst, attr, value):
266
- if value is None:
267
- return
268
-
269
- self.validator(inst, attr, value)
270
-
271
- def __repr__(self):
272
- return "<optional validator for {what} or None>".format(
273
- what=repr(self.validator)
274
- )
275
-
276
-
277
- def optional(validator):
278
- """
279
- A validator that makes an attribute optional. An optional attribute is one
280
- which can be set to ``None`` in addition to satisfying the requirements of
281
- the sub-validator.
282
-
283
- :param Callable | tuple[Callable] | list[Callable] validator: A validator
284
- (or validators) that is used for non-``None`` values.
285
-
286
- .. versionadded:: 15.1.0
287
- .. versionchanged:: 17.1.0 *validator* can be a list of validators.
288
- .. versionchanged:: 23.1.0 *validator* can also be a tuple of validators.
289
- """
290
- if isinstance(validator, (list, tuple)):
291
- return _OptionalValidator(_AndValidator(validator))
292
-
293
- return _OptionalValidator(validator)
294
-
295
-
296
- @attrs(repr=False, slots=True, hash=True)
297
- class _InValidator:
298
- options = attrib()
299
-
300
- def __call__(self, inst, attr, value):
301
- try:
302
- in_options = value in self.options
303
- except TypeError: # e.g. `1 in "abc"`
304
- in_options = False
305
-
306
- if not in_options:
307
- raise ValueError(
308
- "'{name}' must be in {options!r} (got {value!r})".format(
309
- name=attr.name, options=self.options, value=value
310
- ),
311
- attr,
312
- self.options,
313
- value,
314
- )
315
-
316
- def __repr__(self):
317
- return "<in_ validator with options {options!r}>".format(
318
- options=self.options
319
- )
320
-
321
-
322
- def in_(options):
323
- """
324
- A validator that raises a `ValueError` if the initializer is called
325
- with a value that does not belong in the options provided. The check is
326
- performed using ``value in options``.
327
-
328
- :param options: Allowed options.
329
- :type options: list, tuple, `enum.Enum`, ...
330
-
331
- :raises ValueError: With a human readable error message, the attribute (of
332
- type `attrs.Attribute`), the expected options, and the value it
333
- got.
334
-
335
- .. versionadded:: 17.1.0
336
- .. versionchanged:: 22.1.0
337
- The ValueError was incomplete until now and only contained the human
338
- readable error message. Now it contains all the information that has
339
- been promised since 17.1.0.
340
- """
341
- return _InValidator(options)
342
-
343
-
344
- @attrs(repr=False, slots=False, hash=True)
345
- class _IsCallableValidator:
346
- def __call__(self, inst, attr, value):
347
- """
348
- We use a callable class to be able to change the ``__repr__``.
349
- """
350
- if not callable(value):
351
- message = (
352
- "'{name}' must be callable "
353
- "(got {value!r} that is a {actual!r})."
354
- )
355
- raise NotCallableError(
356
- msg=message.format(
357
- name=attr.name, value=value, actual=value.__class__
358
- ),
359
- value=value,
360
- )
361
-
362
- def __repr__(self):
363
- return "<is_callable validator>"
364
-
365
-
366
- def is_callable():
367
- """
368
- A validator that raises a `attrs.exceptions.NotCallableError` if the
369
- initializer is called with a value for this particular attribute
370
- that is not callable.
371
-
372
- .. versionadded:: 19.1.0
373
-
374
- :raises attrs.exceptions.NotCallableError: With a human readable error
375
- message containing the attribute (`attrs.Attribute`) name,
376
- and the value it got.
377
- """
378
- return _IsCallableValidator()
379
-
380
-
381
- @attrs(repr=False, slots=True, hash=True)
382
- class _DeepIterable:
383
- member_validator = attrib(validator=is_callable())
384
- iterable_validator = attrib(
385
- default=None, validator=optional(is_callable())
386
- )
387
-
388
- def __call__(self, inst, attr, value):
389
- """
390
- We use a callable class to be able to change the ``__repr__``.
391
- """
392
- if self.iterable_validator is not None:
393
- self.iterable_validator(inst, attr, value)
394
-
395
- for member in value:
396
- self.member_validator(inst, attr, member)
397
-
398
- def __repr__(self):
399
- iterable_identifier = (
400
- ""
401
- if self.iterable_validator is None
402
- else f" {self.iterable_validator!r}"
403
- )
404
- return (
405
- "<deep_iterable validator for{iterable_identifier}"
406
- " iterables of {member!r}>"
407
- ).format(
408
- iterable_identifier=iterable_identifier,
409
- member=self.member_validator,
410
- )
411
-
412
-
413
- def deep_iterable(member_validator, iterable_validator=None):
414
- """
415
- A validator that performs deep validation of an iterable.
416
-
417
- :param member_validator: Validator(s) to apply to iterable members
418
- :param iterable_validator: Validator to apply to iterable itself
419
- (optional)
420
-
421
- .. versionadded:: 19.1.0
422
-
423
- :raises TypeError: if any sub-validators fail
424
- """
425
- if isinstance(member_validator, (list, tuple)):
426
- member_validator = and_(*member_validator)
427
- return _DeepIterable(member_validator, iterable_validator)
428
-
429
-
430
- @attrs(repr=False, slots=True, hash=True)
431
- class _DeepMapping:
432
- key_validator = attrib(validator=is_callable())
433
- value_validator = attrib(validator=is_callable())
434
- mapping_validator = attrib(default=None, validator=optional(is_callable()))
435
-
436
- def __call__(self, inst, attr, value):
437
- """
438
- We use a callable class to be able to change the ``__repr__``.
439
- """
440
- if self.mapping_validator is not None:
441
- self.mapping_validator(inst, attr, value)
442
-
443
- for key in value:
444
- self.key_validator(inst, attr, key)
445
- self.value_validator(inst, attr, value[key])
446
-
447
- def __repr__(self):
448
- return (
449
- "<deep_mapping validator for objects mapping {key!r} to {value!r}>"
450
- ).format(key=self.key_validator, value=self.value_validator)
451
-
452
-
453
- def deep_mapping(key_validator, value_validator, mapping_validator=None):
454
- """
455
- A validator that performs deep validation of a dictionary.
456
-
457
- :param key_validator: Validator to apply to dictionary keys
458
- :param value_validator: Validator to apply to dictionary values
459
- :param mapping_validator: Validator to apply to top-level mapping
460
- attribute (optional)
461
-
462
- .. versionadded:: 19.1.0
463
-
464
- :raises TypeError: if any sub-validators fail
465
- """
466
- return _DeepMapping(key_validator, value_validator, mapping_validator)
467
-
468
-
469
- @attrs(repr=False, frozen=True, slots=True)
470
- class _NumberValidator:
471
- bound = attrib()
472
- compare_op = attrib()
473
- compare_func = attrib()
474
-
475
- def __call__(self, inst, attr, value):
476
- """
477
- We use a callable class to be able to change the ``__repr__``.
478
- """
479
- if not self.compare_func(value, self.bound):
480
- raise ValueError(
481
- "'{name}' must be {op} {bound}: {value}".format(
482
- name=attr.name,
483
- op=self.compare_op,
484
- bound=self.bound,
485
- value=value,
486
- )
487
- )
488
-
489
- def __repr__(self):
490
- return "<Validator for x {op} {bound}>".format(
491
- op=self.compare_op, bound=self.bound
492
- )
493
-
494
-
495
- def lt(val):
496
- """
497
- A validator that raises `ValueError` if the initializer is called
498
- with a number larger or equal to *val*.
499
-
500
- :param val: Exclusive upper bound for values
501
-
502
- .. versionadded:: 21.3.0
503
- """
504
- return _NumberValidator(val, "<", operator.lt)
505
-
506
-
507
- def le(val):
508
- """
509
- A validator that raises `ValueError` if the initializer is called
510
- with a number greater than *val*.
511
-
512
- :param val: Inclusive upper bound for values
513
-
514
- .. versionadded:: 21.3.0
515
- """
516
- return _NumberValidator(val, "<=", operator.le)
517
-
518
-
519
- def ge(val):
520
- """
521
- A validator that raises `ValueError` if the initializer is called
522
- with a number smaller than *val*.
523
-
524
- :param val: Inclusive lower bound for values
525
-
526
- .. versionadded:: 21.3.0
527
- """
528
- return _NumberValidator(val, ">=", operator.ge)
529
-
530
-
531
- def gt(val):
532
- """
533
- A validator that raises `ValueError` if the initializer is called
534
- with a number smaller or equal to *val*.
535
-
536
- :param val: Exclusive lower bound for values
537
-
538
- .. versionadded:: 21.3.0
539
- """
540
- return _NumberValidator(val, ">", operator.gt)
541
-
542
-
543
- @attrs(repr=False, frozen=True, slots=True)
544
- class _MaxLengthValidator:
545
- max_length = attrib()
546
-
547
- def __call__(self, inst, attr, value):
548
- """
549
- We use a callable class to be able to change the ``__repr__``.
550
- """
551
- if len(value) > self.max_length:
552
- raise ValueError(
553
- "Length of '{name}' must be <= {max}: {len}".format(
554
- name=attr.name, max=self.max_length, len=len(value)
555
- )
556
- )
557
-
558
- def __repr__(self):
559
- return f"<max_len validator for {self.max_length}>"
560
-
561
-
562
- def max_len(length):
563
- """
564
- A validator that raises `ValueError` if the initializer is called
565
- with a string or iterable that is longer than *length*.
566
-
567
- :param int length: Maximum length of the string or iterable
568
-
569
- .. versionadded:: 21.3.0
570
- """
571
- return _MaxLengthValidator(length)
572
-
573
-
574
- @attrs(repr=False, frozen=True, slots=True)
575
- class _MinLengthValidator:
576
- min_length = attrib()
577
-
578
- def __call__(self, inst, attr, value):
579
- """
580
- We use a callable class to be able to change the ``__repr__``.
581
- """
582
- if len(value) < self.min_length:
583
- raise ValueError(
584
- "Length of '{name}' must be => {min}: {len}".format(
585
- name=attr.name, min=self.min_length, len=len(value)
586
- )
587
- )
588
-
589
- def __repr__(self):
590
- return f"<min_len validator for {self.min_length}>"
591
-
592
-
593
- def min_len(length):
594
- """
595
- A validator that raises `ValueError` if the initializer is called
596
- with a string or iterable that is shorter than *length*.
597
-
598
- :param int length: Minimum length of the string or iterable
599
-
600
- .. versionadded:: 22.1.0
601
- """
602
- return _MinLengthValidator(length)
603
-
604
-
605
- @attrs(repr=False, slots=True, hash=True)
606
- class _SubclassOfValidator:
607
- type = attrib()
608
-
609
- def __call__(self, inst, attr, value):
610
- """
611
- We use a callable class to be able to change the ``__repr__``.
612
- """
613
- if not issubclass(value, self.type):
614
- raise TypeError(
615
- "'{name}' must be a subclass of {type!r} "
616
- "(got {value!r}).".format(
617
- name=attr.name,
618
- type=self.type,
619
- value=value,
620
- ),
621
- attr,
622
- self.type,
623
- value,
624
- )
625
-
626
- def __repr__(self):
627
- return "<subclass_of validator for type {type!r}>".format(
628
- type=self.type
629
- )
630
-
631
-
632
- def _subclass_of(type):
633
- """
634
- A validator that raises a `TypeError` if the initializer is called
635
- with a wrong type for this particular attribute (checks are performed using
636
- `issubclass` therefore it's also valid to pass a tuple of types).
637
-
638
- :param type: The type to check for.
639
- :type type: type or tuple of types
640
-
641
- :raises TypeError: With a human readable error message, the attribute
642
- (of type `attrs.Attribute`), the expected type, and the value it
643
- got.
644
- """
645
- return _SubclassOfValidator(type)
646
-
647
-
648
- @attrs(repr=False, slots=True, hash=True)
649
- class _NotValidator:
650
- validator = attrib()
651
- msg = attrib(
652
- converter=default_if_none(
653
- "not_ validator child '{validator!r}' "
654
- "did not raise a captured error"
655
- )
656
- )
657
- exc_types = attrib(
658
- validator=deep_iterable(
659
- member_validator=_subclass_of(Exception),
660
- iterable_validator=instance_of(tuple),
661
- ),
662
- )
663
-
664
- def __call__(self, inst, attr, value):
665
- try:
666
- self.validator(inst, attr, value)
667
- except self.exc_types:
668
- pass # suppress error to invert validity
669
- else:
670
- raise ValueError(
671
- self.msg.format(
672
- validator=self.validator,
673
- exc_types=self.exc_types,
674
- ),
675
- attr,
676
- self.validator,
677
- value,
678
- self.exc_types,
679
- )
680
-
681
- def __repr__(self):
682
- return (
683
- "<not_ validator wrapping {what!r}, " "capturing {exc_types!r}>"
684
- ).format(
685
- what=self.validator,
686
- exc_types=self.exc_types,
687
- )
688
-
689
-
690
- def not_(validator, *, msg=None, exc_types=(ValueError, TypeError)):
691
- """
692
- A validator that wraps and logically 'inverts' the validator passed to it.
693
- It will raise a `ValueError` if the provided validator *doesn't* raise a
694
- `ValueError` or `TypeError` (by default), and will suppress the exception
695
- if the provided validator *does*.
696
-
697
- Intended to be used with existing validators to compose logic without
698
- needing to create inverted variants, for example, ``not_(in_(...))``.
699
-
700
- :param validator: A validator to be logically inverted.
701
- :param msg: Message to raise if validator fails.
702
- Formatted with keys ``exc_types`` and ``validator``.
703
- :type msg: str
704
- :param exc_types: Exception type(s) to capture.
705
- Other types raised by child validators will not be intercepted and
706
- pass through.
707
-
708
- :raises ValueError: With a human readable error message,
709
- the attribute (of type `attrs.Attribute`),
710
- the validator that failed to raise an exception,
711
- the value it got,
712
- and the expected exception types.
713
-
714
- .. versionadded:: 22.2.0
715
- """
716
- try:
717
- exc_types = tuple(exc_types)
718
- except TypeError:
719
- exc_types = (exc_types,)
720
- return _NotValidator(validator, msg, exc_types)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/ttLib/tables/S_T_A_T_.py DELETED
@@ -1,5 +0,0 @@
1
- from .otBase import BaseTTXConverter
2
-
3
-
4
- class table_S_T_A_T_(BaseTTXConverter):
5
- pass
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-ec1a8aac.js DELETED
@@ -1,7 +0,0 @@
1
- import{S as T,e as q,s as L,N as y,P as A,K as h,U as u,p as b,M,R as N,n as k,A as v,G as D,V,m as W,Z as Ie,ar as Fe,C as qe,h as Le,T as ne,Q as E,X as se,a1 as x,O as B,L as X,k as Z,o as J,z as S,v as H,x as K,B as Ge,J as ie,u as I,y as F,f as Y,as as ae}from"./index-3370be2a.js";import{B as Oe}from"./Button-89624748.js";import{E as We}from"./Image-8a3c68cc.js";import{c as Ze}from"./csv-b0b7514a.js";import{d as Je}from"./dsv-576afacd.js";import{E as Ke}from"./Model3D-db673911.js";var Qe=Je(" "),Ue=Qe.parseRows;function Xe(s){let e,l;return{c(){e=y("div"),l=A(s[0]),h(e,"class","svelte-1ayixqk"),u(e,"table",s[1]==="table"),u(e,"gallery",s[1]==="gallery"),u(e,"selected",s[2])},m(t,n){b(t,e,n),M(e,l)},p(t,[n]){n&1&&N(l,t[0]),n&2&&u(e,"table",t[1]==="table"),n&2&&u(e,"gallery",t[1]==="gallery"),n&4&&u(e,"selected",t[2])},i:k,o:k,d(t){t&&v(e)}}}function Ye(s,e,l){let{value:t}=e,{type:n}=e,{selected:a=!1}=e;return s.$$set=i=>{"value"in i&&l(0,t=i.value),"type"in i&&l(1,n=i.type),"selected"in i&&l(2,a=i.selected)},[t,n,a]}class xe extends T{constructor(e){super(),q(this,e,Ye,Xe,L,{value:0,type:1,selected:2})}}function $e(s){let e,l;return{c(){e=y("div"),l=A(s[0]),h(e,"class","svelte-1ayixqk"),u(e,"table",s[1]==="table"),u(e,"gallery",s[1]==="gallery"),u(e,"selected",s[2])},m(t,n){b(t,e,n),M(e,l)},p(t,[n]){n&1&&N(l,t[0]),n&2&&u(e,"table",t[1]==="table"),n&2&&u(e,"gallery",t[1]==="gallery"),n&4&&u(e,"selected",t[2])},i:k,o:k,d(t){t&&v(e)}}}function el(s,e,l){let{value:t}=e,{type:n}=e,{selected:a=!1}=e;return s.$$set=i=>{"value"in i&&l(0,t=i.value),"type"in i&&l(1,n=i.type),"selected"in i&&l(2,a=i.selected)},[t,n,a]}class ll extends T{constructor(e){super(),q(this,e,el,$e,L,{value:0,type:1,selected:2})}}function tl(s){let e,l=s[0].toLocaleString()+"",t;return{c(){e=y("div"),t=A(l),h(e,"class","svelte-1ayixqk"),u(e,"table",s[1]==="table"),u(e,"gallery",s[1]==="gallery"),u(e,"selected",s[2])},m(n,a){b(n,e,a),M(e,t)},p(n,[a]){a&1&&l!==(l=n[0].toLocaleString()+"")&&N(t,l),a&2&&u(e,"table",n[1]==="table"),a&2&&u(e,"gallery",n[1]==="gallery"),a&4&&u(e,"selected",n[2])},i:k,o:k,d(n){n&&v(e)}}}function nl(s,e,l){let{value:t}=e,{type:n}=e,{selected:a=!1}=e;return s.$$set=i=>{"value"in i&&l(0,t=i.value),"type"in i&&l(1,n=i.type),"selected"in i&&l(2,a=i.selected)},[t,n,a]}class sl extends T{constructor(e){super(),q(this,e,nl,tl,L,{value:0,type:1,selected:2})}}function fe(s,e,l){const t=s.slice();return t[3]=e[l],t[5]=l,t}function ce(s){let e;return{c(){e=A(", ")},m(l,t){b(l,e,t)},d(l){l&&v(e)}}}function ue(s){let e=s[3].toLocaleString()+"",l,t,n=s[5]!==s[0].length-1&&ce();return{c(){l=A(e),n&&n.c(),t=W()},m(a,i){b(a,l,i),n&&n.m(a,i),b(a,t,i)},p(a,i){i&1&&e!==(e=a[3].toLocaleString()+"")&&N(l,e),a[5]!==a[0].length-1?n||(n=ce(),n.c(),n.m(t.parentNode,t)):n&&(n.d(1),n=null)},d(a){a&&(v(l),v(t)),n&&n.d(a)}}}function il(s){let e,l=D(s[0]),t=[];for(let n=0;n<l.length;n+=1)t[n]=ue(fe(s,l,n));return{c(){e=y("div");for(let n=0;n<t.length;n+=1)t[n].c();h(e,"class","svelte-1ayixqk"),u(e,"table",s[1]==="table"),u(e,"gallery",s[1]==="gallery"),u(e,"selected",s[2])},m(n,a){b(n,e,a);for(let i=0;i<t.length;i+=1)t[i]&&t[i].m(e,null)},p(n,[a]){if(a&1){l=D(n[0]);let i;for(i=0;i<l.length;i+=1){const f=fe(n,l,i);t[i]?t[i].p(f,a):(t[i]=ue(f),t[i].c(),t[i].m(e,null))}for(;i<t.length;i+=1)t[i].d(1);t.length=l.length}a&2&&u(e,"table",n[1]==="table"),a&2&&u(e,"gallery",n[1]==="gallery"),a&4&&u(e,"selected",n[2])},i:k,o:k,d(n){n&&v(e),V(t,n)}}}function al(s,e,l){let{value:t}=e,{type:n}=e,{selected:a=!1}=e;return s.$$set=i=>{"value"in i&&l(0,t=i.value),"type"in i&&l(1,n=i.type),"selected"in i&&l(2,a=i.selected)},[t,n,a]}class fl extends T{constructor(e){super(),q(this,e,al,il,L,{value:0,type:1,selected:2})}}function cl(s){let e,l;return{c(){e=y("div"),l=A(s[0]),h(e,"class","svelte-1ayixqk"),u(e,"table",s[1]==="table"),u(e,"gallery",s[1]==="gallery"),u(e,"selected",s[2])},m(t,n){b(t,e,n),M(e,l)},p(t,[n]){n&1&&N(l,t[0]),n&2&&u(e,"table",t[1]==="table"),n&2&&u(e,"gallery",t[1]==="gallery"),n&4&&u(e,"selected",t[2])},i:k,o:k,d(t){t&&v(e)}}}function ul(s,e,l){let{value:t}=e,{type:n}=e,{selected:a=!1}=e;return s.$$set=i=>{"value"in i&&l(0,t=i.value),"type"in i&&l(1,n=i.type),"selected"in i&&l(2,a=i.selected)},[t,n,a]}class rl extends T{constructor(e){super(),q(this,e,ul,cl,L,{value:0,type:1,selected:2})}}function ol(s){let e,l;return{c(){e=y("div"),l=A(s[0]),h(e,"class","svelte-1ayixqk"),u(e,"table",s[1]==="table"),u(e,"gallery",s[1]==="gallery"),u(e,"selected",s[2])},m(t,n){b(t,e,n),M(e,l)},p(t,[n]){n&1&&N(l,t[0]),n&2&&u(e,"table",t[1]==="table"),n&2&&u(e,"gallery",t[1]==="gallery"),n&4&&u(e,"selected",t[2])},i:k,o:k,d(t){t&&v(e)}}}function _l(s,e,l){let{value:t}=e,{type:n}=e,{selected:a=!1}=e;return s.$$set=i=>{"value"in i&&l(0,t=i.value),"type"in i&&l(1,n=i.type),"selected"in i&&l(2,a=i.selected)},[t,n,a]}class dl extends T{constructor(e){super(),q(this,e,_l,ol,L,{value:0,type:1,selected:2})}}function ml(s){let e,l,t;return{c(){e=y("div"),l=A(s[0]),h(e,"class","svelte-1viwdyg"),Ie(()=>s[5].call(e)),u(e,"table",s[1]==="table"),u(e,"gallery",s[1]==="gallery"),u(e,"selected",s[2])},m(n,a){b(n,e,a),M(e,l),t=Fe(e,s[5].bind(e)),s[6](e)},p(n,[a]){a&1&&N(l,n[0]),a&2&&u(e,"table",n[1]==="table"),a&2&&u(e,"gallery",n[1]==="gallery"),a&4&&u(e,"selected",n[2])},i:k,o:k,d(n){n&&v(e),t(),s[6](null)}}}function hl(s,e,l){let{value:t}=e,{type:n}=e,{selected:a=!1}=e,i,f;function c(o,w){!o||!w||(f.style.setProperty("--local-text-width",`${w<150?w:200}px`),l(4,f.style.whiteSpace="unset",f))}qe(()=>{c(f,i)});function _(){i=this.clientWidth,l(3,i)}function m(o){Le[o?"unshift":"push"](()=>{f=o,l(4,f)})}return s.$$set=o=>{"value"in o&&l(0,t=o.value),"type"in o&&l(1,n=o.type),"selected"in o&&l(2,a=o.selected)},[t,n,a,i,f,_,m]}class gl extends T{constructor(e){super(),q(this,e,hl,ml,L,{value:0,type:1,selected:2})}}function bl(s){let e,l;return{c(){e=y("div"),l=A(s[0]),h(e,"class","svelte-1ayixqk"),u(e,"table",s[1]==="table"),u(e,"gallery",s[1]==="gallery"),u(e,"selected",s[2])},m(t,n){b(t,e,n),M(e,l)},p(t,[n]){n&1&&N(l,t[0]),n&2&&u(e,"table",t[1]==="table"),n&2&&u(e,"gallery",t[1]==="gallery"),n&4&&u(e,"selected",t[2])},i:k,o:k,d(t){t&&v(e)}}}function vl(s,e,l){let{value:t}=e,{type:n}=e,{selected:a=!1}=e;return s.$$set=i=>{"value"in i&&l(0,t=i.value),"type"in i&&l(1,n=i.type),"selected"in i&&l(2,a=i.selected)},[t,n,a]}class yl extends T{constructor(e){super(),q(this,e,vl,bl,L,{value:0,type:1,selected:2})}}function kl(s){let e,l,t,n;return{c(){e=y("video"),e.muted=!0,e.playsInline=!0,ne(e.src,l=s[3]+s[2])||h(e,"src",l),h(e,"class","svelte-1tntsc1"),u(e,"table",s[0]==="table"),u(e,"gallery",s[0]==="gallery"),u(e,"selected",s[1])},m(a,i){b(a,e,i),s[5](e),t||(n=[E(e,"mouseover",function(){se(s[4].play)&&s[4].play.apply(this,arguments)}),E(e,"mouseout",function(){se(s[4].pause)&&s[4].pause.apply(this,arguments)})],t=!0)},p(a,i){s=a,i&12&&!ne(e.src,l=s[3]+s[2])&&h(e,"src",l),i&1&&u(e,"table",s[0]==="table"),i&1&&u(e,"gallery",s[0]==="gallery"),i&2&&u(e,"selected",s[1])},d(a){a&&v(e),s[5](null),t=!1,x(n)}}}function wl(s){let e;function l(a,i){return kl}let n=l()(s);return{c(){n.c(),e=W()},m(a,i){n.m(a,i),b(a,e,i)},p(a,[i]){n.p(a,i)},i:k,o:k,d(a){a&&v(e),n.d(a)}}}function zl(s,e,l){let{type:t}=e,{selected:n=!1}=e,{value:a}=e,{samples_dir:i}=e,f;async function c(){l(4,f.muted=!0,f),l(4,f.playsInline=!0,f),l(4,f.controls=!1,f),f.setAttribute("muted",""),await f.play(),f.pause()}qe(()=>{c()});function _(m){Le[m?"unshift":"push"](()=>{f=m,l(4,f)})}return s.$$set=m=>{"type"in m&&l(0,t=m.type),"selected"in m&&l(1,n=m.selected),"value"in m&&l(2,a=m.value),"samples_dir"in m&&l(3,i=m.samples_dir)},[t,n,a,i,f,_]}class Cl extends T{constructor(e){super(),q(this,e,zl,wl,L,{type:0,selected:1,value:2,samples_dir:3})}}function Ml(s){let e,l=(Array.isArray(s[0])?s[0].join(", "):s[0])+"",t;return{c(){e=y("div"),t=A(l),h(e,"class","svelte-rgtszb"),u(e,"table",s[1]==="table"),u(e,"gallery",s[1]==="gallery"),u(e,"selected",s[2])},m(n,a){b(n,e,a),M(e,t)},p(n,[a]){a&1&&l!==(l=(Array.isArray(n[0])?n[0].join(", "):n[0])+"")&&N(t,l),a&2&&u(e,"table",n[1]==="table"),a&2&&u(e,"gallery",n[1]==="gallery"),a&4&&u(e,"selected",n[2])},i:k,o:k,d(n){n&&v(e)}}}function Sl(s,e,l){let{value:t}=e,{type:n}=e,{selected:a=!1}=e;return s.$$set=i=>{"value"in i&&l(0,t=i.value),"type"in i&&l(1,n=i.type),"selected"in i&&l(2,a=i.selected)},[t,n,a]}class Al extends T{constructor(e){super(),q(this,e,Sl,Ml,L,{value:0,type:1,selected:2})}}function re(s,e,l){const t=s.slice();return t[10]=e[l],t[12]=l,t}function oe(s,e,l){const t=s.slice();return t[13]=e[l],t[15]=l,t}function _e(s){let e,l,t;function n(f,c){return typeof f[6]=="string"?Tl:Hl}let a=n(s),i=a(s);return{c(){e=y("div"),i.c(),h(e,"class","svelte-1cib1xd"),u(e,"table",s[1]==="table"),u(e,"gallery",s[1]==="gallery"),u(e,"selected",s[2])},m(f,c){b(f,e,c),i.m(e,null),l||(t=[E(e,"mouseenter",s[8]),E(e,"mouseleave",s[9])],l=!0)},p(f,c){a===(a=n(f))&&i?i.p(f,c):(i.d(1),i=a(f),i&&(i.c(),i.m(e,null))),c&2&&u(e,"table",f[1]==="table"),c&2&&u(e,"gallery",f[1]==="gallery"),c&4&&u(e,"selected",f[2])},d(f){f&&v(e),i.d(),l=!1,x(t)}}}function Hl(s){let e,l,t=D(s[6].slice(0,3)),n=[];for(let i=0;i<t.length;i+=1)n[i]=he(re(s,t,i));let a=s[0].length>3&&ge(s);return{c(){e=y("table");for(let i=0;i<n.length;i+=1)n[i].c();l=B(),a&&a.c(),h(e,"class"," svelte-1cib1xd")},m(i,f){b(i,e,f);for(let c=0;c<n.length;c+=1)n[c]&&n[c].m(e,null);M(e,l),a&&a.m(e,null)},p(i,f){if(f&64){t=D(i[6].slice(0,3));let c;for(c=0;c<t.length;c+=1){const _=re(i,t,c);n[c]?n[c].p(_,f):(n[c]=he(_),n[c].c(),n[c].m(e,l))}for(;c<n.length;c+=1)n[c].d(1);n.length=t.length}i[0].length>3?a?a.p(i,f):(a=ge(i),a.c(),a.m(e,null)):a&&(a.d(1),a=null)},d(i){i&&v(e),V(n,i),a&&a.d()}}}function Tl(s){let e;return{c(){e=A(s[6])},m(l,t){b(l,e,t)},p(l,t){t&64&&N(e,l[6])},d(l){l&&v(e)}}}function de(s){let e,l=s[13]+"",t;return{c(){e=y("td"),t=A(l),h(e,"class","svelte-1cib1xd")},m(n,a){b(n,e,a),M(e,t)},p(n,a){a&64&&l!==(l=n[13]+"")&&N(t,l)},d(n){n&&v(e)}}}function me(s){let e;return{c(){e=y("td"),e.textContent="…",h(e,"class","svelte-1cib1xd")},m(l,t){b(l,e,t)},d(l){l&&v(e)}}}function he(s){let e,l,t=D(s[10].slice(0,3)),n=[];for(let i=0;i<t.length;i+=1)n[i]=de(oe(s,t,i));let a=s[10].length>3&&me();return{c(){e=y("tr");for(let i=0;i<n.length;i+=1)n[i].c();l=B(),a&&a.c()},m(i,f){b(i,e,f);for(let c=0;c<n.length;c+=1)n[c]&&n[c].m(e,null);M(e,l),a&&a.m(e,null)},p(i,f){if(f&64){t=D(i[10].slice(0,3));let c;for(c=0;c<t.length;c+=1){const _=oe(i,t,c);n[c]?n[c].p(_,f):(n[c]=de(_),n[c].c(),n[c].m(e,l))}for(;c<n.length;c+=1)n[c].d(1);n.length=t.length}i[10].length>3?a||(a=me(),a.c(),a.m(e,null)):a&&(a.d(1),a=null)},d(i){i&&v(e),V(n,i),a&&a.d()}}}function ge(s){let e;return{c(){e=y("div"),h(e,"class","overlay svelte-1cib1xd"),u(e,"odd",s[3]%2!=0),u(e,"even",s[3]%2==0),u(e,"button",s[1]==="gallery")},m(l,t){b(l,e,t)},p(l,t){t&8&&u(e,"odd",l[3]%2!=0),t&8&&u(e,"even",l[3]%2==0),t&2&&u(e,"button",l[1]==="gallery")},d(l){l&&v(e)}}}function ql(s){let e,l=s[4]&&_e(s);return{c(){l&&l.c(),e=W()},m(t,n){l&&l.m(t,n),b(t,e,n)},p(t,[n]){t[4]?l?l.p(t,n):(l=_e(t),l.c(),l.m(e.parentNode,e)):l&&(l.d(1),l=null)},i:k,o:k,d(t){t&&v(e),l&&l.d(t)}}}function Ll(s,e,l){let{value:t}=e,{samples_dir:n}=e,{type:a}=e,{selected:i=!1}=e,{index:f}=e,c=!1,_=t,m=Array.isArray(_);const o=()=>l(5,c=!0),w=()=>l(5,c=!1);return s.$$set=r=>{"value"in r&&l(0,t=r.value),"samples_dir"in r&&l(7,n=r.samples_dir),"type"in r&&l(1,a=r.type),"selected"in r&&l(2,i=r.selected),"index"in r&&l(3,f=r.index)},s.$$.update=()=>{s.$$.dirty&145&&!m&&typeof t=="string"&&/\.[a-zA-Z]+$/.test(t)&&fetch(n+t).then(r=>r.text()).then(r=>{try{if(t.endsWith("csv")){const z=r.split(`
2
- `).slice(0,4).map(d=>d.split(",").slice(0,4).join(",")).join(`
3
- `);l(6,_=Ze(z))}else if(t.endsWith("tsv")){const z=r.split(`
4
- `).slice(0,4).map(d=>d.split(" ").slice(0,4).join(" ")).join(`
5
- `);l(6,_=Ue(z))}else throw new Error("Incorrect format, only CSV and TSV files are supported");l(4,m=!0)}catch(z){console.error(z)}}).catch(r=>{l(6,_=t),l(4,m=!0)})},[t,a,i,f,m,c,_,n,o,w]}class Dl extends T{constructor(e){super(),q(this,e,Ll,ql,L,{value:0,samples_dir:7,type:1,selected:2,index:3})}}function Nl(s){let e;return{c(){e=y("div"),X(e,"background-color",s[0]),h(e,"class","svelte-h6ogpl"),u(e,"table",s[1]==="table"),u(e,"gallery",s[1]==="gallery"),u(e,"selected",s[2])},m(l,t){b(l,e,t)},p(l,[t]){t&1&&X(e,"background-color",l[0]),t&2&&u(e,"table",l[1]==="table"),t&2&&u(e,"gallery",l[1]==="gallery"),t&4&&u(e,"selected",l[2])},i:k,o:k,d(l){l&&v(e)}}}function jl(s,e,l){let{value:t}=e,{type:n}=e,{selected:a=!1}=e;return s.$$set=i=>{"value"in i&&l(0,t=i.value),"type"in i&&l(1,n=i.type),"selected"in i&&l(2,a=i.selected)},[t,n,a]}class pl extends T{constructor(e){super(),q(this,e,jl,Nl,L,{value:0,type:1,selected:2})}}function El(s){let e,l;return{c(){e=y("div"),l=A(s[0]),h(e,"class","svelte-1ayixqk"),u(e,"table",s[1]==="table"),u(e,"gallery",s[1]==="gallery"),u(e,"selected",s[2])},m(t,n){b(t,e,n),M(e,l)},p(t,[n]){n&1&&N(l,t[0]),n&2&&u(e,"table",t[1]==="table"),n&2&&u(e,"gallery",t[1]==="gallery"),n&4&&u(e,"selected",t[2])},i:k,o:k,d(t){t&&v(e)}}}function Bl(s,e,l){let{value:t}=e,{type:n}=e,{selected:a=!1}=e;return s.$$set=i=>{"value"in i&&l(0,t=i.value),"type"in i&&l(1,n=i.type),"selected"in i&&l(2,a=i.selected)},[t,n,a]}class Pl extends T{constructor(e){super(),q(this,e,Bl,El,L,{value:0,type:1,selected:2})}}function Rl(s){let e;return{c(){e=y("div"),h(e,"class","prose svelte-1ayixqk"),u(e,"table",s[1]==="table"),u(e,"gallery",s[1]==="gallery"),u(e,"selected",s[2])},m(l,t){b(l,e,t),e.innerHTML=s[0]},p(l,[t]){t&1&&(e.innerHTML=l[0]),t&2&&u(e,"table",l[1]==="table"),t&2&&u(e,"gallery",l[1]==="gallery"),t&4&&u(e,"selected",l[2])},i:k,o:k,d(l){l&&v(e)}}}function Vl(s,e,l){let{value:t}=e,{type:n}=e,{selected:a=!1}=e;return s.$$set=i=>{"value"in i&&l(0,t=i.value),"type"in i&&l(1,n=i.type),"selected"in i&&l(2,a=i.selected)},[t,n,a]}class Il extends T{constructor(e){super(),q(this,e,Vl,Rl,L,{value:0,type:1,selected:2})}}function Fl(s){let e;return{c(){e=y("div"),h(e,"class","prose svelte-zvfedn"),u(e,"table",s[1]==="table"),u(e,"gallery",s[1]==="gallery"),u(e,"selected",s[2])},m(l,t){b(l,e,t),e.innerHTML=s[0]},p(l,[t]){t&1&&(e.innerHTML=l[0]),t&2&&u(e,"table",l[1]==="table"),t&2&&u(e,"gallery",l[1]==="gallery"),t&4&&u(e,"selected",l[2])},i:k,o:k,d(l){l&&v(e)}}}function Gl(s,e,l){let{value:t}=e,{type:n}=e,{selected:a=!1}=e;return s.$$set=i=>{"value"in i&&l(0,t=i.value),"type"in i&&l(1,n=i.type),"selected"in i&&l(2,a=i.selected)},[t,n,a]}class Ol extends T{constructor(e){super(),q(this,e,Gl,Fl,L,{value:0,type:1,selected:2})}}function Wl(s){let e,l;return{c(){e=y("pre"),l=A(s[0]),h(e,"class","svelte-agpzo2"),u(e,"table",s[1]==="table"),u(e,"gallery",s[1]==="gallery"),u(e,"selected",s[2])},m(t,n){b(t,e,n),M(e,l)},p(t,[n]){n&1&&N(l,t[0]),n&2&&u(e,"table",t[1]==="table"),n&2&&u(e,"gallery",t[1]==="gallery"),n&4&&u(e,"selected",t[2])},i:k,o:k,d(t){t&&v(e)}}}function Zl(s,e,l){let{value:t}=e,{type:n}=e,{selected:a=!1}=e;return s.$$set=i=>{"value"in i&&l(0,t=i.value),"type"in i&&l(1,n=i.type),"selected"in i&&l(2,a=i.selected)},[t,n,a]}class Jl extends T{constructor(e){super(),q(this,e,Zl,Wl,L,{value:0,type:1,selected:2})}}const O={dropdown:ll,checkbox:sl,checkboxgroup:fl,number:xe,slider:rl,radio:dl,image:We,textbox:gl,audio:yl,video:Cl,file:Al,dataframe:Dl,model3d:Ke,colorpicker:pl,timeseries:Pl,markdown:Il,html:Ol,code:Jl};function be(s,e,l){const t=s.slice();return t[32]=e[l],t}function ve(s,e,l){const t=s.slice();return t[35]=e[l],t[37]=l,t}function ye(s,e,l){const t=s.slice();t[0]=e[l].value,t[39]=e[l].component,t[42]=l;const n=t[1][t[42]];return t[40]=n,t}function ke(s,e,l){const t=s.slice();return t[43]=e[l],t}function we(s,e,l){const t=s.slice();return t[35]=e[l],t[37]=l,t}function Kl(s){let e,l,t,n,a,i,f,c=D(s[3]),_=[];for(let r=0;r<c.length;r+=1)_[r]=ze(ke(s,c,r));let m=D(s[14]),o=[];for(let r=0;r<m.length;r+=1)o[r]=Se(ve(s,m,r));const w=r=>H(o[r],1,1,()=>{o[r]=null});return{c(){e=y("div"),l=y("table"),t=y("thead"),n=y("tr");for(let r=0;r<_.length;r+=1)_[r].c();a=B(),i=y("tbody");for(let r=0;r<o.length;r+=1)o[r].c();h(n,"class","tr-head svelte-13hsdno"),h(l,"class","svelte-13hsdno"),h(e,"class","table-wrap svelte-13hsdno")},m(r,z){b(r,e,z),M(e,l),M(l,t),M(t,n);for(let d=0;d<_.length;d+=1)_[d]&&_[d].m(n,null);M(l,a),M(l,i);for(let d=0;d<o.length;d+=1)o[d]&&o[d].m(i,null);f=!0},p(r,z){if(z[0]&8){c=D(r[3]);let d;for(d=0;d<c.length;d+=1){const C=ke(r,c,d);_[d]?_[d].p(C,z):(_[d]=ze(C),_[d].c(),_[d].m(n,null))}for(;d<_.length;d+=1)_[d].d(1);_.length=c.length}if(z[0]&1795203){m=D(r[14]);let d;for(d=0;d<m.length;d+=1){const C=ve(r,m,d);o[d]?(o[d].p(C,z),S(o[d],1)):(o[d]=Se(C),o[d].c(),S(o[d],1),o[d].m(i,null))}for(I(),d=m.length;d<o.length;d+=1)w(d);F()}},i(r){if(!f){for(let z=0;z<m.length;z+=1)S(o[z]);f=!0}},o(r){o=o.filter(Boolean);for(let z=0;z<o.length;z+=1)H(o[z]);f=!1},d(r){r&&v(e),V(_,r),V(o,r)}}}function Ql(s){let e,l,t=D(s[11]),n=[];for(let i=0;i<t.length;i+=1)n[i]=He(we(s,t,i));const a=i=>H(n[i],1,1,()=>{n[i]=null});return{c(){e=y("div");for(let i=0;i<n.length;i+=1)n[i].c();h(e,"class","gallery svelte-13hsdno")},m(i,f){b(i,e,f);for(let c=0;c<n.length;c+=1)n[c]&&n[c].m(e,null);l=!0},p(i,f){if(f[0]&1797251){t=D(i[11]);let c;for(c=0;c<t.length;c+=1){const _=we(i,t,c);n[c]?(n[c].p(_,f),S(n[c],1)):(n[c]=He(_),n[c].c(),S(n[c],1),n[c].m(e,null))}for(I(),c=t.length;c<n.length;c+=1)a(c);F()}},i(i){if(!l){for(let f=0;f<t.length;f+=1)S(n[f]);l=!0}},o(i){n=n.filter(Boolean);for(let f=0;f<n.length;f+=1)H(n[f]);l=!1},d(i){i&&v(e),V(n,i)}}}function ze(s){let e,l=s[43]+"",t,n;return{c(){e=y("th"),t=A(l),n=B(),h(e,"class","svelte-13hsdno")},m(a,i){b(a,e,i),M(e,t),M(e,n)},p(a,i){i[0]&8&&l!==(l=a[43]+"")&&N(t,l)},d(a){a&&v(e)}}}function Ce(s){let e,l,t,n;var a=s[39];function i(f){return{props:{value:f[0],samples_dir:f[17],type:"table",selected:f[13]===f[37],index:f[37]}}}return a&&(l=Y(a,i(s))),{c(){e=y("td"),l&&Z(l.$$.fragment),X(e,"max-width",s[40]==="textbox"?"35ch":"auto"),h(e,"class",t=ae(s[40])+" svelte-13hsdno")},m(f,c){b(f,e,c),l&&J(l,e,null),n=!0},p(f,c){const _={};if(c[0]&16384&&(_.value=f[0]),c[0]&8192&&(_.selected=f[13]===f[37]),c[0]&16384&&a!==(a=f[39])){if(l){I();const m=l;H(m.$$.fragment,1,0,()=>{K(m,1)}),F()}a?(l=Y(a,i(f)),Z(l.$$.fragment),S(l.$$.fragment,1),J(l,e,null)):l=null}else a&&l.$set(_);(!n||c[0]&2)&&X(e,"max-width",f[40]==="textbox"?"35ch":"auto"),(!n||c[0]&2&&t!==(t=ae(f[40])+" svelte-13hsdno"))&&h(e,"class",t)},i(f){n||(l&&S(l.$$.fragment,f),n=!0)},o(f){l&&H(l.$$.fragment,f),n=!1},d(f){f&&v(e),l&&K(l)}}}function Me(s){let e,l,t=s[40]!==void 0&&O[s[40]]!==void 0&&Ce(s);return{c(){t&&t.c(),e=W()},m(n,a){t&&t.m(n,a),b(n,e,a),l=!0},p(n,a){n[40]!==void 0&&O[n[40]]!==void 0?t?(t.p(n,a),a[0]&2&&S(t,1)):(t=Ce(n),t.c(),S(t,1),t.m(e.parentNode,e)):t&&(I(),H(t,1,1,()=>{t=null}),F())},i(n){l||(S(t),l=!0)},o(n){H(t),l=!1},d(n){n&&v(e),t&&t.d(n)}}}function Se(s){let e,l,t,n,a,i=D(s[35]),f=[];for(let o=0;o<i.length;o+=1)f[o]=Me(ye(s,i,o));const c=o=>H(f[o],1,1,()=>{f[o]=null});function _(){return s[28](s[37])}function m(){return s[29](s[37])}return{c(){e=y("tr");for(let o=0;o<f.length;o+=1)f[o].c();l=B(),h(e,"class","tr-body svelte-13hsdno")},m(o,w){b(o,e,w);for(let r=0;r<f.length;r+=1)f[r]&&f[r].m(e,null);M(e,l),t=!0,n||(a=[E(e,"click",_),E(e,"mouseenter",m),E(e,"mouseleave",s[30])],n=!0)},p(o,w){if(s=o,w[0]&155650){i=D(s[35]);let r;for(r=0;r<i.length;r+=1){const z=ye(s,i,r);f[r]?(f[r].p(z,w),S(f[r],1)):(f[r]=Me(z),f[r].c(),S(f[r],1),f[r].m(e,l))}for(I(),r=i.length;r<f.length;r+=1)c(r);F()}},i(o){if(!t){for(let w=0;w<i.length;w+=1)S(f[w]);t=!0}},o(o){f=f.filter(Boolean);for(let w=0;w<f.length;w+=1)H(f[w]);t=!1},d(o){o&&v(e),V(f,o),n=!1,x(a)}}}function Ae(s){let e,l,t;var n=s[14][0][0].component;function a(i){return{props:{value:i[35][0],samples_dir:i[17],type:"gallery",selected:i[13]===i[37],index:i[37]}}}return n&&(e=Y(n,a(s))),{c(){e&&Z(e.$$.fragment),l=W()},m(i,f){e&&J(e,i,f),b(i,l,f),t=!0},p(i,f){const c={};if(f[0]&2048&&(c.value=i[35][0]),f[0]&8192&&(c.selected=i[13]===i[37]),f[0]&16384&&n!==(n=i[14][0][0].component)){if(e){I();const _=e;H(_.$$.fragment,1,0,()=>{K(_,1)}),F()}n?(e=Y(n,a(i)),Z(e.$$.fragment),S(e.$$.fragment,1),J(e,l.parentNode,l)):e=null}else n&&e.$set(c)},i(i){t||(e&&S(e.$$.fragment,i),t=!0)},o(i){e&&H(e.$$.fragment,i),t=!1},d(i){i&&v(l),e&&K(e,i)}}}function He(s){let e,l=Object.keys(O).includes(s[1][0])&&O[s[1][0]],t,n,a,i,f=l&&Ae(s);function c(){return s[25](s[37],s[35])}function _(){return s[26](s[37])}return{c(){e=y("button"),f&&f.c(),t=B(),h(e,"class","gallery-item svelte-13hsdno")},m(m,o){b(m,e,o),f&&f.m(e,null),M(e,t),n=!0,a||(i=[E(e,"click",c),E(e,"mouseenter",_),E(e,"mouseleave",s[27])],a=!0)},p(m,o){s=m,o[0]&2&&(l=Object.keys(O).includes(s[1][0])&&O[s[1][0]]),l?f?(f.p(s,o),o[0]&2&&S(f,1)):(f=Ae(s),f.c(),S(f,1),f.m(e,t)):f&&(I(),H(f,1,1,()=>{f=null}),F())},i(m){n||(S(f),n=!0)},o(m){H(f),n=!1},d(m){m&&v(e),f&&f.d(),a=!1,x(i)}}}function Ul(s){let e,l,t=D(s[12]),n=[];for(let a=0;a<t.length;a+=1)n[a]=Te(be(s,t,a));return{c(){e=y("div"),l=A(`Pages:
6
- `);for(let a=0;a<n.length;a+=1)n[a].c();h(e,"class","paginate svelte-13hsdno")},m(a,i){b(a,e,i),M(e,l);for(let f=0;f<n.length;f+=1)n[f]&&n[f].m(e,null)},p(a,i){if(i[0]&5120){t=D(a[12]);let f;for(f=0;f<t.length;f+=1){const c=be(a,t,f);n[f]?n[f].p(c,i):(n[f]=Te(c),n[f].c(),n[f].m(e,null))}for(;f<n.length;f+=1)n[f].d(1);n.length=t.length}},d(a){a&&v(e),V(n,a)}}}function Xl(s){let e,l=s[32]+1+"",t,n,a,i;function f(){return s[31](s[32])}return{c(){e=y("button"),t=A(l),n=B(),h(e,"class","svelte-13hsdno"),u(e,"current-page",s[10]===s[32])},m(c,_){b(c,e,_),M(e,t),M(e,n),a||(i=E(e,"click",f),a=!0)},p(c,_){s=c,_[0]&4096&&l!==(l=s[32]+1+"")&&N(t,l),_[0]&5120&&u(e,"current-page",s[10]===s[32])},d(c){c&&v(e),a=!1,i()}}}function Yl(s){let e;return{c(){e=y("div"),e.textContent="..."},m(l,t){b(l,e,t)},p:k,d(l){l&&v(e)}}}function Te(s){let e;function l(a,i){return a[32]===-1?Yl:Xl}let t=l(s),n=t(s);return{c(){n.c(),e=W()},m(a,i){n.m(a,i),b(a,e,i)},p(a,i){t===(t=l(a))&&n?n.p(a,i):(n.d(1),n=t(a),n&&(n.c(),n.m(e.parentNode,e)))},d(a){a&&v(e),n.d(a)}}}function xl(s){let e,l,t,n,a,i,f,c,_,m,o;const w=[Ql,Kl],r=[];function z(C,j){return C[15]?0:1}f=z(s),c=r[f]=w[f](s);let d=s[18]&&Ul(s);return{c(){e=y("div"),l=ie("svg"),t=ie("path"),n=B(),a=A(s[2]),i=B(),c.c(),_=B(),d&&d.c(),m=W(),h(t,"fill","currentColor"),h(t,"d","M10 6h18v2H10zm0 18h18v2H10zm0-9h18v2H10zm-6 0h2v2H4zm0-9h2v2H4zm0 18h2v2H4z"),h(l,"xmlns","http://www.w3.org/2000/svg"),h(l,"xmlns:xlink","http://www.w3.org/1999/xlink"),h(l,"aria-hidden","true"),h(l,"role","img"),h(l,"width","1em"),h(l,"height","1em"),h(l,"preserveAspectRatio","xMidYMid meet"),h(l,"viewBox","0 0 32 32"),h(l,"class","svelte-13hsdno"),h(e,"class","label svelte-13hsdno")},m(C,j){b(C,e,j),M(e,l),M(l,t),M(e,n),M(e,a),b(C,i,j),r[f].m(C,j),b(C,_,j),d&&d.m(C,j),b(C,m,j),o=!0},p(C,j){(!o||j[0]&4)&&N(a,C[2]);let P=f;f=z(C),f===P?r[f].p(C,j):(I(),H(r[P],1,1,()=>{r[P]=null}),F(),c=r[f],c?c.p(C,j):(c=r[f]=w[f](C),c.c()),S(c,1),c.m(_.parentNode,_)),C[18]&&d.p(C,j)},i(C){o||(S(c),o=!0)},o(C){H(c),o=!1},d(C){C&&(v(e),v(i),v(_),v(m)),r[f].d(C),d&&d.d(C)}}}function $l(s){let e,l;return e=new Oe({props:{visible:s[6],padding:!1,elem_id:s[4],elem_classes:s[5],scale:s[8],min_width:s[9],allow_overflow:!1,container:!1,$$slots:{default:[xl]},$$scope:{ctx:s}}}),{c(){Z(e.$$.fragment)},m(t,n){J(e,t,n),l=!0},p(t,n){const a={};n[0]&64&&(a.visible=t[6]),n[0]&16&&(a.elem_id=t[4]),n[0]&32&&(a.elem_classes=t[5]),n[0]&256&&(a.scale=t[8]),n[0]&512&&(a.min_width=t[9]),n[0]&64655|n[1]&32768&&(a.$$scope={dirty:n,ctx:t}),e.$set(a)},i(t){l||(S(e.$$.fragment,t),l=!0)},o(t){H(e.$$.fragment,t),l=!1},d(t){K(e,t)}}}function et(s,e,l){let t,n,{components:a}=e,{label:i="Examples"}=e,{headers:f}=e,{samples:c}=e,{elem_id:_=""}=e,{elem_classes:m=[]}=e,{visible:o=!0}=e,{value:w=null}=e,{root:r}=e,{root_url:z}=e,{samples_per_page:d=10}=e,{scale:C=null}=e,{min_width:j=void 0}=e;const P=Ge();let De=z?"proxy="+z+"file=":r+"/file=",G=0,te=c.length>d,Q,U,R=[],$=-1;function ee(g){l(13,$=g)}function le(){l(13,$=-1)}const Ne=(g,p)=>{l(0,w=g+G*d),P("click",w),P("select",{index:w,value:p})},je=g=>ee(g),pe=()=>le(),Ee=g=>{l(0,w=g+G*d),P("click",w)},Be=g=>ee(g),Pe=()=>le(),Re=g=>l(10,G=g);return s.$$set=g=>{"components"in g&&l(1,a=g.components),"label"in g&&l(2,i=g.label),"headers"in g&&l(3,f=g.headers),"samples"in g&&l(21,c=g.samples),"elem_id"in g&&l(4,_=g.elem_id),"elem_classes"in g&&l(5,m=g.elem_classes),"visible"in g&&l(6,o=g.visible),"value"in g&&l(0,w=g.value),"root"in g&&l(22,r=g.root),"root_url"in g&&l(23,z=g.root_url),"samples_per_page"in g&&l(7,d=g.samples_per_page),"scale"in g&&l(8,C=g.scale),"min_width"in g&&l(9,j=g.min_width)},s.$$.update=()=>{s.$$.dirty[0]&2&&l(15,t=a.length<2),s.$$.dirty[0]&18879616&&(te?(l(12,R=[]),l(11,Q=c.slice(G*d,(G+1)*d)),l(24,U=Math.ceil(c.length/d)),[0,G,U-1].forEach(g=>{for(let p=g-2;p<=g+2;p++)p>=0&&p<U&&!R.includes(p)&&(R.length>0&&p-R[R.length-1]>1&&R.push(-1),R.push(p))})):l(11,Q=c.slice())),s.$$.dirty[0]&2050&&l(14,n=Q.map(g=>g.map((p,Ve)=>({value:p,component:O[a[Ve]]}))))},[w,a,i,f,_,m,o,d,C,j,G,Q,R,$,n,t,P,De,te,ee,le,c,r,z,U,Ne,je,pe,Ee,Be,Pe,Re]}class lt extends T{constructor(e){super(),q(this,e,et,$l,L,{components:1,label:2,headers:3,samples:21,elem_id:4,elem_classes:5,visible:6,value:0,root:22,root_url:23,samples_per_page:7,scale:8,min_width:9},null,[-1,-1])}}const ct=lt,ut=["dynamic"],rt=()=>({type:{payload:"number"},description:{payload:"index of selected row"},example_data:0});export{ct as Component,rt as document,ut as modes};
7
- //# sourceMappingURL=index-ec1a8aac.js.map
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/httpcore/_async/__init__.py DELETED
@@ -1,39 +0,0 @@
1
- from .connection import AsyncHTTPConnection
2
- from .connection_pool import AsyncConnectionPool
3
- from .http11 import AsyncHTTP11Connection
4
- from .http_proxy import AsyncHTTPProxy
5
- from .interfaces import AsyncConnectionInterface
6
-
7
- try:
8
- from .http2 import AsyncHTTP2Connection
9
- except ImportError: # pragma: nocover
10
-
11
- class AsyncHTTP2Connection: # type: ignore
12
- def __init__(self, *args, **kwargs) -> None: # type: ignore
13
- raise RuntimeError(
14
- "Attempted to use http2 support, but the `h2` package is not "
15
- "installed. Use 'pip install httpcore[http2]'."
16
- )
17
-
18
-
19
- try:
20
- from .socks_proxy import AsyncSOCKSProxy
21
- except ImportError: # pragma: nocover
22
-
23
- class AsyncSOCKSProxy: # type: ignore
24
- def __init__(self, *args, **kwargs) -> None: # type: ignore
25
- raise RuntimeError(
26
- "Attempted to use SOCKS support, but the `socksio` package is not "
27
- "installed. Use 'pip install httpcore[socks]'."
28
- )
29
-
30
-
31
- __all__ = [
32
- "AsyncHTTPConnection",
33
- "AsyncConnectionPool",
34
- "AsyncHTTPProxy",
35
- "AsyncHTTP11Connection",
36
- "AsyncHTTP2Connection",
37
- "AsyncConnectionInterface",
38
- "AsyncSOCKSProxy",
39
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Danielzero/GPT3.5/modules/presets.py DELETED
@@ -1,222 +0,0 @@
1
- # -*- coding:utf-8 -*-
2
- import os
3
- from pathlib import Path
4
- import gradio as gr
5
- from .webui_locale import I18nAuto
6
-
7
- i18n = I18nAuto() # internationalization
8
-
9
- CHATGLM_MODEL = None
10
- CHATGLM_TOKENIZER = None
11
- LLAMA_MODEL = None
12
- LLAMA_INFERENCER = None
13
-
14
- # ChatGPT 设置
15
- INITIAL_SYSTEM_PROMPT = "You are a helpful assistant."
16
- API_HOST = "api.openai.com"
17
- COMPLETION_URL = "https://api.openai.com/v1/chat/completions"
18
- BALANCE_API_URL="https://api.openai.com/dashboard/billing/credit_grants"
19
- USAGE_API_URL="https://api.openai.com/dashboard/billing/usage"
20
- HISTORY_DIR = Path("history")
21
- HISTORY_DIR = "history"
22
- TEMPLATES_DIR = "templates"
23
-
24
- # 错误信息
25
- STANDARD_ERROR_MSG = i18n("☹️发生了错误:") # 错误信息的标准前缀
26
- GENERAL_ERROR_MSG = i18n("获取对话时发生错误,请查看后台日志")
27
- ERROR_RETRIEVE_MSG = i18n("请检查网络连接,或者API-Key是否有效。")
28
- CONNECTION_TIMEOUT_MSG = i18n("连接超时,无法获取对话。") # 连接超时
29
- READ_TIMEOUT_MSG = i18n("读取超时,无法获取对话。") # 读取超时
30
- PROXY_ERROR_MSG = i18n("代理错误,无法获取对话。") # 代理错误
31
- SSL_ERROR_PROMPT = i18n("SSL错误,无法获取对话。") # SSL 错误
32
- NO_APIKEY_MSG = i18n("API key为空,请检查是否输入正确。") # API key 长度不足 51 位
33
- NO_INPUT_MSG = i18n("请输入对话内容。") # 未输入对话内容
34
- BILLING_NOT_APPLICABLE_MSG = i18n("账单信息不适用") # 本地运行的模型返回的账单信息
35
-
36
- TIMEOUT_STREAMING = 60 # 流式对话时的超时时间
37
- TIMEOUT_ALL = 200 # 非流式对话时的超时时间
38
- ENABLE_STREAMING_OPTION = True # 是否启用选择选择是否实时显示回答的勾选框
39
- HIDE_MY_KEY = False # 如果你想在UI中隐藏你的 API 密钥,将此值设置为 True
40
- CONCURRENT_COUNT = 100 # 允许同时使用的用户数量
41
-
42
- SIM_K = 5
43
- INDEX_QUERY_TEMPRATURE = 1.0
44
-
45
- CHUANHU_TITLE = i18n("川虎Chat 🚀")
46
-
47
- CHUANHU_DESCRIPTION = i18n("由Bilibili [土川虎虎虎](https://space.bilibili.com/29125536) 和 [明昭MZhao](https://space.bilibili.com/24807452)开发<br />访问川虎Chat的 [GitHub项目](https://github.com/GaiZhenbiao/ChuanhuChatGPT) 下载最新版脚本")
48
-
49
- FOOTER = """<div class="versions">{versions}</div>"""
50
-
51
- APPEARANCE_SWITCHER = """
52
- <div style="display: flex; justify-content: space-between;">
53
- <span style="margin-top: 4px !important;">"""+ i18n("切换亮暗色主题") + """</span>
54
- <span><label class="apSwitch" for="checkbox">
55
- <input type="checkbox" id="checkbox">
56
- <div class="apSlider"></div>
57
- </label></span>
58
- </div>
59
- """
60
-
61
- SUMMARIZE_PROMPT = "你是谁?我们刚才聊了什么?" # 总结对话时的 prompt
62
-
63
- ONLINE_MODELS = [
64
- "gpt-3.5-turbo",
65
- "gpt-3.5-turbo-0301",
66
- "gpt-4",
67
- "gpt-4-0314",
68
- "gpt-4-32k",
69
- "gpt-4-32k-0314",
70
- "xmchat",
71
- ]
72
-
73
- LOCAL_MODELS = [
74
- "chatglm-6b",
75
- "chatglm-6b-int4",
76
- "chatglm-6b-int4-qe",
77
- "llama-7b-hf",
78
- "llama-13b-hf",
79
- "llama-30b-hf",
80
- "llama-65b-hf"
81
- ]
82
-
83
- if os.environ.get('HIDE_LOCAL_MODELS', 'false') == 'true':
84
- MODELS = ONLINE_MODELS
85
- else:
86
- MODELS = ONLINE_MODELS + LOCAL_MODELS
87
-
88
- DEFAULT_MODEL = 0
89
-
90
- os.makedirs("models", exist_ok=True)
91
- os.makedirs("lora", exist_ok=True)
92
- os.makedirs("history", exist_ok=True)
93
- for dir_name in os.listdir("models"):
94
- if os.path.isdir(os.path.join("models", dir_name)):
95
- if dir_name not in MODELS:
96
- MODELS.append(dir_name)
97
-
98
- MODEL_TOKEN_LIMIT = {
99
- "gpt-3.5-turbo": 4096,
100
- "gpt-3.5-turbo-0301": 4096,
101
- "gpt-4": 8192,
102
- "gpt-4-0314": 8192,
103
- "gpt-4-32k": 32768,
104
- "gpt-4-32k-0314": 32768
105
- }
106
-
107
- TOKEN_OFFSET = 1000 # 模型的token上限减去这个值,得到软上限。到达软上限之后,自动尝试减少token占用。
108
- DEFAULT_TOKEN_LIMIT = 3000 # 默认的token上限
109
- REDUCE_TOKEN_FACTOR = 0.5 # 与模型token上限想乘,得到目标token数。减少token占用时,将token占用减少到目标token数以下。
110
-
111
- REPLY_LANGUAGES = [
112
- "简体中文",
113
- "繁體中文",
114
- "English",
115
- "日本語",
116
- "Español",
117
- "Français",
118
- "Deutsch",
119
- "跟随问题语言(不稳定)"
120
- ]
121
-
122
-
123
- WEBSEARCH_PTOMPT_TEMPLATE = """\
124
- Web search results:
125
-
126
- {web_results}
127
- Current date: {current_date}
128
-
129
- Instructions: Using the provided web search results, write a comprehensive reply to the given query. Make sure to cite results using [[number](URL)] notation after the reference. If the provided search results refer to multiple subjects with the same name, write separate answers for each subject.
130
- Query: {query}
131
- Reply in {reply_language}
132
- """
133
-
134
- PROMPT_TEMPLATE = """\
135
- Context information is below.
136
- ---------------------
137
- {context_str}
138
- ---------------------
139
- Current date: {current_date}.
140
- Using the provided context information, write a comprehensive reply to the given query.
141
- Make sure to cite results using [number] notation after the reference.
142
- If the provided context information refer to multiple subjects with the same name, write separate answers for each subject.
143
- Use prior knowledge only if the given context didn't provide enough information.
144
- Answer the question: {query_str}
145
- Reply in {reply_language}
146
- """
147
-
148
- REFINE_TEMPLATE = """\
149
- The original question is as follows: {query_str}
150
- We have provided an existing answer: {existing_answer}
151
- We have the opportunity to refine the existing answer
152
- (only if needed) with some more context below.
153
- ------------
154
- {context_msg}
155
- ------------
156
- Given the new context, refine the original answer to better
157
- Reply in {reply_language}
158
- If the context isn't useful, return the original answer.
159
- """
160
-
161
- ALREADY_CONVERTED_MARK = "<!-- ALREADY CONVERTED BY PARSER. -->"
162
-
163
- small_and_beautiful_theme = gr.themes.Soft(
164
- primary_hue=gr.themes.Color(
165
- c50="#02C160",
166
- c100="rgba(2, 193, 96, 0.2)",
167
- c200="#02C160",
168
- c300="rgba(2, 193, 96, 0.32)",
169
- c400="rgba(2, 193, 96, 0.32)",
170
- c500="rgba(2, 193, 96, 1.0)",
171
- c600="rgba(2, 193, 96, 1.0)",
172
- c700="rgba(2, 193, 96, 0.32)",
173
- c800="rgba(2, 193, 96, 0.32)",
174
- c900="#02C160",
175
- c950="#02C160",
176
- ),
177
- secondary_hue=gr.themes.Color(
178
- c50="#576b95",
179
- c100="#576b95",
180
- c200="#576b95",
181
- c300="#576b95",
182
- c400="#576b95",
183
- c500="#576b95",
184
- c600="#576b95",
185
- c700="#576b95",
186
- c800="#576b95",
187
- c900="#576b95",
188
- c950="#576b95",
189
- ),
190
- neutral_hue=gr.themes.Color(
191
- name="gray",
192
- c50="#f9fafb",
193
- c100="#f3f4f6",
194
- c200="#e5e7eb",
195
- c300="#d1d5db",
196
- c400="#B2B2B2",
197
- c500="#808080",
198
- c600="#636363",
199
- c700="#515151",
200
- c800="#393939",
201
- c900="#272727",
202
- c950="#171717",
203
- ),
204
- radius_size=gr.themes.sizes.radius_sm,
205
- ).set(
206
- button_primary_background_fill="#06AE56",
207
- button_primary_background_fill_dark="#06AE56",
208
- button_primary_background_fill_hover="#07C863",
209
- button_primary_border_color="#06AE56",
210
- button_primary_border_color_dark="#06AE56",
211
- button_primary_text_color="#FFFFFF",
212
- button_primary_text_color_dark="#FFFFFF",
213
- button_secondary_background_fill="#F2F2F2",
214
- button_secondary_background_fill_dark="#2B2B2B",
215
- button_secondary_text_color="#393939",
216
- button_secondary_text_color_dark="#FFFFFF",
217
- # background_fill_primary="#F7F7F7",
218
- # background_fill_primary_dark="#1F1F1F",
219
- block_title_text_color="*primary_500",
220
- block_title_background_fill="*primary_100",
221
- input_background_fill="#F6F6F6",
222
- )